You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183
  1. import numpy as np
  2. import tensorflow as tf
  3. import pandas as pd
  4. import matplotlib.pyplot as plt
  5. import wilshire
  6. from statsmodels.tsa.arima.model import ARIMA
  7. ### Fonctions d'activations ###
  8. def snake(x):
  9. return(x+(tf.math.sin(30*x)**2)/30)
  10. def sinus(x):
  11. return(tf.math.sin(x))
  12. def sinus_cosinus(x):
  13. return(tf.math.sin(x)+tf.math.cos(x))
  14. def swish(x):
  15. return(x*tf.math.sigmoid(x))
  16. def prepare_data(filename="WILL5000INDFC.csv"):
  17. """
  18. Prepare data by preprocessing, normalizing and cutting it in train and test sets
  19. Return x and y train and test sets, as well as the maximum for later plots and the index separating both sets
  20. """
  21. df_train,df_test,index = wilshire.preprocess(filename)
  22. x_train = np.arange(df_train.shape[0])
  23. maximum = np.max(x_train)
  24. x_train = x_train / maximum
  25. y_train=df_train["WILL5000INDFC"]
  26. y_train.to_numpy()
  27. x_test = np.arange(df_train.shape[0]+1,df_train.shape[0]+df_test.shape[0]+1)
  28. y_test = df_test["WILL5000INDFC"]
  29. y_test.to_numpy()
  30. x_test=x_test / maximum
  31. return x_train,x_test,y_train,y_test,maximum,index
  32. def arima_pred(y_train,y_test,orders=[[2,1,1],[2,2,1],[3,1,1],[2,1,2]],n=5):
  33. """
  34. Computes the ARIMA errors (mse) for several orders to compare with the article
  35. """
  36. mse=[]
  37. for order in orders :
  38. mean_err= []
  39. for k in range(n):
  40. train = y_train
  41. preds = []
  42. for test in range(len(y_test)):
  43. model = ARIMA(train, order=(order[0],order[1],order[2]))
  44. model = model.fit()
  45. output = model.forecast()
  46. #print(output)
  47. preds.append(output)
  48. #train.append(y_test[te
  49. mean_err.append((np.square(np.array(preds) - np.array(y_test))).mean())
  50. mse.append([np.array(mean_err).mean(),np.array(mean_err).std()])
  51. return(mse)
  52. def create_model(activation):
  53. """
  54. Create the neural network with the requested activation function
  55. """
  56. model = tf.keras.Sequential()
  57. model.add(tf.keras.layers.Dense(1,input_shape=[1,],activation=activation))
  58. model.add(tf.keras.layers.Dense(64,activation=activation))
  59. model.add(tf.keras.layers.Dense(64,activation=activation))
  60. model.add(tf.keras.layers.Dense(1))
  61. opt = tf.keras.optimizers.SGD(learning_rate=0.01,momentum=0.8)
  62. model.compile(optimizer=opt, loss='mse')
  63. model.build()
  64. model.summary()
  65. return model
  66. def training_testing(n=5,activations = [tf.keras.activations.relu,swish,sinus_cosinus,sinus,snake],epochs = 50):
  67. """
  68. Trains models and computes means and std of test errors on n tries for each activation function requested.
  69. """
  70. x_train,x_test,y_train,y_test,maximum,index = prepare_data(filename="WILL5000INDFC.csv")
  71. models = []
  72. errors_train,errors_test = [],[]
  73. mean_y_train,mean_y_test,std_y_test=[],[],[]
  74. for activation in activations :
  75. y_train_5=[]
  76. y_test_5=[]
  77. errors_train_5=[]
  78. errors_test_5=[]
  79. for k in range(n):
  80. model = create_model(activation)
  81. model.fit(x_train,y_train, batch_size=1, epochs=epochs)
  82. y_pred_test = model.predict(x_test)
  83. y_pred_train = model.predict(x_train)
  84. y_train_5.append(y_pred_train)
  85. y_test_5.append(y_pred_test)
  86. errors_test_5.append(model.evaluate(x_test,y_test))
  87. errors_train_5.append(model.evaluate(x_train,y_train))
  88. models.append(model)
  89. mean_y_train.append(np.mean(y_train_5,axis=0))
  90. mean_y_test.append(np.mean(y_test_5,axis=0))
  91. std_y_test.append(np.std(y_test_5,axis=0))
  92. errors_train.append([np.mean(errors_train_5),np.std(errors_train_5)])
  93. errors_test.append([np.mean(errors_test_5),np.std(errors_test_5)])
  94. # y_preds_train.append(y_pred_train)
  95. # y_preds_test.append(y_pred_test)
  96. return models,errors_train,errors_test
  97. def final_plot(models,errors_test,arima_err,activations=["ReLU","Swish","Sinus Cosinus","Sinus","Snake"],orders_ARIMA = ["[2,1,1]","[2,2,1]","[3,1,1]","[2,1,2]"]):
  98. """
  99. Prints the results to compare with the table of the article and plot the same plot as the article
  100. """
  101. x_train,x_test,y_train,y_test,maximum,index = prepare_data(filename="WILL5000INDFC.csv")
  102. x = np.arange(9000) ## 9000 data points bring us to ~2031 to try and predict future data
  103. x_n = x / maximum
  104. future_preds = models[-1].predict(x_n)
  105. y_true = np.concatenate((y_train,y_test))
  106. x_cut = np.arange(x_train.shape[0]+x_test.shape[0])
  107. print("----- ARIMA Test MSE -----")
  108. for k in range(len(orders_ARIMA)):
  109. print("ARIMA"+orders_ARIMA[k]+" : "+str(arima_err[k]))
  110. print("----- DNN Test MSE -----")
  111. for k in range(len(activations)):
  112. print("DNN "+activations[k]+" : "+str(errors_test[k]))
  113. ### PLOT ###
  114. plt.figure()
  115. plt.plot(x_cut,y_true,label="True data")
  116. plt.plot(x,future_preds,label="Predictions")
  117. plt.xticks(range(0, 9000, 500), range(1995, 2031, 2))
  118. plt.xlabel("Années")
  119. plt.ylabel("Index Willshire5000 normalisé")
  120. plt.vlines([index,index+85],ymin=0,ymax=1,colors="r",label="Test Samples")
  121. plt.legend()
  122. plt.show()
  123. def plot_all_a(a=["1","10","20","30","100"]):
  124. """
  125. Plots the varying a values plot by loading pre-existing models (they are uploaded on GitHub)
  126. """
  127. models=[]
  128. for param in a :
  129. models.append(tf.keras.models.load_model("Snake"+param+"a"))
  130. x_train,x_test,y_train,y_test,maximum,index = prepare_data(filename="WILL5000INDFC.csv")
  131. x = np.arange(9000)
  132. x_n = x / maximum
  133. y_true = np.concatenate((y_train,y_test))
  134. x_cut = np.arange(x_train.shape[0]+x_test.shape[0])
  135. future_preds=[]
  136. for k in range(len(models)):
  137. future_preds.append(models[k].predict(x_n) )
  138. plt.figure()
  139. plt.plot(x_cut,y_true,label="True data")
  140. for k in range(len(models)):
  141. plt.plot(x,future_preds[k],label="a = "+a[k])
  142. plt.xticks(range(0, 9000, 500), range(1995, 2031, 2))
  143. plt.xlabel("Années")
  144. plt.ylabel("Index Willshire5000 normalisé")
  145. plt.legend()
  146. plt.show()
  147. # plot_all_a()