Просмотр исходного кода

Fonctionnement des LSTM avant entrainement

Emile
Emile Siboulet 4 лет назад
Родитель
Сommit
aa83f7c78a
2 измененных файлов: 35 добавлений и 21 удалений
  1. 9
    3
      code/RNN/Model.py
  2. 26
    18
      code/RNN/training.py

+ 9
- 3
code/RNN/Model.py Просмотреть файл

def __init__(self, HIDDEN): def __init__(self, HIDDEN):
super(MyModel, self).__init__() super(MyModel, self).__init__()
self.lstm1 = tf.keras.layers.LSTM(HIDDEN, return_sequences=True) self.lstm1 = tf.keras.layers.LSTM(HIDDEN, return_sequences=True)
self.lstm2 = tf.keras.layers.LSTM(HIDDEN, return_sequences=True)
#self.lstm2 = tf.keras.layers.LSTM(HIDDEN, return_sequences=True)
#self.lstm3 = tf.keras.layers.LSTM(HIDDEN, return_sequences=True) #self.lstm3 = tf.keras.layers.LSTM(HIDDEN, return_sequences=True)
#self.lstm4 = tf.keras.layers.LSTM(HIDDEN, return_sequences=True) #self.lstm4 = tf.keras.layers.LSTM(HIDDEN, return_sequences=True)
self.lstmlast = tf.keras.layers.LSTM(1, return_sequences=True)
self.lstmlast = tf.keras.layers.LSTM(HIDDEN, return_sequences=True)
self.dense1 = tf.keras.layers.Dense(HIDDEN, activation='relu')
self.dense2 = tf.keras.layers.Dense(HIDDEN//2, activation='relu')
self.denselast = tf.keras.layers.Dense(1, activation='sigmoid')




def call(self, inputs): def call(self, inputs):
x = self.lstm1(inputs) x = self.lstm1(inputs)
x = self.lstm2(x)
#x = self.lstm2(x)
#x = self.lstm3(x) #x = self.lstm3(x)
#x = self.lstm4(x) #x = self.lstm4(x)
x = self.lstmlast(x) x = self.lstmlast(x)
x = self.dense1(x)
x = self.dense2(x)
x = self.denselast(x)
return x return x

+ 26
- 18
code/RNN/training.py Просмотреть файл

import numpy as np import numpy as np
from Model import MyModel from Model import MyModel


LEN_TRAIN = 5000
LEN_SEQ = 100
PRED = 0.02
LEN_SEQ = 64
PRED = 0.05
HIDDEN = 128 HIDDEN = 128


model = MyModel(HIDDEN) model = MyModel(HIDDEN)


dataset = np.load('dataset.npy') dataset = np.load('dataset.npy')
datasetp = np.roll(dataset, 1)
datasetp[0] = dataset[0]
data = (dataset - datasetp)/datasetp
data = data*2/(max(data) - min(data))
scale = (np.max(dataset) - np.min(dataset))
data = dataset/scale
shift = np.min(data)
data = data - shift


annee = np.array(list(range(len(data))))/365 annee = np.array(list(range(len(data))))/365
annee = annee - annee[-1] annee = annee - annee[-1]
plt.legend() plt.legend()
plt.show() plt.show()


X_train = [data[0:start_pred-1]]
Y_train = [data[1:start_pred]]
X_train_tot = [data[0:start_pred-1]]
Y_train_tot = [data[1:start_pred]]

X_train_tot = np.expand_dims(np.array(X_train_tot),2)
Y_train_tot = np.expand_dims(np.array(Y_train_tot),2)


X_train = np.expand_dims(np.array(X_train),2)
Y_train = np.expand_dims(np.array(Y_train),2)
X_train = X_train_tot[:,:LEN_SEQ,:]
Y_train = Y_train_tot[:,:LEN_SEQ,:]

for i in range(len(X_train_tot[0]) - LEN_SEQ) :
X_train = np.concatenate((X_train, X_train_tot[:,i:i+LEN_SEQ,:]),0)
Y_train = np.concatenate((Y_train, Y_train_tot[:,i:i+LEN_SEQ,:]),0)
print(X_train_tot.shape)
print(X_train.shape)




model.compile(optimizer='adam', model.compile(optimizer='adam',
import os import os
os.system("rm -rf log_dir") os.system("rm -rf log_dir")


model.fit(x=X_train, y=Y_train, epochs=30)
model.fit(x=X_train, y=Y_train, batch_size=16, epochs=5, shuffle=True)


Pred = X_train.copy()
Pred = X_train_tot.copy()
while len(Pred[0]) < len(data) : while len(Pred[0]) < len(data) :
print(len(data) - len(Pred[0])) print(len(data) - len(Pred[0]))
Pred = np.concatenate((Pred, np.array([[model.predict(Pred)[0][-1]]])),1) Pred = np.concatenate((Pred, np.array([[model.predict(Pred)[0][-1]]])),1)
Pred = Pred/2*(max(data) - min(data))
data_Pred = dataset.copy()
Pred = np.squeeze(Pred)
for i in range(start_pred,len(data_Pred)) :
data_Pred[i] = data_Pred[i-1]*Pred[i] + data_Pred[i-1]
Pred = Pred + shift
Pred = Pred * scale

data_Pred = np.squeeze(Pred)


plt.figure(2) plt.figure(2)
plt.plot(annee[:start_pred],dataset[:start_pred], label="apprentissage") plt.plot(annee[:start_pred],dataset[:start_pred], label="apprentissage")

Загрузка…
Отмена
Сохранить