| @@ -0,0 +1,79 @@ | |||
| import tensorflow as tf | |||
| import tensorflow_addons as tfa | |||
| from matplotlib import pyplot as plt | |||
| import numpy as np | |||
| import optuna | |||
| densite = 10 | |||
| start1 = 5 | |||
| stop1 = 15 | |||
| start2 = 35 | |||
| stop2 = 45 | |||
| start = -50 | |||
| stop = 150 | |||
| training = [np.concatenate((np.linspace(start1, stop1, (stop1-start1)*densite), | |||
| np.linspace(start2, stop2, (stop2-start2)*densite)))] | |||
| training.append(np.sin(training[0])/3+0.5) | |||
| training[0] = training[0]/(stop-start) | |||
| validation = [np.linspace(start, stop, (stop-start)*densite)] | |||
| validation.append(np.sin(validation[0])/3+0.5) | |||
| validation[0] = validation[0]/(stop-start) | |||
| fig = plt.figure(1) | |||
| ax1 = fig.add_subplot(2, 1, 1) | |||
| ax2 = fig.add_subplot(2, 1, 2, sharex=ax1) | |||
| ax1.plot(*validation,'.', label="validation") | |||
| ax2.plot(*training,'.', label="apprentissage") | |||
| plt.legend() | |||
| plt.show() | |||
| bce = tf.keras.losses.BinaryCrossentropy() | |||
| mse = tf.keras.losses.MeanSquaredError() | |||
| def objectif(trial) : | |||
| HIDDEN = trial.suggest_int('hidden',64,512) | |||
| SIZE = trial.suggest_int('size',0,2) | |||
| DROPOUT = trial.suggest_float('dropout', 0,0.3) | |||
| model = tf.keras.Sequential() | |||
| model.add(tf.keras.layers.Dense(HIDDEN, input_shape=(1,), activation=tfa.activations.snake)) | |||
| for i in range(SIZE) : | |||
| model.add(tf.keras.layers.Dense(HIDDEN, activation=tfa.activations.snake)) | |||
| model.add(tf.keras.layers.Dropout(DROPOUT)) | |||
| model.add(tf.keras.layers.Dense(1, activation='sigmoid')) | |||
| model.compile(optimizer='adam', | |||
| loss='bce', | |||
| metrics=['mse']) | |||
| history = model.fit(x=training[0], y=training[1], batch_size=4, | |||
| epochs=3000, shuffle=True, | |||
| #validation_data=(validation[0], validation[1]), | |||
| verbose='auto') | |||
| pred = model.predict(validation[0]) | |||
| plt.figure(2) | |||
| plt.clf() | |||
| plt.plot(*validation, label="validation") | |||
| plt.plot(*training, label="apprentissage") | |||
| plt.plot(validation[0], pred, label="prediction") | |||
| plt.legend() | |||
| rep = mse(pred-0.5,validation[1]-0.5).numpy() | |||
| plt.savefig(f"images_snake/{rep}_{trial.number}_{HIDDEN}_{SIZE}_{DROPOUT}.png") | |||
| return rep | |||
| study = optuna.create_study() | |||
| study.optimize(objectif, n_trials=25) | |||
| @@ -0,0 +1,96 @@ | |||
| import tensorflow as tf | |||
| import tensorflow_addons as tfa | |||
| from matplotlib import pyplot as plt | |||
| import numpy as np | |||
| import optuna | |||
| densite = 10 | |||
| start1 = 0 | |||
| stop1 = 30 | |||
| start = 0 | |||
| stop = 200 | |||
| def f(x) : | |||
| return np.sin(x) | |||
| training = [np.linspace(start1, stop1, (stop1-start1)*densite)] | |||
| training.append(f(training[0])/3+0.5) | |||
| training = [training[0][:-1], training[1][:-1],training[1][1:]] | |||
| validation = [np.linspace(start, stop, (stop-start)*densite)] | |||
| validation.append(f(validation[0])/3+0.5) | |||
| validation = [validation[0][:-1], validation[1][:-1],validation[1][1:]] | |||
| fig = plt.figure(1) | |||
| ax1 = fig.add_subplot(2, 1, 1) | |||
| ax2 = fig.add_subplot(2, 1, 2, sharex=ax1) | |||
| ax1.plot(validation[0],validation[2], label="validation") | |||
| ax2.plot(training[0], training[2], label="apprentissage") | |||
| ax2.plot(training[0], training[1], label="apprentissage") | |||
| plt.legend() | |||
| plt.show() | |||
| training[1] = np.expand_dims(training[1], [0,2]) | |||
| training[2] = np.expand_dims(training[2], [0,2]) | |||
| validation[1] = np.expand_dims(validation[1], [0,2]) | |||
| validation[2] = np.expand_dims(validation[2], [0,2]) | |||
| bce = tf.keras.losses.BinaryCrossentropy() | |||
| mse = tf.keras.losses.MeanSquaredError() | |||
| def objectif(trial) : | |||
| HIDDEN = trial.suggest_int('hidden',16,32) | |||
| SIZE = trial.suggest_int('size',0,2) | |||
| model = tf.keras.Sequential() | |||
| model.add(tf.keras.layers.LSTM(HIDDEN, return_sequences=True)) | |||
| for i in range(SIZE) : | |||
| model.add(tf.keras.layers.LSTM(HIDDEN, return_sequences=True)) | |||
| model.add(tf.keras.layers.Dense(HIDDEN, activation='relu')) | |||
| model.add(tf.keras.layers.Dense(HIDDEN//2, activation='relu')) | |||
| model.add(tf.keras.layers.Dense(1, activation='sigmoid')) | |||
| model.compile(optimizer='adam', | |||
| loss='bce', | |||
| metrics=['mse']) | |||
| history = model.fit(x=training[1], y=training[2], | |||
| epochs=300, shuffle=True, | |||
| #validation_data=(validation[0], validation[1]), | |||
| verbose='auto') | |||
| pred = training[1] | |||
| while len(pred[0]) < len(validation[1][0]) : | |||
| print(len(validation[1][0]) - len(pred[0])) | |||
| pred = np.concatenate((pred,np.expand_dims(model.predict(pred)[0,-1],(1,2))), 1) | |||
| fig = plt.figure(1) | |||
| plt.clf() | |||
| ax1 = fig.add_subplot(2, 1, 1) | |||
| ax2 = fig.add_subplot(2, 1, 2, sharex=ax1) | |||
| ax1.plot(validation[0], validation[2][0,:,0], label="objectif") | |||
| ax1.plot(training[0], training[2][0,:,0], label="apprentissage") | |||
| plt.legend() | |||
| ax2.plot(validation[0], pred[0,:,0], label="prediction") | |||
| plt.legend() | |||
| rep = mse(pred,validation[2]).numpy() | |||
| plt.savefig(f"images_lstm/{rep}_{trial.number}_{HIDDEN}_{SIZE}.png") | |||
| return rep | |||
| study = optuna.create_study() | |||
| study.optimize(objectif, n_trials=25) | |||
| @@ -0,0 +1,96 @@ | |||
| import tensorflow as tf | |||
| import tensorflow_addons as tfa | |||
| from matplotlib import pyplot as plt | |||
| import numpy as np | |||
| import optuna | |||
| densite = 10 | |||
| start1 = 0 | |||
| stop1 = 30 | |||
| start = 0 | |||
| stop = 200 | |||
| def f(x) : | |||
| return (np.sin(x) + 1/3*np.sin(2*x+1) + + 1/5*np.sin(x/2+2)) | |||
| training = [np.linspace(start1, stop1, (stop1-start1)*densite)] | |||
| training.append(f(training[0])/3+0.5) | |||
| training = [training[0][:-1], training[1][:-1],training[1][1:]] | |||
| validation = [np.linspace(start, stop, (stop-start)*densite)] | |||
| validation.append(f(validation[0])/3+0.5) | |||
| validation = [validation[0][:-1], validation[1][:-1],validation[1][1:]] | |||
| fig = plt.figure(1) | |||
| ax1 = fig.add_subplot(2, 1, 1) | |||
| ax2 = fig.add_subplot(2, 1, 2, sharex=ax1) | |||
| ax1.plot(validation[0],validation[2], label="validation") | |||
| ax2.plot(training[0], training[2], label="apprentissage") | |||
| ax2.plot(training[0], training[1], label="apprentissage") | |||
| plt.legend() | |||
| plt.show() | |||
| training[1] = np.expand_dims(training[1], [0,2]) | |||
| training[2] = np.expand_dims(training[2], [0,2]) | |||
| validation[1] = np.expand_dims(validation[1], [0,2]) | |||
| validation[2] = np.expand_dims(validation[2], [0,2]) | |||
| bce = tf.keras.losses.BinaryCrossentropy() | |||
| mse = tf.keras.losses.MeanSquaredError() | |||
| def objectif(trial) : | |||
| HIDDEN = trial.suggest_int('hidden',16,32) | |||
| SIZE = trial.suggest_int('size',0,2) | |||
| model = tf.keras.Sequential() | |||
| model.add(tf.keras.layers.LSTM(HIDDEN, return_sequences=True)) | |||
| for i in range(SIZE) : | |||
| model.add(tf.keras.layers.LSTM(HIDDEN, return_sequences=True)) | |||
| model.add(tf.keras.layers.Dense(HIDDEN, activation='relu')) | |||
| model.add(tf.keras.layers.Dense(HIDDEN//2, activation='relu')) | |||
| model.add(tf.keras.layers.Dense(1, activation='sigmoid')) | |||
| model.compile(optimizer='adam', | |||
| loss='bce', | |||
| metrics=['mse']) | |||
| history = model.fit(x=training[1], y=training[2], | |||
| epochs=300, shuffle=True, | |||
| #validation_data=(validation[0], validation[1]), | |||
| verbose='auto') | |||
| pred = training[1] | |||
| while len(pred[0]) < len(validation[1][0]) : | |||
| print(len(validation[1][0]) - len(pred[0])) | |||
| pred = np.concatenate((pred,np.expand_dims(model.predict(pred)[0,-1],(1,2))), 1) | |||
| fig = plt.figure(1) | |||
| plt.clf() | |||
| ax1 = fig.add_subplot(2, 1, 1) | |||
| ax2 = fig.add_subplot(2, 1, 2, sharex=ax1) | |||
| ax1.plot(validation[0], validation[2][0,:,0], label="objectif") | |||
| ax1.plot(training[0], training[2][0,:,0], label="apprentissage") | |||
| plt.legend() | |||
| ax2.plot(validation[0], pred[0,:,0], label="prediction") | |||
| plt.legend() | |||
| rep = mse(pred,validation[2]).numpy() | |||
| plt.savefig(f"images_lstm_complexe/{rep}_{trial.number}_{HIDDEN}_{SIZE}.png") | |||
| return rep | |||
| study = optuna.create_study() | |||
| study.optimize(objectif, n_trials=25) | |||
| @@ -0,0 +1,79 @@ | |||
| import tensorflow as tf | |||
| import tensorflow_addons as tfa | |||
| from matplotlib import pyplot as plt | |||
| import numpy as np | |||
| import optuna | |||
| densite = 10 | |||
| start1 = 5 | |||
| stop1 = 15 | |||
| start2 = 35 | |||
| stop2 = 45 | |||
| start = -50 | |||
| stop = 150 | |||
| training = [np.concatenate((np.linspace(start1, stop1, (stop1-start1)*densite), | |||
| np.linspace(start2, stop2, (stop2-start2)*densite)))] | |||
| training.append(np.sin(training[0])/3+0.5) | |||
| training[0] = training[0]/(stop-start) | |||
| validation = [np.linspace(start, stop, (stop-start)*densite)] | |||
| validation.append(np.sin(validation[0])/3+0.5) | |||
| validation[0] = validation[0]/(stop-start) | |||
| fig = plt.figure(1) | |||
| ax1 = fig.add_subplot(2, 1, 1) | |||
| ax2 = fig.add_subplot(2, 1, 2, sharex=ax1) | |||
| ax1.plot(*validation,'.', label="validation") | |||
| ax2.plot(*training,'.', label="apprentissage") | |||
| plt.legend() | |||
| plt.show() | |||
| bce = tf.keras.losses.BinaryCrossentropy() | |||
| mse = tf.keras.losses.MeanSquaredError() | |||
| def objectif(trial) : | |||
| HIDDEN = trial.suggest_int('hidden',64,512) | |||
| SIZE = trial.suggest_int('size',0,2) | |||
| DROPOUT = trial.suggest_float('dropout', 0,0.3) | |||
| model = tf.keras.Sequential() | |||
| model.add(tf.keras.layers.Dense(HIDDEN, input_shape=(1,), activation='relu')) | |||
| for i in range(SIZE) : | |||
| model.add(tf.keras.layers.Dense(HIDDEN, activation='relu')) | |||
| model.add(tf.keras.layers.Dropout(DROPOUT)) | |||
| model.add(tf.keras.layers.Dense(1, activation='sigmoid')) | |||
| model.compile(optimizer='adam', | |||
| loss='bce', | |||
| metrics=['mse']) | |||
| history = model.fit(x=training[0], y=training[1], batch_size=4, | |||
| epochs=3000, shuffle=True, | |||
| #validation_data=(validation[0], validation[1]), | |||
| verbose='auto') | |||
| pred = model.predict(validation[0]) | |||
| plt.figure(2) | |||
| plt.clf() | |||
| plt.plot(*validation, label="validation") | |||
| plt.plot(*training, label="apprentissage") | |||
| plt.plot(validation[0], pred, label="prediction") | |||
| plt.legend() | |||
| rep = mse(pred-0.5,validation[1]-0.5).numpy() | |||
| plt.savefig(f"images_relu/{rep}_{trial.number}_{HIDDEN}_{SIZE}_{DROPOUT}.png") | |||
| return rep | |||
| study = optuna.create_study() | |||
| study.optimize(objectif, n_trials=25) | |||