| 123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134 |
- # Réseau inspiré de http://yann.lecun.com/exdb/publis/pdf/lecun-98.pdf
-
-
-
- from tensorflow.python.ops.gen_array_ops import tensor_scatter_min_eager_fallback
- from resnet18 import ResNet18
- import tensorflow as tf
- import numpy as np
- import matplotlib.pyplot as plt
-
-
- def displayConvFilers(model, layer_name, num_filter=4, filter_size=(3,3), num_channel=0, fig_size=(2,2)):
-
- layer_dict = dict([(layer.name, layer) for layer in model.layers])
-
- weight, biais = layer_dict[layer_name].get_weights()
- print(weight.shape)
- plt.figure(figsize=fig_size)
- for i in range(num_filter):
- plt.subplot(fig_size[0],fig_size[1],i+1)
- plt.xticks([])
- plt.yticks([])
- plt.grid(False)
- vis = np.reshape(weight[:,:,num_channel,i],filter_size)
- plt.imshow(vis, cmap=plt.cm.binary)
- plt.show()
-
-
- def snake(x):
- return x + tf.sin(x)**2
-
-
- ## Chargement et normalisation des données
- resnet18 = tf.keras.datasets.cifar10
- (train_images, train_labels), (test_images, test_labels) = resnet18.load_data()
- train_images = train_images.astype('float32')
- test_images = test_images.astype('float32')
-
- from sklearn.model_selection import train_test_split
- train_images, val_images, train_labels, val_labels = train_test_split(train_images,train_labels, test_size = 0.2,shuffle = True)
-
-
-
- '''
- val_images = train_images[40000:]
- val_labels = train_labels[40000:]
-
- train_images = train_images[:40000]
- train_labels = train_labels[:40000]
- '''
-
- train_images = train_images / 255.0
- val_images = val_images /255.0
- test_images = test_images / 255.0
-
- # POUR LES CNN : On rajoute une dimension pour spécifier qu'il s'agit d'imgages en NdG
- train_images = train_images.reshape(max(np.shape(train_images)),32,32,3)
- val_images = val_images.reshape(max(np.shape(val_images)),32,32,3)
- test_images = test_images.reshape(max(np.shape(test_images)),32,32,3)
-
-
- # One hot encoding
- train_labels = tf.keras.utils.to_categorical(train_labels)
- val_labels = tf.keras.utils.to_categorical(val_labels)
- test_labels = tf.keras.utils.to_categorical(test_labels)
-
- filter_size_conv1 = (3,3)
-
- model = ResNet18(10)
- model.build(input_shape = (None,32,32,3))
-
- '''
- filter_size_conv1 = (5,5)
- ## Définition de l'architecture du modèle
- model = tf.keras.models.Sequential()
- # Expliquez à quoi correspondent les valeurs numériques qui définissent les couches du réseau
- model.add(tf.keras.layers.Conv2D(filters=6,kernel_size=filter_size_conv1,padding="same", activation=snake, input_shape=(32, 32, 3)))
- model.add(tf.keras.layers.AveragePooling2D())
- model.add(tf.keras.layers.Conv2D(filters=16,kernel_size=(5,5),padding="valid", activation=snake))
- model.add(tf.keras.layers.AveragePooling2D())
- model.add(tf.keras.layers.Flatten())
- model.add(tf.keras.layers.Dense(120 , activation=snake))
- #mnist_model.add(tf.keras.layers.Dropout(0.5))
- model.add(tf.keras.layers.Dense(84 , activation=snake))
- model.add(tf.keras.layers.Dense(10 , activation='softmax'))
- '''
-
- # expliquer le nombre de paramètre de ce réseau
- print(model.summary())
-
-
- sgd = tf.keras.optimizers.Adam()
-
- model.compile(sgd, loss='categorical_crossentropy', metrics=['accuracy'])
-
- # On visualise avant l'entrainement
-
-
-
- history = model.fit(train_images,
- train_labels,
- batch_size=256,
- epochs=50,
- validation_data=(val_images, val_labels),
- )
-
- ## Evaluation du modèle
- test_loss, test_acc = model.evaluate(test_images, test_labels)
- print('Test accuracy:', test_acc)
-
- fig, axs = plt.subplots(2, 1, figsize=(15,15))
-
- axs[0].plot(history.history['loss'])
- axs[0].plot(history.history['val_loss'])
- axs[0].title.set_text('Training Loss vs Validation Loss')
- axs[0].legend(['Train', 'Val'])
-
- axs[1].plot(history.history['accuracy'])
- axs[1].plot(history.history['val_accuracy'])
- axs[1].title.set_text('Training Accuracy vs Validation Accuracy')
- axs[1].legend(['Train', 'Val'])
- plt.savefig('./resnet18snake.png')
-
-
- '''
- displayConvFilers(mnist_model,
- 'conv2d',
- num_filter=6,
- filter_size=filter_size_conv1,
- num_channel=0,
- fig_size=(2,3)
- )
- '''
|