|
|
|
@@ -1,14 +1,12 @@ |
|
|
|
# Réseau inspiré de http://yann.lecun.com/exdb/publis/pdf/lecun-98.pdf |
|
|
|
|
|
|
|
|
|
|
|
from keras.callbacks import History |
|
|
|
|
|
|
|
from tensorflow.python.ops.gen_array_ops import tensor_scatter_min_eager_fallback |
|
|
|
from resnet18 import ResNet18 |
|
|
|
import tensorflow as tf |
|
|
|
import numpy as np |
|
|
|
import matplotlib.pyplot as plt |
|
|
|
# Pour les utilisateurs de MacOS (pour utiliser plt & keras en même temps) |
|
|
|
import os |
|
|
|
#os.environ['KMP_DUPLICATE_LIB_OK']='True' |
|
|
|
|
|
|
|
|
|
|
|
def displayConvFilers(model, layer_name, num_filter=4, filter_size=(3,3), num_channel=0, fig_size=(2,2)): |
|
|
|
@@ -36,20 +34,28 @@ def snake(x): |
|
|
|
resnet18 = tf.keras.datasets.cifar10 |
|
|
|
(train_images, train_labels), (test_images, test_labels) = resnet18.load_data() |
|
|
|
|
|
|
|
|
|
|
|
from sklearn.model_selection import train_test_split |
|
|
|
train_images, val_images, train_labels, val_labels = train_test_split(train_images,train_labels, test_size = 0.2,shuffle = True) |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
''' |
|
|
|
val_images = train_images[40000:] |
|
|
|
val_labels = train_labels[40000:] |
|
|
|
|
|
|
|
train_images = train_images[:40000] |
|
|
|
train_labels = train_labels[:40000] |
|
|
|
''' |
|
|
|
|
|
|
|
train_images = train_images / 255.0 |
|
|
|
val_images = val_images /255.0 |
|
|
|
test_images = test_images / 255.0 |
|
|
|
|
|
|
|
# POUR LES CNN : On rajoute une dimension pour spécifier qu'il s'agit d'imgages en NdG |
|
|
|
train_images = train_images.reshape(40000,32,32,3) |
|
|
|
val_images = val_images.reshape(10000,32,32,3) |
|
|
|
test_images = test_images.reshape(10000,32,32,3) |
|
|
|
train_images = train_images.reshape(max(np.shape(train_images)),32,32,3) |
|
|
|
val_images = val_images.reshape(max(np.shape(val_images)),32,32,3) |
|
|
|
test_images = test_images.reshape(max(np.shape(test_images)),32,32,3) |
|
|
|
|
|
|
|
|
|
|
|
# One hot encoding |