Parcourir la source

ajout resnet18

Emilien_train
emilien il y a 4 ans
Parent
révision
145f769f75
2 fichiers modifiés avec 194 ajouts et 0 suppressions
  1. 92
    0
      code/resnet18/resnet18.py
  2. 102
    0
      code/resnet18/resnet18_snake.py

+ 92
- 0
code/resnet18/resnet18.py Voir le fichier

@@ -0,0 +1,92 @@
from keras.callbacks import EarlyStopping
from keras.layers import Dense, Conv2D, MaxPool2D, Flatten, GlobalAveragePooling2D, BatchNormalization, Layer, Add
from keras.models import Sequential
from keras.models import Model
import tensorflow as tf

class ResnetBlock(Model):
"""
A standard resnet block.
"""

def __init__(self, channels: int, down_sample=False):
"""
channels: same as number of convolution kernels
"""
super().__init__()

self.__channels = channels
self.__down_sample = down_sample
self.__strides = [2, 1] if down_sample else [1, 1]

KERNEL_SIZE = (3, 3)
# use He initialization, instead of Xavier (a.k.a 'glorot_uniform' in Keras), as suggested in [2]
INIT_SCHEME = "he_normal"

self.conv_1 = Conv2D(self.__channels, strides=self.__strides[0],
kernel_size=KERNEL_SIZE, padding="same", kernel_initializer=INIT_SCHEME)
self.bn_1 = BatchNormalization()
self.conv_2 = Conv2D(self.__channels, strides=self.__strides[1],
kernel_size=KERNEL_SIZE, padding="same", kernel_initializer=INIT_SCHEME)
self.bn_2 = BatchNormalization()
self.merge = Add()

if self.__down_sample:
# perform down sampling using stride of 2, according to [1].
self.res_conv = Conv2D(
self.__channels, strides=2, kernel_size=(1, 1), kernel_initializer=INIT_SCHEME, padding="same")
self.res_bn = BatchNormalization()

def call(self, inputs):
res = inputs

x = self.conv_1(inputs)
x = self.bn_1(x)
x = tf.nn.relu(x)
x = self.conv_2(x)
x = self.bn_2(x)

if self.__down_sample:
res = self.res_conv(res)
res = self.res_bn(res)

# if not perform down sample, then add a shortcut directly
x = self.merge([x, res])
out = tf.nn.relu(x)
return out


class ResNet18(Model):

def __init__(self, num_classes, **kwargs):
"""
num_classes: number of classes in specific classification task.
"""
super().__init__(**kwargs)
self.conv_1 = Conv2D(64, (7, 7), strides=2,
padding="same", kernel_initializer="he_normal")
self.init_bn = BatchNormalization()
self.pool_2 = MaxPool2D(pool_size=(2, 2), strides=2, padding="same")
self.res_1_1 = ResnetBlock(64)
self.res_1_2 = ResnetBlock(64)
self.res_2_1 = ResnetBlock(128, down_sample=True)
self.res_2_2 = ResnetBlock(128)
self.res_3_1 = ResnetBlock(256, down_sample=True)
self.res_3_2 = ResnetBlock(256)
self.res_4_1 = ResnetBlock(512, down_sample=True)
self.res_4_2 = ResnetBlock(512)
self.avg_pool = GlobalAveragePooling2D()
self.flat = Flatten()
self.fc = Dense(num_classes, activation="softmax")

def call(self, inputs):
out = self.conv_1(inputs)
out = self.init_bn(out)
out = tf.nn.relu(out)
out = self.pool_2(out)
for res_block in [self.res_1_1, self.res_1_2, self.res_2_1, self.res_2_2, self.res_3_1, self.res_3_2, self.res_4_1, self.res_4_2]:
out = res_block(out)
out = self.avg_pool(out)
out = self.flat(out)
out = self.fc(out)
return out

+ 102
- 0
code/resnet18/resnet18_snake.py Voir le fichier

@@ -0,0 +1,102 @@
# Réseau inspiré de http://yann.lecun.com/exdb/publis/pdf/lecun-98.pdf


from resnet18 import ResNet18
import tensorflow as tf
import numpy as np
import matplotlib.pyplot as plt
# Pour les utilisateurs de MacOS (pour utiliser plt & keras en même temps)
import os
#os.environ['KMP_DUPLICATE_LIB_OK']='True'


def displayConvFilers(model, layer_name, num_filter=4, filter_size=(3,3), num_channel=0, fig_size=(2,2)):
layer_dict = dict([(layer.name, layer) for layer in mnist_model.layers])
weight, biais = layer_dict[layer_name].get_weights()
print(weight.shape)
plt.figure(figsize=fig_size)
for i in range(num_filter):
plt.subplot(fig_size[0],fig_size[1],i+1)
plt.xticks([])
plt.yticks([])
plt.grid(False)
vis = np.reshape(weight[:,:,num_channel,i],filter_size)
plt.imshow(vis, cmap=plt.cm.binary)
plt.show()


def snake(x):
return x + tf.sin(x)**2


## Chargement et normalisation des données
resnet18 = tf.keras.datasets.cifar10
(train_images, train_labels), (test_images, test_labels) = resnet18.load_data()
train_images = train_images / 255.0
test_images = test_images / 255.0

# POUR LES CNN : On rajoute une dimension pour spécifier qu'il s'agit d'imgages en NdG
train_images = train_images.reshape(50000,32,32,3)
test_images = test_images.reshape(10000,32,32,3)


# One hot encoding
train_labels = tf.keras.utils.to_categorical(train_labels)
test_labels = tf.keras.utils.to_categorical(test_labels)

filter_size_conv1 = (3,3)

model = ResNet18(10)
model.build(input_shape = (None,32,32,3))


# expliquer le nombre de paramètre de ce réseau
print(model.summary())


sgd = tf.keras.optimizers.Adam()

model.compile(sgd, loss='categorical_crossentropy', metrics=['accuracy'])

# On visualise avant l'entrainement
'''
displayConvFilers(mnist_model, 'conv2d',
num_filter=6,
filter_size=filter_size_conv1,
num_channel=0,
fig_size=(2,3)
)
'''


model.fit(train_images,
train_labels,
batch_size=64,
epochs=4
)

## Evaluation du modèle
test_loss, test_acc = model.evaluate(test_images, test_labels)
print('Test accuracy:', test_acc)


'''
displayConvFilers(mnist_model,
'conv2d',
num_filter=6,
filter_size=filter_size_conv1,
num_channel=0,
fig_size=(2,3)
)
'''

'''
displayConvFilers(mnist_model, 'conv2d_1',
num_filter=16,
filter_size=(5,5),
num_channel=1,
fig_size=(4,4)
)
'''

Chargement…
Annuler
Enregistrer