1
0
Fork 0
mirror of https://github.com/prise6/smart-iss-posts synced 2024-05-04 14:43:11 +02:00

generalize autoencoder models + execution on floyd

This commit is contained in:
Francois Vieille 2019-12-08 02:25:27 +01:00
parent 45dbfd8db7
commit 4f6fe3edc8
5 changed files with 70 additions and 30 deletions

View file

@ -69,6 +69,20 @@ exec_clustering:
$(PYTHON_INTERPRETER) -m iss.exec.clustering $(PYTHON_INTERPRETER) -m iss.exec.clustering
#################################################################################
# FLOYDHUB #
#################################################################################
floyd_training:
floyd run --task training
floyd_training_prod:
floyd run --task training_prod
floyd_retraining:
floyd run --task retraining
################################################################################# #################################################################################
# Self Documenting Commands # # Self Documenting Commands #
################################################################################# #################################################################################

View file

@ -1,4 +1,5 @@
import os import os
import click
from iss.init_config import CONFIG from iss.init_config import CONFIG
from iss.models.DataLoader import ImageDataGeneratorWrapper from iss.models.DataLoader import ImageDataGeneratorWrapper
@ -8,29 +9,45 @@ from iss.models import SimpleConvAutoEncoder
from iss.models import VarAutoEncoder from iss.models import VarAutoEncoder
from iss.models import VarConvAutoEncoder from iss.models import VarConvAutoEncoder
## Variables globales
_MODEL_TYPE = 'simple_conv'
_LOAD_NAME = None
_LOAD = False
## Data loader @click.command()
data_loader = ImageDataGeneratorWrapper(CONFIG, model = _MODEL_TYPE) @click.option('--model-type', default='simple_conv', show_default=True, type=str)
@click.option('--load', default=False, is_flag=True)
@click.option('--load-name', default=None, show_default=True, type=str)
def main(model_type, load, load_name):
## Variables globales
_MODEL_TYPE = model_type
_LOAD_NAME = load_name
_LOAD = load
## Data loader
data_loader = ImageDataGeneratorWrapper(CONFIG, model = _MODEL_TYPE)
## Model
if _MODEL_TYPE in ['simple_conv']:
model = SimpleConvAutoEncoder(CONFIG.get('models')[_MODEL_TYPE])
elif _MODEL_TYPE in ['simple']:
model = SimpleAutoEncoder(CONFIG.get('models')[_MODEL_TYPE])
else:
raise Exception
## Model
if _MODEL_TYPE in ['simple_conv']:
model = SimpleConvAutoEncoder(CONFIG.get('models')[_MODEL_TYPE])
if _LOAD: if _LOAD:
model.load(which = _LOAD_NAME) model.load(which = _LOAD_NAME)
model.encoder_model.summary() model.encoder_model.summary()
model.decoder_model.summary() model.decoder_model.summary()
model.model.summary()
model.model.summary() ## Entraineur
trainer = ModelTrainer(model, data_loader, CONFIG.get('models')[_MODEL_TYPE], callbacks=[])
## Entraineur ## Entrainement
trainer = ModelTrainer(model, data_loader, CONFIG.get('models')[_MODEL_TYPE], callbacks=[]) try:
trainer.train()
except KeyboardInterrupt:
trainer.model.save()
## Entrainement
try: if __name__ == '__main__':
trainer.train() main()
except KeyboardInterrupt:
trainer.model.save()

View file

@ -42,7 +42,6 @@ class TensorboardCallback(Callback):
super(TensorboardCallback, self).__init__() super(TensorboardCallback, self).__init__()
def on_epoch_end(self, epoch, logs=None): def on_epoch_end(self, epoch, logs=None):
print(logs)
image_summaries = [] image_summaries = []
for input_pict in self.data_loader.next()[0][:self.limit_image]: for input_pict in self.data_loader.next()[0][:self.limit_image]:
@ -75,3 +74,10 @@ class TensorboardCallback(Callback):
self.writer.flush() self.writer.flush()
class FloydhubTrainigMetricsCallback(Callback):
"""FloydHub Training Metric Integration"""
def on_epoch_end(self, epoch, logs=None):
"""Print Training Metrics"""
print('{{"metric": "loss", "value": {}, "epoch": {}}}'.format(logs.get('loss'), epoch))
print('{{"metric": "val_loss", "value": {}, "epoch": {}}}'.format(logs.get('val_loss'), epoch))

View file

@ -1,7 +1,7 @@
# -*- coding: utf-8 -*- # -*- coding: utf-8 -*-
from keras.callbacks import ModelCheckpoint, CSVLogger from keras.callbacks import ModelCheckpoint, CSVLogger
from iss.models.Callbacks import DisplayPictureCallback, TensorboardCallback from iss.models.Callbacks import DisplayPictureCallback, TensorboardCallback, FloydhubTrainigMetricsCallback
from iss.tools.tools import Tools from iss.tools.tools import Tools
import keras import keras
@ -98,5 +98,7 @@ class ModelTrainer:
data_loader = self.data_loader.get_train_generator() data_loader = self.data_loader.get_train_generator()
)]) )])
if 'floyd' in config['callbacks']:
self.callbacks.extend([FloydhubTrainigMetricsCallback()])
return self return self

View file

@ -17,6 +17,7 @@ class SimpleAutoEncoder(AbstractAutoEncoderModel):
self.activation = config['activation'] self.activation = config['activation']
self.input_shape = (config['input_height'], config['input_width'], config['input_channel']) self.input_shape = (config['input_height'], config['input_width'], config['input_channel'])
self.latent_shape = config['latent_shape']
self.lr = config['learning_rate'] self.lr = config['learning_rate']
self.build_model() self.build_model()
@ -25,25 +26,25 @@ class SimpleAutoEncoder(AbstractAutoEncoderModel):
picture = Input(shape = input_shape) picture = Input(shape = input_shape)
# encoded network
x = Flatten()(picture) x = Flatten()(picture)
layer_1 = Dense(1000, activation = 'relu', name = 'enc_1')(x) layer_1 = Dense(1000, activation = 'relu', name = 'enc_1')(x)
layer_2 = Dense(100, activation = 'relu', name = 'enc_2')(layer_1) layer_2 = Dense(100, activation = 'relu', name = 'enc_2')(layer_1)
layer_3 = Dense(50, activation = 'relu', name = 'enc_3')(layer_2) encoded = Dense(self.latent_shape, activation = 'relu', name = 'enc_3')(layer_2)
layer_4 = Dense(100, activation = 'relu', name = 'dec_1')(layer_3) self.encoder_model = Model(picture, encoded, name = "encoder")
# decoded netword
latent_input = Input(shape = (self.latent_shape,))
layer_4 = Dense(100, activation = 'relu', name = 'dec_1')(latent_input)
layer_5 = Dense(1000, activation = 'relu', name = 'dec_2')(layer_4) layer_5 = Dense(1000, activation = 'relu', name = 'dec_2')(layer_4)
# encoded network
# x = Conv2D(1, (3, 3), activation = 'relu', padding = 'same', name = 'enc_conv_1')(picture)
# encoded = MaxPooling2D((2, 2))(x)
# decoded network
# x = Conv2D(1, (3, 3), activation = 'relu', padding = 'same', name = 'dec_conv_1')(encoded)
# x = UpSampling2D((2, 2))(x)
# x = Flatten()(x)
x = Dense(np.prod(input_shape), activation = self.activation)(layer_5) x = Dense(np.prod(input_shape), activation = self.activation)(layer_5)
decoded = Reshape((input_shape))(x) decoded = Reshape((input_shape))(x)
self.model = Model(picture, decoded) self.decoder_model = Model(latent_input, decoded, name = "decoder")
picture_dec = self.decoder_model(self.encoder_model(picture))
self.model = Model(picture, picture_dec)
# optimizer = Adadelta(lr = self.lr, rho = 0.95, epsilon = None, decay = 0.0) # optimizer = Adadelta(lr = self.lr, rho = 0.95, epsilon = None, decay = 0.0)
optimizer = Adam(lr = 0.001, beta_1=0.9, beta_2=0.999, epsilon=None, decay=0.0, amsgrad=False) optimizer = Adam(lr = 0.001, beta_1=0.9, beta_2=0.999, epsilon=None, decay=0.0, amsgrad=False)