suppression des anciens fichiers

This commit is contained in:
Francois 2019-03-09 22:40:41 +01:00
parent 98e01b6a3d
commit bcaeb4cb1f
5 changed files with 0 additions and 253 deletions

View File

@ -1,98 +0,0 @@
import tensorflow as tf
import os
import sys
from dotenv import find_dotenv, load_dotenv
from data_loader import TFRecordsLoader
from trainer_model import BaseTrainer
class BaseModel:
def __init__(self):
self.cur_epoch = None
self.increment_cur_epoch = None
self.global_step = None
self.increment_global_step = None
self.init_global_step()
self.init_cur_epoch()
self.is_training = None
self.x = None
self.y = None
self.cross_entropy = None
self.accuracy = None
self.train_step = None
self.tmp = None
self.build_model()
self.init_saver()
def save(self, sess):
print("Saving model...")
self.saver.save(sess, os.getenv('CHECKPOINT_DIR'), self.global_step)
def load(self, sess):
print("Load model...")
latest_checkpoint = tf.train.latest_checkpoint(os.getenv('CHECKPOINT_DIR'))
if latest_checkpoint:
print("Loading model checkpoint {}...".format(latest_checkpoint))
self.saver.restore(sess, latest_checkpoint)
print("Model loaded")
def init_cur_epoch(self):
self.cur_epoch = tf.Variable(0, trainable = False)
self.increment_cur_epoch = tf.assign(self.cur_epoch, self.cur_epoch + 1)
def init_global_step(self):
self.global_step = tf.Variable(0, trainable = False)
self.increment_global_step = tf.assign(self.global_step, self.global_step + 1)
def init_saver(self):
self.saver = tf.train.Saver(max_to_keep = int(os.getenv('MAX_TO_KEEP')), save_relative_paths = True)
def build_model(self):
self.x = tf.placeholder(tf.float32, [None, 36, 64, 3])
self.y = tf.placeholder(tf.int64, [int(os.getenv('BATCH_SIZE'))])
self.is_training = tf.placeholder(tf.bool)
conv1 = tf.layers.conv2d(self.x, 32, 5, activation = tf.nn.relu)
conv1 = tf.layers.max_pooling2d(conv1, 2, 2)
conv2 = tf.layers.conv2d(conv1, 64, 3, activation = tf.nn.relu)
conv2 = tf.layers.max_pooling2d(conv2, 2, 2)
fc1 = tf.contrib.layers.flatten(conv2)
fc1 = tf.layers.dense(fc1, 98)
fc2 = tf.layers.dense(fc1, 49)
out = tf.layers.dense(fc2, 2)
# labels = tf.reshape(tf.cast(self.y, tf.float32), (int(os.getenv('BATCH_SIZE')), -1))
# self.cross_entropy = tf.reduce_mean(tf.nn.sigmoid_cross_entropy_with_logits(labels = labels, logits = tf.nn.sigmoid(out)))
# self.cross_entropy = tf.reduce_mean(tf.losses.sigmoid_cross_entropy(labels, logits = tf.nn.sigmoid(out)))
# # self.tmp = tf.count_nonzero(self.y)
# # self.tmp = tf.reduce_min(tf.nn.softmax(out), axis = 1)
# self.tmp = tf.nn.sigmoid(out)
# self.accuracy = tf.reduce_mean(tf.cast(tf.equal(tf.cast(tf.greater(tf.nn.sigmoid(out), 0.8), tf.int64), self.y), tf.float32))
# self.train_step = tf.train.AdamOptimizer(learning_rate = 1e-3).minimize(self.cross_entropy)
# avec une sortie de deux neurones:
self.cross_entropy = tf.reduce_mean(tf.nn.sparse_softmax_cross_entropy_with_logits(labels = self.y, logits = out))
self.tmp = tf.nn.sigmoid(out)
correct_prediction = tf.equal(tf.argmax(out, 1), self.y)
self.accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))
self.train_step = tf.train.AdamOptimizer(learning_rate = 1e-3).minimize(self.cross_entropy)

View File

@ -1,56 +0,0 @@
import tensorflow as tf
import os
import sys
class TFRecordsLoader:
"""
DataSetAPI - Load TFRecords from the disk
"""
def __init__(self):
self.is_train = None
self.dataset = tf.data.TFRecordDataset(os.getenv('TRAIN_TFRECORD'))
self.dataset = self.dataset.map(TFRecordsLoader.parser)
self.dataset = self.dataset.shuffle(1000)
self.dataset = self.dataset.repeat()
self.dataset = self.dataset.batch(int(os.getenv("BATCH_SIZE")))
self.test = tf.data.TFRecordDataset(os.getenv('TEST_TFRECORDS'))
self.test = self.test.map(TFRecordsLoader.parser)
self.test = self.test.repeat()
self.test = self.test.batch(int(os.getenv("BATCH_SIZE")))
self.train_it = self.dataset.make_one_shot_iterator().string_handle()
self.test_it = self.test.make_one_shot_iterator().string_handle()
self.handle = tf.placeholder(tf.string, shape=[])
self.iterator = tf.data.Iterator.from_string_handle(self.handle, self.dataset.output_types, self.dataset.output_shapes)
@staticmethod
def parser(record):
keys_to_features = {
'input': tf.FixedLenFeature((), tf.string),
'label': tf.FixedLenFeature((), tf.int64)
}
parsed = tf.parse_single_example(record, keys_to_features)
image = tf.decode_raw(parsed['input'], tf.float64)
image = tf.reshape(image, [36, 64, 3])
image = tf.cast(image, tf.float32)
label = parsed['label']
return image, label
def set_is_train(self, is_train):
self.is_train = is_train
def initialize(self, sess):
self.train_handle, self.test_handle = sess.run([self.train_it, self.test_it])
def get_input(self, sess):
return sess.run(self.iterator.get_next(), feed_dict = {self.handle: self.train_handle if self.is_train else self.test_handle})

View File

@ -1,22 +0,0 @@
import tensorflow as tf
import os
import sys
from dotenv import find_dotenv, load_dotenv
from data_loader import TFRecordsLoader
from trainer_model import BaseTrainer
from base_model import BaseModel
def main():
load_dotenv(find_dotenv())
sess = tf.Session()
model = BaseModel()
data_loader = TFRecordsLoader()
trainer = BaseTrainer(sess, model, data_loader)
trainer.train()
if __name__ == '__main__':
main()

View File

@ -1,77 +0,0 @@
import tensorflow as tf
import os
import sys
import numpy as np
class BaseTrainer:
def __init__(self, sess, model, data_loader):
self.sess = sess
self.model = model
self.data_loader = data_loader
self.init = tf.global_variables_initializer()
sess.run(self.init)
self.model.load(self.sess)
def train(self):
self.data_loader.initialize(self.sess)
for cur_epoch in range(self.model.cur_epoch.eval(self.sess), int(os.getenv('NUM_EPOCH')) + 1):
ent_train, acc_train = self.train_epoch(cur_epoch)
self.sess.run(self.model.increment_cur_epoch)
ent_test, acc_test = self.test(cur_epoch)
print("Epoch-{} ent:{:.4f} -- acc:{:.4f} | ent:{:.4f} -- acc:{:.4f}".format(
cur_epoch, ent_train, acc_train, ent_test, acc_test))
def train_epoch(self, epoch = None):
self.data_loader.set_is_train(is_train = True)
entropies = []
accuracies = []
for _ in range(int(os.getenv('NUM_ITER_BATCH'))):
ent, acc = self.train_step()
# print("acc : {}".format(acc))
entropies.append(ent)
accuracies.append(acc)
ent = np.mean(entropies)
acc = np.mean(accuracies)
return ent, acc
# self.model.save(self.sess)
def train_step(self):
batch_x, batch_y = self.data_loader.get_input(self.sess)
feed_dict = {
self.model.x: batch_x,
self.model.y: batch_y,
self.model.is_training: True
}
_, ent, acc, tmp = self.sess.run([self.model.train_step, self.model.cross_entropy, self.model.accuracy, self.model.tmp], feed_dict = feed_dict)
# print("tmp : {}".format(tmp))
# print("selfy : {}".format(batch_y))
return ent, acc
def test(self, epoch):
self.data_loader.set_is_train(is_train = False)
test_x, test_y = self.data_loader.get_input(self.sess)
feed_dict = {
self.model.x: test_x,
self.model.y: test_y,
self.model.is_training: False
}
ent, acc, tmp = self.sess.run([self.model.cross_entropy, self.model.accuracy, self.model.tmp], feed_dict = feed_dict)
# print("Sur les donnees test {} - ent:{:.4f} -- acc:{:.4f}".format(epoch, ent, acc))
# print("selfy : {}".format(test_y))
return ent, acc