Trainer.py 7.77 KB
Newer Older
1 2 3 4 5 6 7 8 9
#!/usr/bin/env python
# vim: set fileencoding=utf-8 :
# @author: Tiago de Freitas Pereira <tiago.pereira@idiap.ch>
# @date: Tue 09 Aug 2016 15:25:22 CEST

import logging
logger = logging.getLogger("bob.learn.tensorflow")
import tensorflow as tf
from ..network import SequenceNetwork
10
import threading
Tiago de Freitas Pereira's avatar
Tiago de Freitas Pereira committed
11
import numpy
12 13
import os
import bob.io.base
14
from tensorflow.core.framework import summary_pb2
15

16

17 18 19
class Trainer(object):

    def __init__(self,
20 21
                 architecture,
                 optimizer=tf.train.AdamOptimizer(),
22 23
                 use_gpu=False,
                 loss=None,
24
                 temp_dir="cnn",
25

26 27 28 29
                 # Learning rate
                 base_learning_rate=0.001,
                 weight_decay=0.9,

30
                 ###### training options ##########
31
                 convergence_threshold=0.01,
32 33
                 iterations=5000,
                 snapshot=100):
34
        """
35

36 37 38 39 40 41 42 43 44 45 46 47
        **Parameters**
          architecture: The architecture that you want to run. Should be a :py:class`bob.learn.tensorflow.network.SequenceNetwork`
          optimizer: One of the tensorflow optimizers https://www.tensorflow.org/versions/r0.10/api_docs/python/train.html
          use_gpu: Use GPUs in the training
          loss: Loss
          temp_dir:
          iterations:
          snapshot:
          convergence_threshold:
        """
        if not isinstance(architecture, SequenceNetwork):
            raise ValueError("`architecture` should be instance of `SequenceNetwork`")
48 49

        self.architecture = architecture
50
        self.optimizer = optimizer
51
        self.use_gpu = use_gpu
52 53 54 55 56
        self.loss = loss
        self.temp_dir = temp_dir

        self.base_learning_rate = base_learning_rate
        self.weight_decay = weight_decay
57 58 59 60 61

        self.iterations = iterations
        self.snapshot = snapshot
        self.convergence_threshold = convergence_threshold

62
    def train(self, train_data_shuffler, validation_data_shuffler=None):
63 64 65 66 67
        """
        Do the loop forward --> backward --|
                      ^--------------------|
        """

68 69 70 71 72 73 74 75 76 77 78 79 80 81 82
        def start_thread():
            threads = []
            for n in range(1):
                t = threading.Thread(target=load_and_enqueue)
                t.daemon = True  # thread will close when parent quits
                t.start()
                threads.append(t)
            return threads

        def load_and_enqueue():
            """
            Injecting data in the place holder queue
            """

            #while not thread_pool.should_stop():
83 84
            #for i in range(self.iterations):
            while not thread_pool.should_stop():
85
                train_data, train_labels = train_data_shuffler.get_batch()
86 87 88 89 90 91

                feed_dict = {train_placeholder_data: train_data,
                             train_placeholder_labels: train_labels}

                session.run(enqueue_op, feed_dict=feed_dict)

92 93 94 95 96 97 98 99
        # TODO: find an elegant way to provide this as a parameter of the trainer
        learning_rate = tf.train.exponential_decay(
            self.base_learning_rate,  # Learning rate
            train_data_shuffler.batch_size,
            train_data_shuffler.n_samples,
            self.weight_decay  # Decay step
        )

100 101 102
        # Creating directory
        bob.io.base.create_directories_safe(self.temp_dir)

103
        # Defining place holders
104
        train_placeholder_data, train_placeholder_labels = train_data_shuffler.get_placeholders_forprefetch(name="train")
105 106 107
        #if validation_data_shuffler is not None:
        #    validation_placeholder_data, validation_placeholder_labels = \
        #        validation_data_shuffler.get_placeholders(name="validation")
108 109 110 111 112 113 114
        # Defining a placeholder queue for prefetching
        queue = tf.FIFOQueue(capacity=10,
                             dtypes=[tf.float32, tf.int64],
                             shapes=[train_placeholder_data.get_shape().as_list()[1:], []])

        # Fetching the place holders from the queue
        enqueue_op = queue.enqueue_many([train_placeholder_data, train_placeholder_labels])
115
        train_feature_batch, train_label_batch = queue.dequeue_many(train_data_shuffler.batch_size)
116 117 118 119 120 121

        # Creating the architecture for train and validation
        if not isinstance(self.architecture, SequenceNetwork):
            raise ValueError("The variable `architecture` must be an instance of "
                             "`bob.learn.tensorflow.network.SequenceNetwork`")

122
        # Creating graphs and defining the loss
123 124
        train_graph = self.architecture.compute_graph(train_feature_batch)
        loss_train = self.loss(train_graph, train_label_batch)
125

126 127 128
        # Preparing the optimizer
        self.optimizer._learning_rate = learning_rate
        optimizer = self.optimizer.minimize(loss_train, global_step=tf.Variable(0))
Tiago de Freitas Pereira's avatar
Tiago de Freitas Pereira committed
129

130 131 132 133 134 135 136 137 138 139 140 141
        # Train summary
        tf.scalar_summary('loss', loss_train, name="train")
        tf.scalar_summary('lr', learning_rate, name="train")
        merged_train = tf.merge_all_summaries()

        # Validation
        #if validation_data_shuffler is not None:
        #    validation_graph = self.architecture.compute_graph(validation_placeholder_data)
        #    loss_validation = self.loss(validation_graph, validation_placeholder_labels)
        #    tf.scalar_summary('loss', loss_validation, name="validation")
        #    merged_validation = tf.merge_all_summaries()

142 143
        print("Initializing !!")
        # Training
144
        hdf5 = bob.io.base.HDF5File(os.path.join(self.temp_dir, 'model.hdf5'), 'w')
Tiago de Freitas Pereira's avatar
Tiago de Freitas Pereira committed
145

146
        with tf.Session() as session:
Tiago de Freitas Pereira's avatar
Tiago de Freitas Pereira committed
147

148
            tf.initialize_all_variables().run()
149 150 151 152 153 154 155

            # Start a thread to enqueue data asynchronously, and hide I/O latency.
            thread_pool = tf.train.Coordinator()
            tf.train.start_queue_runners(coord=thread_pool)

            threads = start_thread()

156 157 158
            # TENSOR BOARD SUMMARY
            train_writer = tf.train.SummaryWriter(os.path.join(self.temp_dir, 'train'), session.graph)
            validation_writer = tf.train.SummaryWriter(os.path.join(self.temp_dir, 'validation'), session.graph)
159

160 161
            for step in range(self.iterations):

162 163 164
                _, l, lr, summary = session.run([optimizer, loss_train,
                                                 learning_rate, merged_train])
                train_writer.add_summary(summary, step)
165 166 167

                if validation_data_shuffler is not None and step % self.snapshot == 0:
                    validation_data, validation_labels = validation_data_shuffler.get_batch()
168

169 170
                    feed_dict = {validation_placeholder_data: validation_data,
                                 validation_placeholder_labels: validation_labels}
171

172 173 174 175 176 177 178
                    #l, predictions = session.run([loss_validation, validation_prediction, ], feed_dict=feed_dict)
                    #l, summary = session.run([loss_validation, merged_validation], feed_dict=feed_dict)
                    #import ipdb; ipdb.set_trace();
                    l = session.run(loss_validation, feed_dict=feed_dict)
                    summaries = []
                    summaries.append(summary_pb2.Summary.Value(tag="loss", simple_value=float(l)))
                    validation_writer.add_summary(summary_pb2.Summary(value=summaries), step)
179

180 181 182 183 184 185

                    #l = session.run([loss_validation], feed_dict=feed_dict)
                    #accuracy = 100. * numpy.sum(numpy.argmax(predictions, 1) == validation_labels) / predictions.shape[0]
                    #validation_writer.add_summary(summary, step)
                    #print "Step {0}. Loss = {1}, Acc Validation={2}".format(step, l, accuracy)
                    print "Step {0}. Loss = {1}".format(step, l)
186

187
            train_writer.close()
Tiago de Freitas Pereira's avatar
Tiago de Freitas Pereira committed
188

189 190 191
            self.architecture.save(hdf5)
            del hdf5

192 193 194
            # now they should definetely stop
            thread_pool.request_stop()
            thread_pool.join(threads)
195