Trainer.py 11.4 KB
Newer Older
Tiago de Freitas Pereira's avatar
Tiago de Freitas Pereira committed
1
2
3
4
5
6
7
#!/usr/bin/env python
# vim: set fileencoding=utf-8 :
# @author: Tiago de Freitas Pereira <tiago.pereira@idiap.ch>
# @date: Tue 09 Aug 2016 15:25:22 CEST

import tensorflow as tf
from ..network import SequenceNetwork
8
9
10
import threading
import os
import bob.io.base
11
import bob.core
12
from ..analyzers import SoftmaxAnalizer
13
from tensorflow.core.framework import summary_pb2
14
import time
15
16
from bob.learn.tensorflow.datashuffler.OnlineSampling import OnLineSampling

Tiago de Freitas Pereira's avatar
Tiago de Freitas Pereira committed
17

18
logger = bob.core.log.setup("bob.learn.tensorflow")
19

Tiago de Freitas Pereira's avatar
Tiago de Freitas Pereira committed
20

21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
class Trainer(object):
    """
    One graph trainer.
    Use this trainer when your CNN is composed by one graph

    **Parameters**
      architecture: The architecture that you want to run. Should be a :py:class`bob.learn.tensorflow.network.SequenceNetwork`
      optimizer: One of the tensorflow optimizers https://www.tensorflow.org/versions/r0.10/api_docs/python/train.html
      use_gpu: Use GPUs in the training
      loss: Loss
      temp_dir: The output directory

      base_learning_rate: Initial learning rate
      weight_decay:
      convergence_threshold:

      iterations: Maximum number of iterations
      snapshot: Will take a snapshot of the network at every `n` iterations
      prefetch: Use extra Threads to deal with the I/O
      analizer: Neural network analizer :py:mod:`bob.learn.tensorflow.analyzers`
      verbosity_level:

    """
Tiago de Freitas Pereira's avatar
Tiago de Freitas Pereira committed
44
    def __init__(self,
45
46
                 architecture,
                 optimizer=tf.train.AdamOptimizer(),
Tiago de Freitas Pereira's avatar
Tiago de Freitas Pereira committed
47
48
                 use_gpu=False,
                 loss=None,
49
                 temp_dir="cnn",
Tiago de Freitas Pereira's avatar
Tiago de Freitas Pereira committed
50

51
52
53
54
                 # Learning rate
                 base_learning_rate=0.001,
                 weight_decay=0.9,

Tiago de Freitas Pereira's avatar
Tiago de Freitas Pereira committed
55
                 ###### training options ##########
56
                 convergence_threshold=0.01,
Tiago de Freitas Pereira's avatar
Tiago de Freitas Pereira committed
57
                 iterations=5000,
58
59
                 snapshot=100,
                 prefetch=False,
60
61

                 ## Analizer
62
                 analizer=SoftmaxAnalizer(),
63

64
                 verbosity_level=2):
Tiago de Freitas Pereira's avatar
Tiago de Freitas Pereira committed
65

66
67
        if not isinstance(architecture, SequenceNetwork):
            raise ValueError("`architecture` should be instance of `SequenceNetwork`")
Tiago de Freitas Pereira's avatar
Tiago de Freitas Pereira committed
68
69

        self.architecture = architecture
70
        self.optimizer_class = optimizer
Tiago de Freitas Pereira's avatar
Tiago de Freitas Pereira committed
71
        self.use_gpu = use_gpu
72
73
74
75
76
        self.loss = loss
        self.temp_dir = temp_dir

        self.base_learning_rate = base_learning_rate
        self.weight_decay = weight_decay
Tiago de Freitas Pereira's avatar
Tiago de Freitas Pereira committed
77
78
79
80

        self.iterations = iterations
        self.snapshot = snapshot
        self.convergence_threshold = convergence_threshold
81
        self.prefetch = prefetch
Tiago de Freitas Pereira's avatar
Tiago de Freitas Pereira committed
82

83
84
85
86
87
88
89
90
        # Training variables used in the fit
        self.optimizer = None
        self.training_graph = None
        self.learning_rate = None
        self.training_graph = None
        self.train_data_shuffler = None
        self.summaries_train = None
        self.train_summary_writter = None
91
        self.thread_pool = None
92
93
94
95
96

        # Validation data
        self.validation_graph = None
        self.validation_summary_writter = None

97
98
99
100
101
102
        # Analizer
        self.analizer = analizer

        self.thread_pool = None
        self.enqueue_op = None

103
104
        bob.core.log.set_verbosity_level(logger, verbosity_level)

Tiago de Freitas Pereira's avatar
Tiago de Freitas Pereira committed
105
106
107
    def __del__(self):
        tf.reset_default_graph()

108
    def compute_graph(self, data_shuffler, prefetch=False, name=""):
Tiago de Freitas Pereira's avatar
Tiago de Freitas Pereira committed
109
        """
110
111
        Computes the graph for the trainer.

112

113
114
115
        ** Parameters **

            data_shuffler: Data shuffler
116
            prefetch:
117
118
119
120
            name: Name of the graph
        """

        # Defining place holders
121
        if prefetch:
122
            [placeholder_data, placeholder_labels] = data_shuffler.get_placeholders_forprefetch(name=name)
123
124
125
126
127
128
129

            # Defining a placeholder queue for prefetching
            queue = tf.FIFOQueue(capacity=10,
                                 dtypes=[tf.float32, tf.int64],
                                 shapes=[placeholder_data.get_shape().as_list()[1:], []])

            # Fetching the place holders from the queue
130
            self.enqueue_op = queue.enqueue_many([placeholder_data, placeholder_labels])
131
132
133
134
135
136
137
            feature_batch, label_batch = queue.dequeue_many(data_shuffler.batch_size)

            # Creating the architecture for train and validation
            if not isinstance(self.architecture, SequenceNetwork):
                raise ValueError("The variable `architecture` must be an instance of "
                                 "`bob.learn.tensorflow.network.SequenceNetwork`")
        else:
138
            [feature_batch, label_batch] = data_shuffler.get_placeholders(name=name)
139
140
141
142
143
144
145
146
147

        # Creating graphs and defining the loss
        network_graph = self.architecture.compute_graph(feature_batch)
        graph = self.loss(network_graph, label_batch)

        return graph

    def get_feed_dict(self, data_shuffler):
        """
148
        Given a data shuffler prepared the dictionary to be injected in the graph
149
150
151
152

        ** Parameters **
            data_shuffler:

Tiago de Freitas Pereira's avatar
Tiago de Freitas Pereira committed
153
        """
154
155
        [data, labels] = data_shuffler.get_batch()
        [data_placeholder, label_placeholder] = data_shuffler.get_placeholders()
156
157
158
159
160

        feed_dict = {data_placeholder: data,
                     label_placeholder: labels}
        return feed_dict

161
162
163
164
165
166
167
168
169
170
    def fit(self, session, step):
        """
        Run one iteration (`forward` and `backward`)

        ** Parameters **
            session: Tensorflow session
            step: Iteration number

        """

171
        if self.prefetch:
172
173
            _, l, lr, summary = session.run([self.optimizer, self.training_graph,
                                             self.learning_rate, self.summaries_train])
174
175
176
177
178
        else:
            feed_dict = self.get_feed_dict(self.train_data_shuffler)
            _, l, lr, summary = session.run([self.optimizer, self.training_graph,
                                             self.learning_rate, self.summaries_train], feed_dict=feed_dict)

179
180
        logger.info("Loss training set step={0} = {1}".format(step, l))
        self.train_summary_writter.add_summary(summary, step)
181

182
    def compute_validation(self,  session, data_shuffler, step):
183
184
185
186
187
188
189
190
191
        """
        Computes the loss in the validation set

        ** Parameters **
            session: Tensorflow session
            data_shuffler: The data shuffler to be used
            step: Iteration number

        """
192
        # Opening a new session for validation
193
194
195
196
        self.validation_graph = self.compute_graph(data_shuffler, name="validation")
        feed_dict = self.get_feed_dict(data_shuffler)
        l = session.run(self.validation_graph, feed_dict=feed_dict)

197
198
199
        if self.validation_summary_writter is None:
            self.validation_summary_writter = tf.train.SummaryWriter(os.path.join(self.temp_dir, 'validation'), session.graph)

200
201
202
203
204
        summaries = []
        summaries.append(summary_pb2.Summary.Value(tag="loss", simple_value=float(l)))
        self.validation_summary_writter.add_summary(summary_pb2.Summary(value=summaries), step)
        logger.info("Loss VALIDATION set step={0} = {1}".format(step, l))

205
206
207
208
209
    def create_general_summary(self):
        """
        Creates a simple tensorboard summary with the value of the loss and learning rate
        """

210
211
212
213
214
        # Train summary
        tf.scalar_summary('loss', self.training_graph, name="train")
        tf.scalar_summary('lr', self.learning_rate, name="train")
        return tf.merge_all_summaries()

215
    def start_thread(self, session):
216
217
218
219
220
221
222
        """
        Start pool of threads for pre-fetching

        **Parameters**
          session: Tensorflow session
        """

223
        threads = []
224
225
        for n in range(3):
            t = threading.Thread(target=self.load_and_enqueue, args=(session,))
226
227
228
229
            t.daemon = True  # thread will close when parent quits
            t.start()
            threads.append(t)
        return threads
Tiago de Freitas Pereira's avatar
Tiago de Freitas Pereira committed
230

231
232
    def load_and_enqueue(self, session):
        """
233
        Injecting data in the place holder queue
234
235
236

        **Parameters**
          session: Tensorflow session
237
        """
238

239
        while not self.thread_pool.should_stop():
240
241
            [train_data, train_labels] = self.train_data_shuffler.get_batch()
            [train_placeholder_data, train_placeholder_labels] = self.train_data_shuffler.get_placeholders()
242

243
244
245
            feed_dict = {train_placeholder_data: train_data,
                         train_placeholder_labels: train_labels}

246
            session.run(self.enqueue_op, feed_dict=feed_dict)
247
248
249

    def train(self, train_data_shuffler, validation_data_shuffler=None):
        """
250
        Train the network
251
252
253
254
255
        """

        # Creating directory
        bob.io.base.create_directories_safe(self.temp_dir)
        self.train_data_shuffler = train_data_shuffler
256

257
        # TODO: find an elegant way to provide this as a parameter of the trainer
258
        self.learning_rate = tf.train.exponential_decay(
259
260
261
262
263
264
            self.base_learning_rate,  # Learning rate
            train_data_shuffler.batch_size,
            train_data_shuffler.n_samples,
            self.weight_decay  # Decay step
        )

265
        self.training_graph = self.compute_graph(train_data_shuffler, prefetch=self.prefetch, name="train")
266

267
        # Preparing the optimizer
268
        self.optimizer_class._learning_rate = self.learning_rate
269
270
        #self.optimizer = self.optimizer_class.minimize(self.training_graph, global_step=tf.Variable(0))
        self.optimizer = self.optimizer_class.minimize(self.training_graph)
Tiago de Freitas Pereira's avatar
Tiago de Freitas Pereira committed
271

272
        # Train summary
273
        self.summaries_train = self.create_general_summary()
274
275

        logger.info("Initializing !!")
Tiago de Freitas Pereira's avatar
Tiago de Freitas Pereira committed
276
        # Training
277
        hdf5 = bob.io.base.HDF5File(os.path.join(self.temp_dir, 'model.hdf5'), 'w')
Tiago de Freitas Pereira's avatar
Tiago de Freitas Pereira committed
278

279
        with tf.Session() as session:
Tiago de Freitas Pereira's avatar
Tiago de Freitas Pereira committed
280

Tiago de Freitas Pereira's avatar
Tiago de Freitas Pereira committed
281
            tf.initialize_all_variables().run()
282

283
284
285
            if isinstance(train_data_shuffler, OnLineSampling):
                train_data_shuffler.set_feature_extractor(self.architecture, session=session)

286
            # Start a thread to enqueue data asynchronously, and hide I/O latency.
287
288
289
290
            if self.prefetch:
                self.thread_pool = tf.train.Coordinator()
                tf.train.start_queue_runners(coord=self.thread_pool)
                threads = self.start_thread(session)
291

292
            # TENSOR BOARD SUMMARY
293
            self.train_summary_writter = tf.train.SummaryWriter(os.path.join(self.temp_dir, 'train'), session.graph)
Tiago de Freitas Pereira's avatar
Tiago de Freitas Pereira committed
294
            for step in range(self.iterations):
295
296
297
298
299
300
301

                start = time.time()
                self.fit(session, step)
                end = time.time()
                summary = summary_pb2.Summary.Value(tag="elapsed_time", simple_value=float(end-start))
                self.train_summary_writter.add_summary(summary_pb2.Summary(value=[summary]), step)

302
                if validation_data_shuffler is not None and step % self.snapshot == 0:
303
                    self.compute_validation(session, validation_data_shuffler, step)
304

305
306
                    if self.analizer is not None:
                        self.validation_summary_writter.add_summary(self.analizer(
307
                             validation_data_shuffler, self.architecture, session), step)
Tiago de Freitas Pereira's avatar
Tiago de Freitas Pereira committed
308

309
310
311
312
313
            logger.info("Training finally finished")

            self.train_summary_writter.close()
            if validation_data_shuffler is not None:
                self.validation_summary_writter.close()
Tiago de Freitas Pereira's avatar
Tiago de Freitas Pereira committed
314

315
316
317
            self.architecture.save(hdf5)
            del hdf5

318
319
320
321
            if self.prefetch:
                # now they should definetely stop
                self.thread_pool.request_stop()
                self.thread_pool.join(threads)
322

323
            session.close() # For some reason the session is not closed after the context manager finishes