Trainer.py 11.1 KB
Newer Older
Tiago de Freitas Pereira's avatar
Tiago de Freitas Pereira committed
1
2
3
4
5
6
7
#!/usr/bin/env python
# vim: set fileencoding=utf-8 :
# @author: Tiago de Freitas Pereira <tiago.pereira@idiap.ch>
# @date: Tue 09 Aug 2016 15:25:22 CEST

import tensorflow as tf
from ..network import SequenceNetwork
8
9
10
import threading
import os
import bob.io.base
11
import bob.core
12
from ..analyzers import SoftmaxAnalizer
13
from tensorflow.core.framework import summary_pb2
14
import time
Tiago de Freitas Pereira's avatar
Tiago de Freitas Pereira committed
15

16
logger = bob.core.log.setup("bob.learn.tensorflow")
17

Tiago de Freitas Pereira's avatar
Tiago de Freitas Pereira committed
18

19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
class Trainer(object):
    """
    One graph trainer.
    Use this trainer when your CNN is composed by one graph

    **Parameters**
      architecture: The architecture that you want to run. Should be a :py:class`bob.learn.tensorflow.network.SequenceNetwork`
      optimizer: One of the tensorflow optimizers https://www.tensorflow.org/versions/r0.10/api_docs/python/train.html
      use_gpu: Use GPUs in the training
      loss: Loss
      temp_dir: The output directory

      base_learning_rate: Initial learning rate
      weight_decay:
      convergence_threshold:

      iterations: Maximum number of iterations
      snapshot: Will take a snapshot of the network at every `n` iterations
      prefetch: Use extra Threads to deal with the I/O
      analizer: Neural network analizer :py:mod:`bob.learn.tensorflow.analyzers`
      verbosity_level:

    """
Tiago de Freitas Pereira's avatar
Tiago de Freitas Pereira committed
42
    def __init__(self,
43
44
                 architecture,
                 optimizer=tf.train.AdamOptimizer(),
Tiago de Freitas Pereira's avatar
Tiago de Freitas Pereira committed
45
46
                 use_gpu=False,
                 loss=None,
47
                 temp_dir="cnn",
Tiago de Freitas Pereira's avatar
Tiago de Freitas Pereira committed
48

49
50
51
52
                 # Learning rate
                 base_learning_rate=0.001,
                 weight_decay=0.9,

Tiago de Freitas Pereira's avatar
Tiago de Freitas Pereira committed
53
                 ###### training options ##########
54
                 convergence_threshold=0.01,
Tiago de Freitas Pereira's avatar
Tiago de Freitas Pereira committed
55
                 iterations=5000,
56
57
                 snapshot=100,
                 prefetch=False,
58
59

                 ## Analizer
60
                 analizer=SoftmaxAnalizer(),
61

62
                 verbosity_level=2):
Tiago de Freitas Pereira's avatar
Tiago de Freitas Pereira committed
63

64
65
        if not isinstance(architecture, SequenceNetwork):
            raise ValueError("`architecture` should be instance of `SequenceNetwork`")
Tiago de Freitas Pereira's avatar
Tiago de Freitas Pereira committed
66
67

        self.architecture = architecture
68
        self.optimizer_class = optimizer
Tiago de Freitas Pereira's avatar
Tiago de Freitas Pereira committed
69
        self.use_gpu = use_gpu
70
71
72
73
74
        self.loss = loss
        self.temp_dir = temp_dir

        self.base_learning_rate = base_learning_rate
        self.weight_decay = weight_decay
Tiago de Freitas Pereira's avatar
Tiago de Freitas Pereira committed
75
76
77
78

        self.iterations = iterations
        self.snapshot = snapshot
        self.convergence_threshold = convergence_threshold
79
        self.prefetch = prefetch
Tiago de Freitas Pereira's avatar
Tiago de Freitas Pereira committed
80

81
82
83
84
85
86
87
88
        # Training variables used in the fit
        self.optimizer = None
        self.training_graph = None
        self.learning_rate = None
        self.training_graph = None
        self.train_data_shuffler = None
        self.summaries_train = None
        self.train_summary_writter = None
89
        self.thread_pool = None
90
91
92
93
94

        # Validation data
        self.validation_graph = None
        self.validation_summary_writter = None

95
96
97
98
99
100
        # Analizer
        self.analizer = analizer

        self.thread_pool = None
        self.enqueue_op = None

101
102
        bob.core.log.set_verbosity_level(logger, verbosity_level)

103
    def compute_graph(self, data_shuffler, prefetch=False, name=""):
Tiago de Freitas Pereira's avatar
Tiago de Freitas Pereira committed
104
        """
105
106
        Computes the graph for the trainer.

107

108
109
110
        ** Parameters **

            data_shuffler: Data shuffler
111
            prefetch:
112
113
114
115
            name: Name of the graph
        """

        # Defining place holders
116
        if prefetch:
117
            [placeholder_data, placeholder_labels] = data_shuffler.get_placeholders_forprefetch(name=name)
118
119
120
121
122
123
124

            # Defining a placeholder queue for prefetching
            queue = tf.FIFOQueue(capacity=10,
                                 dtypes=[tf.float32, tf.int64],
                                 shapes=[placeholder_data.get_shape().as_list()[1:], []])

            # Fetching the place holders from the queue
125
            self.enqueue_op = queue.enqueue_many([placeholder_data, placeholder_labels])
126
127
128
129
130
131
132
            feature_batch, label_batch = queue.dequeue_many(data_shuffler.batch_size)

            # Creating the architecture for train and validation
            if not isinstance(self.architecture, SequenceNetwork):
                raise ValueError("The variable `architecture` must be an instance of "
                                 "`bob.learn.tensorflow.network.SequenceNetwork`")
        else:
133
            [feature_batch, label_batch] = data_shuffler.get_placeholders(name=name)
134
135
136
137
138
139
140
141
142

        # Creating graphs and defining the loss
        network_graph = self.architecture.compute_graph(feature_batch)
        graph = self.loss(network_graph, label_batch)

        return graph

    def get_feed_dict(self, data_shuffler):
        """
143
        Given a data shuffler prepared the dictionary to be injected in the graph
144
145
146
147

        ** Parameters **
            data_shuffler:

Tiago de Freitas Pereira's avatar
Tiago de Freitas Pereira committed
148
        """
149
150
        [data, labels] = data_shuffler.get_batch()
        [data_placeholder, label_placeholder] = data_shuffler.get_placeholders()
151
152
153
154
155

        feed_dict = {data_placeholder: data,
                     label_placeholder: labels}
        return feed_dict

156
157
158
159
160
161
162
163
164
165
    def fit(self, session, step):
        """
        Run one iteration (`forward` and `backward`)

        ** Parameters **
            session: Tensorflow session
            step: Iteration number

        """

166
        if self.prefetch:
167
168
            _, l, lr, summary = session.run([self.optimizer, self.training_graph,
                                             self.learning_rate, self.summaries_train])
169
170
171
172
173
        else:
            feed_dict = self.get_feed_dict(self.train_data_shuffler)
            _, l, lr, summary = session.run([self.optimizer, self.training_graph,
                                             self.learning_rate, self.summaries_train], feed_dict=feed_dict)

174
175
        logger.info("Loss training set step={0} = {1}".format(step, l))
        self.train_summary_writter.add_summary(summary, step)
176

177
    def compute_validation(self,  session, data_shuffler, step):
178
179
180
181
182
183
184
185
186
        """
        Computes the loss in the validation set

        ** Parameters **
            session: Tensorflow session
            data_shuffler: The data shuffler to be used
            step: Iteration number

        """
187
        # Opening a new session for validation
188
189
190
191
        self.validation_graph = self.compute_graph(data_shuffler, name="validation")
        feed_dict = self.get_feed_dict(data_shuffler)
        l = session.run(self.validation_graph, feed_dict=feed_dict)

192
193
194
        if self.validation_summary_writter is None:
            self.validation_summary_writter = tf.train.SummaryWriter(os.path.join(self.temp_dir, 'validation'), session.graph)

195
196
197
198
199
        summaries = []
        summaries.append(summary_pb2.Summary.Value(tag="loss", simple_value=float(l)))
        self.validation_summary_writter.add_summary(summary_pb2.Summary(value=summaries), step)
        logger.info("Loss VALIDATION set step={0} = {1}".format(step, l))

200
201
202
203
204
    def create_general_summary(self):
        """
        Creates a simple tensorboard summary with the value of the loss and learning rate
        """

205
206
207
208
209
        # Train summary
        tf.scalar_summary('loss', self.training_graph, name="train")
        tf.scalar_summary('lr', self.learning_rate, name="train")
        return tf.merge_all_summaries()

210
    def start_thread(self, session):
211
212
213
214
215
216
217
        """
        Start pool of threads for pre-fetching

        **Parameters**
          session: Tensorflow session
        """

218
        threads = []
219
220
        for n in range(3):
            t = threading.Thread(target=self.load_and_enqueue, args=(session,))
221
222
223
224
            t.daemon = True  # thread will close when parent quits
            t.start()
            threads.append(t)
        return threads
Tiago de Freitas Pereira's avatar
Tiago de Freitas Pereira committed
225

226
227
    def load_and_enqueue(self, session):
        """
228
        Injecting data in the place holder queue
229
230
231

        **Parameters**
          session: Tensorflow session
232
        """
233

234
        while not self.thread_pool.should_stop():
235
236
            [train_data, train_labels] = self.train_data_shuffler.get_batch()
            [train_placeholder_data, train_placeholder_labels] = self.train_data_shuffler.get_placeholders()
237

238
239
240
            feed_dict = {train_placeholder_data: train_data,
                         train_placeholder_labels: train_labels}

241
            session.run(self.enqueue_op, feed_dict=feed_dict)
242
243
244

    def train(self, train_data_shuffler, validation_data_shuffler=None):
        """
245
        Train the network
246
247
248
249
250
        """

        # Creating directory
        bob.io.base.create_directories_safe(self.temp_dir)
        self.train_data_shuffler = train_data_shuffler
251

252
        # TODO: find an elegant way to provide this as a parameter of the trainer
253
        self.learning_rate = tf.train.exponential_decay(
254
255
256
257
258
259
            self.base_learning_rate,  # Learning rate
            train_data_shuffler.batch_size,
            train_data_shuffler.n_samples,
            self.weight_decay  # Decay step
        )

260
        self.training_graph = self.compute_graph(train_data_shuffler, prefetch=self.prefetch, name="train")
261

262
        # Preparing the optimizer
263
264
        self.optimizer_class._learning_rate = self.learning_rate
        self.optimizer = self.optimizer_class.minimize(self.training_graph, global_step=tf.Variable(0))
Tiago de Freitas Pereira's avatar
Tiago de Freitas Pereira committed
265

266
        # Train summary
267
        self.summaries_train = self.create_general_summary()
268
269

        logger.info("Initializing !!")
Tiago de Freitas Pereira's avatar
Tiago de Freitas Pereira committed
270
        # Training
271
        hdf5 = bob.io.base.HDF5File(os.path.join(self.temp_dir, 'model.hdf5'), 'w')
Tiago de Freitas Pereira's avatar
Tiago de Freitas Pereira committed
272

273
        with tf.Session() as session:
Tiago de Freitas Pereira's avatar
Tiago de Freitas Pereira committed
274

Tiago de Freitas Pereira's avatar
Tiago de Freitas Pereira committed
275
            tf.initialize_all_variables().run()
276
277

            # Start a thread to enqueue data asynchronously, and hide I/O latency.
278
279
280
281
            if self.prefetch:
                self.thread_pool = tf.train.Coordinator()
                tf.train.start_queue_runners(coord=self.thread_pool)
                threads = self.start_thread(session)
282

283
            # TENSOR BOARD SUMMARY
284
            self.train_summary_writter = tf.train.SummaryWriter(os.path.join(self.temp_dir, 'train'), session.graph)
Tiago de Freitas Pereira's avatar
Tiago de Freitas Pereira committed
285
            for step in range(self.iterations):
286
287
288
289
290
291
292

                start = time.time()
                self.fit(session, step)
                end = time.time()
                summary = summary_pb2.Summary.Value(tag="elapsed_time", simple_value=float(end-start))
                self.train_summary_writter.add_summary(summary_pb2.Summary(value=[summary]), step)

293
                if validation_data_shuffler is not None and step % self.snapshot == 0:
294
                    self.compute_validation(session, validation_data_shuffler, step)
295

296
297
                    if self.analizer is not None:
                        self.validation_summary_writter.add_summary(self.analizer(
298
                             validation_data_shuffler, self.architecture, session), step)
Tiago de Freitas Pereira's avatar
Tiago de Freitas Pereira committed
299

300
301
302
303
304
            logger.info("Training finally finished")

            self.train_summary_writter.close()
            if validation_data_shuffler is not None:
                self.validation_summary_writter.close()
Tiago de Freitas Pereira's avatar
Tiago de Freitas Pereira committed
305

306
307
308
            self.architecture.save(hdf5)
            del hdf5

309
310
311
312
            if self.prefetch:
                # now they should definetely stop
                self.thread_pool.request_stop()
                self.thread_pool.join(threads)
313
314

            session.close()# For some reason the session is not closed after the context manager finishes