Trainer.py 6.52 KB
Newer Older
Tiago de Freitas Pereira's avatar
Tiago de Freitas Pereira committed
1
2
3
4
5
6
7
8
9
#!/usr/bin/env python
# vim: set fileencoding=utf-8 :
# @author: Tiago de Freitas Pereira <tiago.pereira@idiap.ch>
# @date: Tue 09 Aug 2016 15:25:22 CEST

import logging
logger = logging.getLogger("bob.learn.tensorflow")
import tensorflow as tf
from ..network import SequenceNetwork
10
import threading
Tiago de Freitas Pereira's avatar
Tiago de Freitas Pereira committed
11
import numpy
12
13
import os
import bob.io.base
Tiago de Freitas Pereira's avatar
Tiago de Freitas Pereira committed
14

15

Tiago de Freitas Pereira's avatar
Tiago de Freitas Pereira committed
16
17
18
class Trainer(object):

    def __init__(self,
19
20
                 architecture,
                 optimizer=tf.train.AdamOptimizer(),
Tiago de Freitas Pereira's avatar
Tiago de Freitas Pereira committed
21
22
                 use_gpu=False,
                 loss=None,
23
                 temp_dir="",
Tiago de Freitas Pereira's avatar
Tiago de Freitas Pereira committed
24

25
26
27
28
                 # Learning rate
                 base_learning_rate=0.001,
                 weight_decay=0.9,

Tiago de Freitas Pereira's avatar
Tiago de Freitas Pereira committed
29
                 ###### training options ##########
30
                 convergence_threshold=0.01,
Tiago de Freitas Pereira's avatar
Tiago de Freitas Pereira committed
31
32
                 iterations=5000,
                 snapshot=100):
33
        """
Tiago de Freitas Pereira's avatar
Tiago de Freitas Pereira committed
34

35
36
37
38
39
40
41
42
43
44
45
46
        **Parameters**
          architecture: The architecture that you want to run. Should be a :py:class`bob.learn.tensorflow.network.SequenceNetwork`
          optimizer: One of the tensorflow optimizers https://www.tensorflow.org/versions/r0.10/api_docs/python/train.html
          use_gpu: Use GPUs in the training
          loss: Loss
          temp_dir:
          iterations:
          snapshot:
          convergence_threshold:
        """
        if not isinstance(architecture, SequenceNetwork):
            raise ValueError("`architecture` should be instance of `SequenceNetwork`")
Tiago de Freitas Pereira's avatar
Tiago de Freitas Pereira committed
47
48

        self.architecture = architecture
49
        self.optimizer = optimizer
Tiago de Freitas Pereira's avatar
Tiago de Freitas Pereira committed
50
        self.use_gpu = use_gpu
51
52
53
54
55
        self.loss = loss
        self.temp_dir = temp_dir

        self.base_learning_rate = base_learning_rate
        self.weight_decay = weight_decay
Tiago de Freitas Pereira's avatar
Tiago de Freitas Pereira committed
56
57
58
59
60

        self.iterations = iterations
        self.snapshot = snapshot
        self.convergence_threshold = convergence_threshold

61
    def train(self, train_data_shuffler, validation_data_shuffler=None):
Tiago de Freitas Pereira's avatar
Tiago de Freitas Pereira committed
62
63
64
65
66
        """
        Do the loop forward --> backward --|
                      ^--------------------|
        """

67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
        def start_thread():
            threads = []
            for n in range(1):
                t = threading.Thread(target=load_and_enqueue)
                t.daemon = True  # thread will close when parent quits
                t.start()
                threads.append(t)
            return threads

        def load_and_enqueue():
            """
            Injecting data in the place holder queue
            """

            #while not thread_pool.should_stop():
82
83
            #for i in range(self.iterations):
            while not thread_pool.should_stop():
84
                train_data, train_labels = train_data_shuffler.get_batch()
85
86
87
88
89
90

                feed_dict = {train_placeholder_data: train_data,
                             train_placeholder_labels: train_labels}

                session.run(enqueue_op, feed_dict=feed_dict)

91
92
93
94
95
96
97
98
        # TODO: find an elegant way to provide this as a parameter of the trainer
        learning_rate = tf.train.exponential_decay(
            self.base_learning_rate,  # Learning rate
            train_data_shuffler.batch_size,
            train_data_shuffler.n_samples,
            self.weight_decay  # Decay step
        )

99
        # Defining place holders
100
101
102
103
        train_placeholder_data, train_placeholder_labels = train_data_shuffler.get_placeholders_forprefetch(name="train")
        if validation_data_shuffler is not None:
            validation_placeholder_data, validation_placeholder_labels = \
                validation_data_shuffler.get_placeholders(name="validation")
104
105
106
107
108
109
110
        # Defining a placeholder queue for prefetching
        queue = tf.FIFOQueue(capacity=10,
                             dtypes=[tf.float32, tf.int64],
                             shapes=[train_placeholder_data.get_shape().as_list()[1:], []])

        # Fetching the place holders from the queue
        enqueue_op = queue.enqueue_many([train_placeholder_data, train_placeholder_labels])
111
        train_feature_batch, train_label_batch = queue.dequeue_many(train_data_shuffler.batch_size)
Tiago de Freitas Pereira's avatar
Tiago de Freitas Pereira committed
112
113
114
115
116
117

        # Creating the architecture for train and validation
        if not isinstance(self.architecture, SequenceNetwork):
            raise ValueError("The variable `architecture` must be an instance of "
                             "`bob.learn.tensorflow.network.SequenceNetwork`")

118
        # Creating graphs and defining the loss
119
120
        train_graph = self.architecture.compute_graph(train_feature_batch)
        loss_train = self.loss(train_graph, train_label_batch)
121
122
123
124
125
        train_prediction = tf.nn.softmax(train_graph)
        if validation_data_shuffler is not None:
            validation_graph = self.architecture.compute_graph(validation_placeholder_data)
            loss_validation = self.loss(validation_graph, validation_placeholder_labels)
            validation_prediction = tf.nn.softmax(validation_graph)
126

127
128
129
        # Preparing the optimizer
        self.optimizer._learning_rate = learning_rate
        optimizer = self.optimizer.minimize(loss_train, global_step=tf.Variable(0))
Tiago de Freitas Pereira's avatar
Tiago de Freitas Pereira committed
130

Tiago de Freitas Pereira's avatar
Tiago de Freitas Pereira committed
131
132
        print("Initializing !!")
        # Training
133
        hdf5 = bob.io.base.HDF5File(os.path.join(self.temp_dir, 'model.hdf5'), 'w')
Tiago de Freitas Pereira's avatar
Tiago de Freitas Pereira committed
134

135
        with tf.Session() as session:
Tiago de Freitas Pereira's avatar
Tiago de Freitas Pereira committed
136

Tiago de Freitas Pereira's avatar
Tiago de Freitas Pereira committed
137
            tf.initialize_all_variables().run()
138
139
140
141
142
143
144

            # Start a thread to enqueue data asynchronously, and hide I/O latency.
            thread_pool = tf.train.Coordinator()
            tf.train.start_queue_runners(coord=thread_pool)

            threads = start_thread()

145
            train_writer = tf.train.SummaryWriter('./LOGS/train', session.graph)
146

Tiago de Freitas Pereira's avatar
Tiago de Freitas Pereira committed
147
148
            for step in range(self.iterations):

149
150
151
152
153
                _, l, lr, _ = session.run([optimizer, loss_train,
                                           learning_rate, train_prediction])

                if validation_data_shuffler is not None and step % self.snapshot == 0:
                    validation_data, validation_labels = validation_data_shuffler.get_batch()
Tiago de Freitas Pereira's avatar
Tiago de Freitas Pereira committed
154

155
156
                    feed_dict = {validation_placeholder_data: validation_data,
                                 validation_placeholder_labels: validation_labels}
157

158
159
                    l, predictions = session.run([loss_validation, validation_prediction], feed_dict=feed_dict)
                    accuracy = 100. * numpy.sum(numpy.argmax(predictions, 1) == validation_labels) / predictions.shape[0]
Tiago de Freitas Pereira's avatar
Tiago de Freitas Pereira committed
160

161
                    print "Step {0}. Loss = {1}, Acc Validation={2}".format(step, l, accuracy)
Tiago de Freitas Pereira's avatar
Tiago de Freitas Pereira committed
162

163
            train_writer.close()
Tiago de Freitas Pereira's avatar
Tiago de Freitas Pereira committed
164

165
166
167
168
169
            # now they should definetely stop
            thread_pool.request_stop()
            thread_pool.join(threads)
            self.architecture.save(hdf5)
            del hdf5
Tiago de Freitas Pereira's avatar
Tiago de Freitas Pereira committed
170