test_cnn_pretrained_model.py 9.09 KB
Newer Older
1
2
3
4
5
6
7
8
#!/usr/bin/env python
# vim: set fileencoding=utf-8 :
# @author: Tiago de Freitas Pereira <tiago.pereira@idiap.ch>
# @date: Thu 13 Oct 2016 13:35 CEST

import numpy
import bob.io.base
import os
Tiago de Freitas Pereira's avatar
Tiago de Freitas Pereira committed
9
10
11
from bob.learn.tensorflow.datashuffler import Memory, ImageAugmentation, TripletMemory, SiameseMemory
from bob.learn.tensorflow.loss import BaseLoss, TripletLoss, ContrastiveLoss
from bob.learn.tensorflow.trainers import Trainer, constant, TripletTrainer, SiameseTrainer
Tiago de Freitas Pereira's avatar
Tiago de Freitas Pereira committed
12
13
from bob.learn.tensorflow.utils import load_mnist
from bob.learn.tensorflow.layers import Conv2D, FullyConnected
Tiago Pereira's avatar
Tiago Pereira committed
14
from bob.learn.tensorflow.network import Embedding
15
from .test_cnn import dummy_experiment
Tiago Pereira's avatar
Tiago Pereira committed
16
17
from .test_cnn_scratch import validate_network

Tiago de Freitas Pereira's avatar
Tiago de Freitas Pereira committed
18

19
20
21
22
23
24
25
26
27
import tensorflow as tf
import shutil

"""
Some unit tests that create networks on the fly and load variables
"""

batch_size = 16
validation_batch_size = 400
Tiago Pereira's avatar
Tiago Pereira committed
28
iterations =300
29
30
seed = 10

Tiago de Freitas Pereira's avatar
Tiago de Freitas Pereira committed
31

Tiago Pereira's avatar
Tiago Pereira committed
32
def scratch_network(input_pl):
Tiago de Freitas Pereira's avatar
Tiago de Freitas Pereira committed
33
    # Creating a random network
Tiago Pereira's avatar
Tiago Pereira committed
34
    slim = tf.contrib.slim
Tiago de Freitas Pereira's avatar
Tiago de Freitas Pereira committed
35

Tiago Pereira's avatar
Tiago Pereira committed
36
    initializer = tf.contrib.layers.xavier_initializer(uniform=False, dtype=tf.float32, seed=10)
Tiago de Freitas Pereira's avatar
Tiago de Freitas Pereira committed
37

Tiago Pereira's avatar
Tiago Pereira committed
38
39
40
41
42
43
44
45
46
    scratch = slim.conv2d(input_pl, 10, 3, activation_fn=tf.nn.tanh,
                          stride=1,
                          weights_initializer=initializer,
                          scope='conv1')
    scratch = slim.flatten(scratch, scope='flatten1')
    scratch = slim.fully_connected(scratch, 10,
                                   weights_initializer=initializer,
                                   activation_fn=None,
                                   scope='fc1')
Tiago de Freitas Pereira's avatar
Tiago de Freitas Pereira committed
47

Tiago Pereira's avatar
Tiago Pereira committed
48
    return scratch
49
50


Tiago de Freitas Pereira's avatar
Tiago de Freitas Pereira committed
51
def test_cnn_pretrained():
Tiago Pereira's avatar
Tiago Pereira committed
52
    # Preparing input data
53
54
55
56
57
58
    train_data, train_labels, validation_data, validation_labels = load_mnist()
    train_data = numpy.reshape(train_data, (train_data.shape[0], 28, 28, 1))

    # Creating datashufflers
    data_augmentation = ImageAugmentation()
    train_data_shuffler = Memory(train_data, train_labels,
Tiago Pereira's avatar
Tiago Pereira committed
59
                                 input_shape=[None, 28, 28, 1],
60
61
62
63
64
65
66
67
                                 batch_size=batch_size,
                                 data_augmentation=data_augmentation)
    validation_data = numpy.reshape(validation_data, (validation_data.shape[0], 28, 28, 1))

    directory = "./temp/cnn"
    directory2 = "./temp/cnn2"

    # Creating a random network
Tiago Pereira's avatar
Tiago Pereira committed
68
69
70
    input_pl = train_data_shuffler("data", from_queue=True)
    graph = scratch_network(input_pl)
    embedding = Embedding(train_data_shuffler("data", from_queue=False), graph)
71
72
73
74
75

    # Loss for the softmax
    loss = BaseLoss(tf.nn.sparse_softmax_cross_entropy_with_logits, tf.reduce_mean)

    # One graph trainer
Tiago Pereira's avatar
Tiago Pereira committed
76
77
    # One graph trainer
    trainer = Trainer(train_data_shuffler,
78
79
                      iterations=iterations,
                      analizer=None,
Tiago de Freitas Pereira's avatar
Tiago de Freitas Pereira committed
80
81
                      temp_dir=directory
                      )
Tiago Pereira's avatar
Tiago Pereira committed
82
83
84
85
86
87
88
89
90
91
    trainer.create_network_from_scratch(graph=graph,
                                        loss=loss,
                                        learning_rate=constant(0.01, name="regular_lr"),
                                        optimizer=tf.train.GradientDescentOptimizer(0.01),
                                        )
    trainer.train()
    accuracy = validate_network(embedding, validation_data, validation_labels)
    assert accuracy > 80

    del graph
92
    del loss
93
    del trainer
94
    # Training the network using a pre trained model
95
    loss = BaseLoss(tf.nn.sparse_softmax_cross_entropy_with_logits, tf.reduce_mean, name="loss")
Tiago Pereira's avatar
Tiago Pereira committed
96
97
98
99
100
    graph = scratch_network(input_pl)

    # One graph trainer
    trainer = Trainer(train_data_shuffler,
                      iterations=iterations,
101
                      analizer=None,
Tiago Pereira's avatar
Tiago Pereira committed
102
                      temp_dir=directory
Tiago de Freitas Pereira's avatar
Tiago de Freitas Pereira committed
103
                      )
Tiago Pereira's avatar
Tiago Pereira committed
104
105
106
107
    trainer.create_network_from_file(os.path.join(directory, "model.ckp"))

    import ipdb;
    ipdb.set_trace()
108

Tiago Pereira's avatar
Tiago Pereira committed
109
    trainer.train()
Tiago de Freitas Pereira's avatar
Tiago de Freitas Pereira committed
110

Tiago Pereira's avatar
Tiago Pereira committed
111
    accuracy = validate_network(embedding, validation_data, validation_labels)
Tiago de Freitas Pereira's avatar
Tiago de Freitas Pereira committed
112
    assert accuracy > 90
Tiago de Freitas Pereira's avatar
Tiago de Freitas Pereira committed
113
114
    shutil.rmtree(directory)
    shutil.rmtree(directory2)
Tiago de Freitas Pereira's avatar
Tiago de Freitas Pereira committed
115

Tiago Pereira's avatar
Tiago Pereira committed
116
    del graph
Tiago de Freitas Pereira's avatar
Tiago de Freitas Pereira committed
117
118
119
    del loss
    del trainer

Tiago de Freitas Pereira's avatar
Tiago de Freitas Pereira committed
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253

def test_triplet_cnn_pretrained():
    train_data, train_labels, validation_data, validation_labels = load_mnist()
    train_data = numpy.reshape(train_data, (train_data.shape[0], 28, 28, 1))

    # Creating datashufflers
    data_augmentation = ImageAugmentation()
    train_data_shuffler = TripletMemory(train_data, train_labels,
                                        input_shape=[28, 28, 1],
                                        batch_size=batch_size,
                                        data_augmentation=data_augmentation)
    validation_data = numpy.reshape(validation_data, (validation_data.shape[0], 28, 28, 1))

    validation_data_shuffler = TripletMemory(validation_data, validation_labels,
                                             input_shape=[28, 28, 1],
                                             batch_size=validation_batch_size)

    directory = "./temp/cnn"
    directory2 = "./temp/cnn2"

    # Creating a random network
    scratch = scratch_network()

    # Loss for the softmax
    loss = TripletLoss(margin=4.)

    # One graph trainer
    trainer = TripletTrainer(architecture=scratch,
                             loss=loss,
                             iterations=iterations,
                             analizer=None,
                             prefetch=False,
                             learning_rate=constant(0.05, name="regular_lr"),
                             optimizer=tf.train.AdamOptimizer(name="adam_pretrained_model"),
                             temp_dir=directory
                             )
    trainer.train(train_data_shuffler)
    # Testing
    eer = dummy_experiment(validation_data_shuffler, scratch)
    # The result is not so good
    assert eer < 0.25

    del scratch
    del loss
    del trainer

    # Training the network using a pre trained model
    loss = TripletLoss(margin=4.)
    scratch = scratch_network()
    trainer = TripletTrainer(architecture=scratch,
                             loss=loss,
                             iterations=iterations + 200,
                             analizer=None,
                             prefetch=False,
                             learning_rate=None,
                             temp_dir=directory2,
                             model_from_file=os.path.join(directory, "model.ckp")
                             )

    trainer.train(train_data_shuffler)

    eer = dummy_experiment(validation_data_shuffler, scratch)
    # Now it is better
    assert eer < 0.15
    shutil.rmtree(directory)
    shutil.rmtree(directory2)

    del scratch
    del loss
    del trainer


def test_siamese_cnn_pretrained():
    train_data, train_labels, validation_data, validation_labels = load_mnist()
    train_data = numpy.reshape(train_data, (train_data.shape[0], 28, 28, 1))

    # Creating datashufflers
    data_augmentation = ImageAugmentation()
    train_data_shuffler = SiameseMemory(train_data, train_labels,
                                        input_shape=[28, 28, 1],
                                        batch_size=batch_size,
                                        data_augmentation=data_augmentation)
    validation_data = numpy.reshape(validation_data, (validation_data.shape[0], 28, 28, 1))

    validation_data_shuffler = SiameseMemory(validation_data, validation_labels,
                                             input_shape=[28, 28, 1],
                                             batch_size=validation_batch_size)

    directory = "./temp/cnn"
    directory2 = "./temp/cnn2"

    # Creating a random network
    scratch = scratch_network()

    # Loss for the softmax
    loss = ContrastiveLoss(contrastive_margin=4.)
    # One graph trainer
    trainer = SiameseTrainer(architecture=scratch,
                             loss=loss,
                             iterations=iterations,
                             analizer=None,
                             prefetch=False,
                             learning_rate=constant(0.05, name="regular_lr"),
                             optimizer=tf.train.AdamOptimizer(name="adam_pretrained_model"),
                             temp_dir=directory
                             )
    trainer.train(train_data_shuffler)

    # Testing
    eer = dummy_experiment(validation_data_shuffler, scratch)
    # The result is not so good
    assert eer < 0.28

    del scratch
    del loss
    del trainer

    # Training the network using a pre trained model
    loss = ContrastiveLoss(contrastive_margin=4.)
    scratch = scratch_network()
    trainer = SiameseTrainer(architecture=scratch,
                             loss=loss,
                             iterations=iterations + 1000,
                             analizer=None,
                             prefetch=False,
                             learning_rate=None,
                             temp_dir=directory2,
                             model_from_file=os.path.join(directory, "model.ckp")
                             )

    trainer.train(train_data_shuffler)

    eer = dummy_experiment(validation_data_shuffler, scratch)
    # Now it is better
Tiago de Freitas Pereira's avatar
Tiago de Freitas Pereira committed
254
    assert eer < 0.27
Tiago de Freitas Pereira's avatar
Tiago de Freitas Pereira committed
255
256
257
258
259
260
    shutil.rmtree(directory)
    shutil.rmtree(directory2)

    del scratch
    del loss
    del trainer