test.py 13.6 KB
Newer Older
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
#!/usr/bin/env python
# encoding: utf-8


""" Unit tests

"""

import numpy
import torch

def test_architectures():

  a = numpy.random.rand(1, 3, 128, 128).astype("float32")
  t = torch.from_numpy(a)
16

17
18
  number_of_classes = 20
  output_dimension = number_of_classes
19

20
21
22
23
24
25
26
  # CASIANet
  from ..architectures import CASIANet
  net = CASIANet(number_of_classes)
  embedding_dimension = 320
  output, emdedding = net.forward(t)
  assert output.shape == torch.Size([1, 20])
  assert emdedding.shape == torch.Size([1, 320])
27

28
29
30
31
32
33
34
  # CNN8
  from ..architectures import CNN8
  net = CNN8(number_of_classes)
  embedding_dimension = 512
  output, emdedding = net.forward(t)
  assert output.shape == torch.Size([1, 20])
  assert emdedding.shape == torch.Size([1, 512])
35

36
37
38
39
40
41
42
43
  # LightCNN9
  a = numpy.random.rand(1, 1, 128, 128).astype("float32")
  t = torch.from_numpy(a)
  from ..architectures import LightCNN9
  net = LightCNN9()
  output, emdedding = net.forward(t)
  assert output.shape == torch.Size([1, 79077])
  assert emdedding.shape == torch.Size([1, 256])
44

45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
  # LightCNN29
  a = numpy.random.rand(1, 1, 128, 128).astype("float32")
  t = torch.from_numpy(a)
  from ..architectures import LightCNN29
  net = LightCNN29()
  output, emdedding = net.forward(t)
  assert output.shape == torch.Size([1, 79077])
  assert emdedding.shape == torch.Size([1, 256])

  # LightCNN29v2
  a = numpy.random.rand(1, 1, 128, 128).astype("float32")
  t = torch.from_numpy(a)
  from ..architectures import LightCNN29v2
  net = LightCNN29v2()
  output, emdedding = net.forward(t)
  assert output.shape == torch.Size([1, 79077])
  assert emdedding.shape == torch.Size([1, 256])
62

63
64
65
66
67
68
69
70
  # MCCNN
  a = numpy.random.rand(1, 4, 128, 128).astype("float32")
  t = torch.from_numpy(a)
  from ..architectures import MCCNN
  net = MCCNN(num_channels=4)
  output = net.forward(t)
  assert output.shape == torch.Size([1, 1])

71
72
73
74
75
76
77
78
  # MCCNNv2
  a = numpy.random.rand(1, 4, 128, 128).astype("float32")
  t = torch.from_numpy(a)
  from ..architectures import MCCNNv2
  net = MCCNNv2(num_channels=4)
  output = net.forward(t)
  assert output.shape == torch.Size([1, 1])

Anjith GEORGE's avatar
Anjith GEORGE committed
79
80
81
82
83
84
85
86
  #FASNet 
  a = numpy.random.rand(1, 3, 224, 224).astype("float32")
  t = torch.from_numpy(a)
  from ..architectures import FASNet
  net = FASNet(pretrained=False)
  output = net.forward(t)
  assert output.shape == torch.Size([1, 1])

87
88
89
90
  #DeepMSPAD 
  a = numpy.random.rand(1, 8, 224, 224).astype("float32")
  t = torch.from_numpy(a)
  from ..architectures import DeepMSPAD
Anjith GEORGE's avatar
Anjith GEORGE committed
91
  net = DeepMSPAD(pretrained=False, num_channels=8)
92
93
94
  output = net.forward(t)
  assert output.shape == torch.Size([1, 1])

95
96
97
98
  #DeepPixBiS 
  a = numpy.random.rand(1, 3, 224, 224).astype("float32")
  t = torch.from_numpy(a)
  from ..architectures import DeepPixBiS
99
  net = DeepPixBiS(pretrained=False)
100
101
102
103
  output = net.forward(t)
  assert output[0].shape == torch.Size([1, 1, 14, 14])
  assert output[1].shape == torch.Size([1, 1])

104
105
106
107
108
109
110
111
112
113
114
115
116
117
  # DCGAN
  d = numpy.random.rand(1, 3, 64, 64).astype("float32")
  t = torch.from_numpy(d)
  from ..architectures import DCGAN_discriminator
  discriminator = DCGAN_discriminator(1)
  output = discriminator.forward(t)
  assert output.shape == torch.Size([1])

  g = numpy.random.rand(1, 100, 1, 1).astype("float32")
  t = torch.from_numpy(g)
  from ..architectures import DCGAN_generator
  generator = DCGAN_generator(1)
  output = generator.forward(t)
  assert output.shape == torch.Size([1, 3, 64, 64])
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139

  # Conditional GAN
  d = numpy.random.rand(1, 3, 64, 64).astype("float32")
  t = torch.from_numpy(d)
  cfm = numpy.zeros((1, 13, 64, 64), dtype="float32")
  cfm[:, 0, :, :] = 1
  cfmt = torch.from_numpy(cfm)
  from ..architectures import ConditionalGAN_discriminator
  discriminator = ConditionalGAN_discriminator(13)
  output = discriminator.forward(t, cfmt)
  assert output.shape == torch.Size([1])

  g = numpy.random.rand(1, 100, 1, 1).astype("float32")
  t = torch.from_numpy(g)
  oh = numpy.zeros((1, 13, 1, 1), dtype="float32")
  oh[0] = 1
  oht = torch.from_numpy(oh)
  from ..architectures import ConditionalGAN_generator
  discriminator = ConditionalGAN_generator(100, 13)
  output = discriminator.forward(t, oht)
  assert output.shape == torch.Size([1, 3, 64, 64])

140
141
142

def test_transforms():

143
  image = numpy.random.rand(3, 128, 128).astype("uint8")
144

145
146
147
148
149
150
  img = numpy.random.rand(128, 128, 4).astype("uint8")

  from ..datasets import ChannelSelect
  cs = ChannelSelect(selected_channels=[0,1,2])
  assert(cs(img).shape==(128,128,3))

151
152
153
154
  from ..datasets import RandomHorizontalFlipImage
  rh = RandomHorizontalFlipImage(p=0.5)
  assert(numpy.allclose(rh(rh(img)),img))

155
  from ..datasets import RollChannels
156
  sample = {'image': image}
157
158
159
160
  rc = RollChannels()
  rc(sample)
  assert sample['image'].shape == (128, 128, 3)

161
  from ..datasets import ToTensor
162
163
164
  tt = ToTensor()
  tt(sample)
  assert isinstance(sample['image'], torch.Tensor)
165
  # grayscale
166
167
168
169
  image_gray = numpy.random.rand(128, 128).astype("uint8")
  sample_gray = {'image': image_gray}
  tt(sample_gray)
  assert isinstance(sample['image'], torch.Tensor)
170

171
  from ..datasets import Normalize
172
173
174
175
176
177
178
179
180
181
182
183
184
185
  image_copy = torch.Tensor(sample['image'])
  norm = Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))
  norm(sample)
  for c in range(3):
    for h in range(sample['image'].shape[0]):
      for w in range(sample['image'].shape[1]):
        assert (abs(sample['image'][c, h, w]) - abs((image_copy[c, h, w] - 0.5) / 0.5)) < 1e-10


def test_map_labels():

  labels = ['1', '4', '7']
  from ..datasets import map_labels
  new_labels = map_labels(labels)
186
187
188
  assert '0' in new_labels, "new_labels = {}".format(new_labels)
  assert '1' in new_labels, "new_labels = {}".format(new_labels)
  assert '2' in new_labels, "new_labels = {}".format(new_labels)
189
190
  #new_labels = sorted(new_labels)
  #assert new_labels == ['0', '1', '2']
191
192

  new_labels = map_labels(labels, start_index = 5)
193
  #new_labels = sorted(new_labels)
194
195
196
  assert '5' in new_labels, "new_labels = {}".format(new_labels)
  assert '6' in new_labels, "new_labels = {}".format(new_labels)
  assert '7' in new_labels, "new_labels = {}".format(new_labels)
197
  #assert new_labels == ['5', '6', '7']
198

199
200
201
202
203
204
205
206

from torch.utils.data import Dataset
class DummyDataSet(Dataset):
  def __init__(self):
    pass
  def __len__(self):
    return 100
  def __getitem__(self, idx):
207
    data =  numpy.random.rand(1, 128, 128).astype("float32")
208
209
    label = numpy.random.randint(20)
    sample = {'image': torch.from_numpy(data), 'label': label}
210
    return sample
211
212


213
def test_CNNtrainer():
214

215
216
  from ..architectures import LightCNN9
  net = LightCNN9(20)
217
218

  dataloader = torch.utils.data.DataLoader(DummyDataSet(), batch_size=32, shuffle=True)
219

220
221
222
223
224
225
226
227
  from ..trainers import CNNTrainer
  trainer = CNNTrainer(net, verbosity_level=3)
  trainer.train(dataloader, n_epochs=1, output_dir='.')

  import os
  assert os.path.isfile('model_1_0.pth')

  os.remove('model_1_0.pth')
228

229
230
231
232
233
234
235
236
237
238
239
class DummyDataSetMCCNN(Dataset):
  def __init__(self):
    pass
  def __len__(self):
    return 100
  def __getitem__(self, idx):
    data =  numpy.random.rand(4, 128, 128).astype("float32")
    label = numpy.random.randint(2)
    sample = data, label
    return sample 

240

241
242
243
244
245
def test_MCCNNtrainer():

  from ..architectures import MCCNN
  net = MCCNN(num_channels=4)

Anjith GEORGE's avatar
Anjith GEORGE committed
246
247
248
  dataloader={}
  dataloader['train'] = torch.utils.data.DataLoader(DummyDataSetMCCNN(), batch_size=32, shuffle=True)

249
  from ..trainers import MCCNNTrainer
250
  trainer = MCCNNTrainer(net, verbosity_level=3, do_crossvalidation=False)
251
252
253
254
  trainer.train(dataloader, n_epochs=1, output_dir='.')

  import os
  assert os.path.isfile('model_1_0.pth')
255

256
  os.remove('model_1_0.pth')   
257
258


259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
def test_MCCNNtrainer_cv():

  from ..architectures import MCCNN
  net = MCCNN(num_channels=4)

  dataloader={}
  dataloader['train'] = torch.utils.data.DataLoader(DummyDataSetMCCNN(), batch_size=32, shuffle=True)
  dataloader['val'] = torch.utils.data.DataLoader(DummyDataSetMCCNN(), batch_size=32, shuffle=True)

  from ..trainers import MCCNNTrainer
  trainer = MCCNNTrainer(net, verbosity_level=3, do_crossvalidation=True)
  trainer.train(dataloader, n_epochs=1, output_dir='.')

  import os
  assert os.path.isfile('model_1_0.pth')
  assert os.path.isfile('model_100_0.pth') # the best model

  os.remove('model_1_0.pth')  
  os.remove('model_100_0.pth')   


280
281
282
283
284
285
286
287
288
289
290
291
292
293
class DummyDataSetFASNet(Dataset):
  def __init__(self):
    pass
  def __len__(self):
    return 100
  def __getitem__(self, idx):
    data =  numpy.random.rand(3, 224,224).astype("float32")
    label = numpy.random.randint(2)
    sample = data, label
    return sample 

def test_FASNettrainer():

  from ..architectures import FASNet
294
  net = FASNet(pretrained=False)
295
296
297
298
299
300
301
302
303
304
305
306
307
308

  dataloader={}
  dataloader['train'] = torch.utils.data.DataLoader(DummyDataSetFASNet(), batch_size=32, shuffle=True)

  from ..trainers import FASNetTrainer
  trainer = FASNetTrainer(net, verbosity_level=3,do_crossvalidation=False)
  trainer.train(dataloader, n_epochs=1, output_dir='.')

  import os
  assert os.path.isfile('model_1_0.pth')

  os.remove('model_1_0.pth') 


309
310
311
def test_FASNettrainer_cv():

  from ..architectures import FASNet
312
  net = FASNet(pretrained=False)
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328

  dataloader={}
  dataloader['train'] = torch.utils.data.DataLoader(DummyDataSetFASNet(), batch_size=32, shuffle=True)
  dataloader['val'] = torch.utils.data.DataLoader(DummyDataSetFASNet(), batch_size=32, shuffle=True)

  from ..trainers import FASNetTrainer
  trainer = FASNetTrainer(net, verbosity_level=3,do_crossvalidation=True)
  trainer.train(dataloader, n_epochs=1, output_dir='.')

  import os
  assert os.path.isfile('model_1_0.pth')
  assert os.path.isfile('model_100_0.pth')

  os.remove('model_1_0.pth')
  os.remove('model_100_0.pth')

329
330
331
332
333
334
335
336
class DummyDataSetGAN(Dataset):
  def __init__(self):
    pass
  def __len__(self):
    return 100
  def __getitem__(self, idx):
    data =  numpy.random.rand(3, 64, 64).astype("float32")
    sample = {'image': torch.from_numpy(data)}
337
    return sample
338
339
340
341
342
343
344
345
346

def test_DCGANtrainer():

  from ..architectures import DCGAN_generator
  from ..architectures import DCGAN_discriminator
  g = DCGAN_generator(1)
  d = DCGAN_discriminator(1)

  dataloader = torch.utils.data.DataLoader(DummyDataSetGAN(), batch_size=32, shuffle=True)
347

348
349
350
351
352
353
354
355
356
357
358
359
360
  from ..trainers import DCGANTrainer
  trainer = DCGANTrainer(g, d, batch_size=32, noise_dim=100, use_gpu=False, verbosity_level=2)
  trainer.train(dataloader, n_epochs=1, output_dir='.')

  import os
  assert os.path.isfile('fake_samples_epoch_000.png')
  assert os.path.isfile('netD_epoch_0.pth')
  assert os.path.isfile('netG_epoch_0.pth')

  os.remove('fake_samples_epoch_000.png')
  os.remove('netD_epoch_0.pth')
  os.remove('netG_epoch_0.pth')

361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
class DummyDataSetConditionalGAN(Dataset):
  def __init__(self):
    pass
  def __len__(self):
    return 100
  def __getitem__(self, idx):
    data =  numpy.random.rand(3, 64, 64).astype("float32")
    sample = {'image': torch.from_numpy(data), 'pose': numpy.random.randint(0, 13)}
    return sample

def test_ConditionalGANTrainer():

  from ..architectures import ConditionalGAN_generator
  from ..architectures import ConditionalGAN_discriminator
  g = ConditionalGAN_generator(100, 13)
  d = ConditionalGAN_discriminator(13)

  dataloader = torch.utils.data.DataLoader(DummyDataSetConditionalGAN(), batch_size=32, shuffle=True)
379

380
381
382
  from ..trainers import ConditionalGANTrainer
  trainer = ConditionalGANTrainer(g, d, [3, 64, 64], batch_size=32, noise_dim=100, conditional_dim=13)
  trainer.train(dataloader, n_epochs=1, output_dir='.')
383

384
385
386
387
388
389
390
  import os
  assert os.path.isfile('fake_samples_epoch_000.png')
  assert os.path.isfile('netD_epoch_0.pth')
  assert os.path.isfile('netG_epoch_0.pth')
  os.remove('fake_samples_epoch_000.png')
  os.remove('netD_epoch_0.pth')
  os.remove('netG_epoch_0.pth')
391
392
393
394
395
396
397


def test_conv_autoencoder():
    """
    Test the ConvAutoencoder class.
    """
    from bob.learn.pytorch.architectures import ConvAutoencoder
398

399
400
401
402
403
    batch = torch.randn(1, 3, 64, 64)
    model = ConvAutoencoder()
    output = model(batch)
    assert batch.shape == output.shape

404
405
406
407
408
    model_embeddings = ConvAutoencoder(return_latent_embedding = True)
    embedding = model_embeddings(batch)
    assert list(embedding.shape) == [1, 16, 5, 5]


409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
def test_extractors():

  # lightCNN9
  from bob.learn.pytorch.extractor.image import LightCNN9Extractor
  extractor = LightCNN9Extractor()
  # this architecture expects 128x128 grayscale images
  data = numpy.random.rand(128, 128).astype("float32")
  output = extractor(data)
  assert output.shape[0] == 256

  # lightCNN29
  from bob.learn.pytorch.extractor.image import LightCNN29Extractor
  extractor = LightCNN29Extractor()
  # this architecture expects 128x128 grayscale images
  data = numpy.random.rand(128, 128).astype("float32")
  output = extractor(data)
  assert output.shape[0] == 256

  # lightCNN29v2
  from bob.learn.pytorch.extractor.image import LightCNN29v2Extractor
  extractor = LightCNN29v2Extractor()
  # this architecture expects 128x128 grayscale images
  data = numpy.random.rand(128, 128).astype("float32")
  output = extractor(data)
  assert output.shape[0] == 256

435
  # MCCNN 
Anjith GEORGE's avatar
Anjith GEORGE committed
436
  from ..extractor.image import MCCNNExtractor
437
  extractor = MCCNNExtractor(num_channels_used=4)
438
439
440
441
442
  # this architecture expects num_channelsx128x128 Multi channel images
  data = numpy.random.rand(4, 128, 128).astype("float32")
  output = extractor(data)
  assert output.shape[0] == 1

Anjith GEORGE's avatar
Anjith GEORGE committed
443
444
445
446
447
448
449
  # MCCNNv2
  from ..extractor.image import MCCNNv2Extractor
  extractor = MCCNNv2Extractor(num_channels_used=4)
  # this architecture expects num_channelsx128x128 Multi channel images
  data = numpy.random.rand(4, 128, 128).astype("float32")
  output = extractor(data)
  assert output.shape[0] == 1
450
451
452

  # FASNet
  from ..extractor.image import FASNetExtractor
Anjith GEORGE's avatar
Anjith GEORGE committed
453
  extractor = FASNetExtractor()
454
  # this architecture expects RGB images of size 3x224x224 channel images
Anjith GEORGE's avatar
Anjith GEORGE committed
455
  data = numpy.random.rand(3, 224, 224).astype("uint8")
456
457
  output = extractor(data)
  assert output.shape[0] == 1