Skip to content
GitLab
Projects
Groups
Snippets
/
Help
Help
Support
Community forum
Keyboard shortcuts
?
Submit feedback
Contribute to GitLab
Sign in
Toggle navigation
Menu
Open sidebar
bob
bob.learn.tensorflow
Commits
f6a946f0
Commit
f6a946f0
authored
Sep 22, 2017
by
Tiago de Freitas Pereira
Browse files
Added end points
parent
8ecba2f8
Pipeline
#12376
failed with stages
in 22 minutes and 12 seconds
Changes
12
Pipelines
1
Hide whitespace changes
Inline
Side-by-side
bob/learn/tensorflow/loss/BaseLoss.py
View file @
f6a946f0
...
...
@@ -7,6 +7,8 @@ import logging
import
tensorflow
as
tf
logger
=
logging
.
getLogger
(
"bob.learn.tensorflow"
)
slim
=
tf
.
contrib
.
slim
class
BaseLoss
(
object
):
"""
...
...
@@ -26,12 +28,23 @@ class BaseLoss(object):
class
MeanSoftMaxLoss
(
object
):
"""
Mean softmax loss
Mean softmax loss
. Basically it wrapps the function tf.nn.sparse_softmax_cross_entropy_with_logits.
"""
def
__init__
(
self
,
name
=
"loss"
):
"""
Constructor
**Parameters**
name:
Scope name
"""
self
.
name
=
name
def
__call__
(
self
,
graph
,
label
):
return
tf
.
reduce_mean
(
tf
.
nn
.
sparse_softmax_cross_entropy_with_logits
(
logits
=
graph
,
labels
=
label
),
name
=
self
.
name
)
bob/learn/tensorflow/network/Chopra.py
View file @
f6a946f0
...
...
@@ -4,6 +4,7 @@
# @date: Wed 11 May 2016 09:39:36 CEST
import
tensorflow
as
tf
from
.utils
import
append_logits
class
Chopra
(
object
):
...
...
@@ -47,6 +48,8 @@ class Chopra(object):
pooling2_size
fc1_output:
n_classes: If None, no Fully COnnected layer with class output will be created
seed:
"""
...
...
@@ -63,9 +66,8 @@ class Chopra(object):
pooling2_size
=
[
4
,
3
],
fc1_output
=
250
,
seed
=
10
,
device
=
"/cpu:0"
,
batch_norm
=
False
):
n_classes
=
None
,
seed
=
10
):
self
.
conv1_kernel_size
=
conv1_kernel_size
self
.
conv1_output
=
conv1_output
...
...
@@ -78,34 +80,48 @@ class Chopra(object):
self
.
fc1_output
=
fc1_output
self
.
seed
=
seed
self
.
device
=
device
self
.
batch_norm
=
batch_norm
self
.
n_classes
=
n_classes
def
__call__
(
self
,
inputs
,
reuse
=
False
):
slim
=
tf
.
contrib
.
slim
with
tf
.
device
(
self
.
device
):
initializer
=
tf
.
contrib
.
layers
.
xavier_initializer
(
uniform
=
False
,
dtype
=
tf
.
float32
,
seed
=
self
.
seed
)
graph
=
slim
.
conv2d
(
inputs
,
self
.
conv1_output
,
self
.
conv1_kernel_size
,
activation_fn
=
tf
.
nn
.
relu
,
stride
=
1
,
weights_initializer
=
initializer
,
scope
=
'conv1'
,
reuse
=
reuse
)
graph
=
slim
.
max_pool2d
(
graph
,
self
.
pooling1_size
,
scope
=
'pool1'
)
graph
=
slim
.
conv2d
(
graph
,
self
.
conv2_output
,
self
.
conv2_kernel_size
,
activation_fn
=
tf
.
nn
.
relu
,
stride
=
1
,
weights_initializer
=
initializer
,
scope
=
'conv2'
,
reuse
=
reuse
)
graph
=
slim
.
max_pool2d
(
graph
,
self
.
pooling2_size
,
scope
=
'pool2'
)
def
__call__
(
self
,
inputs
,
reuse
=
False
,
end_point
=
'logits'
):
slim
=
tf
.
contrib
.
slim
graph
=
slim
.
flatten
(
graph
,
scope
=
'flatten1'
)
end_points
=
dict
()
initializer
=
tf
.
contrib
.
layers
.
xavier_initializer
(
uniform
=
False
,
dtype
=
tf
.
float32
,
seed
=
self
.
seed
)
graph
=
slim
.
conv2d
(
inputs
,
self
.
conv1_output
,
self
.
conv1_kernel_size
,
activation_fn
=
tf
.
nn
.
relu
,
stride
=
1
,
weights_initializer
=
initializer
,
scope
=
'conv1'
,
reuse
=
reuse
)
end_points
[
'conv1'
]
=
graph
graph
=
slim
.
max_pool2d
(
graph
,
self
.
pooling1_size
,
scope
=
'pool1'
)
end_points
[
'pool1'
]
=
graph
graph
=
slim
.
conv2d
(
graph
,
self
.
conv2_output
,
self
.
conv2_kernel_size
,
activation_fn
=
tf
.
nn
.
relu
,
stride
=
1
,
weights_initializer
=
initializer
,
scope
=
'conv2'
,
reuse
=
reuse
)
end_points
[
'conv2'
]
=
graph
graph
=
slim
.
max_pool2d
(
graph
,
self
.
pooling2_size
,
scope
=
'pool2'
)
end_points
[
'pool2'
]
=
graph
graph
=
slim
.
flatten
(
graph
,
scope
=
'flatten1'
)
end_points
[
'flatten1'
]
=
graph
graph
=
slim
.
fully_connected
(
graph
,
self
.
fc1_output
,
weights_initializer
=
initializer
,
activation_fn
=
None
,
scope
=
'fc1'
,
reuse
=
reuse
)
end_points
[
'fc1'
]
=
graph
if
self
.
n_classes
is
not
None
:
# Appending the logits layer
graph
=
append_logits
(
graph
,
self
.
n_classes
,
reuse
)
end_points
[
'logits'
]
=
graph
return
end_points
[
end_point
]
graph
=
slim
.
fully_connected
(
graph
,
self
.
fc1_output
,
weights_initializer
=
initializer
,
activation_fn
=
None
,
scope
=
'fc1'
,
reuse
=
reuse
)
return
graph
bob/learn/tensorflow/network/Dummy.py
View file @
f6a946f0
...
...
@@ -17,10 +17,8 @@ class Dummy(object):
conv1_output
=
1
,
fc1_output
=
2
,
seed
=
10
,
device
=
"/cpu:0"
,
use_gpu
=
False
):
n_classes
=
None
):
"""
Create all the necessary variables for this CNN
...
...
@@ -36,23 +34,33 @@ class Dummy(object):
self
.
conv1_kernel_size
=
conv1_kernel_size
self
.
fc1_output
=
fc1_output
self
.
seed
=
seed
self
.
device
=
device
self
.
n_classes
=
n_classes
def
__call__
(
self
,
inputs
):
def
__call__
(
self
,
inputs
,
reuse
=
False
,
end_point
=
"logits"
):
slim
=
tf
.
contrib
.
slim
with
tf
.
device
(
self
.
device
):
end_points
=
dict
()
initializer
=
tf
.
contrib
.
layers
.
xavier_initializer
(
uniform
=
False
,
dtype
=
tf
.
float32
,
seed
=
self
.
seed
)
graph
=
slim
.
conv2d
(
inputs
,
self
.
conv1_output
,
self
.
conv1_kernel_size
,
activation_fn
=
tf
.
nn
.
relu
,
stride
=
1
,
weights_initializer
=
initializer
,
scope
=
'conv1'
)
end_points
[
'conv1'
]
=
graph
initializer
=
tf
.
contrib
.
layers
.
xavier_initializer
(
uniform
=
False
,
dtype
=
tf
.
float32
,
seed
=
self
.
seed
)
graph
=
slim
.
flatten
(
graph
,
scope
=
'flatten1'
)
end_points
[
'flatten1'
]
=
graph
graph
=
slim
.
conv2d
(
inputs
,
self
.
conv1_output
,
self
.
conv1_kernel_size
,
activation_fn
=
tf
.
nn
.
relu
,
stride
=
1
,
weights_initializer
=
initializer
,
scope
=
'conv1'
)
graph
=
slim
.
flatten
(
graph
,
scope
=
'flatten1'
)
graph
=
slim
.
fully_connected
(
graph
,
self
.
fc1_output
,
weights_initializer
=
initializer
,
activation_fn
=
None
,
scope
=
'fc1'
)
end_points
[
'fc1'
]
=
graph
if
self
.
n_classes
is
not
None
:
# Appending the logits layer
graph
=
append_logits
(
graph
,
self
.
n_classes
,
reuse
)
end_points
[
'logits'
]
=
graph
graph
=
slim
.
fully_connected
(
graph
,
self
.
fc1_output
,
weights_initializer
=
initializer
,
activation_fn
=
None
,
scope
=
'fc1'
)
return
graph
return
end_points
[
end_point
]
bob/learn/tensorflow/network/InceptionResnetV2.py
View file @
f6a946f0
...
...
@@ -115,15 +115,24 @@ def inception_resnet_v2(inputs, is_training=True,
reuse
=
None
,
scope
=
'InceptionResnetV2'
):
"""Creates the Inception Resnet V2 model.
Args:
**Parameters**:
inputs: a 4-D tensor of size [batch_size, height, width, 3].
num_classes: number of predicted classes.
is_training: whether is training or not.
dropout_keep_prob: float, the fraction to keep before final layer.
reuse: whether or not the network and its variables should be reused. To be
able to reuse 'scope' must be given.
scope: Optional variable_scope.
Returns:
**Returns**:
logits: the logits outputs of the model.
end_points: the set of end_points from the inception model.
"""
...
...
bob/learn/tensorflow/network/LightCNN29.py
View file @
f6a946f0
...
...
@@ -5,6 +5,7 @@
import
tensorflow
as
tf
from
bob.learn.tensorflow.layers
import
maxout
from
.utils
import
append_logits
class
LightCNN29
(
object
):
"""Creates the graph for the Light CNN-9 in
...
...
@@ -13,20 +14,15 @@ class LightCNN29(object):
"""
def
__init__
(
self
,
seed
=
10
,
n_classes
=
10
,
device
=
"/cpu:0"
,
batch_norm
=
False
):
n_classes
=
10
):
self
.
seed
=
seed
self
.
device
=
device
self
.
batch_norm
=
batch_norm
self
.
n_classes
=
n_classes
def
__call__
(
self
,
inputs
,
reuse
=
False
):
def
__call__
(
self
,
inputs
,
reuse
=
False
,
end_point
=
"logits"
):
slim
=
tf
.
contrib
.
slim
#with tf.device(self.device):
end_points
=
dict
()
initializer
=
tf
.
contrib
.
layers
.
xavier_initializer
(
uniform
=
False
,
dtype
=
tf
.
float32
,
seed
=
self
.
seed
)
graph
=
slim
.
conv2d
(
inputs
,
96
,
[
5
,
5
],
activation_fn
=
tf
.
nn
.
relu
,
...
...
@@ -34,7 +30,8 @@ class LightCNN29(object):
weights_initializer
=
initializer
,
scope
=
'Conv1'
,
reuse
=
reuse
)
end_points
[
'conv1'
]
=
graph
graph
=
maxout
(
graph
,
num_units
=
48
,
name
=
'Maxout1'
)
...
...
@@ -58,7 +55,8 @@ class LightCNN29(object):
weights_initializer
=
initializer
,
scope
=
'Conv2'
,
reuse
=
reuse
)
end_points
[
'conv2'
]
=
graph
graph
=
maxout
(
graph
,
num_units
=
96
,
name
=
'Maxout2'
)
...
...
@@ -82,7 +80,8 @@ class LightCNN29(object):
weights_initializer
=
initializer
,
scope
=
'Conv3'
,
reuse
=
reuse
)
end_points
[
'conv3'
]
=
graph
graph
=
maxout
(
graph
,
num_units
=
192
,
name
=
'Maxout3'
)
...
...
@@ -106,7 +105,8 @@ class LightCNN29(object):
weights_initializer
=
initializer
,
scope
=
'Conv4'
,
reuse
=
reuse
)
end_points
[
'conv4'
]
=
graph
graph
=
maxout
(
graph
,
num_units
=
128
,
name
=
'Maxout4'
)
...
...
@@ -128,6 +128,7 @@ class LightCNN29(object):
weights_initializer
=
initializer
,
scope
=
'Conv5'
,
reuse
=
reuse
)
end_points
[
'conv5'
]
=
graph
graph
=
maxout
(
graph
,
num_units
=
128
,
...
...
@@ -144,16 +145,18 @@ class LightCNN29(object):
activation_fn
=
tf
.
nn
.
relu
,
scope
=
'fc1'
,
reuse
=
reuse
)
end_points
[
'fc1'
]
=
graph
graph
=
maxout
(
graph
,
num_units
=
256
,
name
=
'Maxoutfc1'
)
graph
=
slim
.
dropout
(
graph
,
keep_prob
=
0.3
,
scope
=
'dropout1'
)
graph
=
slim
.
fully_connected
(
graph
,
self
.
n_classes
,
weights_initializer
=
initializ
er
,
activation_fn
=
None
,
scope
=
'fc2'
,
reuse
=
reuse
)
if
self
.
n_classes
is
not
None
:
# Appending the logits lay
er
graph
=
append_logits
(
graph
,
self
.
n_classes
,
reuse
)
end_points
[
'logits'
]
=
graph
return
graph
return
end_points
[
end_point
]
bob/learn/tensorflow/network/LightCNN9.py
View file @
f6a946f0
...
...
@@ -5,6 +5,7 @@
import
tensorflow
as
tf
from
bob.learn.tensorflow.layers
import
maxout
from
.utils
import
append_logits
class
LightCNN9
(
object
):
"""Creates the graph for the Light CNN-9 in
...
...
@@ -13,28 +14,26 @@ class LightCNN9(object):
"""
def
__init__
(
self
,
seed
=
10
,
n_classes
=
10
,
device
=
"/cpu:0"
,
batch_norm
=
False
):
n_classes
=
10
):
self
.
seed
=
seed
self
.
device
=
device
self
.
batch_norm
=
batch_norm
self
.
n_classes
=
n_classes
def
__call__
(
self
,
inputs
,
reuse
=
False
,
get_class_layer
=
True
):
def
__call__
(
self
,
inputs
,
reuse
=
False
,
get_class_layer
=
True
,
end_point
=
"logits"
):
slim
=
tf
.
contrib
.
slim
#with tf.device(self.device):
initializer
=
tf
.
contrib
.
layers
.
xavier_initializer
(
uniform
=
False
,
dtype
=
tf
.
float32
,
seed
=
self
.
seed
)
end_points
=
dict
()
graph
=
slim
.
conv2d
(
inputs
,
96
,
[
5
,
5
],
activation_fn
=
tf
.
nn
.
relu
,
stride
=
1
,
weights_initializer
=
initializer
,
scope
=
'Conv1'
,
reuse
=
reuse
)
end_points
[
'conv1'
]
=
graph
graph
=
maxout
(
graph
,
num_units
=
48
,
name
=
'Maxout1'
)
...
...
@@ -58,7 +57,8 @@ class LightCNN9(object):
weights_initializer
=
initializer
,
scope
=
'Conv2'
,
reuse
=
reuse
)
end_points
[
'conv2'
]
=
graph
graph
=
maxout
(
graph
,
num_units
=
96
,
name
=
'Maxout2'
)
...
...
@@ -82,6 +82,7 @@ class LightCNN9(object):
weights_initializer
=
initializer
,
scope
=
'Conv3'
,
reuse
=
reuse
)
end_points
[
'conv3'
]
=
graph
graph
=
maxout
(
graph
,
num_units
=
192
,
...
...
@@ -106,6 +107,7 @@ class LightCNN9(object):
weights_initializer
=
initializer
,
scope
=
'Conv4'
,
reuse
=
reuse
)
end_points
[
'conv4'
]
=
graph
graph
=
maxout
(
graph
,
num_units
=
128
,
...
...
@@ -128,14 +130,16 @@ class LightCNN9(object):
weights_initializer
=
initializer
,
scope
=
'Conv5'
,
reuse
=
reuse
)
end_points
[
'conv5'
]
=
graph
graph
=
maxout
(
graph
,
num_units
=
128
,
name
=
'Maxout5'
)
name
=
'Maxout5'
)
graph
=
slim
.
max_pool2d
(
graph
,
[
2
,
2
],
stride
=
2
,
padding
=
"SAME"
,
scope
=
'Pool4'
)
graph
=
slim
.
flatten
(
graph
,
scope
=
'flatten1'
)
end_points
[
'flatten1'
]
=
graph
graph
=
slim
.
dropout
(
graph
,
keep_prob
=
0.3
,
scope
=
'dropout1'
)
...
...
@@ -144,18 +148,17 @@ class LightCNN9(object):
activation_fn
=
tf
.
nn
.
relu
,
scope
=
'fc1'
,
reuse
=
reuse
)
end_points
[
'fc1'
]
=
graph
#graph = maxout(graph,
# num_units=256,
# name='Maxoutfc1')
graph
=
slim
.
dropout
(
graph
,
keep_prob
=
0.3
,
scope
=
'dropout2'
)
if
self
.
n_classes
is
not
None
:
# Appending the logits layer
graph
=
append_logits
(
graph
,
self
.
n_classes
,
reuse
)
end_points
[
'logits'
]
=
graph
if
get_class_layer
:
graph
=
slim
.
fully_connected
(
graph
,
self
.
n_classes
,
weights_initializer
=
initializer
,
activation_fn
=
None
,
scope
=
'fc2'
,
reuse
=
reuse
)
return
end_points
[
end_point
]
return
graph
bob/learn/tensorflow/network/__init__.py
View file @
f6a946f0
...
...
@@ -6,6 +6,7 @@ from .MLP import MLP
from
.Embedding
import
Embedding
from
.InceptionResnetV2
import
inception_resnet_v2
# gets sphinx autodoc done right - don't remove it
def
__appropriate__
(
*
args
):
"""Says object was actually declared here, an not on the import module.
...
...
bob/learn/tensorflow/network/utils.py
0 → 100644
View file @
f6a946f0
#!/usr/bin/env python
# vim: set fileencoding=utf-8 :
# @author: Tiago de Freitas Pereira <tiago.pereira@idiap.ch>
import
tensorflow
as
tf
slim
=
tf
.
contrib
.
slim
def
append_logits
(
graph
,
n_classes
,
reuse
):
graph
=
slim
.
fully_connected
(
graph
,
n_classes
,
activation_fn
=
None
,
weights_initializer
=
tf
.
truncated_normal_initializer
(
stddev
=
0.1
),
weights_regularizer
=
slim
.
l2_regularizer
(
0.1
),
scope
=
'Logits'
,
reuse
=
reuse
)
return
graph
bob/learn/tensorflow/script/compute_statistics.py
View file @
f6a946f0
...
...
@@ -8,7 +8,7 @@
Script that computes statistics for image
Usage:
compute_statistics.py <base_path> --extension=<arg>
compute_statistics.py <base_path>
<output_file>
--extension=<arg>
compute_statistics.py -h | --help
Options:
-h --help Show this screen.
...
...
@@ -47,7 +47,9 @@ def main():
BASE_PATH
=
args
[
'<base_path>'
]
EXTENSION
=
args
[
'--extension'
]
SHAPE
=
[
3
,
224
,
224
]
OUTPUT_FILE
=
args
[
'<output_file>'
]
#SHAPE = [3, 224, 224]
SHAPE
=
[
1
,
64
,
64
]
count
,
sum_data
=
process_images
(
BASE_PATH
,
EXTENSION
,
SHAPE
)
...
...
@@ -55,5 +57,5 @@ def main():
for
s
in
range
(
SHAPE
[
0
]):
means
[
s
,
...]
=
sum_data
[
s
,
...]
/
float
(
count
)
bob
.
io
.
base
.
save
(
means
,
"means_casia.hdf5"
)
bob
.
io
.
base
.
save
(
means
[
0
,
:,
:].
astype
(
"uint8"
),
"
means_casi
a.png"
)
bob
.
io
.
base
.
save
(
means
,
OUTPUT_FILE
)
bob
.
io
.
base
.
save
(
means
[
0
,
:,
:].
astype
(
"uint8"
),
"
xux
a.png"
)
bob/learn/tensorflow/test/test_cnn.py
View file @
f6a946f0
...
...
@@ -6,7 +6,7 @@
import
numpy
from
bob.learn.tensorflow.datashuffler
import
Memory
,
SiameseMemory
,
TripletMemory
,
ImageAugmentation
,
ScaleFactor
,
Linear
from
bob.learn.tensorflow.network
import
Chopra
from
bob.learn.tensorflow.loss
import
Base
Loss
,
ContrastiveLoss
,
TripletLoss
from
bob.learn.tensorflow.loss
import
MeanSoftMax
Loss
,
ContrastiveLoss
,
TripletLoss
from
bob.learn.tensorflow.trainers
import
Trainer
,
SiameseTrainer
,
TripletTrainer
,
constant
from
.test_cnn_scratch
import
validate_network
from
bob.learn.tensorflow.network
import
Embedding
,
LightCNN9
...
...
@@ -93,12 +93,12 @@ def test_cnn_trainer():
directory
=
"./temp/cnn"
# Loss for the softmax
loss
=
BaseLoss
(
tf
.
nn
.
sparse_s
oft
m
ax
_cross_entropy_with_logits
,
tf
.
reduce_mean
)
loss
=
MeanS
oft
M
ax
Loss
(
)
# Preparing the architecture
architecture
=
Chopra
(
seed
=
seed
,
fc1_output
=
10
)
architecture
=
Chopra
(
seed
=
seed
,
n_classes
=
10
)
input_pl
=
train_data_shuffler
(
"data"
,
from_queue
=
True
)
graph
=
architecture
(
input_pl
)
embedding
=
Embedding
(
train_data_shuffler
(
"data"
,
from_queue
=
False
),
graph
)
...
...
@@ -151,13 +151,13 @@ def test_lightcnn_trainer():
directory
=
"./temp/cnn"
# Loss for the softmax
loss
=
BaseLoss
(
tf
.
nn
.
sparse_s
oft
m
ax
_cross_entropy_with_logits
,
tf
.
reduce_mean
)
loss
=
MeanS
oft
M
ax
Loss
(
)
# Preparing the architecture
architecture
=
LightCNN9
(
seed
=
seed
,
n_classes
=
2
)
input_pl
=
train_data_shuffler
(
"data"
,
from_queue
=
True
)
graph
=
architecture
(
input_pl
)
graph
=
architecture
(
input_pl
,
end_point
=
"logits"
)
embedding
=
Embedding
(
train_data_shuffler
(
"data"
,
from_queue
=
False
),
graph
)
# One graph trainer
...
...
@@ -203,15 +203,15 @@ def test_siamesecnn_trainer():
directory
=
"./temp/siamesecnn"
# Preparing the architecture
architecture
=
Chopra
(
seed
=
seed
,
fc1_output
=
10
)
architecture
=
Chopra
(
seed
=
seed
)
# Loss for the Siamese
loss
=
ContrastiveLoss
(
contrastive_margin
=
4.
)
input_pl
=
train_data_shuffler
(
"data"
)
graph
=
dict
()
graph
[
'left'
]
=
architecture
(
input_pl
[
'left'
])
graph
[
'right'
]
=
architecture
(
input_pl
[
'right'
],
reuse
=
True
)
graph
[
'left'
]
=
architecture
(
input_pl
[
'left'
]
,
end_point
=
"fc1"
)
graph
[
'right'
]
=
architecture
(
input_pl
[
'right'
],
reuse
=
True
,
end_point
=
"fc1"
)
trainer
=
SiameseTrainer
(
train_data_shuffler
,
iterations
=
iterations
,
...
...
@@ -261,9 +261,9 @@ def test_tripletcnn_trainer():
input_pl
=
train_data_shuffler
(
"data"
)
graph
=
dict
()
graph
[
'anchor'
]
=
architecture
(
input_pl
[
'anchor'
])
graph
[
'positive'
]
=
architecture
(
input_pl
[
'positive'
],
reuse
=
True
)
graph
[
'negative'
]
=
architecture
(
input_pl
[
'negative'
],
reuse
=
True
)
graph
[
'anchor'
]
=
architecture
(
input_pl
[
'anchor'
]
,
end_point
=
"fc1"
)
graph
[
'positive'
]
=
architecture
(
input_pl
[
'positive'
],
reuse
=
True
,
end_point
=
"fc1"
)
graph
[
'negative'
]
=
architecture
(
input_pl
[
'negative'
],
reuse
=
True
,
end_point
=
"fc1"
)
# One graph trainer
trainer
=
TripletTrainer
(
train_data_shuffler
,
...
...
bob/learn/tensorflow/test/test_cnn_prefetch.py
View file @
f6a946f0
...
...
@@ -54,7 +54,7 @@ def test_cnn_trainer():
# Preparing the architecture
architecture
=
Chopra
(
seed
=
seed
,
fc1_output
=
10
)
n_classes
=
10
)
input_pl
=
train_data_shuffler
(
"data"
,
from_queue
=
True
)
graph
=
architecture
(
input_pl
)
embedding
=
Embedding
(
train_data_shuffler
(
"data"
,
from_queue
=
False
),
architecture
(
train_data_shuffler
(
"data"
,
from_queue
=
False
),
reuse
=
True
))
...
...
bob/learn/tensorflow/test/test_cnn_pretrained_model.py
View file @
f6a946f0
...
...
@@ -233,7 +233,7 @@ def test_siamese_cnn_pretrained():
#embedding = Embedding(train_data_shuffler("data", from_queue=False)['left'], graph['left'])
embedding
=
Embedding
(
trainer
.
data_ph
[
'left'
],
trainer
.
graph
[
'left'
])
eer
=
dummy_experiment
(
validation_data_shuffler
,
embedding
)
assert
eer
<
0.1
5
assert
eer
<
0.1
8
del
graph
del
loss
...
...
@@ -250,7 +250,7 @@ def test_siamese_cnn_pretrained():
#embedding = Embedding(train_data_shuffler("data", from_queue=False)['left'], trainer.graph['left'])
embedding
=
Embedding
(
trainer
.
data_ph
[
'left'
],
trainer
.
graph
[
'left'
])
eer
=
dummy_experiment
(
validation_data_shuffler
,
embedding
)
assert
eer
<
0.1
4
assert
eer
<
0.1
8
shutil
.
rmtree
(
directory
)
...
...
Write
Preview
Supports
Markdown
0%
Try again
or
attach a new file
.
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment