Skip to content
Projects
Groups
Snippets
Help
Loading...
Help
Support
Keyboard shortcuts
?
Submit feedback
Contribute to GitLab
Sign in
Toggle navigation
bob.learn.tensorflow
Project overview
Project overview
Details
Activity
Releases
Repository
Repository
Files
Commits
Branches
Tags
Contributors
Graph
Compare
Issues
11
Issues
11
List
Boards
Labels
Milestones
Merge Requests
1
Merge Requests
1
CI / CD
CI / CD
Pipelines
Jobs
Schedules
Analytics
Analytics
CI / CD
Repository
Value Stream
Members
Members
Collapse sidebar
Close sidebar
Activity
Graph
Create a new issue
Jobs
Commits
Issue Boards
Open sidebar
bob
bob.learn.tensorflow
Commits
8bc92f21
Commit
8bc92f21
authored
Sep 21, 2016
by
Tiago de Freitas Pereira
Browse files
Options
Browse Files
Download
Email Patches
Plain Diff
Finished some milestones
parent
271e12b9
Changes
11
Expand all
Hide whitespace changes
Inline
Side-by-side
Showing
11 changed files
with
568 additions
and
41 deletions
+568
-41
bob/learn/tensorflow/analyzers/ExperimentAnalizer.py
bob/learn/tensorflow/analyzers/ExperimentAnalizer.py
+17
-7
bob/learn/tensorflow/data/BaseDataShuffler.py
bob/learn/tensorflow/data/BaseDataShuffler.py
+2
-0
bob/learn/tensorflow/data/TextDataShuffler.py
bob/learn/tensorflow/data/TextDataShuffler.py
+32
-7
bob/learn/tensorflow/network/Chopra.py
bob/learn/tensorflow/network/Chopra.py
+93
-0
bob/learn/tensorflow/network/Dummy.py
bob/learn/tensorflow/network/Dummy.py
+52
-0
bob/learn/tensorflow/network/SequenceNetwork.py
bob/learn/tensorflow/network/SequenceNetwork.py
+61
-1
bob/learn/tensorflow/network/VGG.py
bob/learn/tensorflow/network/VGG.py
+254
-0
bob/learn/tensorflow/network/__init__.py
bob/learn/tensorflow/network/__init__.py
+3
-0
bob/learn/tensorflow/script/train_mnist.py
bob/learn/tensorflow/script/train_mnist.py
+4
-3
bob/learn/tensorflow/script/train_mnist_siamese.py
bob/learn/tensorflow/script/train_mnist_siamese.py
+30
-14
bob/learn/tensorflow/trainers/SiameseTrainer.py
bob/learn/tensorflow/trainers/SiameseTrainer.py
+20
-9
No files found.
bob/learn/tensorflow/analyzers/Analizer.py
→
bob/learn/tensorflow/analyzers/
Experiment
Analizer.py
View file @
8bc92f21
...
...
@@ -8,9 +8,11 @@ Neural net work error rates analizer
"""
import
numpy
import
bob.measure
from
tensorflow.core.framework
import
summary_pb2
from
scipy.spatial.distance
import
cosine
class
Analizer
:
class
ExperimentAnalizer
:
"""
Analizer.
...
...
@@ -21,7 +23,7 @@ class Analizer:
"""
def
__init__
(
self
,
data_shuffler
,
machine
,
session
):
def
__init__
(
self
,
data_shuffler
,
machine
,
session
,
convergence_threshold
=
0.01
,
convergence_reference
=
'eer'
):
"""
Use the CNN as feature extractor for a n-class classification
...
...
@@ -29,6 +31,10 @@ class Analizer:
data_shuffler:
graph:
session:
convergence_threshold:
convergence_reference: References to analize the convergence. Possible values are `eer`, `far10`, `far10`
"""
...
...
@@ -49,8 +55,6 @@ class Analizer:
enroll_features
=
self
.
machine
(
enroll_data
,
session
=
self
.
session
)
del
enroll_data
#import ipdb; ipdb.set_trace();
# Extracting features for probing
probe_data
,
probe_labels
=
self
.
data_shuffler
.
get_batch
()
probe_features
=
self
.
machine
(
probe_data
,
session
=
self
.
session
)
...
...
@@ -79,9 +83,9 @@ class Analizer:
n
=
[
cosine
(
models
[
i
],
negative_data
[
j
])
for
j
in
range
(
negative_data
.
shape
[
0
])]
negative_scores
=
numpy
.
hstack
((
negative_scores
,
n
))
self
.
compute_stats
((
-
1
)
*
negative_scores
,
(
-
1
)
*
positive_scores
)
return
self
.
__compute_tensorflow_summary
((
-
1
)
*
negative_scores
,
(
-
1
)
*
positive_scores
)
def
compute_stats
(
self
,
negative_scores
,
positive_scores
):
def
__compute_tensorflow_summary
(
self
,
negative_scores
,
positive_scores
):
"""
Compute some stats with the scores, such as:
- EER
...
...
@@ -96,25 +100,31 @@ class Analizer:
positive_scores:
"""
summaries
=
[]
# Compute EER
threshold
=
bob
.
measure
.
eer_threshold
(
negative_scores
,
positive_scores
)
far
,
frr
=
bob
.
measure
.
farfrr
(
negative_scores
,
positive_scores
,
threshold
)
eer
=
(
far
+
frr
)
/
2.
summaries
.
append
(
summary_pb2
.
Summary
.
Value
(
tag
=
"EER"
,
simple_value
=
eer
))
self
.
eer
.
append
(
eer
)
# Computing FAR 10
threshold
=
bob
.
measure
.
far_threshold
(
negative_scores
,
positive_scores
,
far_value
=
0.1
)
far
,
frr
=
bob
.
measure
.
farfrr
(
negative_scores
,
positive_scores
,
threshold
)
summaries
.
append
(
summary_pb2
.
Summary
.
Value
(
tag
=
"FAR 10"
,
simple_value
=
frr
))
self
.
far10
.
append
(
frr
)
# Computing FAR 100
threshold
=
bob
.
measure
.
far_threshold
(
negative_scores
,
positive_scores
,
far_value
=
0.01
)
far
,
frr
=
bob
.
measure
.
farfrr
(
negative_scores
,
positive_scores
,
threshold
)
summaries
.
append
(
summary_pb2
.
Summary
.
Value
(
tag
=
"FAR 100"
,
simple_value
=
frr
))
self
.
far100
.
append
(
frr
)
# Computing FAR 1000
threshold
=
bob
.
measure
.
far_threshold
(
negative_scores
,
positive_scores
,
far_value
=
0.001
)
far
,
frr
=
bob
.
measure
.
farfrr
(
negative_scores
,
positive_scores
,
threshold
)
summaries
.
append
(
summary_pb2
.
Summary
.
Value
(
tag
=
"FAR 1000"
,
simple_value
=
frr
))
self
.
far1000
.
append
(
frr
)
return
return
summary_pb2
.
Summary
(
value
=
summaries
)
\ No newline at end of file
bob/learn/tensorflow/data/BaseDataShuffler.py
View file @
8bc92f21
...
...
@@ -34,6 +34,8 @@ class BaseDataShuffler(object):
self
.
data
=
data
self
.
shape
=
tuple
([
batch_size
]
+
input_shape
)
self
.
input_shape
=
tuple
(
input_shape
)
self
.
labels
=
labels
self
.
possible_labels
=
list
(
set
(
self
.
labels
))
...
...
bob/learn/tensorflow/data/TextDataShuffler.py
View file @
8bc92f21
...
...
@@ -6,6 +6,7 @@
import
numpy
import
bob.io.base
import
bob.io.image
import
bob.ip.base
import
tensorflow
as
tf
from
.BaseDataShuffler
import
BaseDataShuffler
...
...
@@ -27,12 +28,12 @@ class TextDataShuffler(BaseDataShuffler):
Shuffler that deal with file list
**Parameters**
data:
labels:
perc_train:
scale
:
train_batch_siz
e:
validation_batch_size
:
data:
labels:
input_shape: Shape of the input. `input_shape != data.shape`, the data will be reshaped
input_dtype="float64"
:
scale=Tru
e:
batch_size=1
:
"""
if
isinstance
(
data
,
list
):
...
...
@@ -52,12 +53,15 @@ class TextDataShuffler(BaseDataShuffler):
def
load_from_file
(
self
,
file_name
,
shape
):
d
=
bob
.
io
.
base
.
load
(
file_name
)
#import ipdb; ipdb.set_trace();
if
len
(
d
.
shape
)
==
2
:
data
=
numpy
.
zeros
(
shape
=
tuple
(
shape
[
1
:]
))
data
=
numpy
.
zeros
(
shape
=
(
d
.
shape
[
0
],
d
.
shape
[
1
],
1
))
data
[:,
:,
0
]
=
d
else
:
data
=
d
data
=
self
.
rescale
(
data
)
return
data
def
get_batch
(
self
):
...
...
@@ -80,6 +84,27 @@ class TextDataShuffler(BaseDataShuffler):
return
selected_data
.
astype
(
"float32"
),
selected_labels
def
rescale
(
self
,
data
):
"""
Reescale a single sample with input_shape
"""
if
self
.
input_shape
!=
data
.
shape
:
# TODO: Implement a better way to do this reescaling
# If it is gray scale
if
self
.
input_shape
[
2
]
==
1
:
copy
=
data
[:,
:,
0
].
copy
()
dst
=
numpy
.
zeros
(
shape
=
self
.
input_shape
[
0
:
2
])
bob
.
ip
.
base
.
scale
(
copy
,
dst
)
dst
=
numpy
.
reshape
(
dst
,
self
.
input_shape
)
else
:
dst
=
numpy
.
resize
(
data
,
self
.
input_shape
)
# Scaling with numpy, because bob is c,w,d instead of w,h,c
#bob.ip.base.scale(data, dst)
return
dst
else
:
return
data
def
get_pair
(
self
,
zero_one_labels
=
True
):
"""
Get a random pair of samples
...
...
bob/learn/tensorflow/network/Chopra.py
0 → 100644
View file @
8bc92f21
#!/usr/bin/env python
# vim: set fileencoding=utf-8 :
# @author: Tiago de Freitas Pereira <tiago.pereira@idiap.ch>
# @date: Wed 11 May 2016 09:39:36 CEST
"""
Class that creates the architecture presented in the paper:
Chopra, Sumit, Raia Hadsell, and Yann LeCun. "Learning a similarity metric discriminatively, with application to
face verification." 2005 IEEE Computer Society Conference on Computer Vision and Pattern Recognition (CVPR'05). Vol. 1. IEEE, 2005.
"""
import
tensorflow
as
tf
from
.SequenceNetwork
import
SequenceNetwork
from
..layers
import
Conv2D
,
FullyConnected
,
MaxPooling
import
bob.learn.tensorflow
from
bob.learn.tensorflow.initialization
import
Xavier
from
bob.learn.tensorflow.initialization
import
Constant
class
Chopra
(
SequenceNetwork
):
def
__init__
(
self
,
conv1_kernel_size
=
7
,
conv1_output
=
15
,
conv2_kernel_size
=
6
,
conv2_output
=
45
,
conv3_kernel_size
=
5
,
conv3_output
=
250
,
fc6_output
=
50
,
n_classes
=
40
,
default_feature_layer
=
"fc7"
,
seed
=
10
,
use_gpu
=
False
):
"""
Create all the necessary variables for this CNN
**Parameters**
conv1_kernel_size=5,
conv1_output=32,
conv2_kernel_size=5,
conv2_output=64,
conv3_kernel_size=5,
conv3_output=250,
fc6_output=50,
n_classes=10
seed = 10
"""
super
(
Chopra
,
self
).
__init__
(
default_feature_layer
=
default_feature_layer
,
use_gpu
=
use_gpu
)
self
.
add
(
Conv2D
(
name
=
"conv1"
,
kernel_size
=
conv1_kernel_size
,
filters
=
conv1_output
,
activation
=
tf
.
nn
.
tanh
,
weights_initialization
=
Xavier
(
seed
=
seed
,
use_gpu
=
self
.
use_gpu
),
bias_initialization
=
Constant
(
use_gpu
=
self
.
use_gpu
)
))
self
.
add
(
MaxPooling
(
name
=
"pooling1"
))
self
.
add
(
Conv2D
(
name
=
"conv2"
,
kernel_size
=
conv2_kernel_size
,
filters
=
conv2_output
,
activation
=
tf
.
nn
.
tanh
,
weights_initialization
=
Xavier
(
seed
=
seed
,
use_gpu
=
self
.
use_gpu
),
bias_initialization
=
Constant
(
use_gpu
=
self
.
use_gpu
)
))
self
.
add
(
MaxPooling
(
name
=
"pooling2"
))
self
.
add
(
Conv2D
(
name
=
"conv3"
,
kernel_size
=
conv3_kernel_size
,
filters
=
conv3_output
,
activation
=
tf
.
nn
.
tanh
,
weights_initialization
=
Xavier
(
seed
=
seed
,
use_gpu
=
self
.
use_gpu
),
bias_initialization
=
Constant
(
use_gpu
=
self
.
use_gpu
)
))
self
.
add
(
FullyConnected
(
name
=
"fc6"
,
output_dim
=
fc6_output
,
activation
=
tf
.
nn
.
tanh
,
weights_initialization
=
Xavier
(
seed
=
seed
,
use_gpu
=
self
.
use_gpu
),
bias_initialization
=
Constant
(
use_gpu
=
self
.
use_gpu
)
))
self
.
add
(
FullyConnected
(
name
=
"fc7"
,
output_dim
=
n_classes
,
activation
=
None
,
weights_initialization
=
Xavier
(
seed
=
seed
,
use_gpu
=
self
.
use_gpu
),
bias_initialization
=
Constant
(
use_gpu
=
self
.
use_gpu
)))
bob/learn/tensorflow/network/Dummy.py
0 → 100644
View file @
8bc92f21
#!/usr/bin/env python
# vim: set fileencoding=utf-8 :
# @author: Tiago de Freitas Pereira <tiago.pereira@idiap.ch>
# @date: Wed 11 May 2016 09:39:36 CEST
"""
Summy architecture
"""
import
tensorflow
as
tf
from
.SequenceNetwork
import
SequenceNetwork
from
..layers
import
Conv2D
,
FullyConnected
,
MaxPooling
import
bob.learn.tensorflow
from
bob.learn.tensorflow.initialization
import
Xavier
from
bob.learn.tensorflow.initialization
import
Constant
class
Dummy
(
SequenceNetwork
):
def
__init__
(
self
,
conv1_kernel_size
=
3
,
conv1_output
=
1
,
n_classes
=
2
,
default_feature_layer
=
"fc1"
,
seed
=
10
,
use_gpu
=
False
):
"""
Create all the necessary variables for this CNN
**Parameters**
conv1_kernel_size=3,
conv1_output=2,
n_classes=10
seed = 10
"""
super
(
Dummy
,
self
).
__init__
(
default_feature_layer
=
default_feature_layer
,
use_gpu
=
use_gpu
)
self
.
add
(
Conv2D
(
name
=
"conv1"
,
kernel_size
=
conv1_kernel_size
,
filters
=
conv1_output
,
activation
=
tf
.
nn
.
tanh
,
weights_initialization
=
Xavier
(
seed
=
seed
,
use_gpu
=
self
.
use_gpu
),
bias_initialization
=
Constant
(
use_gpu
=
self
.
use_gpu
)
))
self
.
add
(
FullyConnected
(
name
=
"fc1"
,
output_dim
=
n_classes
,
activation
=
None
,
weights_initialization
=
Xavier
(
seed
=
seed
,
use_gpu
=
self
.
use_gpu
),
bias_initialization
=
Constant
(
use_gpu
=
self
.
use_gpu
)))
bob/learn/tensorflow/network/SequenceNetwork.py
View file @
8bc92f21
...
...
@@ -10,10 +10,11 @@ Class that creates the lenet architecture
import
tensorflow
as
tf
import
abc
import
six
import
numpy
import
os
from
collections
import
OrderedDict
from
bob.learn.tensorflow.layers
import
Layer
,
MaxPooling
,
Dropout
from
bob.learn.tensorflow.layers
import
Layer
,
MaxPooling
,
Dropout
,
Conv2D
,
FullyConnected
class
SequenceNetwork
(
six
.
with_metaclass
(
abc
.
ABCMeta
,
object
)):
...
...
@@ -146,6 +147,65 @@ class SequenceNetwork(six.with_metaclass(abc.ABCMeta, object)):
self
.
sequence_net
[
k
].
b
.
assign
(
hdf5
.
read
(
self
.
sequence_net
[
k
].
b
.
name
)).
eval
(
session
=
session
)
session
.
run
(
self
.
sequence_net
[
k
].
b
)
def
variable_summaries
(
self
,
var
,
name
):
"""Attach a lot of summaries to a Tensor."""
with
tf
.
name_scope
(
'summaries'
):
mean
=
tf
.
reduce_mean
(
var
)
tf
.
scalar_summary
(
'mean/'
+
name
,
mean
)
with
tf
.
name_scope
(
'stddev'
):
stddev
=
tf
.
sqrt
(
tf
.
reduce_sum
(
tf
.
square
(
var
-
mean
)))
tf
.
scalar_summary
(
'sttdev/'
+
name
,
stddev
)
tf
.
scalar_summary
(
'max/'
+
name
,
tf
.
reduce_max
(
var
))
tf
.
scalar_summary
(
'min/'
+
name
,
tf
.
reduce_min
(
var
))
tf
.
histogram_summary
(
name
,
var
)
def
generate_summaries
(
self
):
for
k
in
self
.
sequence_net
.
keys
():
current_layer
=
self
.
sequence_net
[
k
]
if
not
isinstance
(
self
.
sequence_net
[
k
],
MaxPooling
)
and
not
isinstance
(
self
.
sequence_net
[
k
],
Dropout
):
self
.
variable_summaries
(
current_layer
.
W
,
current_layer
.
name
+
'/weights'
)
self
.
variable_summaries
(
current_layer
.
b
,
current_layer
.
name
+
'/bias'
)
def
compute_magic_number
(
self
,
hypothetic_image_dimensions
=
(
28
,
28
,
1
)):
"""
Here it is done an estimative of the capacity of CNN.
:param hypothetic_sample_number: an ar
:param hypothetic_image_dimensions:
:return:
"""
stride
=
1
# ALWAYS EQUALS TO ONE
current_image_dimensions
=
list
(
hypothetic_image_dimensions
)
samples_per_sample
=
0
flatten_dimension
=
numpy
.
prod
(
current_image_dimensions
)
for
k
in
self
.
sequence_net
.
keys
():
current_layer
=
self
.
sequence_net
[
k
]
if
isinstance
(
current_layer
,
Conv2D
):
#samples_per_sample += current_layer.filters * current_layer.kernel_size * current_image_dimensions[0] + current_layer.filters
#samples_per_sample += current_layer.filters * current_layer.kernel_size * current_image_dimensions[1] + current_layer.filters
samples_per_sample
+=
current_layer
.
filters
*
current_image_dimensions
[
0
]
*
current_image_dimensions
[
1
]
+
current_layer
.
filters
current_image_dimensions
[
2
]
=
current_layer
.
filters
flatten_dimension
=
numpy
.
prod
(
current_image_dimensions
)
if
isinstance
(
current_layer
,
MaxPooling
):
current_image_dimensions
[
0
]
/=
2
current_image_dimensions
[
1
]
/=
2
flatten_dimension
=
current_image_dimensions
[
0
]
*
current_image_dimensions
[
1
]
*
current_image_dimensions
[
2
]
if
isinstance
(
current_layer
,
FullyConnected
):
samples_per_sample
+=
flatten_dimension
*
current_layer
.
output_dim
+
current_layer
.
output_dim
flatten_dimension
=
current_layer
.
output_dim
return
samples_per_sample
#if self.saver is None:
# variables = self.dump_variables()
...
...
bob/learn/tensorflow/network/VGG.py
0 → 100644
View file @
8bc92f21
This diff is collapsed.
Click to expand it.
bob/learn/tensorflow/network/__init__.py
View file @
8bc92f21
...
...
@@ -4,6 +4,9 @@ __path__ = extend_path(__path__, __name__)
from
.SequenceNetwork
import
SequenceNetwork
from
.Lenet
import
Lenet
from
.Chopra
import
Chopra
from
.Dummy
import
Dummy
from
.VGG
import
VGG
from
.LenetDropout
import
LenetDropout
from
.MLP
import
MLP
...
...
bob/learn/tensorflow/script/train_mnist.py
View file @
8bc92f21
...
...
@@ -23,7 +23,7 @@ import tensorflow as tf
from
..
import
util
SEED
=
10
from
bob.learn.tensorflow.data
import
MemoryDataShuffler
,
TextDataShuffler
from
bob.learn.tensorflow.network
import
Lenet
,
MLP
from
bob.learn.tensorflow.network
import
Lenet
,
MLP
,
Dummy
from
bob.learn.tensorflow.trainers
import
Trainer
from
bob.learn.tensorflow.loss
import
BaseLoss
...
...
@@ -89,9 +89,10 @@ def main():
# Preparing the architecture
cnn
=
True
if
cnn
:
lenet
=
Lenet
(
seed
=
SEED
)
#architecture = Lenet(seed=SEED)
architecture
=
Dummy
(
seed
=
SEED
)
loss
=
BaseLoss
(
tf
.
nn
.
sparse_softmax_cross_entropy_with_logits
,
tf
.
reduce_mean
)
trainer
=
Trainer
(
architecture
=
lenet
,
loss
=
loss
,
iterations
=
ITERATIONS
)
trainer
=
Trainer
(
architecture
=
architecture
,
loss
=
loss
,
iterations
=
ITERATIONS
)
trainer
.
train
(
train_data_shuffler
,
validation_data_shuffler
)
else
:
mlp
=
MLP
(
10
,
hidden_layers
=
[
15
,
20
])
...
...
bob/learn/tensorflow/script/train_mnist_siamese.py
View file @
8bc92f21
...
...
@@ -23,7 +23,7 @@ import tensorflow as tf
from
..
import
util
SEED
=
10
from
bob.learn.tensorflow.data
import
MemoryDataShuffler
,
TextDataShuffler
from
bob.learn.tensorflow.network
import
Lenet
,
MLP
,
LenetDropout
from
bob.learn.tensorflow.network
import
Lenet
,
MLP
,
LenetDropout
,
VGG
,
Chopra
from
bob.learn.tensorflow.trainers
import
SiameseTrainer
from
bob.learn.tensorflow.loss
import
ContrastiveLoss
import
numpy
...
...
@@ -58,32 +58,43 @@ def main():
batch_size
=
VALIDATION_BATCH_SIZE
)
else
:
import
bob.db.atnt
db
=
bob
.
db
.
atnt
.
Database
()
import
bob.db.mobio
db
=
bob
.
db
.
mobio
.
Database
()
#
import bob.db.mobio
#
db = bob.db.mobio.Database()
# Preparing train set
train_objects
=
db
.
objects
(
protocol
=
"male"
,
groups
=
"world"
)
#train_objects = db.objects(protocol="male", groups="world")
train_objects
=
db
.
objects
(
groups
=
"world"
)
train_labels
=
[
o
.
client_id
for
o
in
train_objects
]
#directory = "/idiap/user/tpereira/face/baselines/eigenface/preprocessed",
train_file_names
=
[
o
.
make_path
(
directory
=
"/idiap/
user/tpereira/face/baselines/eigenface/preprocessed
"
,
extension
=
".
hdf5
"
)
directory
=
"/idiap/
group/biometric/databases/orl
"
,
extension
=
".
pgm
"
)
for
o
in
train_objects
]
#train_data_shuffler = TextDataShuffler(train_file_names, train_labels,
# input_shape=[80, 64, 1],
# batch_size=BATCH_SIZE)
train_data_shuffler
=
TextDataShuffler
(
train_file_names
,
train_labels
,
input_shape
=
[
80
,
64
,
1
],
input_shape
=
[
56
,
46
,
1
],
batch_size
=
BATCH_SIZE
)
# Preparing train set
validation_objects
=
db
.
objects
(
protocol
=
"male"
,
groups
=
"dev"
)
#validation_objects = db.objects(protocol="male", groups="dev")
validation_objects
=
db
.
objects
(
groups
=
"dev"
)
validation_labels
=
[
o
.
client_id
for
o
in
validation_objects
]
validation_file_names
=
[
o
.
make_path
(
directory
=
"/idiap/
user/tpereira/face/baselines/eigenface/preprocessed
"
,
extension
=
".
hdf5
"
)
directory
=
"/idiap/
group/biometric/databases/orl
"
,
extension
=
".
pgm
"
)
for
o
in
validation_objects
]
#validation_data_shuffler = TextDataShuffler(validation_file_names, validation_labels,
# input_shape=[80, 64, 1],
# batch_size=VALIDATION_BATCH_SIZE)
validation_data_shuffler
=
TextDataShuffler
(
validation_file_names
,
validation_labels
,
input_shape
=
[
80
,
64
,
1
],
input_shape
=
[
56
,
46
,
1
],
batch_size
=
VALIDATION_BATCH_SIZE
)
# Preparing the architecture
...
...
@@ -92,11 +103,16 @@ def main():
cnn
=
True
if
cnn
:
lenet
=
Lenet
(
default_feature_layer
=
"fc2"
,
n_classes
=
n_classes
,
conv1_output
=
4
,
conv2_output
=
8
,
use_gpu
=
USE_GPU
)
#lenet = LenetDropout(default_feature_layer="fc2", n_classes=n_classes, conv1_output=4, conv2_output=8, use_gpu=USE_GPU)
# LENET PAPER CHOPRA
#architecture = Chopra(default_feature_layer="fc7")
architecture
=
Lenet
(
default_feature_layer
=
"fc2"
,
n_classes
=
n_classes
,
conv1_output
=
4
,
conv2_output
=
8
,
use_gpu
=
USE_GPU
)
#architecture = VGG(n_classes=n_classes, use_gpu=USE_GPU)
#architecture = LenetDropout(default_feature_layer="fc2", n_classes=n_classes, conv1_output=4, conv2_output=8, use_gpu=USE_GPU)
loss
=
ContrastiveLoss
()
trainer
=
SiameseTrainer
(
architecture
=
lenet
,
#optimizer = tf.train.GradientDescentOptimizer(0.0001)
trainer
=
SiameseTrainer
(
architecture
=
architecture
,
loss
=
loss
,
iterations
=
ITERATIONS
,
snapshot
=
VALIDATION_TEST
)
...
...
bob/learn/tensorflow/trainers/SiameseTrainer.py
View file @
8bc92f21
...
...
@@ -7,7 +7,7 @@ import logging
logger
=
logging
.
getLogger
(
"bob.learn.tensorflow"
)
import
tensorflow
as
tf
import
threading
from
..analyzers
import
Analizer
from
..analyzers
import
Experiment
Analizer
from
..network
import
SequenceNetwork
import
bob.io.base
from
.Trainer
import
Trainer
...
...
@@ -21,7 +21,7 @@ class SiameseTrainer(Trainer):
optimizer
=
tf
.
train
.
AdamOptimizer
(),
use_gpu
=
False
,
loss
=
None
,
temp_dir
=
""
,
temp_dir
=
"
cnn
"
,
# Learning rate
base_learning_rate
=
0.001
,
...
...
@@ -81,7 +81,8 @@ class SiameseTrainer(Trainer):
self
.
weight_decay
# Decay step
)
bob
.
io
.
base
.
create_directories_safe
(
os
.
path
.
join
(
self
.
temp_dir
,
'OUTPUT'
))
# Creating directory
bob
.
io
.
base
.
create_directories_safe
(
self
.
temp_dir
)
# Creating two graphs
train_placeholder_left_data
,
train_placeholder_labels
=
train_data_shuffler
.
\
...
...
@@ -127,7 +128,7 @@ class SiameseTrainer(Trainer):
with
tf
.
Session
()
as
session
:
if
validation_data_shuffler
is
not
None
:
analizer
=
Analizer
(
validation_data_shuffler
,
self
.
architecture
,
session
)
analizer
=
Experiment
Analizer
(
validation_data_shuffler
,
self
.
architecture
,
session
)
tf
.
initialize_all_variables
().
run
()
...
...
@@ -136,15 +137,22 @@ class SiameseTrainer(Trainer):
tf
.
train
.
start_queue_runners
(
coord
=
thread_pool
)
threads
=
start_thread
()
# TENSOR BOARD SUMMARY
train_writer
=
tf
.
train
.
SummaryWriter
(
os
.
path
.
join
(
self
.
temp_dir
,
'LOGS'
),
session
.
graph
)
#
Tensorboard data
#
Siamese specific summary
tf
.
scalar_summary
(
'loss'
,
loss_train
)
tf
.
scalar_summary
(
'between_class'
,
between_class
)
tf
.
scalar_summary
(
'within_class'
,
within_class
)
tf
.
scalar_summary
(
'lr'
,
learning_rate
)
merged
=
tf
.
merge_all_summaries
()
# Architecture summary
self
.
architecture
.
generate_summaries
()
merged_validation
=
tf
.
merge_all_summaries
()
for
step
in
range
(
self
.
iterations
):
_
,
l
,
lr
,
summary
=
session
.
run
([
optimizer
,
loss_train
,
learning_rate
,
merged
])
...
...
@@ -152,10 +160,13 @@ class SiameseTrainer(Trainer):
train_writer
.
add_summary
(
summary
,
step
)
if
validation_data_shuffler
is
not
None
and
step
%
self
.
snapshot
==
0
:
analizer
()
#if self.save_intermediate:
# self.architecture.save(hdf5, step)
print
str
(
step
)
+
" - "
+
str
(
analizer
.
eer
[
-
1
])
summary
=
session
.
run
(
merged_validation
)
train_writer
.
add_summary
(
summary
,
step
)
summary
=
analizer
()
train_writer
.
add_summary
(
summary
,
step
)
print
str
(
step
)
self
.
architecture
.
save
(
hdf5
)
del
hdf5
...
...
Write
Preview
Markdown
is supported
0%
Try again
or
attach a new file
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment