Skip to content
GitLab
Menu
Projects
Groups
Snippets
Help
Help
Support
Community forum
Keyboard shortcuts
?
Submit feedback
Contribute to GitLab
Sign in
Toggle navigation
Menu
Open sidebar
bob
bob.learn.tensorflow
Commits
5879fa2a
Commit
5879fa2a
authored
Jun 26, 2017
by
Tiago Pereira
Browse files
Handling the pretrained models
parent
754ef62d
Pipeline
#10715
failed with stages
in 6 minutes and 42 seconds
Changes
7
Pipelines
1
Hide whitespace changes
Inline
Side-by-side
bob/learn/tensorflow/datashuffler/Siamese.py
View file @
5879fa2a
...
...
@@ -28,7 +28,7 @@ class Siamese(Base):
:return:
"""
with
tf
.
name_scope
(
"Input"
):
self
.
data_ph
=
{}
self
.
data_ph
=
dict
()
self
.
data_ph
[
'left'
]
=
tf
.
placeholder
(
tf
.
float32
,
shape
=
self
.
input_shape
,
name
=
"left"
)
self
.
data_ph
[
'right'
]
=
tf
.
placeholder
(
tf
.
float32
,
shape
=
self
.
input_shape
,
name
=
"right"
)
self
.
label_ph
=
tf
.
placeholder
(
tf
.
int64
,
shape
=
[
None
],
name
=
"label"
)
...
...
bob/learn/tensorflow/datashuffler/Triplet.py
View file @
5879fa2a
...
...
@@ -28,7 +28,7 @@ class Triplet(Base):
:return:
"""
with
tf
.
name_scope
(
"Input"
):
self
.
data_ph
=
{}
self
.
data_ph
=
dict
()
self
.
data_ph
[
'anchor'
]
=
tf
.
placeholder
(
tf
.
float32
,
shape
=
self
.
input_shape
,
name
=
"anchor"
)
self
.
data_ph
[
'positive'
]
=
tf
.
placeholder
(
tf
.
float32
,
shape
=
self
.
input_shape
,
name
=
"positive"
)
self
.
data_ph
[
'negative'
]
=
tf
.
placeholder
(
tf
.
float32
,
shape
=
self
.
input_shape
,
name
=
"negative"
)
...
...
bob/learn/tensorflow/test/test_cnn.py
View file @
5879fa2a
...
...
@@ -147,7 +147,7 @@ def test_siamesecnn_trainer():
loss
=
ContrastiveLoss
(
contrastive_margin
=
4.
)
input_pl
=
train_data_shuffler
(
"data"
)
graph
=
{}
graph
=
dict
()
graph
[
'left'
]
=
architecture
(
input_pl
[
'left'
])
graph
[
'right'
]
=
architecture
(
input_pl
[
'right'
])
...
...
@@ -160,7 +160,7 @@ def test_siamesecnn_trainer():
loss
=
loss
,
learning_rate
=
constant
(
0.01
,
name
=
"regular_lr"
),
optimizer
=
tf
.
train
.
GradientDescentOptimizer
(
0.01
),)
trainer
.
train
(
train_data_shuffler
)
trainer
.
train
()
embedding
=
Embedding
(
train_data_shuffler
(
"data"
,
from_queue
=
False
)[
'left'
],
graph
[
'left'
])
eer
=
dummy_experiment
(
validation_data_shuffler
,
embedding
)
assert
eer
<
0.15
...
...
@@ -194,7 +194,7 @@ def test_tripletcnn_trainer():
loss
=
TripletLoss
(
margin
=
4.
)
input_pl
=
train_data_shuffler
(
"data"
)
graph
=
{}
graph
=
dict
()
graph
[
'anchor'
]
=
architecture
(
input_pl
[
'anchor'
])
graph
[
'positive'
]
=
architecture
(
input_pl
[
'positive'
])
graph
[
'negative'
]
=
architecture
(
input_pl
[
'negative'
])
...
...
bob/learn/tensorflow/test/test_cnn_pretrained_model.py
View file @
5879fa2a
...
...
@@ -73,18 +73,15 @@ def test_cnn_pretrained():
# Loss for the softmax
loss
=
BaseLoss
(
tf
.
nn
.
sparse_softmax_cross_entropy_with_logits
,
tf
.
reduce_mean
)
# One graph trainer
# One graph trainer
trainer
=
Trainer
(
train_data_shuffler
,
iterations
=
iterations
,
analizer
=
None
,
temp_dir
=
directory
)
temp_dir
=
directory
)
trainer
.
create_network_from_scratch
(
graph
=
graph
,
loss
=
loss
,
learning_rate
=
constant
(
0.1
,
name
=
"regular_lr"
),
optimizer
=
tf
.
train
.
GradientDescentOptimizer
(
0.1
),
)
optimizer
=
tf
.
train
.
GradientDescentOptimizer
(
0.1
))
trainer
.
train
()
accuracy
=
validate_network
(
embedding
,
validation_data
,
validation_labels
)
...
...
@@ -193,65 +190,65 @@ def test_siamese_cnn_pretrained():
# Creating datashufflers
data_augmentation
=
ImageAugmentation
()
train_data_shuffler
=
SiameseMemory
(
train_data
,
train_labels
,
input_shape
=
[
28
,
28
,
1
],
input_shape
=
[
None
,
28
,
28
,
1
],
batch_size
=
batch_size
,
data_augmentation
=
data_augmentation
)
data_augmentation
=
data_augmentation
,
normalizer
=
ScaleFactor
())
validation_data
=
numpy
.
reshape
(
validation_data
,
(
validation_data
.
shape
[
0
],
28
,
28
,
1
))
validation_data_shuffler
=
SiameseMemory
(
validation_data
,
validation_labels
,
input_shape
=
[
28
,
28
,
1
],
batch_size
=
validation_batch_size
)
input_shape
=
[
None
,
28
,
28
,
1
],
batch_size
=
validation_batch_size
,
normalizer
=
ScaleFactor
())
directory
=
"./temp/cnn"
directory2
=
"./temp/cnn2"
# Creating a random network
scratch
=
scratch_network
()
# Creating graph
input_pl
=
train_data_shuffler
(
"data"
)
graph
=
dict
()
graph
[
'left'
]
=
scratch_network
(
input_pl
[
'left'
])
graph
[
'right'
]
=
scratch_network
(
input_pl
[
'right'
])
# Loss for the softmax
loss
=
ContrastiveLoss
(
contrastive_margin
=
4.
)
# One graph trainer
trainer
=
SiameseTrainer
(
architecture
=
scratch
,
loss
=
loss
,
trainer
=
SiameseTrainer
(
train_data_shuffler
,
iterations
=
iterations
,
analizer
=
None
,
prefetch
=
False
,
learning_rate
=
constant
(
0.05
,
name
=
"regular_lr"
),
optimizer
=
tf
.
train
.
AdamOptimizer
(
name
=
"adam_pretrained_model"
),
temp_dir
=
directory
)
trainer
.
train
(
train_data_shuffler
)
temp_dir
=
directory
)
trainer
.
create_network_from_scratch
(
graph
=
graph
,
loss
=
loss
,
learning_rate
=
constant
(
0.01
,
name
=
"regular_lr"
),
optimizer
=
tf
.
train
.
GradientDescentOptimizer
(
0.01
))
trainer
.
train
()
# Testing
e
er
=
dummy_experiment
(
validation_data_shuffler
,
scratch
)
# The result is not so good
assert
eer
<
0.
28
e
mbedding
=
Embedding
(
train_data_shuffler
(
"data"
,
from_queue
=
False
)[
'left'
],
graph
[
'left'
]
)
eer
=
dummy_experiment
(
validation_data_shuffler
,
embedding
)
assert
eer
<
0.
10
del
scratc
h
del
grap
h
del
loss
del
trainer
# Training the network using a pre trained model
loss
=
ContrastiveLoss
(
contrastive_margin
=
4.
)
scratch
=
scratch_network
()
trainer
=
SiameseTrainer
(
architecture
=
scratch
,
loss
=
loss
,
iterations
=
iterations
+
1000
,
trainer
=
SiameseTrainer
(
train_data_shuffler
,
iterations
=
iterations
*
2
,
analizer
=
None
,
prefetch
=
False
,
learning_rate
=
None
,
temp_dir
=
directory2
,
model_from_file
=
os
.
path
.
join
(
directory
,
"model.ckp"
)
)
temp_dir
=
directory
)
trainer
.
train
(
train_data_shuffler
)
trainer
.
create_network_from_file
(
os
.
path
.
join
(
directory
,
"model.ckp"
))
trainer
.
train
()
#import ipdb; ipdb.set_trace()
embedding
=
Embedding
(
train_data_shuffler
(
"data"
,
from_queue
=
False
)[
'left'
],
trainer
.
graph
[
'left'
])
eer
=
dummy_experiment
(
validation_data_shuffler
,
embedding
)
assert
eer
<
0.10
eer
=
dummy_experiment
(
validation_data_shuffler
,
scratch
)
# Now it is better
assert
eer
<
0.27
shutil
.
rmtree
(
directory
)
shutil
.
rmtree
(
directory2
)
del
scratc
h
del
grap
h
del
loss
del
trainer
bob/learn/tensorflow/test/test_cnn_scratch.py
View file @
5879fa2a
...
...
@@ -4,14 +4,11 @@
# @date: Thu 13 Oct 2016 13:35 CEST
import
numpy
import
bob.io.base
import
os
from
bob.learn.tensorflow.datashuffler
import
Memory
,
ImageAugmentation
,
ScaleFactor
from
bob.learn.tensorflow.network
import
Embedding
from
bob.learn.tensorflow.loss
import
BaseLoss
from
bob.learn.tensorflow.trainers
import
Trainer
,
learning_rate
from
bob.learn.tensorflow.trainers
import
Trainer
,
constant
from
bob.learn.tensorflow.utils
import
load_mnist
from
bob.learn.tensorflow.layers
import
Conv2D
,
FullyConnected
import
tensorflow
as
tf
import
shutil
...
...
@@ -30,20 +27,20 @@ slim = tf.contrib.slim
def
scratch_network
():
# Creating a random network
inputs
=
{}
inputs
=
dict
()
inputs
[
'data'
]
=
tf
.
placeholder
(
tf
.
float32
,
shape
=
[
None
,
28
,
28
,
1
],
name
=
"train_data"
)
inputs
[
'label'
]
=
tf
.
placeholder
(
tf
.
int64
,
shape
=
[
None
],
name
=
"train_label"
)
initializer
=
tf
.
contrib
.
layers
.
xavier_initializer
(
seed
=
seed
)
scratch
=
slim
.
conv2d
(
inputs
[
'data'
],
10
,
[
3
,
3
],
activation_fn
=
tf
.
nn
.
relu
,
stride
=
1
,
scope
=
'conv1'
,
weights_initializer
=
initializer
)
scratch
=
slim
.
max_pool2d
(
scratch
,
[
4
,
4
],
scope
=
'pool1'
)
scratch
=
slim
.
flatten
(
scratch
,
scope
=
'flatten1'
)
scratch
=
slim
.
fully_connected
(
scratch
,
10
,
activation_fn
=
None
,
scope
=
'fc1'
,
weights_initializer
=
initializer
)
graph
=
slim
.
conv2d
(
inputs
[
'data'
],
10
,
[
3
,
3
],
activation_fn
=
tf
.
nn
.
relu
,
stride
=
1
,
scope
=
'conv1'
,
weights_initializer
=
initializer
)
graph
=
slim
.
max_pool2d
(
graph
,
[
4
,
4
],
scope
=
'pool1'
)
graph
=
slim
.
flatten
(
graph
,
scope
=
'flatten1'
)
graph
=
slim
.
fully_connected
(
graph
,
10
,
activation_fn
=
None
,
scope
=
'fc1'
,
weights_initializer
=
initializer
)
return
inputs
,
graph
return
inputs
,
scratch
def
validate_network
(
embedding
,
validation_data
,
validation_labels
):
# Testing
...
...
@@ -67,41 +64,43 @@ def test_cnn_trainer_scratch():
# Creating datashufflers
data_augmentation
=
ImageAugmentation
()
train_data_shuffler
=
Memory
(
train_data
,
train_labels
,
input_shape
=
[
28
,
28
,
1
],
batch_size
=
batch_size
,
data_augmentation
=
data_augmentation
,
normalizer
=
ScaleFactor
())
validation_data_shuffler
=
Memory
(
train_data
,
train_labels
,
input_shape
=
[
28
,
28
,
1
],
input_shape
=
[
None
,
28
,
28
,
1
],
batch_size
=
batch_size
,
data_augmentation
=
data_augmentation
,
normalizer
=
ScaleFactor
())
validation_data
=
numpy
.
reshape
(
validation_data
,
(
validation_data
.
shape
[
0
],
28
,
28
,
1
))
#validation_data_shuffler = Memory(train_data, train_labels,
# input_shape=[28, 28, 1],
# batch_size=batch_size,
# data_augmentation=data_augmentation,
# normalizer=ScaleFactor())
validation_data
=
numpy
.
reshape
(
validation_data
,
(
validation_data
.
shape
[
0
],
28
,
28
,
1
))
# Create scratch network
inputs
,
scratch
=
scratch_network
()
# Setting the placeholders
train_data_shuffler
.
data_ph
=
inputs
[
'data'
]
train_data_shuffler
.
label_ph
=
inputs
[
'label'
]
embedding
=
Embedding
(
inputs
[
'data'
],
scratch
)
# Loss for the softmax
loss
=
BaseLoss
(
tf
.
nn
.
sparse_softmax_cross_entropy_with_logits
,
tf
.
reduce_mean
)
# One graph trainer
trainer
=
Trainer
(
inputs
=
inputs
,
graph
=
scratch
,
trainer
=
Trainer
(
train_data_shuffler
,
iterations
=
iterations
,
loss
=
loss
,
analizer
=
None
,
prefetch
=
False
,
temp_dir
=
directory
,
optimizer
=
tf
.
train
.
GradientDescentOptimizer
(
0.01
),
learning_rate
=
learning_rate
.
constant
(
base_learning_rate
=
0.01
,
name
=
"constant_learning_rate"
),
validation_snapshot
=
20
)
temp_dir
=
directory
)
trainer
.
train
(
train_data_shuffler
,
validation_data_shuffler
)
trainer
.
create_network_from_scratch
(
graph
=
scratch
,
loss
=
loss
,
learning_rate
=
constant
(
0.01
,
name
=
"regular_lr"
),
optimizer
=
tf
.
train
.
GradientDescentOptimizer
(
0.01
),
)
trainer
.
train
()
accuracy
=
validate_network
(
embedding
,
validation_data
,
validation_labels
)
assert
accuracy
>
8
0
assert
accuracy
>
7
0
shutil
.
rmtree
(
directory
)
del
trainer
bob/learn/tensorflow/trainers/SiameseTrainer.py
View file @
5879fa2a
...
...
@@ -192,19 +192,19 @@ class SiameseTrainer(Trainer):
self
.
graph
[
'left'
]
=
tf
.
get_collection
(
"graph_left"
)[
0
]
self
.
graph
[
'right'
]
=
tf
.
get_collection
(
"graph_right"
)[
0
]
# Loading the place
holders
by
the pointer
# Loading the placeholders
from
the pointer
s
self
.
data_ph
=
dict
()
self
.
data_ph
[
'left'
]
=
tf
.
get_collection
(
"data_ph_left"
)[
0
]
self
.
data_ph
[
'right'
]
=
tf
.
get_collection
(
"data_ph_right"
)[
0
]
self
.
label_ph
=
tf
.
get_collection
(
"label_ph"
)[
0
]
self
.
predictor
=
[]
# Loading loss from the pointers
self
.
predictor
=
dict
()
self
.
predictor
[
'loss'
]
=
tf
.
get_collection
(
"predictor_loss"
)[
0
]
self
.
predictor
[
'between_class'
]
=
tf
.
get_collection
(
"predictor_between_class_loss"
)[
0
]
self
.
predictor
[
'within_class'
]
=
tf
.
get_collection
(
"predictor_within_class_loss"
)[
0
]
self
.
predictor
=
tf
.
get_collection
(
"predictor"
)[
0
]
# Loding other elements
# Loading other elements
self
.
optimizer
=
tf
.
get_collection
(
"optimizer"
)[
0
]
self
.
learning_rate
=
tf
.
get_collection
(
"learning_rate"
)[
0
]
self
.
summaries_train
=
tf
.
get_collection
(
"summaries_train"
)[
0
]
...
...
bob/learn/tensorflow/trainers/Trainer.py
View file @
5879fa2a
...
...
@@ -135,7 +135,8 @@ class Trainer(object):
self
.
label_ph
=
self
.
train_data_shuffler
(
"label"
)
self
.
graph
=
graph
self
.
loss
=
loss
self
.
predictor
=
self
.
loss
(
self
.
graph
,
self
.
train_data_shuffler
(
"label"
,
from_queue
=
True
))
#self.predictor = self.loss(self.graph, self.train_data_shuffler("label", from_queue=True))
self
.
predictor
=
self
.
loss
(
self
.
graph
,
self
.
train_data_shuffler
(
"label"
,
from_queue
=
False
))
self
.
optimizer_class
=
optimizer
self
.
learning_rate
=
learning_rate
...
...
Write
Preview
Supports
Markdown
0%
Try again
or
attach a new file
.
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment