Skip to content
Projects
Groups
Snippets
Help
Loading...
Help
Support
Keyboard shortcuts
?
Submit feedback
Contribute to GitLab
Sign in
Toggle navigation
bob.learn.tensorflow
Project overview
Project overview
Details
Activity
Releases
Repository
Repository
Files
Commits
Branches
Tags
Contributors
Graph
Compare
Issues
11
Issues
11
List
Boards
Labels
Milestones
Merge Requests
1
Merge Requests
1
CI / CD
CI / CD
Pipelines
Jobs
Schedules
Analytics
Analytics
CI / CD
Repository
Value Stream
Members
Members
Collapse sidebar
Close sidebar
Activity
Graph
Create a new issue
Jobs
Commits
Issue Boards
Open sidebar
bob
bob.learn.tensorflow
Commits
f9965fd8
Commit
f9965fd8
authored
Oct 06, 2016
by
Tiago de Freitas Pereira
Browse files
Options
Browse Files
Download
Email Patches
Plain Diff
Several updates. Need to control better these commits
parent
e22b1822
Changes
10
Hide whitespace changes
Inline
Side-by-side
Showing
10 changed files
with
103 additions
and
74 deletions
+103
-74
bob/learn/tensorflow/data/BaseDataShuffler.py
bob/learn/tensorflow/data/BaseDataShuffler.py
+2
-2
bob/learn/tensorflow/data/MemoryDataShuffler.py
bob/learn/tensorflow/data/MemoryDataShuffler.py
+2
-2
bob/learn/tensorflow/data/TextDataShuffler.py
bob/learn/tensorflow/data/TextDataShuffler.py
+27
-0
bob/learn/tensorflow/layers/MaxPooling.py
bob/learn/tensorflow/layers/MaxPooling.py
+3
-2
bob/learn/tensorflow/loss/TripletLoss.py
bob/learn/tensorflow/loss/TripletLoss.py
+2
-2
bob/learn/tensorflow/network/Chopra.py
bob/learn/tensorflow/network/Chopra.py
+26
-25
bob/learn/tensorflow/script/train_mnist.py
bob/learn/tensorflow/script/train_mnist.py
+3
-2
bob/learn/tensorflow/script/train_mnist_siamese.py
bob/learn/tensorflow/script/train_mnist_siamese.py
+3
-3
bob/learn/tensorflow/script/train_mnist_triplet.py
bob/learn/tensorflow/script/train_mnist_triplet.py
+6
-6
bob/learn/tensorflow/trainers/TripletTrainer.py
bob/learn/tensorflow/trainers/TripletTrainer.py
+29
-30
No files found.
bob/learn/tensorflow/data/BaseDataShuffler.py
View file @
f9965fd8
...
...
@@ -101,8 +101,8 @@ class BaseDataShuffler(object):
def
get_one_triplet
(
self
,
input_data
,
input_labels
):
# Getting a pair of clients
index
=
numpy
.
random
.
choice
(
len
(
self
.
possible_labels
),
2
,
replace
=
False
)
label_positive
=
index
[
0
]
label_negative
=
index
[
1
]
index
[
0
]
=
self
.
possible_labels
[
index
[
0
]
]
index
[
1
]
=
self
.
possible_labels
[
index
[
1
]
]
# Getting the indexes of the data from a particular client
indexes
=
numpy
.
where
(
input_labels
==
index
[
0
])[
0
]
...
...
bob/learn/tensorflow/data/MemoryDataShuffler.py
View file @
f9965fd8
...
...
@@ -82,7 +82,7 @@ class MemoryDataShuffler(BaseDataShuffler):
return
data
,
data_p
,
labels_siamese
def
get_random_triplet
(
self
,
n_triplets
=
1
):
def
get_random_triplet
(
self
):
"""
Get a random triplet
...
...
@@ -96,7 +96,7 @@ class MemoryDataShuffler(BaseDataShuffler):
data_p
=
numpy
.
zeros
(
shape
=
self
.
shape
,
dtype
=
'float32'
)
data_n
=
numpy
.
zeros
(
shape
=
self
.
shape
,
dtype
=
'float32'
)
for
i
in
range
(
n_triplets
):
for
i
in
range
(
self
.
shape
[
0
]
):
data_a
[
i
,
...],
data_p
[
i
,
...],
data_n
[
i
,
...]
=
self
.
get_one_triplet
(
self
.
data
,
self
.
labels
)
return
data_a
,
data_p
,
data_n
...
...
bob/learn/tensorflow/data/TextDataShuffler.py
View file @
f9965fd8
...
...
@@ -150,3 +150,30 @@ class TextDataShuffler(BaseDataShuffler):
return
data
,
data_p
,
labels_siamese
def
get_random_triplet
(
self
):
"""
Get a random pair of samples
**Parameters**
is_target_set_train: Defining the target set to get the batch
**Return**
"""
data_a
=
numpy
.
zeros
(
shape
=
self
.
shape
,
dtype
=
'float32'
)
data_p
=
numpy
.
zeros
(
shape
=
self
.
shape
,
dtype
=
'float32'
)
data_n
=
numpy
.
zeros
(
shape
=
self
.
shape
,
dtype
=
'float32'
)
for
i
in
range
(
self
.
shape
[
0
]):
file_name_a
,
file_name_p
,
file_name_n
=
self
.
get_one_triplet
(
self
.
data
,
self
.
labels
)
data_a
[
i
,
...]
=
self
.
load_from_file
(
str
(
file_name_a
),
self
.
shape
)
data_p
[
i
,
...]
=
self
.
load_from_file
(
str
(
file_name_p
),
self
.
shape
)
data_n
[
i
,
...]
=
self
.
load_from_file
(
str
(
file_name_n
),
self
.
shape
)
if
self
.
scale
:
data_a
*=
self
.
scale_value
data_p
*=
self
.
scale_value
data_n
*=
self
.
scale_value
return
data_a
,
data_p
,
data_n
bob/learn/tensorflow/layers/MaxPooling.py
View file @
f9965fd8
...
...
@@ -10,11 +10,12 @@ from .Layer import Layer
class
MaxPooling
(
Layer
):
def
__init__
(
self
,
name
):
def
__init__
(
self
,
name
,
shape
=
[
1
,
2
,
2
,
1
]
):
"""
Constructor
"""
super
(
MaxPooling
,
self
).
__init__
(
name
,
use_gpu
=
False
)
self
.
shape
=
shape
def
create_variables
(
self
,
input_layer
):
self
.
input_layer
=
input_layer
...
...
@@ -22,6 +23,6 @@ class MaxPooling(Layer):
def
get_graph
(
self
):
with
tf
.
name_scope
(
str
(
self
.
name
)):
output
=
tf
.
nn
.
max_pool
(
self
.
input_layer
,
ksize
=
[
1
,
2
,
2
,
1
],
strides
=
[
1
,
2
,
2
,
1
],
padding
=
'SAME'
)
output
=
tf
.
nn
.
max_pool
(
self
.
input_layer
,
ksize
=
self
.
shape
,
strides
=
[
1
,
1
,
1
,
1
],
padding
=
'SAME'
)
return
output
bob/learn/tensorflow/loss/TripletLoss.py
View file @
f9965fd8
...
...
@@ -30,7 +30,7 @@ class TripletLoss(BaseLoss):
"""
def
__init__
(
self
,
margin
=
2
.0
):
def
__init__
(
self
,
margin
=
5
.0
):
self
.
margin
=
margin
def
__call__
(
self
,
anchor_feature
,
positive_feature
,
negative_feature
):
...
...
@@ -41,5 +41,5 @@ class TripletLoss(BaseLoss):
d_negative
=
tf
.
square
(
compute_euclidean_distance
(
anchor_feature
,
negative_feature
))
loss
=
tf
.
maximum
(
0.
,
d_positive
-
d_negative
+
self
.
margin
)
return
tf
.
reduce_mean
(
loss
),
tf
.
reduce_mean
(
d_positive
),
tf
.
reduce_mean
(
d_negative
)
#return loss, d_positive, d_negative
bob/learn/tensorflow/network/Chopra.py
View file @
f9965fd8
...
...
@@ -9,6 +9,18 @@ Class that creates the architecture presented in the paper:
Chopra, Sumit, Raia Hadsell, and Yann LeCun. "Learning a similarity metric discriminatively, with application to
face verification." 2005 IEEE Computer Society Conference on Computer Vision and Pattern Recognition (CVPR'05). Vol. 1. IEEE, 2005.
This is modifield version of the original architecture.
It is inspired on https://gitlab.idiap.ch/bob/xfacereclib.cnn/blob/master/lua/network.lua
-- C1 : Convolutional, kernel = 7x7 pixels, 15 feature maps
-- M2 : MaxPooling, 2x2
-- HT : Hard Hyperbolic Tangent
-- C3 : Convolutional, kernel = 6x6 pixels, 45 feature maps
-- M4 : MaxPooling, 4x3
-- HT : Hard Hyperbolic Tangent
-- R : Reshaping layer HT 5x5 => 25 (45 times; once for each feature map)
-- L5 : Linear 25 => 250
"""
...
...
@@ -26,15 +38,16 @@ class Chopra(SequenceNetwork):
conv1_kernel_size
=
7
,
conv1_output
=
15
,
pooling1_size
=
[
1
,
2
,
2
,
1
],
conv2_kernel_size
=
6
,
conv2_output
=
45
,
conv3_kernel_size
=
5
,
conv3_output
=
250
,
pooling2_size
=
[
1
,
4
,
3
,
1
],
fc6_output
=
50
,
n_classes
=
40
,
default_feature_layer
=
"fc7"
,
fc1_output
=
250
,
default_feature_layer
=
"fc1"
,
seed
=
10
,
use_gpu
=
False
):
...
...
@@ -45,19 +58,19 @@ class Chopra(SequenceNetwork):
conv1_kernel_size=5,
conv1_output=32,
pooling1_size=[1, 2, 2, 1],
conv2_kernel_size=5,
conv2_output=64,
conv3_kernel_size=5,
conv3_output=250,
pooling2_size=[1, 4, 3, 1],
fc6_output=50,
n_classes=10
fc1_output=50,
seed = 10
"""
super
(
Chopra
,
self
).
__init__
(
default_feature_layer
=
default_feature_layer
,
use_gpu
=
use_gpu
)
use_gpu
=
use_gpu
)
self
.
add
(
Conv2D
(
name
=
"conv1"
,
kernel_size
=
conv1_kernel_size
,
filters
=
conv1_output
,
...
...
@@ -65,7 +78,7 @@ class Chopra(SequenceNetwork):
weights_initialization
=
Xavier
(
seed
=
seed
,
use_gpu
=
self
.
use_gpu
),
bias_initialization
=
Constant
(
use_gpu
=
self
.
use_gpu
)
))
self
.
add
(
MaxPooling
(
name
=
"pooling1"
))
self
.
add
(
MaxPooling
(
name
=
"pooling1"
,
shape
=
pooling1_size
))
self
.
add
(
Conv2D
(
name
=
"conv2"
,
kernel_size
=
conv2_kernel_size
,
filters
=
conv2_output
,
...
...
@@ -73,21 +86,9 @@ class Chopra(SequenceNetwork):
weights_initialization
=
Xavier
(
seed
=
seed
,
use_gpu
=
self
.
use_gpu
),
bias_initialization
=
Constant
(
use_gpu
=
self
.
use_gpu
)
))
self
.
add
(
MaxPooling
(
name
=
"pooling2"
))
self
.
add
(
MaxPooling
(
name
=
"pooling2"
,
shape
=
pooling2_size
))
self
.
add
(
Conv2D
(
name
=
"conv3"
,
kernel_size
=
conv3_kernel_size
,
filters
=
conv3_output
,
activation
=
tf
.
nn
.
tanh
,
weights_initialization
=
Xavier
(
seed
=
seed
,
use_gpu
=
self
.
use_gpu
),
bias_initialization
=
Constant
(
use_gpu
=
self
.
use_gpu
)
))
self
.
add
(
FullyConnected
(
name
=
"fc6"
,
output_dim
=
fc6_output
,
activation
=
tf
.
nn
.
tanh
,
weights_initialization
=
Xavier
(
seed
=
seed
,
use_gpu
=
self
.
use_gpu
),
bias_initialization
=
Constant
(
use_gpu
=
self
.
use_gpu
)
))
self
.
add
(
FullyConnected
(
name
=
"fc7"
,
output_dim
=
n_classes
,
self
.
add
(
FullyConnected
(
name
=
"fc1"
,
output_dim
=
fc1_output
,
activation
=
None
,
weights_initialization
=
Xavier
(
seed
=
seed
,
use_gpu
=
self
.
use_gpu
),
bias_initialization
=
Constant
(
use_gpu
=
self
.
use_gpu
)))
bob/learn/tensorflow/script/train_mnist.py
View file @
f9965fd8
...
...
@@ -23,7 +23,7 @@ import tensorflow as tf
from
..
import
util
SEED
=
10
from
bob.learn.tensorflow.data
import
MemoryDataShuffler
,
TextDataShuffler
from
bob.learn.tensorflow.network
import
Lenet
,
MLP
,
Dummy
from
bob.learn.tensorflow.network
import
Lenet
,
MLP
,
Dummy
,
Chopra
from
bob.learn.tensorflow.trainers
import
Trainer
from
bob.learn.tensorflow.loss
import
BaseLoss
...
...
@@ -89,7 +89,8 @@ def main():
# Preparing the architecture
cnn
=
True
if
cnn
:
architecture
=
Lenet
(
seed
=
SEED
)
architecture
=
Chopra
(
seed
=
SEED
)
#architecture = Lenet(seed=SEED)
#architecture = Dummy(seed=SEED)
loss
=
BaseLoss
(
tf
.
nn
.
sparse_softmax_cross_entropy_with_logits
,
tf
.
reduce_mean
)
trainer
=
Trainer
(
architecture
=
architecture
,
loss
=
loss
,
iterations
=
ITERATIONS
)
...
...
bob/learn/tensorflow/script/train_mnist_siamese.py
View file @
f9965fd8
...
...
@@ -39,7 +39,7 @@ def main():
perc_train
=
0.9
# Loading data
mnist
=
Fals
e
mnist
=
Tru
e
if
mnist
:
train_data
,
train_labels
,
validation_data
,
validation_labels
=
\
...
...
@@ -118,8 +118,8 @@ def main():
if
cnn
:
# LENET PAPER CHOPRA
#architecture = Chopra(default_feature_layer="fc7"
)
architecture
=
Lenet
(
default_feature_layer
=
"fc2"
,
n_classes
=
n_classes
,
conv1_output
=
8
,
conv2_output
=
16
,
use_gpu
=
USE_GPU
)
architecture
=
Chopra
(
seed
=
SEED
)
#
architecture = Lenet(default_feature_layer="fc2", n_classes=n_classes, conv1_output=8, conv2_output=16,use_gpu=USE_GPU)
#architecture = VGG(n_classes=n_classes, use_gpu=USE_GPU)
#architecture = Dummy(seed=SEED)
...
...
bob/learn/tensorflow/script/train_mnist_triplet.py
View file @
f9965fd8
...
...
@@ -39,7 +39,7 @@ def main():
perc_train
=
0.9
# Loading data
mnist
=
Tru
e
mnist
=
Fals
e
if
mnist
:
train_data
,
train_labels
,
validation_data
,
validation_labels
=
\
...
...
@@ -125,15 +125,15 @@ def main():
#architecture = LenetDropout(default_feature_layer="fc2", n_classes=n_classes, conv1_output=4, conv2_output=8, use_gpu=USE_GPU)
loss
=
TripletLoss
()
optimizer
=
tf
.
train
.
GradientDescentOptimizer
(
0.0001
)
optimizer
=
tf
.
train
.
GradientDescentOptimizer
(
0.000001
)
trainer
=
TripletTrainer
(
architecture
=
architecture
,
loss
=
loss
,
iterations
=
ITERATIONS
,
snapshot
=
VALIDATION_TEST
,
optimizer
=
optimizer
)
#trainer.train(train_data_shuffler, validation_data_shuffler)
trainer
.
train
(
train_data_shuffler
)
optimizer
=
optimizer
)
trainer
.
train
(
train_data_shuffler
,
validation_data_shuffler
)
#trainer.train(train_data_shuffler)
else
:
mlp
=
MLP
(
n_classes
,
hidden_layers
=
[
15
,
20
])
...
...
bob/learn/tensorflow/trainers/TripletTrainer.py
View file @
f9965fd8
...
...
@@ -87,36 +87,29 @@ class TripletTrainer(Trainer):
bob
.
io
.
base
.
create_directories_safe
(
self
.
temp_dir
)
# Creating two graphs
#
train_placeholder_anchor_data, _ = train_data_shuffler.get_placeholders_forprefetch(name="train_anchor")
#
train_placeholder_positive_data, _ = train_data_shuffler.get_placeholders_forprefetch(name="train_positive")
#
train_placeholder_negative_data, _ = train_data_shuffler.get_placeholders_forprefetch(name="train_negative")
train_placeholder_anchor_data
,
_
=
train_data_shuffler
.
get_placeholders_forprefetch
(
name
=
"train_anchor"
)
train_placeholder_positive_data
,
_
=
train_data_shuffler
.
get_placeholders_forprefetch
(
name
=
"train_positive"
)
train_placeholder_negative_data
,
_
=
train_data_shuffler
.
get_placeholders_forprefetch
(
name
=
"train_negative"
)
# Defining a placeholder queue for prefetching
#
queue = tf.FIFOQueue(capacity=100,
#
dtypes=[tf.float32, tf.float32, tf.float32],
#
shapes=[train_placeholder_anchor_data.get_shape().as_list()[1:],
#
train_placeholder_positive_data.get_shape().as_list()[1:],
#
train_placeholder_negative_data.get_shape().as_list()[1:]])
queue
=
tf
.
FIFOQueue
(
capacity
=
100
,
dtypes
=
[
tf
.
float32
,
tf
.
float32
,
tf
.
float32
],
shapes
=
[
train_placeholder_anchor_data
.
get_shape
().
as_list
()[
1
:],
train_placeholder_positive_data
.
get_shape
().
as_list
()[
1
:],
train_placeholder_negative_data
.
get_shape
().
as_list
()[
1
:]])
# Fetching the place holders from the queue
#
enqueue_op = queue.enqueue_many([train_placeholder_anchor_data,
#
train_placeholder_positive_data,
#
train_placeholder_negative_data])
#train_anchor_feature_batch, train_positive_label_batch, train_negative_label
_batch = \
#
queue.dequeue_many(train_data_shuffler.batch_size)
enqueue_op
=
queue
.
enqueue_many
([
train_placeholder_anchor_data
,
train_placeholder_positive_data
,
train_placeholder_negative_data
])
train_anchor_feature_batch
,
train_positive_feature_batch
,
train_negative_feature
_batch
=
\
queue
.
dequeue_many
(
train_data_shuffler
.
batch_size
)
# Creating the architecture for train and validation
if
not
isinstance
(
self
.
architecture
,
SequenceNetwork
):
raise
ValueError
(
"The variable `architecture` must be an instance of "
"`bob.learn.tensorflow.network.SequenceNetwork`"
)
#############
train_anchor_feature_batch
,
_
=
train_data_shuffler
.
get_placeholders
(
name
=
"train_anchor"
)
train_positive_feature_batch
,
_
=
train_data_shuffler
.
get_placeholders
(
name
=
"train_positive"
)
train_negative_feature_batch
,
_
=
train_data_shuffler
.
get_placeholders
(
name
=
"train_negative"
)
#############
# Creating the siamese graph
#import ipdb; ipdb.set_trace();
train_anchor_graph
=
self
.
architecture
.
compute_graph
(
train_anchor_feature_batch
)
...
...
@@ -145,9 +138,9 @@ class TripletTrainer(Trainer):
tf
.
initialize_all_variables
().
run
()
# Start a thread to enqueue data asynchronously, and hide I/O latency.
#
thread_pool = tf.train.Coordinator()
#
tf.train.start_queue_runners(coord=thread_pool)
#
threads = start_thread()
thread_pool
=
tf
.
train
.
Coordinator
()
tf
.
train
.
start_queue_runners
(
coord
=
thread_pool
)
threads
=
start_thread
()
# TENSOR BOARD SUMMARY
train_writer
=
tf
.
train
.
SummaryWriter
(
os
.
path
.
join
(
self
.
temp_dir
,
'LOGS'
),
session
.
graph
)
...
...
@@ -165,18 +158,24 @@ class TripletTrainer(Trainer):
for
step
in
range
(
self
.
iterations
):
batch_anchor
,
batch_positive
,
batch_negative
=
train_data_shuffler
.
get_random_triplet
()
#batch_anchor, batch_positive, batch_negative = train_data_shuffler.get_random_triplet()
#feed_dict = {train_anchor_feature_batch: batch_anchor,
# train_positive_feature_batch: batch_positive,
# train_negative_feature_batch: batch_negative}
feed_dict
=
{
train_anchor_feature_batch
:
batch_anchor
,
train_positive_feature_batch
:
batch_positive
,
train_negative_feature_batch
:
batch_negative
}
#_, l, lr, summary, pos, neg = session.run([optimizer, loss_train, learning_rate, merged, within_class, between_class], feed_dict=feed_dict)
#_, l, lr, pos, neg, f_anchor, f_positive, f_negative = session.run(
# [optimizer, loss_train, learning_rate, within_class, between_class, train_anchor_feature_batch, train_positive_feature_batch, train_negative_feature_batch], feed_dict=feed_dict)
#import ipdb; ipdb.set_trace();
_
,
l
,
lr
,
summary
=
session
.
run
([
optimizer
,
loss_train
,
learning_rate
,
merged
],
feed_dict
=
feed_dict
)
#_, l, lr= session.run([optimizer, loss_train, learning_rate])
_
,
l
,
lr
,
summary
=
session
.
run
([
optimizer
,
loss_train
,
learning_rate
,
merged
])
train_writer
.
add_summary
(
summary
,
step
)
print
str
(
step
)
+
" -- loss: "
+
str
(
l
)
#print str(step) + " -- loss: " + str(l)
#print str(step) + " -- loss: {0}; pos: {1}; neg: {2}".format(l, pos, neg)
sys
.
stdout
.
flush
()
if
validation_data_shuffler
is
not
None
and
step
%
self
.
snapshot
==
0
:
...
...
Write
Preview
Markdown
is supported
0%
Try again
or
attach a new file
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment