Skip to content
Projects
Groups
Snippets
Help
Loading...
Help
Support
Keyboard shortcuts
?
Submit feedback
Contribute to GitLab
Sign in
Toggle navigation
bob.learn.tensorflow
Project overview
Project overview
Details
Activity
Releases
Repository
Repository
Files
Commits
Branches
Tags
Contributors
Graph
Compare
Issues
11
Issues
11
List
Boards
Labels
Milestones
Merge Requests
1
Merge Requests
1
CI / CD
CI / CD
Pipelines
Jobs
Schedules
Analytics
Analytics
CI / CD
Repository
Value Stream
Members
Members
Collapse sidebar
Close sidebar
Activity
Graph
Create a new issue
Jobs
Commits
Issue Boards
Open sidebar
bob
bob.learn.tensorflow
Commits
b16aa7c5
Commit
b16aa7c5
authored
Nov 03, 2016
by
Tiago de Freitas Pereira
Browse files
Options
Browse Files
Download
Email Patches
Plain Diff
Fixed issues with the siamese
parent
b85a344a
Changes
9
Hide whitespace changes
Inline
Side-by-side
Showing
9 changed files
with
12 additions
and
526 deletions
+12
-526
bob/learn/tensorflow/script/train_mnist.py
bob/learn/tensorflow/script/train_mnist.py
+0
-98
bob/learn/tensorflow/script/train_mnist_siamese.py
bob/learn/tensorflow/script/train_mnist_siamese.py
+0
-86
bob/learn/tensorflow/script/train_mnist_triplet.py
bob/learn/tensorflow/script/train_mnist_triplet.py
+0
-107
bob/learn/tensorflow/script/train_mobio.py
bob/learn/tensorflow/script/train_mobio.py
+0
-114
bob/learn/tensorflow/script/train_siamese_casia_webface.py
bob/learn/tensorflow/script/train_siamese_casia_webface.py
+0
-98
bob/learn/tensorflow/trainers/SiameseTrainer.py
bob/learn/tensorflow/trainers/SiameseTrainer.py
+5
-5
bob/learn/tensorflow/trainers/Trainer.py
bob/learn/tensorflow/trainers/Trainer.py
+0
-4
bob/learn/tensorflow/trainers/TripletTrainer.py
bob/learn/tensorflow/trainers/TripletTrainer.py
+6
-6
setup.py
setup.py
+1
-8
No files found.
bob/learn/tensorflow/script/train_mnist.py
deleted
100644 → 0
View file @
b85a344a
#!/usr/bin/env python
# vim: set fileencoding=utf-8 :
# @author: Tiago de Freitas Pereira <tiago.pereira@idiap.ch>
# @date: Wed 11 May 2016 09:39:36 CEST
"""
Simple script that trains MNIST with LENET using Tensor flow
Usage:
train_mnist.py [--batch-size=<arg> --validation-batch-size=<arg> --iterations=<arg> --validation-interval=<arg> --use-gpu]
train_mnist.py -h | --help
Options:
-h --help Show this screen.
--batch-size=<arg> [default: 1]
--validation-batch-size=<arg> [default:128]
--iterations=<arg> [default: 30000]
--validation-interval=<arg> [default: 100]
"""
from
docopt
import
docopt
import
tensorflow
as
tf
from
..
import
util
SEED
=
10
from
bob.learn.tensorflow.datashuffler
import
Memory
,
SiameseMemory
,
TripletMemory
from
bob.learn.tensorflow.network
import
Lenet
,
MLP
,
Dummy
,
Chopra
from
bob.learn.tensorflow.trainers
import
Trainer
from
bob.learn.tensorflow.loss
import
BaseLoss
import
bob.io.base
from
..analyzers
import
ExperimentAnalizer
,
SoftmaxAnalizer
import
numpy
def
main
():
args
=
docopt
(
__doc__
,
version
=
'Mnist training with TensorFlow'
)
BATCH_SIZE
=
int
(
args
[
'--batch-size'
])
VALIDATION_BATCH_SIZE
=
int
(
args
[
'--validation-batch-size'
])
ITERATIONS
=
int
(
args
[
'--iterations'
])
VALIDATION_TEST
=
int
(
args
[
'--validation-interval'
])
USE_GPU
=
args
[
'--use-gpu'
]
perc_train
=
0.9
mnist
=
True
train_data
,
train_labels
,
validation_data
,
validation_labels
=
\
util
.
load_mnist
(
data_dir
=
"./src/bob.db.mnist/bob/db/mnist/"
)
train_data
=
numpy
.
reshape
(
train_data
,
(
train_data
.
shape
[
0
],
28
,
28
,
1
))
validation_data
=
numpy
.
reshape
(
validation_data
,
(
validation_data
.
shape
[
0
],
28
,
28
,
1
))
# Creating datashufflers
train_data_shuffler
=
Memory
(
train_data
,
train_labels
,
input_shape
=
[
28
,
28
,
1
],
batch_size
=
BATCH_SIZE
)
validation_data_shuffler
=
Memory
(
validation_data
,
validation_labels
,
input_shape
=
[
28
,
28
,
1
],
batch_size
=
VALIDATION_BATCH_SIZE
)
# Preparing the architecture
cnn
=
True
if
cnn
:
architecture
=
Chopra
(
seed
=
SEED
,
fc1_output
=
10
,
batch_norm
=
False
)
loss
=
BaseLoss
(
tf
.
nn
.
sparse_softmax_cross_entropy_with_logits
,
tf
.
reduce_mean
)
trainer
=
Trainer
(
architecture
=
architecture
,
loss
=
loss
,
iterations
=
ITERATIONS
,
prefetch
=
False
,
temp_dir
=
"./temp/cnn/no-batch-norm"
)
#prefetch = False, temp_dir = "./temp/cnn/batch-norm-2convs-all-relu")
trainer
.
train
(
train_data_shuffler
,
validation_data_shuffler
)
#trainer.train(train_data_shuffler)
else
:
mlp
=
MLP
(
10
,
hidden_layers
=
[
15
,
20
])
loss
=
BaseLoss
(
tf
.
nn
.
sparse_softmax_cross_entropy_with_logits
,
tf
.
reduce_mean
)
trainer
=
Trainer
(
architecture
=
mlp
,
loss
=
loss
,
iterations
=
ITERATIONS
,
temp_dir
=
"./LOGS/dnn"
)
trainer
.
train
(
train_data_shuffler
,
validation_data_shuffler
)
# Loading
#test_data_shuffler = Memory(validation_data, validation_labels,
# input_shape=[28, 28, 1],
# batch_size=400)
#with tf.Session() as session:
#new_net = Chopra(seed=SEED, fc1_output=10)
#new_net.load(bob.io.base.HDF5File("./temp/cnn/model.hdf5"), shape=[400, 28, 28, 1], session=session)
#[data, labels] = test_data_shuffler.get_batch()
#print new_net(data, session)
bob/learn/tensorflow/script/train_mnist_siamese.py
deleted
100644 → 0
View file @
b85a344a
#!/usr/bin/env python
# vim: set fileencoding=utf-8 :
# @author: Tiago de Freitas Pereira <tiago.pereira@idiap.ch>
# @date: Wed 11 May 2016 09:39:36 CEST
"""
Simple script that trains MNIST with LENET using Tensor flow
Usage:
train_mnist_siamese.py [--batch-size=<arg> --validation-batch-size=<arg> --iterations=<arg> --validation-interval=<arg> --use-gpu]
train_mnist_siamese.py -h | --help
Options:
-h --help Show this screen.
--batch-size=<arg> [default: 1]
--validation-batch-size=<arg> [default:128]
--iterations=<arg> [default: 30000]
--validation-interval=<arg> [default: 100]
"""
from
docopt
import
docopt
import
tensorflow
as
tf
from
..
import
util
SEED
=
10
from
bob.learn.tensorflow.datashuffler
import
SiameseMemory
from
bob.learn.tensorflow.network
import
Lenet
,
MLP
,
LenetDropout
,
VGG
,
Chopra
,
Dummy
from
bob.learn.tensorflow.trainers
import
SiameseTrainer
from
bob.learn.tensorflow.loss
import
ContrastiveLoss
import
numpy
def
main
():
args
=
docopt
(
__doc__
,
version
=
'Mnist training with TensorFlow'
)
BATCH_SIZE
=
int
(
args
[
'--batch-size'
])
VALIDATION_BATCH_SIZE
=
int
(
args
[
'--validation-batch-size'
])
ITERATIONS
=
int
(
args
[
'--iterations'
])
VALIDATION_TEST
=
int
(
args
[
'--validation-interval'
])
USE_GPU
=
args
[
'--use-gpu'
]
perc_train
=
0.9
# Loading data
train_data
,
train_labels
,
validation_data
,
validation_labels
=
\
util
.
load_mnist
(
data_dir
=
"./src/bob.db.mnist/bob/db/mnist/"
)
train_data
=
numpy
.
reshape
(
train_data
,
(
train_data
.
shape
[
0
],
28
,
28
,
1
))
validation_data
=
numpy
.
reshape
(
validation_data
,
(
validation_data
.
shape
[
0
],
28
,
28
,
1
))
train_data_shuffler
=
SiameseMemory
(
train_data
,
train_labels
,
input_shape
=
[
28
,
28
,
1
],
scale
=
True
,
batch_size
=
BATCH_SIZE
)
validation_data_shuffler
=
SiameseMemory
(
validation_data
,
validation_labels
,
input_shape
=
[
28
,
28
,
1
],
scale
=
True
,
batch_size
=
VALIDATION_BATCH_SIZE
)
# Preparing the architecture
n_classes
=
len
(
train_data_shuffler
.
possible_labels
)
cnn
=
True
if
cnn
:
# LENET PAPER CHOPRA
architecture
=
Chopra
(
seed
=
SEED
,
fc1_output
=
n_classes
)
loss
=
ContrastiveLoss
(
contrastive_margin
=
4.
)
#optimizer = tf.train.GradientDescentOptimizer(0.000001)
trainer
=
SiameseTrainer
(
architecture
=
architecture
,
loss
=
loss
,
iterations
=
ITERATIONS
,
snapshot
=
VALIDATION_TEST
,
prefetch
=
False
,
temp_dir
=
"./LOGS/siamese-cnn-prefetch"
)
trainer
.
train
(
train_data_shuffler
,
validation_data_shuffler
)
else
:
mlp
=
MLP
(
n_classes
,
hidden_layers
=
[
15
,
20
])
loss
=
ContrastiveLoss
()
trainer
=
SiameseTrainer
(
architecture
=
mlp
,
loss
=
loss
,
iterations
=
ITERATIONS
,
snapshot
=
VALIDATION_TEST
,
temp_dir
=
"./LOGS/siamese-dnn"
)
trainer
.
train
(
train_data_shuffler
,
validation_data_shuffler
)
bob/learn/tensorflow/script/train_mnist_triplet.py
deleted
100644 → 0
View file @
b85a344a
#!/usr/bin/env python
# vim: set fileencoding=utf-8 :
# @author: Tiago de Freitas Pereira <tiago.pereira@idiap.ch>
# @date: Wed 11 May 2016 09:39:36 CEST
"""
Simple script that trains MNIST with LENET using Tensor flow
Usage:
train_mnist_triplet.py [--batch-size=<arg> --validation-batch-size=<arg> --iterations=<arg> --validation-interval=<arg> --use-gpu]
train_mnist_triplet.py -h | --help
Options:
-h --help Show this screen.
--batch-size=<arg> [default: 1]
--validation-batch-size=<arg> [default:128]
--iterations=<arg> [default: 30000]
--validation-interval=<arg> [default: 100]
"""
from
docopt
import
docopt
import
tensorflow
as
tf
from
..
import
util
SEED
=
10
from
bob.learn.tensorflow.datashuffler
import
TripletMemory
,
TripletWithSelectionMemory
from
bob.learn.tensorflow.network
import
Lenet
,
MLP
,
LenetDropout
,
VGG
,
Chopra
,
Dummy
,
FaceNet
from
bob.learn.tensorflow.trainers
import
TripletTrainer
from
bob.learn.tensorflow.loss
import
TripletLoss
import
numpy
def
main
():
args
=
docopt
(
__doc__
,
version
=
'Mnist training with TensorFlow'
)
BATCH_SIZE
=
int
(
args
[
'--batch-size'
])
VALIDATION_BATCH_SIZE
=
int
(
args
[
'--validation-batch-size'
])
ITERATIONS
=
int
(
args
[
'--iterations'
])
VALIDATION_TEST
=
int
(
args
[
'--validation-interval'
])
USE_GPU
=
args
[
'--use-gpu'
]
perc_train
=
0.9
# Loading data
train_data
,
train_labels
,
validation_data
,
validation_labels
=
\
util
.
load_mnist
(
data_dir
=
"./src/bob.db.mnist/bob/db/mnist/"
)
train_data
=
numpy
.
reshape
(
train_data
,
(
train_data
.
shape
[
0
],
28
,
28
,
1
))
validation_data
=
numpy
.
reshape
(
validation_data
,
(
validation_data
.
shape
[
0
],
28
,
28
,
1
))
#train_data_shuffler = MemoryDataShuffler(train_data, train_labels,
# input_shape=[28, 28, 1],
# scale=True,
# batch_size=BATCH_SIZE)
#validation_data_shuffler = MemoryDataShuffler(validation_data, validation_labels,
# input_shape=[28, 28, 1],
# scale=True,
# batch_size=VALIDATION_BATCH_SIZE)
validation_data_shuffler
=
TripletMemory
(
train_data
,
train_labels
,
input_shape
=
[
28
,
28
,
1
],
scale
=
True
,
batch_size
=
VALIDATION_BATCH_SIZE
)
train_data_shuffler
=
TripletWithSelectionMemory
(
train_data
,
train_labels
,
input_shape
=
[
28
,
28
,
1
],
scale
=
True
,
batch_size
=
BATCH_SIZE
)
#train_data_shuffler = TripletMemory(train_data, train_labels,
# input_shape=[28, 28, 1],
# scale=True,
# batch_size=BATCH_SIZE)
# Preparing the architecture
n_classes
=
len
(
train_data_shuffler
.
possible_labels
)
#n_classes = 200
cnn
=
True
if
cnn
:
architecture
=
FaceNet
(
seed
=
SEED
,
use_gpu
=
USE_GPU
)
#architecture = Chopra(seed=SEED, fc1_output=n_classes, use_gpu=USE_GPU)
#architecture = Lenet(default_feature_layer="fc2", n_classes=n_classes, conv1_output=8, conv2_output=16,use_gpu=USE_GPU)
#architecture = VGG(n_classes=n_classes, use_gpu=USE_GPU)
#architecture = Dummy(seed=SEED)
#architecture = LenetDropout(default_feature_layer="fc2", n_classes=n_classes, conv1_output=4, conv2_output=8, use_gpu=USE_GPU)
loss
=
TripletLoss
()
optimizer
=
tf
.
train
.
GradientDescentOptimizer
(
0.000001
)
trainer
=
TripletTrainer
(
architecture
=
architecture
,
loss
=
loss
,
iterations
=
ITERATIONS
,
snapshot
=
VALIDATION_TEST
,
temp_dir
=
"triplet/cnn-triplet-SELECTION"
,
prefetch
=
False
,
optimizer
=
optimizer
)
trainer
.
train
(
train_data_shuffler
,
validation_data_shuffler
)
#trainer.train(train_data_shuffler)
else
:
mlp
=
MLP
(
n_classes
,
hidden_layers
=
[
15
,
20
])
loss
=
TripletLoss
()
trainer
=
TripletTrainer
(
architecture
=
mlp
,
loss
=
loss
,
temp_dir
=
"dnn-triplet"
,
iterations
=
ITERATIONS
,
snapshot
=
VALIDATION_TEST
)
#trainer.train(train_data_shuffler, validation_data_shuffler)
trainer
.
train
(
train_data_shuffler
)
bob/learn/tensorflow/script/train_mobio.py
deleted
100644 → 0
View file @
b85a344a
#!/usr/bin/env python
# vim: set fileencoding=utf-8 :
# @author: Tiago de Freitas Pereira <tiago.pereira@idiap.ch>
# @date: Wed 11 May 2016 09:39:36 CEST
"""
Simple script that trains CASIA WEBFACE
Usage:
train_mobio.py [--batch-size=<arg> --validation-batch-size=<arg> --iterations=<arg> --validation-interval=<arg> --use-gpu]
train_mobio.py -h | --help
Options:
-h --help Show this screen.
--batch-size=<arg> [default: 1]
--validation-batch-size=<arg> [default:128]
--iterations=<arg> [default: 30000]
--validation-interval=<arg> [default: 100]
"""
from
docopt
import
docopt
import
tensorflow
as
tf
from
..
import
util
SEED
=
10
from
bob.learn.tensorflow.datashuffler
import
TripletWithSelectionDisk
,
TripletDisk
,
TripletWithFastSelectionDisk
from
bob.learn.tensorflow.network
import
Lenet
,
MLP
,
LenetDropout
,
VGG
,
Chopra
,
Dummy
,
FaceNet
,
FaceNetSimple
,
VGG16
from
bob.learn.tensorflow.trainers
import
SiameseTrainer
,
Trainer
,
TripletTrainer
,
constant
from
bob.learn.tensorflow.loss
import
ContrastiveLoss
,
BaseLoss
,
TripletLoss
import
numpy
import
os
#os.environ["CUDA_VISIBLE_DEVICES"] = "0,1,2,3"
os
.
environ
[
"CUDA_VISIBLE_DEVICES"
]
=
""
def
main
():
args
=
docopt
(
__doc__
,
version
=
'Mnist training with TensorFlow'
)
BATCH_SIZE
=
int
(
args
[
'--batch-size'
])
VALIDATION_BATCH_SIZE
=
int
(
args
[
'--validation-batch-size'
])
ITERATIONS
=
int
(
args
[
'--iterations'
])
VALIDATION_TEST
=
int
(
args
[
'--validation-interval'
])
USE_GPU
=
args
[
'--use-gpu'
]
perc_train
=
0.9
import
bob.db.mobio
db_mobio
=
bob
.
db
.
mobio
.
Database
()
directory
=
"/idiap/temp/tpereira/DEEP_FACE/CASIA_WEBFACE/mobio/preprocessed/"
#directory = "./preprocessed/"
# Preparing train set
#train_objects = db_mobio.objects(protocol="male", groups="world")
train_objects
=
sorted
(
db_mobio
.
objects
(
protocol
=
"male"
,
groups
=
"world"
),
key
=
lambda
x
:
x
.
id
)
train_labels
=
[
int
(
o
.
client_id
)
for
o
in
train_objects
]
n_classes
=
len
(
set
(
train_labels
))
train_file_names
=
[
o
.
make_path
(
directory
=
directory
,
extension
=
".hdf5"
)
for
o
in
train_objects
]
#train_data_shuffler = TripletWithSelectionDisk(train_file_names, train_labels,
# input_shape=[56, 56, 3],
# total_identities=8,
# batch_size=BATCH_SIZE)
train_data_shuffler
=
TripletWithFastSelectionDisk
(
train_file_names
,
train_labels
,
input_shape
=
[
224
,
224
,
3
],
batch_size
=
BATCH_SIZE
,
total_identities
=
8
)
# Preparing train set
validation_objects
=
sorted
(
db_mobio
.
objects
(
protocol
=
"male"
,
groups
=
"dev"
),
key
=
lambda
x
:
x
.
id
)
#validation_objects = db_mobio.objects(protocol="male", groups="world")
validation_labels
=
[
o
.
client_id
for
o
in
validation_objects
]
validation_file_names
=
[
o
.
make_path
(
directory
=
directory
,
extension
=
".hdf5"
)
for
o
in
validation_objects
]
validation_data_shuffler
=
TripletDisk
(
validation_file_names
,
validation_labels
,
input_shape
=
[
224
,
224
,
3
],
batch_size
=
VALIDATION_BATCH_SIZE
)
# Preparing the architecture
#architecture = Chopra(seed=SEED, fc1_output=n_classes)
#architecture = FaceNet(seed=SEED, use_gpu=USE_GPU)
architecture
=
VGG16
(
seed
=
SEED
,
use_gpu
=
USE_GPU
)
#loss = BaseLoss(tf.nn.sparse_softmax_cross_entropy_with_logits, tf.reduce_mean)
#trainer = Trainer(architecture=architecture, loss=loss,
# iterations=ITERATIONS,
# prefetch=False,
# optimizer=optimizer,
# temp_dir="./LOGS/cnn")
#loss = ContrastiveLoss(contrastive_margin=4.)
#trainer = SiameseTrainer(architecture=architecture, loss=loss,
# iterations=ITERATIONS,
# prefetch=False,
# optimizer=optimizer,
# temp_dir="./LOGS_MOBIO/siamese-cnn-prefetch")
loss
=
TripletLoss
(
margin
=
0.2
)
optimizer
=
tf
.
train
.
GradientDescentOptimizer
(
0.05
)
trainer
=
TripletTrainer
(
architecture
=
architecture
,
loss
=
loss
,
iterations
=
ITERATIONS
,
learning_rate
=
constant
(
0.05
),
prefetch
=
False
,
optimizer
=
optimizer
,
snapshot
=
200
,
temp_dir
=
"/idiap/temp/tpereira/VGG16/mobio_world"
)
#trainer.train(train_data_shuffler, validation_data_shuffler)
trainer
.
train
(
train_data_shuffler
)
bob/learn/tensorflow/script/train_siamese_casia_webface.py
deleted
100644 → 0
View file @
b85a344a
#!/usr/bin/env python
# vim: set fileencoding=utf-8 :
# @author: Tiago de Freitas Pereira <tiago.pereira@idiap.ch>
# @date: Wed 11 May 2016 09:39:36 CEST
"""
Simple script that trains CASIA WEBFACE
Usage:
train_siamese_casia_webface.py [--batch-size=<arg> --validation-batch-size=<arg> --iterations=<arg> --validation-interval=<arg> --use-gpu]
train_siamese_casia_webface.py -h | --help
Options:
-h --help Show this screen.
--batch-size=<arg> [default: 1]
--validation-batch-size=<arg> [default:128]
--iterations=<arg> [default: 30000]
--validation-interval=<arg> [default: 100]
"""
from
docopt
import
docopt
import
tensorflow
as
tf
from
..
import
util
SEED
=
10
from
bob.learn.tensorflow.datashuffler
import
TripletDisk
,
TripletWithSelectionDisk
,
TripletWithFastSelectionDisk
from
bob.learn.tensorflow.network
import
Lenet
,
MLP
,
LenetDropout
,
VGG
,
Chopra
,
Dummy
,
FaceNet
,
FaceNetSimple
,
VGG16
from
bob.learn.tensorflow.trainers
import
SiameseTrainer
,
TripletTrainer
,
constant
from
bob.learn.tensorflow.loss
import
ContrastiveLoss
,
TripletLoss
import
numpy
def
main
():
args
=
docopt
(
__doc__
,
version
=
'Mnist training with TensorFlow'
)
BATCH_SIZE
=
int
(
args
[
'--batch-size'
])
VALIDATION_BATCH_SIZE
=
int
(
args
[
'--validation-batch-size'
])
ITERATIONS
=
int
(
args
[
'--iterations'
])
VALIDATION_TEST
=
int
(
args
[
'--validation-interval'
])
USE_GPU
=
args
[
'--use-gpu'
]
perc_train
=
0.9
import
bob.db.mobio
db_mobio
=
bob
.
db
.
mobio
.
Database
()
import
bob.db.casia_webface
db_casia
=
bob
.
db
.
casia_webface
.
Database
()
# Preparing train set
train_objects
=
sorted
(
db_casia
.
objects
(
groups
=
"world"
),
key
=
lambda
x
:
x
.
id
)
#train_objects = db.objects(groups="world")
train_labels
=
[
int
(
o
.
client_id
)
for
o
in
train_objects
]
directory
=
"/idiap/temp/tpereira/DEEP_FACE/CASIA_WEBFACE/casia_webface/preprocessed"
train_file_names
=
[
o
.
make_path
(
directory
=
directory
,
extension
=
".hdf5"
)
for
o
in
train_objects
]
#train_data_shuffler = TripletWithFastSelectionDisk(train_file_names, train_labels,
# input_shape=[224, 224, 3],
# batch_size=BATCH_SIZE,
# total_identities=16)
train_data_shuffler
=
TripletDisk
(
train_file_names
,
train_labels
,
input_shape
=
[
224
,
224
,
3
],
batch_size
=
VALIDATION_BATCH_SIZE
)
# Preparing train set
directory
=
"/idiap/temp/tpereira/DEEP_FACE/CASIA_WEBFACE/mobio/preprocessed"
validation_objects
=
sorted
(
db_mobio
.
objects
(
protocol
=
"male"
,
groups
=
"dev"
),
key
=
lambda
x
:
x
.
id
)
validation_labels
=
[
o
.
client_id
for
o
in
validation_objects
]
validation_file_names
=
[
o
.
make_path
(
directory
=
directory
,
extension
=
".hdf5"
)
for
o
in
validation_objects
]
validation_data_shuffler
=
TripletDisk
(
validation_file_names
,
validation_labels
,
input_shape
=
[
224
,
224
,
3
],
batch_size
=
VALIDATION_BATCH_SIZE
)
# Preparing the architecture
# LENET PAPER CHOPRA
#architecture = FaceNetSimple(seed=SEED, use_gpu=USE_GPU)
architecture
=
VGG16
(
seed
=
SEED
,
use_gpu
=
USE_GPU
)
optimizer
=
tf
.
train
.
GradientDescentOptimizer
(
0.05
)
loss
=
TripletLoss
(
margin
=
0.2
)
trainer
=
TripletTrainer
(
architecture
=
architecture
,
loss
=
loss
,
iterations
=
ITERATIONS
,
learning_rate
=
constant
(),
optimizer
=
optimizer
,
prefetch
=
False
,
temp_dir
=
"/idiap/temp/tpereira/CNN_MODELS_GRAD_DESC/triplet-cnn-selection-gpu"
)
#import ipdb; ipdb.set_trace()
#trainer.train(train_data_shuffler, validation_data_shuffler)
trainer
.
train
(
train_data_shuffler
)
bob/learn/tensorflow/trainers/SiameseTrainer.py
View file @
b16aa7c5
...
...
@@ -138,7 +138,7 @@ class SiameseTrainer(Trainer):
tf
.
get_collection
(
"validation_placeholder_data2"
)[
0
],
tf
.
get_collection
(
"validation_placeholder_label"
)[
0
])
def
compute_graph
(
self
,
data_shuffler
,
prefetch
=
False
,
name
=
""
,
train
=
True
):
def
compute_graph
(
self
,
data_shuffler
,
prefetch
=
False
,
name
=
""
,
train
ing
=
True
):
"""
Computes the graph for the trainer.
...
...
@@ -173,14 +173,14 @@ class SiameseTrainer(Trainer):
[
feature_left_batch
,
feature_right_batch
,
label_batch
]
=
data_shuffler
.
get_placeholders
(
name
=
name
)
# Creating the siamese graph
train_left_graph
=
self
.
architecture
.
compute_graph
(
feature_left_batch
)
train_right_graph
=
self
.
architecture
.
compute_graph
(
feature_right_batch
)
train_left_graph
=
self
.
architecture
.
compute_graph
(
feature_left_batch
,
training
=
training
)
train_right_graph
=
self
.
architecture
.
compute_graph
(
feature_right_batch
,
training
=
training
)
graph
,
between_class_graph
,
within_class_graph
=
self
.
loss
(
label_batch
,
train_left_graph
,
train_right_graph
)
if
train
:
if
train
ing
:
self
.
between_class_graph_train
=
between_class_graph
self
.
within_class_graph_train
=
within_class_graph
else
:
...
...
@@ -243,7 +243,7 @@ class SiameseTrainer(Trainer):
if
self
.
validation_summary_writter
is
None
:
self
.
validation_summary_writter
=
tf
.
train
.
SummaryWriter
(
os
.
path
.
join
(
self
.
temp_dir
,
'validation'
),
session
.
graph
)
self
.
validation_graph
=
self
.
compute_graph
(
data_shuffler
,
name
=
"validation"
,
train
=
False
)
self
.
validation_graph
=
self
.
compute_graph
(
data_shuffler
,
name
=
"validation"
,
train
ing
=
False
)
feed_dict
=
self
.
get_feed_dict
(
data_shuffler
)
l
,
bt_class
,
wt_class
=
session
.
run
([
self
.
validation_graph
,
self
.
between_class_graph_validation
,
self
.
within_class_graph_validation
],
...
...
bob/learn/tensorflow/trainers/Trainer.py
View file @
b16aa7c5
...
...
@@ -266,10 +266,6 @@ class Trainer(object):
self
.
validation_graph
=
self
.
compute_graph
(
validation_data_shuffler
,
name
=
"validation"
,
training
=
False
)
tf
.
add_to_collection
(
"validation_graph"
,
self
.
validation_graph
)
batch
,
label
=
validation_data_shuffler
.
get_placeholders
()
tf
.
add_to_collection
(
"validation_placeholder_data"
,
batch
)
tf
.
add_to_collection
(
"validation_placeholder_label"
,
label
)
self
.
bootstrap_placeholders
(
train_data_shuffler
,
validation_data_shuffler
)
def
bootstrap_placeholders
(
self
,
train_data_shuffler
,
validation_data_shuffler
):
...
...
bob/learn/tensorflow/trainers/TripletTrainer.py
View file @
b16aa7c5
...
...
@@ -137,7 +137,7 @@ class TripletTrainer(Trainer):
tf
.
get_collection
(
"validation_placeholder_data2"
)[
0
],
tf
.
get_collection
(
"validation_placeholder_data3"
)[
0
])
def
compute_graph
(
self
,
data_shuffler
,
prefetch
=
False
,
name
=
""
,
train
=
True
):
def
compute_graph
(
self
,
data_shuffler
,
prefetch
=
False
,
name
=
""
,
train
ing
=
True
):
"""
Computes the graph for the trainer.
...
...
@@ -177,15 +177,15 @@ class TripletTrainer(Trainer):
data_shuffler
.
get_placeholders
(
name
=
name
)
# Creating the siamese graph
train_anchor_graph
=
self
.
architecture
.
compute_graph
(
feature_anchor_batch
)
train_positive_graph
=
self
.
architecture
.
compute_graph
(
feature_positive_batch
)
train_negative_graph
=
self
.
architecture
.
compute_graph
(
feature_negative_batch
)
train_anchor_graph
=
self
.
architecture
.
compute_graph
(
feature_anchor_batch
,
training
=
training
)
train_positive_graph
=
self
.
architecture
.
compute_graph
(
feature_positive_batch
,
training
=
training
)
train_negative_graph
=
self
.
architecture
.
compute_graph
(
feature_negative_batch
,
training
=
training
)
graph
,
between_class_graph
,
within_class_graph
=
self
.
loss
(
train_anchor_graph
,
train_positive_graph
,
train_negative_graph
)
if
train
:
if
train
ing
:
self
.
between_class_graph_train
=
between_class_graph
self
.
within_class_graph_train
=
within_class_graph
else
:
...
...
@@ -252,7 +252,7 @@ class TripletTrainer(Trainer):
self
.
validation_summary_writter
=
tf
.
train
.
SummaryWriter
(
os
.
path
.
join
(
self
.
temp_dir
,
'validation'
),
session
.
graph
)
self
.
validation_graph
=
self
.
compute_graph
(
data_shuffler
,
name
=
"validation"
,
train
=
False
)
self
.
validation_graph
=
self
.
compute_graph
(
data_shuffler
,
name
=
"validation"
,
train
ing
=
False
)
feed_dict
=
self
.
get_feed_dict
(
data_shuffler
)
l
,
bt_class
,
wt_class
=
session
.
run
([
self
.
validation_graph
,
self
.
between_class_graph_validation
,
self
.
within_class_graph_validation
],
...
...
setup.py
View file @
b16aa7c5
...
...
@@ -74,16 +74,9 @@ setup(
# scripts should be declared using this entry:
'console_scripts'
:
[
'train_mnist.py = bob.learn.tensorflow.script.train_mnist:main'
,
'train_mnist_siamese.py = bob.learn.tensorflow.script.train_mnist_siamese:main'
,
'train_mnist_triplet.py = bob.learn.tensorflow.script.train_mnist_triplet:main'
,
'train_siamese_casia_webface.py = bob.learn.tensorflow.script.train_siamese_casia_webface:main'
,
'train_mobio.py = bob.learn.tensorflow.script.train_mobio:main'
,
],
},
},
# Classifiers are important if you plan to distribute this package through
# PyPI. You can find the complete list of classifiers that are valid and
...
...
Write
Preview
Markdown
is supported
0%
Try again
or
attach a new file
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment