Commit ccc7f48a authored by Tiago de Freitas Pereira's avatar Tiago de Freitas Pereira
Browse files

[sphinx] Fixing WArnings

parent 16c9feff
......@@ -2,6 +2,20 @@
.. Tiago de Freitas Pereira <tiago.pereira@idiap.ch>
.. Thu 30 Jan 08:46:53 2014 CET
.. image:: http://img.shields.io/badge/docs-stable-yellow.png
:target: http://pythonhosted.org/bob.learn.tensorflow/index.html
.. image:: http://img.shields.io/badge/docs-latest-orange.png
:target: https://www.idiap.ch/software/bob/docs/latest/bob/bob.learn.tensorflow/master/index.html
.. image:: https://gitlab.idiap.ch/bob/bob.learn.tensorflow/badges/master/build.svg
:target: https://gitlab.idiap.ch/bob/bob.learn.tensorflow/commits/master
.. image:: https://img.shields.io/badge/gitlab-project-0000c0.svg
:target: https://gitlab.idiap.ch/bob/bob.learn.tensorflow
.. image:: http://img.shields.io/pypi/v/bob.learn.tensorflow.png
:target: https://pypi.python.org/pypi/bob.learn.tensorflow
.. image:: http://img.shields.io/pypi/dm/bob.learn.tensorflow.png
:target: https://pypi.python.org/pypi/bob.learn.tensorflow
===========================
Bob support for tensorflow
===========================
......
# see https://docs.python.org/3/library/pkgutil.html
from pkgutil import extend_path
__path__ = extend_path(__path__, __name__)
# gets sphinx autodoc done right - don't remove it
__all__ = [_ for _ in dir() if not _.startswith('_')]
from .ExperimentAnalizer import ExperimentAnalizer
from .SoftmaxAnalizer import SoftmaxAnalizer
......
......@@ -15,6 +15,7 @@ class Base(object):
The class provide base functionalities to shuffle the data to train a neural network
**Parameters**
data:
Input data to be trainer
......
......@@ -21,6 +21,7 @@ class Disk(Base):
The data is loaded on the fly,.
**Parameters**
data:
Input data to be trainer
......
......@@ -15,6 +15,7 @@ class Memory(Base):
This datashuffler deal with memory databases that are stored in a :py:class`numpy.array`
**Parameters**
data:
Input data to be trainer
......
......@@ -3,12 +3,12 @@
# @author: Tiago de Freitas Pereira <tiago.pereira@idiap.ch>
# @date: Wed 11 May 2016 09:39:36 CEST
import numpy
import tensorflow as tf
from .Base import Base
from bob.learn.tensorflow.network import SequenceNetwork
class OnLineSampling(object):
class OnlineSampling(Base):
"""
This data shuffler uses the current state of the network to select the samples.
This class is not meant to be used, but extended.
......
......@@ -7,7 +7,7 @@ import numpy
import tensorflow as tf
from .Memory import Memory
from Triplet import Triplet
from .Triplet import Triplet
from bob.learn.tensorflow.datashuffler.Normalizer import Linear
......
......@@ -6,9 +6,9 @@
import numpy
import tensorflow as tf
from Disk import Disk
from Triplet import Triplet
from OnlineSampling import OnLineSampling
from .Disk import Disk
from .Triplet import Triplet
from .OnlineSampling import OnlineSampling
from scipy.spatial.distance import euclidean, cdist
import logging
......@@ -16,7 +16,7 @@ logger = logging.getLogger("bob.learn.tensorflow")
from bob.learn.tensorflow.datashuffler.Normalizer import Linear
class TripletWithFastSelectionDisk(Triplet, Disk, OnLineSampling):
class TripletWithFastSelectionDisk(Triplet, Disk, OnlineSampling):
"""
This data shuffler generates triplets from :py:class:`bob.learn.tensorflow.datashuffler.Triplet` and
:py:class:`bob.learn.tensorflow.datashuffler.Disk` shufflers.
......@@ -27,12 +27,11 @@ class TripletWithFastSelectionDisk(Triplet, Disk, OnLineSampling):
"Facenet: A unified embedding for face recognition and clustering." Proceedings of the IEEE Conference on
Computer Vision and Pattern Recognition. 2015.
In this shuffler, the triplets are selected as the following:
1. Select M identities
2. Get N pairs anchor-positive (for each M identities) such that the argmax(anchor, positive)
3. For each pair anchor-positive, find the "semi-hard" negative samples such that
argmin(||f(x_a) - f(x_p)||^2 < ||f(x_a) - f(x_n)||^2
1. Select M identities.
2. Get N pairs anchor-positive (for each M identities) such that the argmax(anchor, positive).
3. For each pair anchor-positive, find the "semi-hard" negative samples such that :math:`argmin(||f(x_a) - f(x_p)||^2 < ||f(x_a) - f(x_n)||^2`.
**Parameters**
......@@ -142,8 +141,6 @@ class TripletWithFastSelectionDisk(Triplet, Disk, OnLineSampling):
samples_a[i, ...] = self.get_anchor(anchor_labels[i])
embedding_a = self.project(samples_a)
print "EMBEDDING {0} ".format(embedding_a[:, 0])
# Getting the positives
samples_p, embedding_p, d_anchor_positive = self.get_positives(anchor_labels, embedding_a)
samples_n = self.get_negative(anchor_labels, embedding_a, d_anchor_positive)
......
......@@ -8,7 +8,7 @@ import tensorflow as tf
from .Disk import Disk
from .Triplet import Triplet
from .OnlineSampling import OnLineSampling
from .OnlineSampling import OnlineSampling
from scipy.spatial.distance import euclidean
from bob.learn.tensorflow.datashuffler.Normalizer import Linear
......@@ -17,12 +17,13 @@ logger = logging.getLogger("bob.learn.tensorflow")
from bob.learn.tensorflow.datashuffler.Normalizer import Linear
class TripletWithSelectionDisk(Triplet, Disk, OnLineSampling):
class TripletWithSelectionDisk(Triplet, Disk, OnlineSampling):
"""
This data shuffler generates triplets from :py:class:`bob.learn.tensorflow.datashuffler.Triplet` shufflers.
The selection of the triplets are random.
**Parameters**
data:
Input data to be trainer
......
......@@ -6,14 +6,14 @@
import numpy
import tensorflow as tf
from OnlineSampling import OnLineSampling
from Memory import Memory
from Triplet import Triplet
from .OnlineSampling import OnlineSampling
from .Memory import Memory
from .Triplet import Triplet
from scipy.spatial.distance import euclidean
from bob.learn.tensorflow.datashuffler.Normalizer import Linear
class TripletWithSelectionMemory(Triplet, Memory, OnLineSampling):
class TripletWithSelectionMemory(Triplet, Memory, OnlineSampling):
"""
This data shuffler generates triplets from :py:class:`bob.learn.tensorflow.datashuffler.Triplet` and
:py:class:`bob.learn.tensorflow.datashuffler.Memory` shufflers.
......@@ -24,12 +24,11 @@ class TripletWithSelectionMemory(Triplet, Memory, OnLineSampling):
"Facenet: A unified embedding for face recognition and clustering." Proceedings of the IEEE Conference on
Computer Vision and Pattern Recognition. 2015.
In this shuffler, the triplets are selected as the following:
1. Select M identities
2. Get N pairs anchor-positive (for each M identities) such that the argmax(anchor, positive)
3. For each pair anchor-positive, find the "semi-hard" negative samples such that
argmin(||f(x_a) - f(x_p)||^2 < ||f(x_a) - f(x_n)||^2
1. Select M identities.
2. Get N pairs anchor-positive (for each M identities) such that the argmax(anchor, positive).
3. For each pair anchor-positive, find the "semi-hard" negative samples such that :math:`argmin(||f(x_a) - f(x_p)||^2 < ||f(x_a) - f(x_n)||^2`
**Parameters**
......
# see https://docs.python.org/3/library/pkgutil.html
from .Base import Base
from .OnlineSampling import OnlineSampling
from .Siamese import Siamese
from .Triplet import Triplet
from .Memory import Memory
from .Disk import Disk
from .OnlineSampling import OnLineSampling
from .SiameseMemory import SiameseMemory
from .TripletMemory import TripletMemory
......
......@@ -14,6 +14,7 @@ class Layer(object):
Layer base class
**Parameters**
name: str
The name of the layer
......
......@@ -13,7 +13,7 @@ from .InputLayer import InputLayer
def __appropriate__(*args):
"""Says object was actually declared here, an not on the import module.
Parameters:
**Parameters**
*args: An iterable of objects to modify
......
......@@ -17,9 +17,10 @@ class ContrastiveLoss(BaseLoss):
http://yann.lecun.com/exdb/publis/pdf/hadsell-chopra-lecun-06.pdf
L = 0.5 * (Y) * D^2 + 0.5 * (1-Y) * {max(0, margin - D)}^2
:math:`L = 0.5 * (Y) * D^2 + 0.5 * (1-Y) * {max(0, margin - D)}^2`
**Parameters**
left_feature:
First element of the pair
......
......@@ -19,9 +19,10 @@ class TripletLoss(BaseLoss):
"Facenet: A unified embedding for face recognition and clustering."
Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition. 2015.
L = sum( |f_a - f_p|^2 - |f_a - f_n|^2 + \lambda)
:math:`L = sum( |f_a - f_p|^2 - |f_a - f_n|^2 + \lambda)`
**Parameters**
left_feature:
First element of the pair
......
......@@ -317,9 +317,9 @@ class SequenceNetwork(six.with_metaclass(abc.ABCMeta, object)):
open(path+"_sequence_net.pickle", 'w').write(self.pickle_architecture)
return saver.save(session, path)
def load(self, path, clear_devices=False):
def load(self, path, clear_devices=False, session_from_scratch=False):
session = Session.instance().session
session = Session.instance(new=session_from_scratch).session
self.sequence_net = pickle.loads(open(path+"_sequence_net.pickle").read())
if clear_devices:
......
......@@ -224,10 +224,10 @@ class VGG16_mod(SequenceNetwork):
weights_initialization=Xavier(seed=seed, use_gpu=self.use_gpu),
bias_initialization=Constant(use_gpu=self.use_gpu)
))
self.add(AveragePooling(name="pooling5", strides=[1, 2, 2, 1]))
self.add(AveragePooling(name="pooling5", shape=[1, 7, 7, 1], strides=[1, 7, 7, 1]))
if do_dropout:
self.add(Dropout(name="dropout", keep_prob=0.4))
self.add(Dropout(name="dropout", keep_prob=0.5))
self.add(FullyConnected(name="fc8", output_dim=n_classes,
activation=None,
......
......@@ -4,11 +4,12 @@
# @date: Thu 13 Oct 2016 13:35 CEST
import numpy
from bob.learn.tensorflow.datashuffler import Memory, SiameseMemory, TripletMemory, Disk, SiameseDisk, TripletDisk, ImageAugmentation
from bob.learn.tensorflow.network import Chopra
from bob.learn.tensorflow.datashuffler import Memory, SiameseMemory, TripletMemory, ImageAugmentation
from bob.learn.tensorflow.network import Chopra, SequenceNetwork
from bob.learn.tensorflow.loss import BaseLoss, ContrastiveLoss, TripletLoss
from bob.learn.tensorflow.trainers import Trainer, SiameseTrainer, TripletTrainer, constant
from .test_cnn_scratch import validate_network
import pkg_resources
from bob.learn.tensorflow.utils import load_mnist
import tensorflow as tf
......@@ -99,7 +100,10 @@ def test_cnn_trainer():
iterations=iterations,
analizer=None,
prefetch=False,
learning_rate=constant(0.05, name="regular_lr"),
optimizer=tf.train.AdamOptimizer(name="adam_softmax"),
temp_dir=directory)
trainer.train(train_data_shuffler)
accuracy = validate_network(validation_data, validation_labels, architecture)
......@@ -139,6 +143,7 @@ def test_siamesecnn_trainer():
prefetch=False,
analizer=None,
learning_rate=constant(0.05, name="siamese_lr"),
optimizer=tf.train.AdamOptimizer(name="adam_siamese"),
temp_dir=directory)
trainer.train(train_data_shuffler)
......@@ -181,6 +186,7 @@ def test_tripletcnn_trainer():
prefetch=False,
analizer=None,
learning_rate=constant(0.05, name="triplet_lr"),
optimizer=tf.train.AdamOptimizer(name="adam_triplet"),
temp_dir=directory)
trainer.train(train_data_shuffler)
......
......@@ -8,7 +8,6 @@
Some unit tests that create networks on the fly
"""
import numpy
import pkg_resources
from bob.learn.tensorflow.utils import load_mnist
......@@ -38,9 +37,8 @@ def test_load_test_cnn():
# Creating datashufflers
validation_data = numpy.reshape(validation_data, (validation_data.shape[0], 28, 28, 1))
network = SequenceNetwork()
network.load(pkg_resources.resource_filename(__name__, 'data/cnn_mnist/model.ckp'))
network.load(pkg_resources.resource_filename(__name__, 'data/cnn_mnist/model.ckp'), session_from_scratch=True)
accuracy = validate_network(validation_data, validation_labels, network)
assert accuracy > 80
del network
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment