Commit 9a0fa0ca authored by Amir MOHAMMADI's avatar Amir MOHAMMADI

Merge branch 'cleanup' into 'master'

Cleanup

See merge request !89
parents 7b676965 70d3fe40
Pipeline #45569 passed with stages
in 5 minutes and 32 seconds
......@@ -2,16 +2,16 @@
# See https://pre-commit.com/hooks.html for more hooks
repos:
- repo: https://github.com/timothycrosley/isort
rev: 4.3.21-2
rev: 5.6.4
hooks:
- id: isort
args: [-sl]
args: [--sl, --line-length, "88"]
- repo: https://github.com/psf/black
rev: stable
rev: 20.8b1
hooks:
- id: black
- repo: https://github.com/pre-commit/pre-commit-hooks
rev: v2.0.0
rev: v3.3.0
hooks:
- id: check-ast
- id: check-case-conflict
......@@ -19,22 +19,8 @@ repos:
- id: end-of-file-fixer
- id: debug-statements
- id: check-added-large-files
- id: flake8
- repo: local
- repo: https://gitlab.com/pycqa/flake8
rev: 3.8.4
hooks:
- id: sphinx-build
name: sphinx build
entry: python -m sphinx.cmd.build
args: [-a, -E, -W, doc, sphinx]
language: system
files: ^doc/
types: [file]
pass_filenames: false
- id: sphinx-doctest
name: sphinx doctest
entry: python -m sphinx.cmd.build
args: [-a, -E, -b, doctest, doc, sphinx]
language: system
files: ^doc/
types: [file]
pass_filenames: false
- id: flake8
args: [--ignore, "E203,W503,E501,E302,E111,E114,E121,E402"]
......@@ -2,7 +2,6 @@ import json
import os
import tensorflow as tf
from tensorflow.keras import callbacks
class CustomBackupAndRestore(tf.keras.callbacks.experimental.BackupAndRestore):
......
from .generator import Generator, dataset_using_generator
from .tfrecords import dataset_to_tfrecord, dataset_from_tfrecord, TFRECORDS_EXT
from .generator import Generator
from .generator import dataset_using_generator # noqa: F401
from .tfrecords import TFRECORDS_EXT # noqa: F401
from .tfrecords import dataset_from_tfrecord # noqa: F401
from .tfrecords import dataset_to_tfrecord # noqa: F401
# gets sphinx autodoc done right - don't remove it
def __appropriate__(*args):
......
......@@ -14,21 +14,21 @@ class Generator:
----------
epoch : int
The number of epochs that have been passed so far.
multiple_samples : :obj:`bool`, optional
If true, it assumes that the bio database's samples actually contain
multiple samples. This is useful for when you want to for example treat
video databases as image databases.
reader : :obj:`object`, optional
A callable with the signature of ``data, label, key = reader(sample)``
which takes a sample and loads it.
samples : [:obj:`object`]
A list of samples to be given to ``reader`` to load the data.
shuffle_on_epoch_end : :obj:`bool`, optional
If True, it shuffle the samples at the end of each epoch.
output_types : (object, object, object)
The types of the returned samples.
output_shapes : ``(tf.TensorShape, tf.TensorShape, tf.TensorShape)``
The shapes of the returned samples.
"""
def __init__(
......@@ -76,10 +76,12 @@ class Generator:
@property
def output_types(self):
"The types of the returned samples"
return self._output_types
@property
def output_shapes(self):
"The shapes of the returned samples"
return self._output_shapes
def __call__(self):
......@@ -87,8 +89,8 @@ class Generator:
Yields
------
(data, label, key) : tuple
A tuple containing the data, label, and the key.
object
Samples one by one.
"""
for sample in self.samples:
dlk = self.reader(sample)
......
......@@ -8,7 +8,6 @@ import json
import tensorflow as tf
TFRECORDS_EXT = ".tfrecords"
......@@ -102,7 +101,7 @@ def dataset_from_tfrecord(tfrecord, num_parallel_reads=None):
A dataset that contains the data from the TFRecord file.
"""
# these imports are needed so that eval can work
from tensorflow import TensorShape
from tensorflow import TensorShape # noqa: F401
if isinstance(tfrecord, str):
tfrecord = [tfrecord]
......
from .center_loss import CenterLoss, CenterLossLayer
from .center_loss import CenterLoss
from .center_loss import CenterLossLayer
# gets sphinx autodoc done right - don't remove it
def __appropriate__(*args):
......@@ -16,8 +18,5 @@ def __appropriate__(*args):
obj.__module__ = __name__
__appropriate__(
CenterLoss,
CenterLossLayer
)
__appropriate__(CenterLoss, CenterLossLayer)
__all__ = [_ for _ in dir() if not _.startswith("_")]
......@@ -6,10 +6,12 @@ class CenterLossLayer(tf.keras.layers.Layer):
Attributes
----------
centers : tf.Variable
centers
The variable that keeps track of centers.
n_classes : int
Number of classes of the task.
n_features : int
The size of prelogits.
"""
......@@ -49,12 +51,17 @@ class CenterLoss(tf.keras.losses.Loss):
Attributes
----------
alpha : float
alpha: float
The moving average coefficient for updating centers in each batch.
centers : tf.Variable
centers
The variable that keeps track of centers.
centers_layer
The layer that keeps track of centers.
update_centers: bool
Update the centers? Used at training
"""
def __init__(
......
from .embedding_accuracy import EmbeddingAccuracy, predict_using_tensors
from .embedding_accuracy import EmbeddingAccuracy
from .embedding_accuracy import predict_using_tensors # noqa: F401
# gets sphinx autodoc done right - don't remove it
def __appropriate__(*args):
......
......@@ -2,6 +2,7 @@ from .alexnet import AlexNet_simplified
from .densenet import DenseNet
from .mine import MineModel
# gets sphinx autodoc done right - don't remove it
def __appropriate__(*args):
"""Says object was actually declared here, an not on the import module.
......@@ -18,9 +19,5 @@ def __appropriate__(*args):
obj.__module__ = __name__
__appropriate__(
AlexNet_simplified,
DenseNet,
MineModel
)
__appropriate__(AlexNet_simplified, DenseNet, MineModel)
__all__ = [_ for _ in dir() if not _.startswith("_")]
......@@ -61,7 +61,7 @@ def AlexNet_simplified(name="AlexNet", **kwargs):
if __name__ == "__main__":
import pkg_resources
import pkg_resources # noqa: F401
from bob.learn.tensorflow.utils import model_summary
......
......@@ -113,7 +113,7 @@ def autoencoder_face(z_dim=256, weight_decay=1e-10, decoder_last_act="tanh"):
if __name__ == "__main__":
import pkg_resources
import pkg_resources # noqa: F401
from tabulate import tabulate
from bob.learn.tensorflow.utils import model_summary
......
......@@ -446,7 +446,7 @@ class DeepPixBiS(tf.keras.Model):
if __name__ == "__main__":
import pkg_resources
import pkg_resources # noqa: F401
from tabulate import tabulate
from bob.learn.tensorflow.utils import model_summary
......
......@@ -142,7 +142,7 @@ def GoogLeNet(*, num_classes=1000, name="GoogLeNet", **kwargs):
if __name__ == "__main__":
import pkg_resources
import pkg_resources # noqa: F401
from tabulate import tabulate
from bob.learn.tensorflow.utils import model_summary
......
......@@ -6,7 +6,6 @@ import logging
import tensorflow as tf
from tensorflow.keras import backend as K
from tensorflow.keras.layers import Activation
from tensorflow.keras.layers import AvgPool2D
from tensorflow.keras.layers import BatchNormalization
from tensorflow.keras.layers import Concatenate
from tensorflow.keras.layers import Conv2D
......@@ -14,9 +13,7 @@ from tensorflow.keras.layers import Dense
from tensorflow.keras.layers import Dropout
from tensorflow.keras.layers import GlobalAvgPool2D
from tensorflow.keras.layers import GlobalMaxPool2D
from tensorflow.keras.layers import Input
from tensorflow.keras.layers import MaxPool2D
from tensorflow.keras.models import Model
from tensorflow.keras.models import Sequential
from bob.learn.tensorflow.utils import SequentialLayer
......@@ -240,7 +237,7 @@ class ReductionA(tf.keras.layers.Layer):
self,
padding,
k=256,
l=256,
l=256, # noqa: E741
m=384,
n=384,
use_atrous=False,
......@@ -250,7 +247,7 @@ class ReductionA(tf.keras.layers.Layer):
super().__init__(name=name, **kwargs)
self.padding = padding
self.k = k
self.l = l
self.l = l # noqa: E741
self.m = m
self.n = n
self.use_atrous = use_atrous
......@@ -448,7 +445,6 @@ def InceptionResNetV1(
Conv2D_BN(80, 1, padding="valid", name="Conv2d_3b_1x1"),
Conv2D_BN(192, 3, padding="valid", name="Conv2d_4a_3x3"),
Conv2D_BN(256, 3, strides=2, padding="valid", name="Conv2d_4b_3x3"),
]
# 5x block35 (Inception-ResNet-A block): 35 x 35 x 320
......@@ -503,7 +499,6 @@ def InceptionResNetV1(
)
)
# 5x block8 (Inception-ResNet-C block): 8 x 8 x 2080
for block_idx in range(1, 5):
layers.append(
......@@ -515,7 +510,7 @@ def InceptionResNetV1(
name=f"block8_{block_idx}",
)
)
layers.append(
InceptionResnetBlock(
n_channels=1792,
......@@ -523,10 +518,10 @@ def InceptionResNetV1(
activation=None,
block_type="block8",
block_idx=5,
name=f"block8_5",
name="block8_5",
)
)
if (include_top and pooling is None) or (bottleneck):
pooling = "avg"
......@@ -545,7 +540,7 @@ def InceptionResNetV1(
# Classification block
if include_top:
layers.append(Dense(classes, name="logits"))
# Create model and call it on input to create its variables.
model = Sequential(layers, name=name, **kwargs)
model(img_input)
......@@ -554,10 +549,11 @@ def InceptionResNetV1(
if __name__ == "__main__":
import pkg_resources
from bob.learn.tensorflow.utils import model_summary
import pkg_resources # noqa: F401
from tabulate import tabulate
from bob.learn.tensorflow.utils import model_summary
def print_model(inputs, outputs, name=None):
print("")
print("===============")
......@@ -568,7 +564,9 @@ if __name__ == "__main__":
del rows[-2]
print(tabulate(rows, headers="firstrow", tablefmt="latex"))
model = InceptionResNetV1(input_shape=(160, 160, 3), bottleneck=True, include_top=False)
model = InceptionResNetV1(
input_shape=(160, 160, 3), bottleneck=True, include_top=False
)
inputs = tf.keras.Input((160, 160, 3))
outputs = model.call(inputs)
model.summary()
\ No newline at end of file
model.summary()
......@@ -744,10 +744,11 @@ def MultiScaleInceptionResNetV2(
if __name__ == "__main__":
import pkg_resources
from bob.learn.tensorflow.utils import model_summary
import pkg_resources # noqa: F401
from tabulate import tabulate
from bob.learn.tensorflow.utils import model_summary
def print_model(inputs, outputs, name=None):
print("")
print("===============")
......
......@@ -31,7 +31,7 @@ def LeNet5_simplified(name="LeNet5", **kwargs):
if __name__ == "__main__":
import pkg_resources
import pkg_resources # noqa: F401
from bob.learn.tensorflow.utils import model_summary
......
......@@ -7,6 +7,7 @@ Mutual Information Neural Estimation (https://arxiv.org/pdf/1801.04062.pdf)
import tensorflow as tf
class MineModel(tf.keras.Model):
"""
......@@ -21,7 +22,7 @@ class MineModel(tf.keras.Model):
super().__init__(name=name, **kwargs)
self.units = units
self.is_mine_f = is_mine_f
self.transformer_x = tf.keras.layers.Dense(self.units)
self.transformer_z = tf.keras.layers.Dense(self.units)
self.transformer_xz = tf.keras.layers.Dense(self.units)
......@@ -32,19 +33,21 @@ class MineModel(tf.keras.Model):
h1_x = self.transformer_x(x)
h1_z = self.transformer_z(z)
h1 = tf.keras.layers.ReLU()(h1_x + h1_z)
h2 = self.transformer_output(tf.keras.layers.ReLU()(self.transformer_xz(h1)))
h2 = self.transformer_output(
tf.keras.layers.ReLU()(self.transformer_xz(h1))
)
return h2
def compute_lower_bound(x, z):
t_xz = compute(x,z)
t_xz = compute(x, z)
z_shuffle = tf.random.shuffle(z)
t_x_z = compute(x, z_shuffle)
if self.is_mine_f:
lb = -(
tf.reduce_mean(t_xz, axis=0)
- tf.reduce_mean(tf.math.exp(t_x_z-1))
- tf.reduce_mean(tf.math.exp(t_x_z - 1))
)
else:
lb = -(
......@@ -60,9 +63,7 @@ class MineModel(tf.keras.Model):
return compute_lower_bound(x, z)
def get_config(self):
config = super().get_config()
config.update({"units": self.units})
return config
......@@ -65,7 +65,7 @@ def MSUPatch(name="MSUPatch", **kwargs):
if __name__ == "__main__":
import pkg_resources
import pkg_resources # noqa: F401
from tabulate import tabulate
from bob.learn.tensorflow.utils import model_summary
......
......@@ -41,7 +41,7 @@ def SimpleCNN(input_shape=(28, 28, 3), inputs=None, name="SimpleCNN", **kwargs):
if __name__ == "__main__":
import pkg_resources
import pkg_resources # noqa: F401
from tabulate import tabulate
from bob.learn.tensorflow.utils import model_summary
......
......@@ -12,7 +12,6 @@ from bob.extension.scripts.click_helper import ConfigCommand
from bob.extension.scripts.click_helper import ResourceOption
from bob.extension.scripts.click_helper import verbosity_option
logger = logging.getLogger(__name__)
......@@ -45,8 +44,9 @@ def datasets_to_tfrecords(dataset, output, force, **kwargs):
To use this script with SGE, change your dataset (like shard it) and output a part
of the dataset based on the SGE_TASK_ID environment variable in your config file.
"""
from bob.extension.scripts.click_helper import log_parameters
import os
from bob.extension.scripts.click_helper import log_parameters
from bob.learn.tensorflow.data.tfrecords import dataset_to_tfrecord
from bob.learn.tensorflow.data.tfrecords import tfrecord_name_and_json_name
......
import tensorflow as tf
from bob.learn.tensorflow.data import dataset_using_generator
mnist = tf.keras.datasets.mnist
(x_train, y_train), (_, _) = mnist.load_data()
samples = (tf.keras.backend.arange(len(x_train)), x_train, y_train)
x_train, y_train = x_train[:10], y_train[:10]
samples = zip(tf.keras.backend.arange(len(x_train)), x_train, y_train)
def reader(sample):
......
import pkg_resources
import tensorflow as tf
from click.testing import CliRunner
from bob.extension.config import load
from bob.extension.scripts.click_helper import assert_click_runner_result
from bob.learn.tensorflow.data.tfrecords import dataset_from_tfrecord
from bob.learn.tensorflow.scripts.datasets_to_tfrecords import datasets_to_tfrecords
from click.testing import CliRunner
regenerate_reference = False
......
import numpy as np
import tensorflow as tf
from bob.learn.tensorflow.models import MineModel
def run_mine(is_mine_f):
np.random.seed(10)
N = 20000
d = 1
EPOCHS = 100
X = np.sign(np.random.normal(0.,1.,[N, d]))
Z = X + np.random.normal(0.,np.sqrt(0.2),[N, d])
EPOCHS = 10
X = np.sign(np.random.normal(0.0, 1.0, [N, d]))
Z = X + np.random.normal(0.0, np.sqrt(0.2), [N, d])
from sklearn.feature_selection import mutual_info_regression
mi_numerical = mutual_info_regression(X.reshape(-1, 1), Z.ravel())[0]
model = MineModel(is_mine_f=is_mine_f)
model.compile(optimizer=tf.keras.optimizers.Adam(learning_rate=0.01))
callback = model.fit(
x=[X, Z], epochs=EPOCHS, verbose=1, batch_size=100
)
callback = model.fit(x=[X, Z], epochs=EPOCHS, verbose=1, batch_size=100)
mine = -np.array(callback.history["loss"])[-1]
np.allclose(mine,mi_numerical, atol=0.01)
np.allclose(mine, mi_numerical, atol=0.01)
def test_mine():
run_mine(False)
def test_mine_f():
run_mine(True)
\ No newline at end of file
run_mine(True)
from .keras import *
from .math import *
from .image import *
from .image import * # noqa: F401,F403
from .keras import * # noqa: F401,F403
from .math import * # noqa: F401,F403
......@@ -6,6 +6,7 @@ package:
version: {{ environ.get('BOB_PACKAGE_VERSION', '0.0.1') }}
build:
skip: true # [not linux]
number: {{ environ.get('BOB_BUILD_NUMBER', 0) }}
run_exports:
- {{ pin_subpackage(name) }}
......@@ -21,33 +22,29 @@ requirements:
- python {{ python }}
- setuptools {{ setuptools }}
- bob.extension
- bob.io.base
- bob.io.image
- bob.measure
- numpy {{ numpy }}
- scipy {{ scipy }}
- click {{ click }}
- click-plugins {{ click_plugins }}
- scikit-learn {{ scikit_learn }}
- tensorflow {{ tensorflow }} # [linux]
run:
- python
- setuptools
- numpy
- scipy
- {{ pin_compatible('numpy') }}
- {{ pin_compatible('scipy') }}
- {{ pin_compatible('click') }}
- {{ pin_compatible('click-plugins') }}
- tensorflow >=1.4
- {{ pin_compatible('tensorflow') }} # [linux]
run_constrained:
- {{ pin_compatible('scikit-learn') }}
test:
imports:
- {{ name }}
commands:
- bob tf --help
- bob tf compute-statistics --help
- bob tf db-to-tfrecords --help
- bob tf eval --help
- bob tf predict-bio --help
- bob tf style-transfer --help
- bob tf train --help
- bob tf train-and-evaluate --help
- bob tf trim --help
- bob tf datasets-to-tfrecords --help
- nosetests --with-coverage --cover-package={{ name }} -sv {{ name }}
- sphinx-build -aEW {{ project_dir }}/doc {{ project_dir }}/sphinx
- sphinx-build -aEb doctest {{ project_dir }}/doc sphinx
......@@ -59,10 +56,9 @@ test:
- coverage
- sphinx
- sphinx_rtd_theme
- bob.io.image
- bob.db.atnt
- matplotlib
- gridtk
- scikit-learn
about:
home: https://www.idiap.ch/software/bob/
......
......@@ -4,19 +4,15 @@
[buildout]
parts = scripts
eggs = bob.learn.tensorflow
bob.db.casia_webface
bob.db.mobio
gridtk
eggs = bob.extension
bob.db.atnt
bob.learn.tensorflow
extensions = bob.buildout
mr.developer
auto-checkout = *
develop = src/bob.db.mnist
src/gridtk
src/bob.db.casia_webface
src/bob.db.mobio
src/bob.db.lfw
develop = src/bob.extension
src/bob.db.atnt
.
; options for bob.buildout
......@@ -26,12 +22,8 @@ newest = false
[sources]
bob.db.mnist = git git@gitlab.idiap.ch:bob/bob.db.mnist.git
bob.db.base = git git@gitlab.idiap.ch:bob/bob.db.base.git
bob.db.mobio = git git@gitlab.idiap.ch:bob/bob.db.mobio.git
bob.db.lfw = git git@gitlab.idiap.ch:bob/bob.db.lfw.git
bob.db.casia_webface = git git@gitlab.idiap.ch:bob/bob.db.casia_webface.git
gridtk = git git@gitlab.idiap.ch:bob/gridtk
bob.extension = git git@gitlab.idiap.ch:bob/bob.extension.git
bob.db.atnt = git git@gitlab.idiap.ch:bob/bob.db.atnt.git
[scripts]
......
#!/usr/bin/env python
# vim: set fileencoding=utf-8 :
import glob
import os
import sys
import time
import pkg_resources
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
import sphinx_rtd_theme
# For inter-documentation mapping:
......@@ -234,9 +229,9 @@ rst_epilog = """
autoclass_content = "class"
autodoc_member_order = "bysource"
autodoc_default_options = {
"members": True,
"undoc-members": True,
"show-inheritance": True,
"members": True,
"undoc-members": True,
"show-inheritance": True,
}
......@@ -247,5 +242,3 @@ if os.path.exists(sphinx_requirements):
)
else:
intersphinx_mapping = link_documentation()
......@@ -2,3 +2,5 @@ py:class list
py:exc ValueError
py:class tensorflow.python.estimator.estimator.Estimator
py:class tensorflow_estimator.python.estimator.estimator.Estimator
py:class tensorflow.python.keras.losses.Loss
py:class tensorflow.python.keras.engine.base_layer.Layer
......@@ -7,63 +7,27 @@
============
Estimators
==========
.. autosummary::
bob.learn.tensorflow.estimators.Logits
bob.learn.tensorflow.estimators.LogitsCenterLoss
bob.learn.tensorflow.estimators.Triplet
bob.learn.tensorflow.estimators.Siamese
bob.learn.tensorflow.estimators.Regressor
bob.learn.tensorflow.estimators.MovingAverageOptimizer
bob.learn.tensorflow.estimators.learning_rate_decay_fn
Architectures
=============