Skip to content
GitLab
Menu
Projects
Groups
Snippets
Help
Help
Support
Community forum
Keyboard shortcuts
?
Submit feedback
Contribute to GitLab
Sign in
Toggle navigation
Menu
Open sidebar
bob
bob.learn.tensorflow
Commits
70d3fe40
Commit
70d3fe40
authored
Nov 13, 2020
by
Amir MOHAMMADI
Browse files
Fix pre-commit complains
parent
c3836797
Pipeline
#45550
passed with stage
in 17 minutes and 50 seconds
Changes
25
Pipelines
1
Hide whitespace changes
Inline
Side-by-side
.pre-commit-config.yaml
View file @
70d3fe40
...
...
@@ -2,16 +2,16 @@
# See https://pre-commit.com/hooks.html for more hooks
repos
:
-
repo
:
https://github.com/timothycrosley/isort
rev
:
4.3.21-2
rev
:
5.6.4
hooks
:
-
id
:
isort
args
:
[
-sl
]
args
:
[
-
-
sl
,
--line-length
,
"
88"
]
-
repo
:
https://github.com/psf/black
rev
:
stable
rev
:
20.8b1
hooks
:
-
id
:
black
-
repo
:
https://github.com/pre-commit/pre-commit-hooks
rev
:
v
2.0
.0
rev
:
v
3.3
.0
hooks
:
-
id
:
check-ast
-
id
:
check-case-conflict
...
...
@@ -19,22 +19,8 @@ repos:
-
id
:
end-of-file-fixer
-
id
:
debug-statements
-
id
:
check-added-large-files
-
id
:
flake8
-
re
po
:
local
-
repo
:
https://gitlab.com/pycqa/
flake8
re
v
:
3.8.4
hooks
:
-
id
:
sphinx-build
name
:
sphinx build
entry
:
python -m sphinx.cmd.build
args
:
[
-a
,
-E
,
-W
,
doc
,
sphinx
]
language
:
system
files
:
^doc/
types
:
[
file
]
pass_filenames
:
false
-
id
:
sphinx-doctest
name
:
sphinx doctest
entry
:
python -m sphinx.cmd.build
args
:
[
-a
,
-E
,
-b
,
doctest
,
doc
,
sphinx
]
language
:
system
files
:
^doc/
types
:
[
file
]
pass_filenames
:
false
-
id
:
flake8
args
:
[
--ignore
,
"
E203,W503,E501,E302,E111,E114,E121,E402"
]
bob/learn/tensorflow/callbacks.py
View file @
70d3fe40
...
...
@@ -2,7 +2,6 @@ import json
import
os
import
tensorflow
as
tf
from
tensorflow.keras
import
callbacks
class
CustomBackupAndRestore
(
tf
.
keras
.
callbacks
.
experimental
.
BackupAndRestore
):
...
...
bob/learn/tensorflow/data/__init__.py
View file @
70d3fe40
from
.generator
import
Generator
,
dataset_using_generator
from
.tfrecords
import
dataset_to_tfrecord
,
dataset_from_tfrecord
,
TFRECORDS_EXT
from
.generator
import
Generator
from
.generator
import
dataset_using_generator
# noqa: F401
from
.tfrecords
import
TFRECORDS_EXT
# noqa: F401
from
.tfrecords
import
dataset_from_tfrecord
# noqa: F401
from
.tfrecords
import
dataset_to_tfrecord
# noqa: F401
# gets sphinx autodoc done right - don't remove it
def
__appropriate__
(
*
args
):
...
...
bob/learn/tensorflow/data/tfrecords.py
View file @
70d3fe40
...
...
@@ -8,7 +8,6 @@ import json
import
tensorflow
as
tf
TFRECORDS_EXT
=
".tfrecords"
...
...
@@ -102,7 +101,7 @@ def dataset_from_tfrecord(tfrecord, num_parallel_reads=None):
A dataset that contains the data from the TFRecord file.
"""
# these imports are needed so that eval can work
from
tensorflow
import
TensorShape
from
tensorflow
import
TensorShape
# noqa: F401
if
isinstance
(
tfrecord
,
str
):
tfrecord
=
[
tfrecord
]
...
...
bob/learn/tensorflow/losses/__init__.py
View file @
70d3fe40
from
.center_loss
import
CenterLoss
,
CenterLossLayer
from
.center_loss
import
CenterLoss
from
.center_loss
import
CenterLossLayer
# gets sphinx autodoc done right - don't remove it
def
__appropriate__
(
*
args
):
...
...
@@ -16,8 +18,5 @@ def __appropriate__(*args):
obj
.
__module__
=
__name__
__appropriate__
(
CenterLoss
,
CenterLossLayer
)
__appropriate__
(
CenterLoss
,
CenterLossLayer
)
__all__
=
[
_
for
_
in
dir
()
if
not
_
.
startswith
(
"_"
)]
bob/learn/tensorflow/metrics/__init__.py
View file @
70d3fe40
from
.embedding_accuracy
import
EmbeddingAccuracy
,
predict_using_tensors
from
.embedding_accuracy
import
EmbeddingAccuracy
from
.embedding_accuracy
import
predict_using_tensors
# noqa: F401
# gets sphinx autodoc done right - don't remove it
def
__appropriate__
(
*
args
):
...
...
bob/learn/tensorflow/models/__init__.py
View file @
70d3fe40
...
...
@@ -2,6 +2,7 @@ from .alexnet import AlexNet_simplified
from
.densenet
import
DenseNet
from
.mine
import
MineModel
# gets sphinx autodoc done right - don't remove it
def
__appropriate__
(
*
args
):
"""Says object was actually declared here, an not on the import module.
...
...
@@ -18,9 +19,5 @@ def __appropriate__(*args):
obj
.
__module__
=
__name__
__appropriate__
(
AlexNet_simplified
,
DenseNet
,
MineModel
)
__appropriate__
(
AlexNet_simplified
,
DenseNet
,
MineModel
)
__all__
=
[
_
for
_
in
dir
()
if
not
_
.
startswith
(
"_"
)]
bob/learn/tensorflow/models/alexnet.py
View file @
70d3fe40
...
...
@@ -61,7 +61,7 @@ def AlexNet_simplified(name="AlexNet", **kwargs):
if
__name__
==
"__main__"
:
import
pkg_resources
import
pkg_resources
# noqa: F401
from
bob.learn.tensorflow.utils
import
model_summary
...
...
bob/learn/tensorflow/models/autoencoder_face.py
View file @
70d3fe40
...
...
@@ -113,7 +113,7 @@ def autoencoder_face(z_dim=256, weight_decay=1e-10, decoder_last_act="tanh"):
if
__name__
==
"__main__"
:
import
pkg_resources
import
pkg_resources
# noqa: F401
from
tabulate
import
tabulate
from
bob.learn.tensorflow.utils
import
model_summary
...
...
bob/learn/tensorflow/models/densenet.py
View file @
70d3fe40
...
...
@@ -446,7 +446,7 @@ class DeepPixBiS(tf.keras.Model):
if
__name__
==
"__main__"
:
import
pkg_resources
import
pkg_resources
# noqa: F401
from
tabulate
import
tabulate
from
bob.learn.tensorflow.utils
import
model_summary
...
...
bob/learn/tensorflow/models/inception.py
View file @
70d3fe40
...
...
@@ -142,7 +142,7 @@ def GoogLeNet(*, num_classes=1000, name="GoogLeNet", **kwargs):
if
__name__
==
"__main__"
:
import
pkg_resources
import
pkg_resources
# noqa: F401
from
tabulate
import
tabulate
from
bob.learn.tensorflow.utils
import
model_summary
...
...
bob/learn/tensorflow/models/inception_resnet_v1.py
View file @
70d3fe40
...
...
@@ -6,7 +6,6 @@ import logging
import
tensorflow
as
tf
from
tensorflow.keras
import
backend
as
K
from
tensorflow.keras.layers
import
Activation
from
tensorflow.keras.layers
import
AvgPool2D
from
tensorflow.keras.layers
import
BatchNormalization
from
tensorflow.keras.layers
import
Concatenate
from
tensorflow.keras.layers
import
Conv2D
...
...
@@ -14,9 +13,7 @@ from tensorflow.keras.layers import Dense
from
tensorflow.keras.layers
import
Dropout
from
tensorflow.keras.layers
import
GlobalAvgPool2D
from
tensorflow.keras.layers
import
GlobalMaxPool2D
from
tensorflow.keras.layers
import
Input
from
tensorflow.keras.layers
import
MaxPool2D
from
tensorflow.keras.models
import
Model
from
tensorflow.keras.models
import
Sequential
from
bob.learn.tensorflow.utils
import
SequentialLayer
...
...
@@ -240,7 +237,7 @@ class ReductionA(tf.keras.layers.Layer):
self
,
padding
,
k
=
256
,
l
=
256
,
l
=
256
,
# noqa: E741
m
=
384
,
n
=
384
,
use_atrous
=
False
,
...
...
@@ -250,7 +247,7 @@ class ReductionA(tf.keras.layers.Layer):
super
().
__init__
(
name
=
name
,
**
kwargs
)
self
.
padding
=
padding
self
.
k
=
k
self
.
l
=
l
self
.
l
=
l
# noqa: E741
self
.
m
=
m
self
.
n
=
n
self
.
use_atrous
=
use_atrous
...
...
@@ -448,7 +445,6 @@ def InceptionResNetV1(
Conv2D_BN
(
80
,
1
,
padding
=
"valid"
,
name
=
"Conv2d_3b_1x1"
),
Conv2D_BN
(
192
,
3
,
padding
=
"valid"
,
name
=
"Conv2d_4a_3x3"
),
Conv2D_BN
(
256
,
3
,
strides
=
2
,
padding
=
"valid"
,
name
=
"Conv2d_4b_3x3"
),
]
# 5x block35 (Inception-ResNet-A block): 35 x 35 x 320
...
...
@@ -503,7 +499,6 @@ def InceptionResNetV1(
)
)
# 5x block8 (Inception-ResNet-C block): 8 x 8 x 2080
for
block_idx
in
range
(
1
,
5
):
layers
.
append
(
...
...
@@ -515,7 +510,7 @@ def InceptionResNetV1(
name
=
f
"block8_
{
block_idx
}
"
,
)
)
layers
.
append
(
InceptionResnetBlock
(
n_channels
=
1792
,
...
...
@@ -523,10 +518,10 @@ def InceptionResNetV1(
activation
=
None
,
block_type
=
"block8"
,
block_idx
=
5
,
name
=
f
"block8_5"
,
name
=
"block8_5"
,
)
)
if
(
include_top
and
pooling
is
None
)
or
(
bottleneck
):
pooling
=
"avg"
...
...
@@ -545,7 +540,7 @@ def InceptionResNetV1(
# Classification block
if
include_top
:
layers
.
append
(
Dense
(
classes
,
name
=
"logits"
))
# Create model and call it on input to create its variables.
model
=
Sequential
(
layers
,
name
=
name
,
**
kwargs
)
model
(
img_input
)
...
...
@@ -554,10 +549,11 @@ def InceptionResNetV1(
if
__name__
==
"__main__"
:
import
pkg_resources
from
bob.learn.tensorflow.utils
import
model_summary
import
pkg_resources
# noqa: F401
from
tabulate
import
tabulate
from
bob.learn.tensorflow.utils
import
model_summary
def
print_model
(
inputs
,
outputs
,
name
=
None
):
print
(
""
)
print
(
"==============="
)
...
...
@@ -568,7 +564,9 @@ if __name__ == "__main__":
del
rows
[
-
2
]
print
(
tabulate
(
rows
,
headers
=
"firstrow"
,
tablefmt
=
"latex"
))
model
=
InceptionResNetV1
(
input_shape
=
(
160
,
160
,
3
),
bottleneck
=
True
,
include_top
=
False
)
model
=
InceptionResNetV1
(
input_shape
=
(
160
,
160
,
3
),
bottleneck
=
True
,
include_top
=
False
)
inputs
=
tf
.
keras
.
Input
((
160
,
160
,
3
))
outputs
=
model
.
call
(
inputs
)
model
.
summary
()
\ No newline at end of file
model
.
summary
()
bob/learn/tensorflow/models/inception_resnet_v2.py
View file @
70d3fe40
...
...
@@ -744,10 +744,11 @@ def MultiScaleInceptionResNetV2(
if
__name__
==
"__main__"
:
import
pkg_resources
from
bob.learn.tensorflow.utils
import
model_summary
import
pkg_resources
# noqa: F401
from
tabulate
import
tabulate
from
bob.learn.tensorflow.utils
import
model_summary
def
print_model
(
inputs
,
outputs
,
name
=
None
):
print
(
""
)
print
(
"==============="
)
...
...
bob/learn/tensorflow/models/lenet5.py
View file @
70d3fe40
...
...
@@ -31,7 +31,7 @@ def LeNet5_simplified(name="LeNet5", **kwargs):
if
__name__
==
"__main__"
:
import
pkg_resources
import
pkg_resources
# noqa: F401
from
bob.learn.tensorflow.utils
import
model_summary
...
...
bob/learn/tensorflow/models/mine.py
View file @
70d3fe40
...
...
@@ -7,6 +7,7 @@ Mutual Information Neural Estimation (https://arxiv.org/pdf/1801.04062.pdf)
import
tensorflow
as
tf
class
MineModel
(
tf
.
keras
.
Model
):
"""
...
...
@@ -21,7 +22,7 @@ class MineModel(tf.keras.Model):
super
().
__init__
(
name
=
name
,
**
kwargs
)
self
.
units
=
units
self
.
is_mine_f
=
is_mine_f
self
.
transformer_x
=
tf
.
keras
.
layers
.
Dense
(
self
.
units
)
self
.
transformer_z
=
tf
.
keras
.
layers
.
Dense
(
self
.
units
)
self
.
transformer_xz
=
tf
.
keras
.
layers
.
Dense
(
self
.
units
)
...
...
@@ -32,19 +33,21 @@ class MineModel(tf.keras.Model):
h1_x
=
self
.
transformer_x
(
x
)
h1_z
=
self
.
transformer_z
(
z
)
h1
=
tf
.
keras
.
layers
.
ReLU
()(
h1_x
+
h1_z
)
h2
=
self
.
transformer_output
(
tf
.
keras
.
layers
.
ReLU
()(
self
.
transformer_xz
(
h1
)))
h2
=
self
.
transformer_output
(
tf
.
keras
.
layers
.
ReLU
()(
self
.
transformer_xz
(
h1
))
)
return
h2
def
compute_lower_bound
(
x
,
z
):
t_xz
=
compute
(
x
,
z
)
t_xz
=
compute
(
x
,
z
)
z_shuffle
=
tf
.
random
.
shuffle
(
z
)
t_x_z
=
compute
(
x
,
z_shuffle
)
if
self
.
is_mine_f
:
lb
=
-
(
tf
.
reduce_mean
(
t_xz
,
axis
=
0
)
-
tf
.
reduce_mean
(
tf
.
math
.
exp
(
t_x_z
-
1
))
-
tf
.
reduce_mean
(
tf
.
math
.
exp
(
t_x_z
-
1
))
)
else
:
lb
=
-
(
...
...
@@ -60,9 +63,7 @@ class MineModel(tf.keras.Model):
return
compute_lower_bound
(
x
,
z
)
def
get_config
(
self
):
config
=
super
().
get_config
()
config
.
update
({
"units"
:
self
.
units
})
return
config
bob/learn/tensorflow/models/msu_patch.py
View file @
70d3fe40
...
...
@@ -65,7 +65,7 @@ def MSUPatch(name="MSUPatch", **kwargs):
if
__name__
==
"__main__"
:
import
pkg_resources
import
pkg_resources
# noqa: F401
from
tabulate
import
tabulate
from
bob.learn.tensorflow.utils
import
model_summary
...
...
bob/learn/tensorflow/models/simple_cnn.py
View file @
70d3fe40
...
...
@@ -41,7 +41,7 @@ def SimpleCNN(input_shape=(28, 28, 3), inputs=None, name="SimpleCNN", **kwargs):
if
__name__
==
"__main__"
:
import
pkg_resources
import
pkg_resources
# noqa: F401
from
tabulate
import
tabulate
from
bob.learn.tensorflow.utils
import
model_summary
...
...
bob/learn/tensorflow/scripts/datasets_to_tfrecords.py
View file @
70d3fe40
...
...
@@ -12,7 +12,6 @@ from bob.extension.scripts.click_helper import ConfigCommand
from
bob.extension.scripts.click_helper
import
ResourceOption
from
bob.extension.scripts.click_helper
import
verbosity_option
logger
=
logging
.
getLogger
(
__name__
)
...
...
@@ -45,8 +44,9 @@ def datasets_to_tfrecords(dataset, output, force, **kwargs):
To use this script with SGE, change your dataset (like shard it) and output a part
of the dataset based on the SGE_TASK_ID environment variable in your config file.
"""
from
bob.extension.scripts.click_helper
import
log_parameters
import
os
from
bob.extension.scripts.click_helper
import
log_parameters
from
bob.learn.tensorflow.data.tfrecords
import
dataset_to_tfrecord
from
bob.learn.tensorflow.data.tfrecords
import
tfrecord_name_and_json_name
...
...
bob/learn/tensorflow/tests/data/db_to_tfrecords_config.py
View file @
70d3fe40
import
tensorflow
as
tf
from
bob.learn.tensorflow.data
import
dataset_using_generator
mnist
=
tf
.
keras
.
datasets
.
mnist
(
x_train
,
y_train
),
(
_
,
_
)
=
mnist
.
load_data
()
x_train
,
y_train
=
x_train
[:
10
],
y_train
[:
10
]
samples
=
zip
(
tf
.
keras
.
backend
.
arange
(
len
(
x_train
)),
x_train
,
y_train
)
...
...
bob/learn/tensorflow/tests/test_datasets_to_tfrecords.py
View file @
70d3fe40
import
pkg_resources
import
tensorflow
as
tf
from
click.testing
import
CliRunner
from
bob.extension.config
import
load
from
bob.extension.scripts.click_helper
import
assert_click_runner_result
from
bob.learn.tensorflow.data.tfrecords
import
dataset_from_tfrecord
from
bob.learn.tensorflow.scripts.datasets_to_tfrecords
import
datasets_to_tfrecords
from
click.testing
import
CliRunner
regenerate_reference
=
False
...
...
Prev
1
2
Next
Write
Preview
Markdown
is supported
0%
Try again
or
attach a new file
.
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment