Skip to content
GitLab
Explore
Sign in
Primary navigation
Search or go to…
Project
bob.learn.tensorflow
Manage
Activity
Members
Labels
Plan
Issues
Issue boards
Milestones
Code
Merge requests
Repository
Branches
Commits
Tags
Repository graph
Compare revisions
Build
Pipelines
Jobs
Pipeline schedules
Artifacts
Deploy
Releases
Model registry
Operate
Environments
Monitor
Incidents
Analyze
Value stream analytics
Contributor analytics
CI/CD analytics
Repository analytics
Model experiments
Help
Help
Support
GitLab documentation
Compare GitLab plans
GitLab community forum
Contribute to GitLab
Provide feedback
Keyboard shortcuts
?
Snippets
Groups
Projects
This is an archived project. Repository and other project resources are read-only.
Show more breadcrumbs
bob
bob.learn.tensorflow
Merge requests
!85
Porting to TF2
Code
Review changes
Check out branch
Download
Patches
Plain diff
Expand sidebar
Merged
Porting to TF2
tf2
into
master
Overview
8
Commits
24
Pipelines
5
Changes
8
1 unresolved thread
Hide all comments
Merged
Tiago de Freitas Pereira
requested to merge
tf2
into
master
4 years ago
Overview
8
Commits
24
Pipelines
5
Changes
8
1 unresolved thread
Hide all comments
Fixes
#75 (closed)
Edited
4 years ago
by
Amir MOHAMMADI
0
0
Merge request reports
Viewing commit
a836dbe5
Prev
Next
Show latest version
8 files
+
0
−
1311
Inline
Compare changes
Side-by-side
Inline
Show whitespace changes
Show one file at a time
Files
8
a836dbe5
Remove tests of removed components
· a836dbe5
Amir MOHAMMADI
authored
4 years ago
bob/learn/tensorflow/test/test_architectures.py deleted
100644 → 0
+
0
−
152
View file @ cbdc2d10
#!/usr/bin/env python
# vim: set fileencoding=utf-8 :
# @author: Tiago de Freitas Pereira <tiago.pereira@idiap.ch>
from
nose.plugins.attrib
import
attr
import
tensorflow
as
tf
from
bob.learn.tensorflow.network
import
inception_resnet_v2
,
inception_resnet_v2_batch_norm
,
\
inception_resnet_v1
,
inception_resnet_v1_batch_norm
,
\
vgg_19
,
vgg_16
,
mlp_with_batchnorm_and_dropout
# @attr('slow')
# def test_inceptionv2():
# tf.reset_default_graph()
# # Testing WITHOUT batch norm
# inputs = tf.placeholder(tf.float32, shape=(1, 160, 160, 1))
# graph, _ = inception_resnet_v2(inputs)
# assert len(tf.trainable_variables()) == 490
# tf.reset_default_graph()
# assert len(tf.global_variables()) == 0
# # Testing WITH batch norm
# inputs = tf.placeholder(tf.float32, shape=(1, 160, 160, 1))
# graph, _ = inception_resnet_v2_batch_norm(inputs)
# assert len(tf.trainable_variables()) == 490, len(tf.trainable_variables())
# tf.reset_default_graph()
# assert len(tf.global_variables()) == 0
# @attr('slow')
# def test_inceptionv2_adaptation():
# tf.reset_default_graph()
# for n, trainable_variables in [
# (490, None),
# (0, []),
# (2, ['Conv2d_1a_3x3', 'Conv2d_1a_3x3_BN']),
# (4, ['Conv2d_1a_3x3', 'Conv2d_2a_3x3', 'Conv2d_1a_3x3_BN',
# 'Conv2d_2a_3x3_BN']),
# (6, ['Conv2d_1a_3x3', 'Conv2d_2a_3x3', 'Conv2d_2b_3x3',
# 'Conv2d_1a_3x3_BN', 'Conv2d_2a_3x3_BN', 'Conv2d_2b_3x3_BN']),
# (1, ['Conv2d_1a_3x3_BN']),
# (2, ['Conv2d_1a_3x3_BN', 'Conv2d_2a_3x3_BN']),
# (3, ['Conv2d_1a_3x3_BN', 'Conv2d_2a_3x3_BN', 'Conv2d_2b_3x3_BN']),
# ]:
# input = tf.placeholder(tf.float32, shape=(1, 160, 160, 1))
# net, end_points = inception_resnet_v2_batch_norm(
# input, trainable_variables=trainable_variables)
# l = len(tf.trainable_variables())
# assert l == n, (l, n)
# tf.reset_default_graph()
# tf.reset_default_graph()
# assert len(tf.global_variables()) == 0
# @attr('slow')
# def test_inceptionv1():
# tf.reset_default_graph()
# # Testing WITHOUT batch norm
# inputs = tf.placeholder(tf.float32, shape=(1, 160, 160, 1))
# graph, _ = inception_resnet_v1(inputs)
# assert len(tf.trainable_variables()) == 266
# tf.reset_default_graph()
# assert len(tf.global_variables()) == 0
# # Testing WITH batch norm
# inputs = tf.placeholder(tf.float32, shape=(1, 160, 160, 1))
# graph, _ = inception_resnet_v1_batch_norm(inputs)
# assert len(tf.trainable_variables()) == 266
# tf.reset_default_graph()
# assert len(tf.global_variables()) == 0
# @attr('slow')
# def test_inceptionv1_adaptation():
# tf.reset_default_graph()
# for n, trainable_variables in [
# (266, None),
# (0, []),
# (2, ['Conv2d_1a_3x3', 'Conv2d_1a_3x3_BN']),
# (4, ['Conv2d_1a_3x3', 'Conv2d_2a_3x3', 'Conv2d_1a_3x3_BN',
# 'Conv2d_2a_3x3_BN']),
# (6, ['Conv2d_1a_3x3', 'Conv2d_2a_3x3', 'Conv2d_2b_3x3',
# 'Conv2d_1a_3x3_BN', 'Conv2d_2a_3x3_BN', 'Conv2d_2b_3x3_BN']),
# (1, ['Conv2d_1a_3x3_BN']),
# (2, ['Conv2d_1a_3x3_BN', 'Conv2d_2a_3x3_BN']),
# (3, ['Conv2d_1a_3x3_BN', 'Conv2d_2a_3x3_BN', 'Conv2d_2b_3x3_BN']),
# ]:
# input = tf.placeholder(tf.float32, shape=(1, 160, 160, 1))
# net, end_points = inception_resnet_v1_batch_norm(
# input, trainable_variables=trainable_variables)
# l = len(tf.trainable_variables())
# assert l == n, (l, n)
# tf.reset_default_graph()
# tf.reset_default_graph()
# assert len(tf.global_variables()) == 0
def
test_vgg
():
tf
.
reset_default_graph
()
# Testing VGG19 Training mode
inputs
=
tf
.
placeholder
(
tf
.
float32
,
shape
=
(
1
,
224
,
224
,
3
))
graph
,
_
=
vgg_19
(
inputs
)
assert
len
(
tf
.
trainable_variables
())
==
38
tf
.
reset_default_graph
()
assert
len
(
tf
.
global_variables
())
==
0
# Testing VGG19 predicting mode
inputs
=
tf
.
placeholder
(
tf
.
float32
,
shape
=
(
1
,
224
,
224
,
3
))
graph
,
_
=
vgg_19
(
inputs
,
mode
=
tf
.
estimator
.
ModeKeys
.
PREDICT
)
assert
len
(
tf
.
trainable_variables
())
==
0
tf
.
reset_default_graph
()
assert
len
(
tf
.
global_variables
())
==
0
# Testing VGG 16 training mode
inputs
=
tf
.
placeholder
(
tf
.
float32
,
shape
=
(
1
,
224
,
224
,
3
))
graph
,
_
=
vgg_16
(
inputs
)
assert
len
(
tf
.
trainable_variables
())
==
30
tf
.
reset_default_graph
()
assert
len
(
tf
.
global_variables
())
==
0
# Testing VGG 16 predicting mode
inputs
=
tf
.
placeholder
(
tf
.
float32
,
shape
=
(
1
,
224
,
224
,
3
))
graph
,
_
=
vgg_16
(
inputs
,
mode
=
tf
.
estimator
.
ModeKeys
.
PREDICT
)
assert
len
(
tf
.
trainable_variables
())
==
0
tf
.
reset_default_graph
()
assert
len
(
tf
.
global_variables
())
==
0
def
test_mlp
():
tf
.
reset_default_graph
()
# Testing MLP Training mode
inputs
=
tf
.
placeholder
(
tf
.
float32
,
shape
=
(
1
,
10
,
10
,
3
))
graph
,
_
=
mlp_with_batchnorm_and_dropout
(
inputs
,
[
6
,
5
])
assert
len
(
tf
.
trainable_variables
())
==
4
tf
.
reset_default_graph
()
# Testing MLP Predicting mode
inputs
=
tf
.
placeholder
(
tf
.
float32
,
shape
=
(
1
,
10
,
10
,
3
))
graph
,
_
=
mlp_with_batchnorm_and_dropout
(
inputs
,
[
6
,
5
],
mode
=
tf
.
estimator
.
ModeKeys
.
PREDICT
)
assert
len
(
tf
.
trainable_variables
())
==
0
Loading