Skip to content
GitLab
Menu
Projects
Groups
Snippets
Loading...
Help
Help
Support
Community forum
Keyboard shortcuts
?
Submit feedback
Contribute to GitLab
Sign in
Toggle navigation
Menu
Open sidebar
bob
bob.learn.tensorflow
Commits
89e4b78b
Commit
89e4b78b
authored
Feb 10, 2020
by
Amir MOHAMMADI
Browse files
remove extra models and improve comments
parent
36c3f4b9
Changes
6
Hide whitespace changes
Inline
Side-by-side
bob/learn/tensorflow/models/autoencoder_face.py
View file @
89e4b78b
"""Face Autoencoder used in:
IMPROVING CROSS-DATASET PERFORMANCE OF FACE PRESENTATION ATTACK DETECTION SYSTEMS USING FACE RECOGNITION DATASETS,
Mohammadi, Amir and Bhattacharjee, Sushil and Marcel, Sebastien, ICASSP 2020
"""
import
tensorflow
as
tf
from
bob.learn.tensorflow.models.densenet
import
densenet161
...
...
@@ -9,63 +14,6 @@ def _get_l2_kw(weight_decay):
return
l2_kw
# class ConvDecoder(tf.keras.Sequential):
# """The decoder similar to the one in
# https://github.com/google/compare_gan/blob/master/compare_gan/architectures/sndcgan.py
# """
# def __init__(
# self,
# z_dim,
# decoder_layers=(
# (512, 7, 7, 0),
# (256, 4, 2, 1),
# (128, 4, 2, 1),
# (64, 4, 2, 1),
# (32, 4, 2, 1),
# (16, 4, 2, 1),
# (3, 1, 1, 0),
# ),
# weight_decay=1e-5,
# name="Decoder",
# **kwargs,
# ):
# self.z_dim = z_dim
# self.data_format = data_format = "channels_last"
# l2_kw = _get_l2_kw(weight_decay)
# layers = [
# tf.keras.layers.Reshape((1, 1, z_dim), input_shape=(z_dim,), name="reshape")
# ]
# for i, (filters, kernel_size, strides, cropping) in enumerate(decoder_layers):
# dconv = tf.keras.layers.Conv2DTranspose(
# filters,
# kernel_size,
# strides=strides,
# use_bias=i == len(decoder_layers) - 1,
# data_format=data_format,
# name=f"dconv_{i}",
# **l2_kw,
# )
# crop = tf.keras.layers.Cropping2D(
# cropping=cropping, data_format=data_format, name=f"crop_{i}"
# )
# if i == len(decoder_layers) - 1:
# act = tf.keras.layers.Activation("tanh", name=f"tanh_{i}")
# bn = None
# else:
# act = tf.keras.layers.Activation("relu", name=f"relu_{i}")
# bn = tf.keras.layers.BatchNormalization(
# scale=False, fused=False, name=f"bn_{i}"
# )
# if bn is not None:
# layers.extend([dconv, crop, bn, act])
# else:
# layers.extend([dconv, crop, act])
# with tf.name_scope(name):
# super().__init__(layers=layers, name=name, **kwargs)
def
ConvDecoder
(
z_dim
,
decoder_layers
=
(
...
...
@@ -149,7 +97,7 @@ class Autoencoder(tf.keras.Model):
return
z
,
x_hat
def
autoencoder_face
(
z_dim
=
256
,
weight_decay
=
1e-
9
,
decoder_last_act
=
"tanh"
):
def
autoencoder_face
(
z_dim
=
256
,
weight_decay
=
1e-
10
,
decoder_last_act
=
"tanh"
):
encoder
=
densenet161
(
output_classes
=
z_dim
,
weight_decay
=
weight_decay
,
weights
=
None
,
name
=
"DenseNet"
)
...
...
bob/learn/tensorflow/models/autoencoder_yz.py
deleted
100644 → 0
View file @
36c3f4b9
import
tensorflow
as
tf
from
.densenet
import
densenet161
,
ConvBlock
def
_get_l2_kw
(
weight_decay
):
l2_kw
=
{}
if
weight_decay
is
not
None
:
l2_kw
=
{
"kernel_regularizer"
:
tf
.
keras
.
regularizers
.
l2
(
weight_decay
)}
return
l2_kw
class
ConvEncoder
(
tf
.
keras
.
Model
):
"""The encoder part"""
def
__init__
(
self
,
encoder_layers
,
data_format
=
"channels_last"
,
weight_decay
=
1e-5
,
name
=
"Encoder"
,
**
kwargs
,
):
super
().
__init__
(
name
=
name
,
**
kwargs
)
self
.
data_format
=
data_format
l2_kw
=
_get_l2_kw
(
weight_decay
)
layers
=
[]
for
i
,
(
filters
,
kernel_size
,
strides
,
padding
)
in
enumerate
(
encoder_layers
):
bn
=
tf
.
keras
.
layers
.
BatchNormalization
(
scale
=
False
,
fused
=
False
,
name
=
f
"bn_
{
i
}
"
)
if
i
==
0
:
act
=
tf
.
keras
.
layers
.
Activation
(
"linear"
,
name
=
f
"linear_
{
i
}
"
)
else
:
act
=
tf
.
keras
.
layers
.
Activation
(
"relu"
,
name
=
f
"relu_
{
i
}
"
)
pad
=
tf
.
keras
.
layers
.
ZeroPadding2D
(
padding
=
padding
,
data_format
=
data_format
,
name
=
f
"pad_
{
i
}
"
)
conv
=
tf
.
keras
.
layers
.
Conv2D
(
filters
,
kernel_size
,
strides
=
strides
,
use_bias
=
(
i
==
len
(
encoder_layers
)
-
1
),
data_format
=
data_format
,
name
=
f
"conv_
{
i
}
"
,
**
l2_kw
,
)
if
i
==
len
(
encoder_layers
)
-
1
:
pool
=
tf
.
keras
.
layers
.
AvgPool2D
(
data_format
=
data_format
,
name
=
f
"pool_
{
i
}
"
)
else
:
pool
=
tf
.
keras
.
layers
.
MaxPooling2D
(
data_format
=
data_format
,
name
=
f
"pool_
{
i
}
"
)
layers
.
extend
([
bn
,
act
,
pad
,
conv
,
pool
])
self
.
sequential_layers
=
layers
def
call
(
self
,
x
,
training
=
None
):
for
l
in
self
.
sequential_layers
:
try
:
x
=
l
(
x
,
training
=
training
)
except
TypeError
:
x
=
l
(
x
)
return
x
class
ConvDecoder
(
tf
.
keras
.
Model
):
"""The encoder part"""
def
__init__
(
self
,
decoder_layers
,
y_dim
,
weight_decay
=
1e-5
,
name
=
"Decoder"
,
**
kwargs
):
super
().
__init__
(
name
=
name
,
**
kwargs
)
self
.
data_format
=
data_format
=
"channels_last"
self
.
y_dim
=
y_dim
l2_kw
=
_get_l2_kw
(
weight_decay
)
layers
=
[]
for
i
,
(
filters
,
kernel_size
,
strides
,
cropping
)
in
enumerate
(
decoder_layers
):
dconv
=
tf
.
keras
.
layers
.
Conv2DTranspose
(
filters
,
kernel_size
,
strides
=
strides
,
use_bias
=
False
,
data_format
=
data_format
,
name
=
f
"dconv_
{
i
}
"
,
**
l2_kw
,
)
crop
=
tf
.
keras
.
layers
.
Cropping2D
(
cropping
=
cropping
,
data_format
=
data_format
,
name
=
f
"crop_
{
i
}
"
)
bn
=
tf
.
keras
.
layers
.
BatchNormalization
(
scale
=
(
i
==
len
(
decoder_layers
)
-
1
),
fused
=
False
,
name
=
f
"bn_
{
i
}
"
)
if
i
==
len
(
decoder_layers
)
-
1
:
act
=
tf
.
keras
.
layers
.
Activation
(
"tanh"
,
name
=
f
"tanh_
{
i
}
"
)
else
:
act
=
tf
.
keras
.
layers
.
Activation
(
"relu"
,
name
=
f
"relu_
{
i
}
"
)
layers
.
extend
([
dconv
,
crop
,
bn
,
act
])
self
.
sequential_layers
=
layers
def
call
(
self
,
x
,
y
,
training
=
None
):
y
=
tf
.
reshape
(
tf
.
cast
(
y
,
x
.
dtype
),
(
-
1
,
1
,
1
,
self
.
y_dim
))
x
=
tf
.
concat
([
x
,
y
],
axis
=-
1
)
for
l
in
self
.
sequential_layers
:
try
:
x
=
l
(
x
,
training
=
training
)
except
TypeError
:
x
=
l
(
x
)
return
x
class
Autoencoder
(
tf
.
keras
.
Model
):
"""
A class defining a simple convolutional autoencoder.
Attributes
----------
data_format : str
channels_last is only supported
decoder : object
The encoder part
encoder : object
The decoder part
"""
def
__init__
(
self
,
encoder
,
decoder
,
z_dim
,
weight_decay
=
1e-5
,
name
=
"Autoencoder"
,
**
kwargs
):
super
().
__init__
(
name
=
name
,
**
kwargs
)
data_format
=
"channels_last"
self
.
data_format
=
data_format
self
.
weight_decay
=
weight_decay
self
.
encoder
=
encoder
self
.
decoder
=
decoder
self
.
z_dim
=
z_dim
def
call
(
self
,
x
,
y
,
training
=
None
):
self
.
encoder_output
=
tf
.
reshape
(
self
.
encoder
(
x
,
training
=
training
),
(
-
1
,
1
,
1
,
self
.
z_dim
)
)
self
.
decoder_output
=
self
.
decoder
(
self
.
encoder_output
,
y
,
training
=
training
)
return
self
.
decoder_output
def
densenet161_autoencoder
(
z_dim
=
256
,
y_dim
=
3
,
weight_decay
=
1e-10
):
encoder
=
densenet161
(
output_classes
=
z_dim
,
weight_decay
=
weight_decay
,
weights
=
None
)
decoder_layers
=
(
(
128
,
7
,
7
,
0
),
(
64
,
4
,
2
,
1
),
(
32
,
4
,
2
,
1
),
(
16
,
4
,
2
,
1
),
(
8
,
4
,
2
,
1
),
(
4
,
4
,
2
,
1
),
(
3
,
1
,
1
,
0
),
)
decoder
=
ConvDecoder
(
decoder_layers
,
y_dim
=
y_dim
,
weight_decay
=
weight_decay
,
name
=
"Decoder"
)
autoencoder
=
Autoencoder
(
encoder
,
decoder
,
z_dim
=
z_dim
,
weight_decay
=
weight_decay
)
return
autoencoder
class
ConvDecoderSupervised
(
tf
.
keras
.
Model
):
"""The encoder part"""
def
__init__
(
self
,
decoder_layers
,
weight_decay
=
1e-5
,
data_format
=
"channels_last"
,
name
=
"Decoder"
,
y_dim
=
None
,
**
kwargs
,
):
super
().
__init__
(
name
=
name
,
**
kwargs
)
self
.
data_format
=
data_format
self
.
y_dim
=
y_dim
l2_kw
=
_get_l2_kw
(
weight_decay
)
layers
=
[]
for
i
,
(
filters
,
kernel_size
,
strides
,
cropping
)
in
enumerate
(
decoder_layers
):
dconv
=
tf
.
keras
.
layers
.
Conv2DTranspose
(
filters
,
kernel_size
,
strides
=
strides
,
use_bias
=
False
,
data_format
=
data_format
,
name
=
f
"dconv_
{
i
}
"
,
**
l2_kw
,
)
crop
=
tf
.
keras
.
layers
.
Cropping2D
(
cropping
=
cropping
,
data_format
=
data_format
,
name
=
f
"crop_
{
i
}
"
)
bn
=
tf
.
keras
.
layers
.
BatchNormalization
(
scale
=
(
i
==
len
(
decoder_layers
)
-
1
),
fused
=
False
,
name
=
f
"bn_
{
i
}
"
)
if
i
==
len
(
decoder_layers
)
-
1
:
act
=
tf
.
keras
.
layers
.
Activation
(
"tanh"
,
name
=
f
"tanh_
{
i
}
"
)
else
:
act
=
tf
.
keras
.
layers
.
Activation
(
"relu"
,
name
=
f
"relu_
{
i
}
"
)
layers
.
extend
([
dconv
,
crop
,
bn
,
act
])
self
.
sequential_layers
=
layers
def
call
(
self
,
x
,
training
=
None
):
x
=
tf
.
reshape
(
x
,
(
-
1
,
1
,
1
,
x
.
get_shape
().
as_list
()[
-
1
]))
if
self
.
y_dim
is
not
None
:
y_fixed
=
tf
.
one_hot
([[[
0
]]],
self
.
y_dim
,
dtype
=
x
.
dtype
)
y_fixed
=
tf
.
tile
(
y_fixed
,
multiples
=
[
tf
.
shape
(
x
)[
0
],
1
,
1
,
1
])
x
=
tf
.
concat
([
x
,
y_fixed
],
axis
=-
1
)
x
=
tf
.
keras
.
Input
(
tensor
=
x
)
for
l
in
self
.
sequential_layers
:
try
:
x
=
l
(
x
,
training
=
training
)
except
TypeError
:
x
=
l
(
x
)
return
x
def
densenet161_autoencoder_supervised
(
x
,
training
,
weight_decay
=
1e-10
,
z_dim
=
256
,
y_dim
=
1
,
deeppixbis_add_one_more_layer
=
False
,
start_from_face_autoencoder
=
False
,
):
data_format
=
"channels_last"
with
tf
.
name_scope
(
"Autoencoder"
):
densenet
=
densenet161
(
output_classes
=
z_dim
,
weight_decay
=
weight_decay
,
weights
=
None
,
data_format
=
data_format
,
)
z
=
densenet
(
x
,
training
=
training
)
transition
=
tf
.
keras
.
Input
(
tensor
=
densenet
.
transition_blocks
[
1
].
output
)
layers
=
[
tf
.
keras
.
layers
.
Conv2D
(
filters
=
1
,
kernel_size
=
1
,
kernel_initializer
=
"he_normal"
,
kernel_regularizer
=
tf
.
keras
.
regularizers
.
l2
(
weight_decay
),
data_format
=
data_format
,
name
=
"dec"
,
),
tf
.
keras
.
layers
.
Flatten
(
data_format
=
data_format
,
name
=
"Pixel_Logits_Flatten"
),
]
if
deeppixbis_add_one_more_layer
:
layers
.
insert
(
0
,
ConvBlock
(
num_filters
=
32
,
data_format
=
data_format
,
bottleneck
=
True
,
weight_decay
=
weight_decay
,
name
=
"prelogits"
,
),
)
y
=
transition
with
tf
.
name_scope
(
"DeepPixBiS"
):
for
l
in
layers
:
try
:
y
=
l
(
y
,
training
=
training
)
except
TypeError
:
y
=
l
(
y
)
deep_pix_bis_final_layers
=
tf
.
keras
.
Model
(
inputs
=
transition
,
outputs
=
y
,
name
=
"DeepPixBiS"
)
encoder
=
tf
.
keras
.
Model
(
inputs
=
[
x
,
transition
],
outputs
=
[
y
,
z
],
name
=
"Encoder"
)
encoder
.
densenet
=
densenet
if
deeppixbis_add_one_more_layer
:
encoder
.
prelogits
=
deep_pix_bis_final_layers
.
layers
[
-
3
].
output
else
:
encoder
.
prelogits
=
transition
encoder
.
deep_pix_bis
=
deep_pix_bis_final_layers
decoder_layers
=
(
(
128
,
7
,
7
,
0
),
(
64
,
4
,
2
,
1
),
(
32
,
4
,
2
,
1
),
(
16
,
4
,
2
,
1
),
(
8
,
4
,
2
,
1
),
(
4
,
4
,
2
,
1
),
(
3
,
1
,
1
,
0
),
)
decoder
=
ConvDecoderSupervised
(
decoder_layers
,
weight_decay
=
weight_decay
,
name
=
"Decoder"
,
data_format
=
data_format
,
y_dim
=
3
if
start_from_face_autoencoder
else
None
,
)
x_hat
=
decoder
(
z
,
training
=
training
)
autoencoder
=
tf
.
keras
.
Model
(
inputs
=
[
x
,
transition
],
outputs
=
[
y
,
z
,
x_hat
],
name
=
"Autoencoder"
)
autoencoder
.
encoder
=
encoder
autoencoder
.
decoder
=
decoder
return
autoencoder
,
y
,
z
,
x_hat
bob/learn/tensorflow/models/densenet.py
View file @
89e4b78b
"""Densely Connected Convolutional Networks.
Reference [
Densely Connected Convolutional Networks](https://arxiv.org/abs/1608.06993)
Reference [Densely Connected Convolutional Networks](https://arxiv.org/abs/1608.06993)
"""
import
tensorflow
as
tf
...
...
bob/learn/tensorflow/models/discriminator.py
View file @
89e4b78b
"""Discriminator used in training of autoencoder in:
IMPROVING CROSS-DATASET PERFORMANCE OF FACE PRESENTATION ATTACK DETECTION SYSTEMS USING FACE RECOGNITION DATASETS,
Mohammadi, Amir and Bhattacharjee, Sushil and Marcel, Sebastien, ICASSP 2020
"""
import
tensorflow
as
tf
from
..gan.spectral_normalization
import
spectral_norm_regularizer
from
..utils
import
gram_matrix
class
ConvDiscriminator
(
tf
.
keras
.
Model
):
"""A discriminator that can sit on top of DenseNet 161's transition 1 block.
The output of that block given 224x224x3 inputs is 14x14x384."""
def
__init__
(
self
,
data_format
=
"channels_last"
,
n_classes
=
1
,
**
kwargs
):
super
().
__init__
(
**
kwargs
)
self
.
data_format
=
data_format
self
.
n_classes
=
n_classes
act
=
"sigmoid"
if
n_classes
==
1
else
"softmax"
self
.
sequential_layers
=
[
tf
.
keras
.
layers
.
Conv2D
(
200
,
1
,
data_format
=
data_format
),
tf
.
keras
.
layers
.
Activation
(
"relu"
),
tf
.
keras
.
layers
.
AveragePooling2D
(
3
,
2
,
data_format
=
data_format
),
tf
.
keras
.
layers
.
Conv2D
(
100
,
1
,
data_format
=
data_format
),
tf
.
keras
.
layers
.
Activation
(
"relu"
),
tf
.
keras
.
layers
.
AveragePooling2D
(
3
,
2
,
data_format
=
data_format
),
tf
.
keras
.
layers
.
Flatten
(
data_format
=
data_format
),
tf
.
keras
.
layers
.
Dense
(
n_classes
),
tf
.
keras
.
layers
.
Activation
(
act
),
]
def
call
(
self
,
x
,
training
=
None
):
for
l
in
self
.
sequential_layers
:
try
:
x
=
l
(
x
,
training
=
training
)
except
TypeError
:
x
=
l
(
x
)
return
x
class
ConvDiscriminator2
(
tf
.
keras
.
Model
):
"""A discriminator that can sit on top of DenseNet 161's transition 1 block.
The output of that block given 224x224 inputs is 14x14x384. Here we want to output
15x15x128 features which is going to match the output of encoder in mcae.py given
these layers::
ENCODER_LAYERS = (
(32, 5, 1, 2),
(64, 5, 1, 2),
(128, 3, 1, 2),
(128, 3, 1, 2)
)
DECODER_LAYERS = (
(64, 3, 2, 1),
(32, 3, 2, 1),
(16, 5, 2, 2),
(8, 5, 2, 2),
(3, 2, 1, 1),
)
"""
def
__init__
(
self
,
data_format
=
"channels_last"
,
**
kwargs
):
super
().
__init__
(
**
kwargs
)
self
.
data_format
=
data_format
self
.
sequential_layers
=
[
tf
.
keras
.
layers
.
ZeroPadding2D
(
padding
=
((
1
,
0
),
(
1
,
0
)),
data_format
=
data_format
),
tf
.
keras
.
layers
.
Conv2D
(
256
,
5
,
data_format
=
data_format
,
padding
=
"same"
),
tf
.
keras
.
layers
.
Activation
(
"relu"
),
tf
.
keras
.
layers
.
Conv2D
(
128
,
5
,
data_format
=
data_format
,
padding
=
"same"
),
tf
.
keras
.
layers
.
Activation
(
"relu"
),
tf
.
keras
.
layers
.
Conv2D
(
128
,
1
,
data_format
=
data_format
,
padding
=
"same"
),
tf
.
keras
.
layers
.
Activation
(
"relu"
),
]
def
call
(
self
,
x
,
training
=
None
):
for
l
in
self
.
sequential_layers
:
try
:
x
=
l
(
x
,
training
=
training
)
except
TypeError
:
x
=
l
(
x
)
return
x
class
ConvDiscriminator3
(
tf
.
keras
.
Model
):
"""A discriminator that takes images and tries its best.
Be careful, this one returns logits."""
def
__init__
(
self
,
data_format
=
"channels_last"
,
n_classes
=
1
,
**
kwargs
):
super
().
__init__
(
**
kwargs
)
self
.
data_format
=
data_format
self
.
n_classes
=
n_classes
spectral_norm
=
spectral_norm_regularizer
(
scale
=
1.0
)
conv2d_kw
=
{
"kernel_regularizer"
:
spectral_norm
,
"data_format"
:
data_format
}
self
.
sequential_layers
=
[
tf
.
keras
.
layers
.
Conv2D
(
64
,
3
,
strides
=
1
,
**
conv2d_kw
),
tf
.
keras
.
layers
.
LeakyReLU
(
0.1
),
tf
.
keras
.
layers
.
Conv2D
(
64
,
4
,
strides
=
2
,
**
conv2d_kw
),
tf
.
keras
.
layers
.
LeakyReLU
(
0.1
),
tf
.
keras
.
layers
.
Conv2D
(
128
,
3
,
strides
=
1
,
**
conv2d_kw
),
tf
.
keras
.
layers
.
LeakyReLU
(
0.1
),
tf
.
keras
.
layers
.
Conv2D
(
128
,
4
,
strides
=
2
,
**
conv2d_kw
),
tf
.
keras
.
layers
.
LeakyReLU
(
0.1
),
tf
.
keras
.
layers
.
Conv2D
(
256
,
3
,
strides
=
1
,
**
conv2d_kw
),
tf
.
keras
.
layers
.
LeakyReLU
(
0.1
),
tf
.
keras
.
layers
.
Conv2D
(
256
,
4
,
strides
=
2
,
**
conv2d_kw
),
tf
.
keras
.
layers
.
LeakyReLU
(
0.1
),
tf
.
keras
.
layers
.
Conv2D
(
512
,
3
,
strides
=
1
,
**
conv2d_kw
),
tf
.
keras
.
layers
.
LeakyReLU
(
0.1
),
tf
.
keras
.
layers
.
GlobalAveragePooling2D
(
data_format
=
data_format
),
tf
.
keras
.
layers
.
Dense
(
n_classes
),
]
def
call
(
self
,
x
,
training
=
None
):
for
l
in
self
.
sequential_layers
:
try
:
x
=
l
(
x
,
training
=
training
)
except
TypeError
:
x
=
l
(
x
)
return
x
class
DenseDiscriminator
(
tf
.
keras
.
Model
):
def
DenseDiscriminator
(
n_classes
=
1
,
name
=
"DenseDiscriminator"
,
**
kwargs
):
"""A discriminator that takes vectors as input and tries its best.
Be careful, this one returns logits."""
def
__init__
(
self
,
n_classes
=
1
,
**
kwargs
):
super
().
__init__
(
**
kwargs
)
self
.
n_classes
=
n_classes
self
.
sequential_layers
=
[
return
tf
.
keras
.
Sequential
(
[
tf
.
keras
.
layers
.
Dense
(
1000
),
tf
.
keras
.
layers
.
Activation
(
"relu"
),
tf
.
keras
.
layers
.
Dense
(
1000
),
tf
.
keras
.
layers
.
Activation
(
"relu"
),
tf
.
keras
.
layers
.
Dense
(
n_classes
),
]
def
call
(
self
,
x
,
training
=
None
):
for
l
in
self
.
sequential_layers
:
try
:
x
=
l
(
x
,
training
=
training
)
except
TypeError
:
x
=
l
(
x
)
return
x
class
GramComparer1
(
tf
.
keras
.
Model
):
"""A model to compare images based on their gram matrices."""
def
__init__
(
self
,
**
kwargs
):
super
().
__init__
(
**
kwargs
)
self
.
batchnorm
=
tf
.
keras
.
layers
.
BatchNormalization
()
self
.
conv2d
=
tf
.
keras
.
layers
.
Conv2D
(
128
,
7
)
def
call
(
self
,
x_1_2
,
training
=
None
):
def
_call
(
x
):
x
=
self
.
batchnorm
(
x
,
training
=
training
)
x
=
self
.
conv2d
(
x
)
return
gram_matrix
(
x
)
gram1
=
_call
(
x_1_2
[...,
:
3
])
gram2
=
_call
(
x_1_2
[...,
3
:])
return
-
tf
.
reduce_mean
((
gram1
-
gram2
)
**
2
,
axis
=
[
1
,
2
])[:,
None
]
],
name
=
name
,
**
kwargs
)
bob/learn/tensorflow/models/msu_patch.py
View file @
89e4b78b
"""Patch-based CNN used for face PAD in:
Y. Atoum, Y. Liu, A. Jourabloo, and X. Liu, “Face anti-spoofing using patch and
depth-based CNNs,” in 2017 IEEE International Joint Conference on Biometrics (IJCB),
Denver, CO, 2017, pp. 319–328.
"""
import
tensorflow
as
tf
...
...
bob/learn/tensorflow/models/simple_cnn.py
View file @
89e4b78b
"""
The network using keras (same as new_architecture function below)::
from tensorflow.python.keras import *
from tensorflow.python.keras.layers import *
simplecnn = Sequential([
Conv2D(32,(3,3),padding='same',use_bias=False, input_shape=(28,28,3)),
BatchNormalization(scale=False),
Activation('relu'),
MaxPool2D(padding='same'),