Commit 89e4b78b authored by Amir MOHAMMADI's avatar Amir MOHAMMADI

remove extra models and improve comments

parent 36c3f4b9
"""Face Autoencoder used in:
IMPROVING CROSS-DATASET PERFORMANCE OF FACE PRESENTATION ATTACK DETECTION SYSTEMS USING FACE RECOGNITION DATASETS,
Mohammadi, Amir and Bhattacharjee, Sushil and Marcel, Sebastien, ICASSP 2020
"""
import tensorflow as tf
from bob.learn.tensorflow.models.densenet import densenet161
......@@ -9,63 +14,6 @@ def _get_l2_kw(weight_decay):
return l2_kw
# class ConvDecoder(tf.keras.Sequential):
# """The decoder similar to the one in
# https://github.com/google/compare_gan/blob/master/compare_gan/architectures/sndcgan.py
# """
# def __init__(
# self,
# z_dim,
# decoder_layers=(
# (512, 7, 7, 0),
# (256, 4, 2, 1),
# (128, 4, 2, 1),
# (64, 4, 2, 1),
# (32, 4, 2, 1),
# (16, 4, 2, 1),
# (3, 1, 1, 0),
# ),
# weight_decay=1e-5,
# name="Decoder",
# **kwargs,
# ):
# self.z_dim = z_dim
# self.data_format = data_format = "channels_last"
# l2_kw = _get_l2_kw(weight_decay)
# layers = [
# tf.keras.layers.Reshape((1, 1, z_dim), input_shape=(z_dim,), name="reshape")
# ]
# for i, (filters, kernel_size, strides, cropping) in enumerate(decoder_layers):
# dconv = tf.keras.layers.Conv2DTranspose(
# filters,
# kernel_size,
# strides=strides,
# use_bias=i == len(decoder_layers) - 1,
# data_format=data_format,
# name=f"dconv_{i}",
# **l2_kw,
# )
# crop = tf.keras.layers.Cropping2D(
# cropping=cropping, data_format=data_format, name=f"crop_{i}"
# )
# if i == len(decoder_layers) - 1:
# act = tf.keras.layers.Activation("tanh", name=f"tanh_{i}")
# bn = None
# else:
# act = tf.keras.layers.Activation("relu", name=f"relu_{i}")
# bn = tf.keras.layers.BatchNormalization(
# scale=False, fused=False, name=f"bn_{i}"
# )
# if bn is not None:
# layers.extend([dconv, crop, bn, act])
# else:
# layers.extend([dconv, crop, act])
# with tf.name_scope(name):
# super().__init__(layers=layers, name=name, **kwargs)
def ConvDecoder(
z_dim,
decoder_layers=(
......@@ -149,7 +97,7 @@ class Autoencoder(tf.keras.Model):
return z, x_hat
def autoencoder_face(z_dim=256, weight_decay=1e-9, decoder_last_act="tanh"):
def autoencoder_face(z_dim=256, weight_decay=1e-10, decoder_last_act="tanh"):
encoder = densenet161(
output_classes=z_dim, weight_decay=weight_decay, weights=None, name="DenseNet"
)
......
This diff is collapsed.
"""Densely Connected Convolutional Networks.
Reference [
Densely Connected Convolutional Networks](https://arxiv.org/abs/1608.06993)
Reference [Densely Connected Convolutional Networks](https://arxiv.org/abs/1608.06993)
"""
import tensorflow as tf
......
"""Discriminator used in training of autoencoder in:
IMPROVING CROSS-DATASET PERFORMANCE OF FACE PRESENTATION ATTACK DETECTION SYSTEMS USING FACE RECOGNITION DATASETS,
Mohammadi, Amir and Bhattacharjee, Sushil and Marcel, Sebastien, ICASSP 2020
"""
import tensorflow as tf
from ..gan.spectral_normalization import spectral_norm_regularizer
from ..utils import gram_matrix
class ConvDiscriminator(tf.keras.Model):
"""A discriminator that can sit on top of DenseNet 161's transition 1 block.
The output of that block given 224x224x3 inputs is 14x14x384."""
def __init__(self, data_format="channels_last", n_classes=1, **kwargs):
super().__init__(**kwargs)
self.data_format = data_format
self.n_classes = n_classes
act = "sigmoid" if n_classes == 1 else "softmax"
self.sequential_layers = [
tf.keras.layers.Conv2D(200, 1, data_format=data_format),
tf.keras.layers.Activation("relu"),
tf.keras.layers.AveragePooling2D(3, 2, data_format=data_format),
tf.keras.layers.Conv2D(100, 1, data_format=data_format),
tf.keras.layers.Activation("relu"),
tf.keras.layers.AveragePooling2D(3, 2, data_format=data_format),
tf.keras.layers.Flatten(data_format=data_format),
tf.keras.layers.Dense(n_classes),
tf.keras.layers.Activation(act),
]
def call(self, x, training=None):
for l in self.sequential_layers:
try:
x = l(x, training=training)
except TypeError:
x = l(x)
return x
class ConvDiscriminator2(tf.keras.Model):
"""A discriminator that can sit on top of DenseNet 161's transition 1 block.
The output of that block given 224x224 inputs is 14x14x384. Here we want to output
15x15x128 features which is going to match the output of encoder in mcae.py given
these layers::
ENCODER_LAYERS = (
(32, 5, 1, 2),
(64, 5, 1, 2),
(128, 3, 1, 2),
(128, 3, 1, 2)
)
DECODER_LAYERS = (
(64, 3, 2, 1),
(32, 3, 2, 1),
(16, 5, 2, 2),
(8, 5, 2, 2),
(3, 2, 1, 1),
)
"""
def __init__(self, data_format="channels_last", **kwargs):
super().__init__(**kwargs)
self.data_format = data_format
self.sequential_layers = [
tf.keras.layers.ZeroPadding2D(
padding=((1, 0), (1, 0)), data_format=data_format
),
tf.keras.layers.Conv2D(256, 5, data_format=data_format, padding="same"),
tf.keras.layers.Activation("relu"),
tf.keras.layers.Conv2D(128, 5, data_format=data_format, padding="same"),
tf.keras.layers.Activation("relu"),
tf.keras.layers.Conv2D(128, 1, data_format=data_format, padding="same"),
tf.keras.layers.Activation("relu"),
]
def call(self, x, training=None):
for l in self.sequential_layers:
try:
x = l(x, training=training)
except TypeError:
x = l(x)
return x
class ConvDiscriminator3(tf.keras.Model):
"""A discriminator that takes images and tries its best.
Be careful, this one returns logits."""
def __init__(self, data_format="channels_last", n_classes=1, **kwargs):
super().__init__(**kwargs)
self.data_format = data_format
self.n_classes = n_classes
spectral_norm = spectral_norm_regularizer(scale=1.0)
conv2d_kw = {"kernel_regularizer": spectral_norm, "data_format": data_format}
self.sequential_layers = [
tf.keras.layers.Conv2D(64, 3, strides=1, **conv2d_kw),
tf.keras.layers.LeakyReLU(0.1),
tf.keras.layers.Conv2D(64, 4, strides=2, **conv2d_kw),
tf.keras.layers.LeakyReLU(0.1),
tf.keras.layers.Conv2D(128, 3, strides=1, **conv2d_kw),
tf.keras.layers.LeakyReLU(0.1),
tf.keras.layers.Conv2D(128, 4, strides=2, **conv2d_kw),
tf.keras.layers.LeakyReLU(0.1),
tf.keras.layers.Conv2D(256, 3, strides=1, **conv2d_kw),
tf.keras.layers.LeakyReLU(0.1),
tf.keras.layers.Conv2D(256, 4, strides=2, **conv2d_kw),
tf.keras.layers.LeakyReLU(0.1),
tf.keras.layers.Conv2D(512, 3, strides=1, **conv2d_kw),
tf.keras.layers.LeakyReLU(0.1),
tf.keras.layers.GlobalAveragePooling2D(data_format=data_format),
tf.keras.layers.Dense(n_classes),
]
def call(self, x, training=None):
for l in self.sequential_layers:
try:
x = l(x, training=training)
except TypeError:
x = l(x)
return x
class DenseDiscriminator(tf.keras.Model):
def DenseDiscriminator(n_classes=1, name="DenseDiscriminator", **kwargs):
"""A discriminator that takes vectors as input and tries its best.
Be careful, this one returns logits."""
def __init__(self, n_classes=1, **kwargs):
super().__init__(**kwargs)
self.n_classes = n_classes
self.sequential_layers = [
return tf.keras.Sequential(
[
tf.keras.layers.Dense(1000),
tf.keras.layers.Activation("relu"),
tf.keras.layers.Dense(1000),
tf.keras.layers.Activation("relu"),
tf.keras.layers.Dense(n_classes),
]
def call(self, x, training=None):
for l in self.sequential_layers:
try:
x = l(x, training=training)
except TypeError:
x = l(x)
return x
class GramComparer1(tf.keras.Model):
"""A model to compare images based on their gram matrices."""
def __init__(self, **kwargs):
super().__init__(**kwargs)
self.batchnorm = tf.keras.layers.BatchNormalization()
self.conv2d = tf.keras.layers.Conv2D(128, 7)
def call(self, x_1_2, training=None):
def _call(x):
x = self.batchnorm(x, training=training)
x = self.conv2d(x)
return gram_matrix(x)
gram1 = _call(x_1_2[..., :3])
gram2 = _call(x_1_2[..., 3:])
return -tf.reduce_mean((gram1 - gram2) ** 2, axis=[1, 2])[:, None]
],
name=name,
**kwargs
)
"""Patch-based CNN used for face PAD in:
Y. Atoum, Y. Liu, A. Jourabloo, and X. Liu, “Face anti-spoofing using patch and
depth-based CNNs,” in 2017 IEEE International Joint Conference on Biometrics (IJCB),
Denver, CO, 2017, pp. 319–328.
"""
import tensorflow as tf
......
"""
The network using keras (same as new_architecture function below)::
from tensorflow.python.keras import *
from tensorflow.python.keras.layers import *
simplecnn = Sequential([
Conv2D(32,(3,3),padding='same',use_bias=False, input_shape=(28,28,3)),
BatchNormalization(scale=False),
Activation('relu'),
MaxPool2D(padding='same'),
Conv2D(64,(3,3),padding='same',use_bias=False),
BatchNormalization(scale=False),
Activation('relu'),
MaxPool2D(padding='same'),
Flatten(),
Dense(1024, use_bias=False),
BatchNormalization(scale=False),
Activation('relu'),
Dropout(rate=0.4),
Dense(2, activation="softmax"),
])
simplecnn.summary()
_________________________________________________________________
Layer (type) Output Shape Param #
=================================================================
conv2d_1 (Conv2D) (None, 28, 28, 32) 864
_________________________________________________________________
batch_normalization_1 (Batch (None, 28, 28, 32) 96
_________________________________________________________________
activation_1 (Activation) (None, 28, 28, 32) 0
_________________________________________________________________
max_pooling2d_1 (MaxPooling2 (None, 14, 14, 32) 0
_________________________________________________________________
conv2d_2 (Conv2D) (None, 14, 14, 64) 18432
_________________________________________________________________
batch_normalization_2 (Batch (None, 14, 14, 64) 192
_________________________________________________________________
activation_2 (Activation) (None, 14, 14, 64) 0
_________________________________________________________________
max_pooling2d_2 (MaxPooling2 (None, 7, 7, 64) 0
_________________________________________________________________
flatten_1 (Flatten) (None, 3136) 0
_________________________________________________________________
dense_1 (Dense) (None, 1024) 3211264
_________________________________________________________________
batch_normalization_3 (Batch (None, 1024) 3072
_________________________________________________________________
activation_3 (Activation) (None, 1024) 0
_________________________________________________________________
dropout_1 (Dropout) (None, 1024) 0
_________________________________________________________________
dense_2 (Dense) (None, 2) 2050
=================================================================
Total params: 3,235,970
Trainable params: 3,233,730
Non-trainable params: 2,240
_________________________________________________________________
"""
"""A small CNN used for patch-based Face PAD"""
from tensorflow.python.keras import Sequential, Input
from tensorflow.python.keras.layers import (
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment