autoencoder_face.py 5.4 KB
Newer Older
Amir MOHAMMADI's avatar
Amir MOHAMMADI committed
1
import tensorflow as tf
Amir MOHAMMADI's avatar
Amir MOHAMMADI committed
2
from bob.learn.tensorflow.models.densenet import densenet161
Amir MOHAMMADI's avatar
Amir MOHAMMADI committed
3 4 5 6 7 8 9 10 11


def _get_l2_kw(weight_decay):
    l2_kw = {}
    if weight_decay is not None:
        l2_kw = {"kernel_regularizer": tf.keras.regularizers.l2(weight_decay)}
    return l2_kw


Amir MOHAMMADI's avatar
Amir MOHAMMADI committed
12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84
# class ConvDecoder(tf.keras.Sequential):
#     """The decoder similar to the one in
#     https://github.com/google/compare_gan/blob/master/compare_gan/architectures/sndcgan.py
#     """

#     def __init__(
#         self,
#         z_dim,
#         decoder_layers=(
#             (512, 7, 7, 0),
#             (256, 4, 2, 1),
#             (128, 4, 2, 1),
#             (64, 4, 2, 1),
#             (32, 4, 2, 1),
#             (16, 4, 2, 1),
#             (3, 1, 1, 0),
#         ),
#         weight_decay=1e-5,
#         name="Decoder",
#         **kwargs,
#     ):
#         self.z_dim = z_dim
#         self.data_format = data_format = "channels_last"
#         l2_kw = _get_l2_kw(weight_decay)
#         layers = [
#             tf.keras.layers.Reshape((1, 1, z_dim), input_shape=(z_dim,), name="reshape")
#         ]
#         for i, (filters, kernel_size, strides, cropping) in enumerate(decoder_layers):
#             dconv = tf.keras.layers.Conv2DTranspose(
#                 filters,
#                 kernel_size,
#                 strides=strides,
#                 use_bias=i == len(decoder_layers) - 1,
#                 data_format=data_format,
#                 name=f"dconv_{i}",
#                 **l2_kw,
#             )
#             crop = tf.keras.layers.Cropping2D(
#                 cropping=cropping, data_format=data_format, name=f"crop_{i}"
#             )

#             if i == len(decoder_layers) - 1:
#                 act = tf.keras.layers.Activation("tanh", name=f"tanh_{i}")
#                 bn = None
#             else:
#                 act = tf.keras.layers.Activation("relu", name=f"relu_{i}")
#                 bn = tf.keras.layers.BatchNormalization(
#                     scale=False, fused=False, name=f"bn_{i}"
#                 )
#             if bn is not None:
#                 layers.extend([dconv, crop, bn, act])
#             else:
#                 layers.extend([dconv, crop, act])
#         with tf.name_scope(name):
#             super().__init__(layers=layers, name=name, **kwargs)


def ConvDecoder(
    z_dim,
    decoder_layers=(
        (512, 7, 7, 0),
        (256, 4, 2, 1),
        (128, 4, 2, 1),
        (64, 4, 2, 1),
        (32, 4, 2, 1),
        (16, 4, 2, 1),
        (3, 1, 1, 0),
    ),
    weight_decay=1e-5,
    last_act="tanh",
    name="Decoder",
    **kwargs,
):
Amir MOHAMMADI's avatar
Amir MOHAMMADI committed
85 86 87
    """The decoder similar to the one in
    https://github.com/google/compare_gan/blob/master/compare_gan/architectures/sndcgan.py
    """
Amir MOHAMMADI's avatar
Amir MOHAMMADI committed
88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108
    z_dim = z_dim
    data_format = "channels_last"
    l2_kw = _get_l2_kw(weight_decay)
    layers = [
        tf.keras.layers.Reshape(
            (1, 1, z_dim), input_shape=(z_dim,), name=f"{name}/reshape"
        )
    ]
    for i, (filters, kernel_size, strides, cropping) in enumerate(decoder_layers):
        dconv = tf.keras.layers.Conv2DTranspose(
            filters,
            kernel_size,
            strides=strides,
            use_bias=i == len(decoder_layers) - 1,
            data_format=data_format,
            name=f"{name}/dconv_{i}",
            **l2_kw,
        )
        crop = tf.keras.layers.Cropping2D(
            cropping=cropping, data_format=data_format, name=f"{name}/crop_{i}"
        )
Amir MOHAMMADI's avatar
Amir MOHAMMADI committed
109

Amir MOHAMMADI's avatar
Amir MOHAMMADI committed
110 111 112
        if i == len(decoder_layers) - 1:
            act = tf.keras.layers.Activation(
                f"{last_act}", name=f"{name}/{last_act}_{i}"
Amir MOHAMMADI's avatar
Amir MOHAMMADI committed
113
            )
Amir MOHAMMADI's avatar
Amir MOHAMMADI committed
114 115 116 117 118
            bn = None
        else:
            act = tf.keras.layers.Activation("relu", name=f"{name}/relu_{i}")
            bn = tf.keras.layers.BatchNormalization(
                scale=False, fused=False, name=f"{name}/bn_{i}"
Amir MOHAMMADI's avatar
Amir MOHAMMADI committed
119
            )
Amir MOHAMMADI's avatar
Amir MOHAMMADI committed
120 121 122 123 124
        if bn is not None:
            layers.extend([dconv, crop, bn, act])
        else:
            layers.extend([dconv, crop, act])
    return tf.keras.Sequential(layers, name=name, **kwargs)
Amir MOHAMMADI's avatar
Amir MOHAMMADI committed
125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150


class Autoencoder(tf.keras.Model):
    """
    A class defining a simple convolutional autoencoder.

    Attributes
    ----------
    data_format : str
        channels_last is only supported
    decoder : object
        The encoder part
    encoder : object
        The decoder part
    """

    def __init__(self, encoder, decoder, name="Autoencoder", **kwargs):
        super().__init__(name=name, **kwargs)
        self.encoder = encoder
        self.decoder = decoder

    def call(self, x, training=None):
        z = self.encoder(x, training=training)
        x_hat = self.decoder(z, training=training)
        return z, x_hat

Amir MOHAMMADI's avatar
Amir MOHAMMADI committed
151 152

def autoencoder_face(z_dim=256, weight_decay=1e-9, decoder_last_act="tanh"):
Amir MOHAMMADI's avatar
Amir MOHAMMADI committed
153 154 155
    encoder = densenet161(
        output_classes=z_dim, weight_decay=weight_decay, weights=None, name="DenseNet"
    )
Amir MOHAMMADI's avatar
Amir MOHAMMADI committed
156 157 158 159 160 161
    decoder = ConvDecoder(
        z_dim=z_dim,
        weight_decay=weight_decay,
        last_act=decoder_last_act,
        name="Decoder",
    )
Amir MOHAMMADI's avatar
Amir MOHAMMADI committed
162 163
    autoencoder = Autoencoder(encoder, decoder, name="Autoencoder")
    return autoencoder
Amir MOHAMMADI's avatar
Amir MOHAMMADI committed
164 165 166 167 168 169 170 171 172 173 174 175


if __name__ == "__main__":
    import pkg_resources
    from tabulate import tabulate
    from bob.learn.tensorflow.utils import model_summary

    model = ConvDecoder(z_dim=256, weight_decay=1e-9, last_act="tanh", name="Decoder")
    model.summary()
    rows = model_summary(model, do_print=True)
    del rows[-2]
    print(tabulate(rows, headers="firstrow", tablefmt="latex"))