diff --git a/bob/__init__.py b/bob/__init__.py index 2ab1e28b150f0549def9963e9e87de3fdd6b2579..edbb4090fca046b19d22d3982711084621bff3be 100644 --- a/bob/__init__.py +++ b/bob/__init__.py @@ -1,3 +1,4 @@ # see https://docs.python.org/3/library/pkgutil.html from pkgutil import extend_path + __path__ = extend_path(__path__, __name__) diff --git a/bob/ip/__init__.py b/bob/ip/__init__.py index 2ab1e28b150f0549def9963e9e87de3fdd6b2579..edbb4090fca046b19d22d3982711084621bff3be 100644 --- a/bob/ip/__init__.py +++ b/bob/ip/__init__.py @@ -1,3 +1,4 @@ # see https://docs.python.org/3/library/pkgutil.html from pkgutil import extend_path + __path__ = extend_path(__path__, __name__) diff --git a/bob/ip/tensorflow_extractor/DrGanMSU.py b/bob/ip/tensorflow_extractor/DrGanMSU.py deleted file mode 100644 index 65caaa1f422f538995484aa307b1f95773e9721b..0000000000000000000000000000000000000000 --- a/bob/ip/tensorflow_extractor/DrGanMSU.py +++ /dev/null @@ -1,453 +0,0 @@ -#!/usr/bin/env python -# encoding: utf-8 - - -import numpy -import tensorflow as tf -import os -from bob.extension import rc -import logging -import bob.extension.download -import bob.io.base -logger = logging.getLogger(__name__) - - -class batch_norm(object): - """Code modification of http://stackoverflow.com/a/33950177""" - - def __init__(self, epsilon=1e-5, momentum=0.9, name="batch_norm"): - with tf.variable_scope(name): - self.epsilon = epsilon - self.momentum = momentum - self.name = name - - def __call__(self, x, train=True, reuse=False): - return tf.contrib.layers.batch_norm(x, - decay=self.momentum, - updates_collections=None, - epsilon=self.epsilon, - scale=True, - reuse=reuse, - is_training=train, - scope=self.name) - - -def conv2d(input_, output_dim, - k_h=3, k_w=3, d_h=2, d_w=2, stddev=0.02, - name="conv2d", reuse=False): - with tf.variable_scope(name, reuse=reuse): - w = tf.get_variable('w', [k_h, k_w, input_.get_shape()[-1], output_dim], - initializer=tf.truncated_normal_initializer(stddev=stddev)) - conv = tf.nn.conv2d( - input_, w, strides=[ - 1, d_h, d_w, 1], padding='SAME') - - biases = tf.get_variable( - 'biases', - [output_dim], - initializer=tf.constant_initializer(0.0)) - conv = tf.reshape(tf.nn.bias_add(conv, biases), conv.get_shape()) - - return conv - - -def elu(x, name='elu'): - return tf.nn.elu(x, name) - - -class DRGAN(object): - """ - class implementing the DR GAN, as described in - - - Note: this code has orginally been done by L.Tran @ MSU, and is heavily influenced - by the DCGAN code here: - - I simplified it to the maximum to increase readability, and to limit its usage - to face image encoding. - - **Parameters** - - image_size: int - The size of the (squared) image. - - gf_dim: int - The number of feature maps in the first convolutional layer (encoder and discriminator). - - gfc_dim: int - The dimension of the encoded id (output of the encoder). - """ - - def __init__(self, image_size=96, gf_dim=32, gfc_dim=320): - - self.image_size = image_size - self.gf_dim = gf_dim - self.gfc_dim = gfc_dim - - # batch normalization - self.g_bn0_0 = batch_norm(name='g_k_bn0_0') - self.g_bn0_1 = batch_norm(name='g_k_bn0_1') - self.g_bn0_2 = batch_norm(name='g_k_bn0_2') - self.g_bn0_3 = batch_norm(name='g_k_bn0_3') - self.g_bn1_0 = batch_norm(name='g_k_bn1_0') - self.g_bn1_1 = batch_norm(name='g_k_bn1_1') - self.g_bn1_2 = batch_norm(name='g_k_bn1_2') - self.g_bn1_3 = batch_norm(name='g_k_bn1_3') - self.g_bn2_0 = batch_norm(name='g_k_bn2_0') - self.g_bn2_1 = batch_norm(name='g_k_bn2_1') - self.g_bn2_2 = batch_norm(name='g_k_bn2_2') - self.g_bn2_3 = batch_norm(name='g_k_bn2_3') - self.g_bn3_0 = batch_norm(name='g_k_bn3_0') - self.g_bn3_1 = batch_norm(name='g_k_bn3_1') - self.g_bn3_2 = batch_norm(name='g_k_bn3_2') - self.g_bn3_3 = batch_norm(name='g_k_bn3_3') - self.g_bn4_0 = batch_norm(name='g_k_bn4_0') - self.g_bn4_1 = batch_norm(name='g_k_bn4_1') - self.g_bn4_2 = batch_norm(name='g_k_bn4_2') - self.g_bn4_c = batch_norm(name='g_h_bn4_c') - self.g_bn5 = batch_norm(name='g_k_bn5') - - def generator_encoder(self, image, is_reuse=False, is_training=True): - """ Function that define the graph doing the encoding of a face image. - - **Parameters** - - image: numpy array - The input image - - is_reuse: bool - Reuse variables. - - is_training: bool - Flag for training mode - - **Returns** - The encoded id - """ - s16 = int(self.image_size / 16) - k0_0 = image - - k0_1 = elu( - self.g_bn0_1( - conv2d( - k0_0, - self.gf_dim * 1, - d_h=1, - d_w=1, - name='g_k01_conv', - reuse=is_reuse), - train=is_training, - reuse=is_reuse), - name='g_k01_prelu') - k0_2 = elu( - self.g_bn0_2( - conv2d( - k0_1, - self.gf_dim * 2, - d_h=1, - d_w=1, - name='g_k02_conv', - reuse=is_reuse), - train=is_training, - reuse=is_reuse), - name='g_k02_prelu') - - k1_0 = elu( - self.g_bn1_0( - conv2d( - k0_2, - self.gf_dim * 2, - d_h=2, - d_w=2, - name='g_k10_conv', - reuse=is_reuse), - train=is_training, - reuse=is_reuse), - name='g_k10_prelu') - k1_1 = elu( - self.g_bn1_1( - conv2d( - k1_0, - self.gf_dim * 2, - d_h=1, - d_w=1, - name='g_k11_conv', - reuse=is_reuse), - train=is_training, - reuse=is_reuse), - name='g_k11_prelu') - k1_2 = elu( - self.g_bn1_2( - conv2d( - k1_1, - self.gf_dim * 4, - d_h=1, - d_w=1, - name='g_k12_conv', - reuse=is_reuse), - train=is_training, - reuse=is_reuse), - name='g_k12_prelu') - - k2_0 = elu( - self.g_bn2_0( - conv2d( - k1_2, - self.gf_dim * 4, - d_h=2, - d_w=2, - name='g_k20_conv', - reuse=is_reuse), - train=is_training, - reuse=is_reuse), - name='g_k20_prelu') - k2_1 = elu( - self.g_bn2_1( - conv2d( - k2_0, - self.gf_dim * 3, - d_h=1, - d_w=1, - name='g_k21_conv', - reuse=is_reuse), - train=is_training, - reuse=is_reuse), - name='g_k21_prelu') - k2_2 = elu( - self.g_bn2_2( - conv2d( - k2_1, - self.gf_dim * 6, - d_h=1, - d_w=1, - name='g_k22_conv', - reuse=is_reuse), - train=is_training, - reuse=is_reuse), - name='g_k22_prelu') - - k3_0 = elu( - self.g_bn3_0( - conv2d( - k2_2, - self.gf_dim * 6, - d_h=2, - d_w=2, - name='g_k30_conv', - reuse=is_reuse), - train=is_training, - reuse=is_reuse), - name='g_k30_prelu') - k3_1 = elu( - self.g_bn3_1( - conv2d( - k3_0, - self.gf_dim * 4, - d_h=1, - d_w=1, - name='g_k31_conv', - reuse=is_reuse), - train=is_training, - reuse=is_reuse), - name='g_k31_prelu') - k3_2 = elu( - self.g_bn3_2( - conv2d( - k3_1, - self.gf_dim * 8, - d_h=1, - d_w=1, - name='g_k32_conv', - reuse=is_reuse), - train=is_training, - reuse=is_reuse), - name='g_k32_prelu') - - k4_0 = elu( - self.g_bn4_0( - conv2d( - k3_2, - self.gf_dim * 8, - d_h=2, - d_w=2, - name='g_k40_conv', - reuse=is_reuse), - train=is_training, - reuse=is_reuse), - name='g_k40_prelu') - k4_1 = elu( - self.g_bn4_1( - conv2d( - k4_0, - self.gf_dim * 5, - d_h=1, - d_w=1, - name='g_k41_conv', - reuse=is_reuse), - train=is_training, - reuse=is_reuse), - name='g_k41_prelu') - k4_2 = self.g_bn4_2( - conv2d( - k4_1, - self.gfc_dim, - d_h=1, - d_w=1, - name='g_k42_conv', - reuse=is_reuse), - train=is_training, - reuse=is_reuse) - - k5 = tf.nn.avg_pool( - k4_2, ksize=[ - 1, s16, s16, 1], strides=[ - 1, 1, 1, 1], padding='VALID') - k5 = tf.reshape(k5, [-1, self.gfc_dim]) - - # dropout if training - if (is_training): - k5 = tf.nn.dropout(k5, keep_prob=0.6) - - return k5 - - -class DrGanMSUExtractor(object): - """Wrapper for the free DRGan by L.Tran @ MSU: - - To use this class as a bob.bio.base extractor:: - - from bob.bio.base.extractor import Extractor - class DrGanMSUExtractorBioBase(DrGanMSUExtractor, Extractor): - pass - extractor = DrGanMSUExtractorBioBase() - - - **Parameters:** - - model_file: - Path to the model - - image_size: list - The input image size (WxHxC) - - """ - - def __init__(self, model_path=rc["bob.ip.tensorflow_extractor.drgan_modelpath"], image_size=[96, 96, 3]): - - self.image_size = image_size - self.session = tf.Session() - - # placeholder for the input image - data_shape = [1] + self.image_size - self.X = tf.placeholder(tf.float32, shape=data_shape) - - # the encoder - self.drgan = DRGAN() - self.encode = self.drgan.generator_encoder( - self.X, is_reuse=False, is_training=False) - - # If the path is not, set the default path - if model_path is None: - model_path = self.get_modelpath() - - # If does not exist, download - if not os.path.exists(model_path): - bob.io.base.create_directories_safe(DrGanMSUExtractor.get_modelpath()) - zip_file = os.path.join(DrGanMSUExtractor.get_modelpath(), - "DR_GAN_model.zip") - urls = [ - # This is a private link at Idiap to save bandwidth. - "http://beatubulatest.lab.idiap.ch/private/wheels/gitlab/" - "DR_GAN_model.zip", - ] - - bob.extension.download.download_and_unzip(urls, zip_file) - - self.saver = tf.train.Saver() - # Reestore either from the last checkpoint or from a particular checkpoint - if os.path.isdir(model_path): - self.saver.restore(self.session, - tf.train.latest_checkpoint(model_path)) - else: - self.saver.restore(self.session, model_path) - - @staticmethod - def get_modelpath(): - - # Priority to the RC path - model_path = rc[DrGanMSUExtractor.get_rcvariable()] - - if model_path is None: - import pkg_resources - model_path = pkg_resources.resource_filename(__name__, - 'data/DR_GAN_model') - - return model_path - - - @staticmethod - def get_rcvariable(): - return "bob.ip.tensorflow_extractor.drgan_modelpath" - - - def __call__(self, image): - """__call__(image) -> feature - - Extract features - - **Parameters:** - - image : 3D :py:class:`numpy.ndarray` (floats) - The image to extract the features from. - - **Returns:** - - feature : 2D :py:class:`numpy.ndarray` (floats) - The extracted features - """ - def bob2skimage(bob_image): - """ - Convert bob color image to the skcit image - """ - - if len(bob_image.shape) == 3: - skimage = numpy.zeros( - shape=( - bob_image.shape[1], - bob_image.shape[2], - 3), - dtype=numpy.uint8) - skimage[:, :, 0] = bob_image[0, :, :] - skimage[:, :, 1] = bob_image[1, :, :] - skimage[:, :, 2] = bob_image[2, :, :] - else: - skimage = numpy.zeros( - shape=( - bob_image.shape[0], - bob_image.shape[1], - 1)) - skimage[:, :, 0] = bob_image[:, :] - return skimage - - def rescaleToUint8(image): - result = numpy.zeros_like(image) - for channel in range(image.shape[2]): - min_image = numpy.min(image[:, :, channel]) - max_image = numpy.max(image[:, :, channel]) - if (max_image - min_image) != 0: - result[:, :, channel] = 255.0 * \ - ((image[:, :, channel] - min_image) / (max_image - min_image)) - else: - result[:, :, channel] = 0 - result = result.astype('uint8') - return result - - # encode the provided image - image = rescaleToUint8(image) - image = bob2skimage(image) - image = numpy.array(image / 127.5 - 1).astype(numpy.float32) - shape = [1] + list(image.shape) - img = numpy.reshape(image, tuple(shape)) - encoded_id = self.session.run(self.encode, feed_dict={self.X: img}) - return encoded_id - diff --git a/bob/ip/tensorflow_extractor/Extractor.py b/bob/ip/tensorflow_extractor/Extractor.py old mode 100755 new mode 100644 index a3c3f6d120f4f5baa1218fcc83faee7cd12d6f94..96e02d82acb69442698a27a65e7d44a9d53d38cc --- a/bob/ip/tensorflow_extractor/Extractor.py +++ b/bob/ip/tensorflow_extractor/Extractor.py @@ -35,14 +35,17 @@ class Extractor(object): self.graph = graph # Initializing the variables of the current graph - self.session = tf.Session() - self.session.run(tf.global_variables_initializer()) + self.session = tf.compat.v1.Session() + self.session.run(tf.compat.v1.global_variables_initializer()) # Loading the last checkpoint and overwriting the current variables - saver = tf.train.Saver() + saver = tf.compat.v1.train.Saver() if os.path.splitext(checkpoint_filename)[1] == ".meta": - saver.restore(self.session, tf.train.latest_checkpoint(os.path.dirname(checkpoint_filename))) + saver.restore( + self.session, + tf.train.latest_checkpoint(os.path.dirname(checkpoint_filename)), + ) elif os.path.isdir(checkpoint_filename): saver.restore(self.session, tf.train.latest_checkpoint(checkpoint_filename)) else: @@ -52,10 +55,8 @@ class Extractor(object): if debug: self.session = tf_debug.LocalCLIDebugWrapperSession(self.session) - def __del__(self): - tf.reset_default_graph() - + tf.compat.v1.reset_default_graph() def __call__(self, data): """ @@ -73,4 +74,3 @@ class Extractor(object): """ return self.session.run(self.graph, feed_dict={self.input_tensor: data}) - diff --git a/bob/ip/tensorflow_extractor/FaceNet.py b/bob/ip/tensorflow_extractor/FaceNet.py index 2c2838f460697a7aa28760e97b4bd40ae5bf726c..d7a58e591fbe788e41df3ca9d0cdee1a90ad8ec6 100644 --- a/bob/ip/tensorflow_extractor/FaceNet.py +++ b/bob/ip/tensorflow_extractor/FaceNet.py @@ -12,6 +12,8 @@ import bob.io.base logger = logging.getLogger(__name__) +FACENET_MODELPATH_KEY = "bob.ip.tensorflow_extractor.facenet_modelpath" + def prewhiten(img): mean = numpy.mean(img) @@ -24,18 +26,18 @@ def prewhiten(img): def get_model_filenames(model_dir): # code from https://github.com/davidsandberg/facenet files = os.listdir(model_dir) - meta_files = [s for s in files if s.endswith('.meta')] + meta_files = [s for s in files if s.endswith(".meta")] if len(meta_files) == 0: - raise ValueError( - 'No meta file found in the model directory (%s)' % model_dir) + raise ValueError("No meta file found in the model directory (%s)" % model_dir) elif len(meta_files) > 1: raise ValueError( - 'There should not be more than one meta file in the model ' - 'directory (%s)' % model_dir) + "There should not be more than one meta file in the model " + "directory (%s)" % model_dir + ) meta_file = meta_files[0] max_step = -1 for f in files: - step_str = re.match(r'(^model-[\w\- ]+.ckpt-(\d+))', f) + step_str = re.match(r"(^model-[\w\- ]+.ckpt-(\d+))", f) if step_str is not None and len(step_str.groups()) >= 2: step = int(step_str.groups()[1]) if step > max_step: @@ -74,11 +76,12 @@ class FaceNet(object): """ def __init__( - self, - model_path=rc["bob.ip.tensorflow_extractor.facenet_modelpath"], - image_size=160, - layer_name='embeddings:0', - **kwargs): + self, + model_path=rc[FACENET_MODELPATH_KEY], + image_size=160, + layer_name="embeddings:0", + **kwargs + ): super(FaceNet, self).__init__() self.model_path = model_path self.image_size = image_size @@ -101,14 +104,12 @@ class FaceNet(object): self.model_path = self.get_modelpath() if not os.path.exists(self.model_path): bob.io.base.create_directories_safe(FaceNet.get_modelpath()) - zip_file = os.path.join(FaceNet.get_modelpath(), - "20170512-110547.zip") + zip_file = os.path.join(FaceNet.get_modelpath(), "20170512-110547.zip") urls = [ - # This is a private link at Idiap to save bandwidth. - "http://beatubulatest.lab.idiap.ch/private/wheels/gitlab/" + # This link only works in Idiap CI to save bandwidth. + "http://www.idiap.ch/private/wheels/gitlab/" "facenet_model2_20170512-110547.zip", # this link to dropbox would work for everybody - # previous link to gogle drive would require cookies "https://www.dropbox.com/s/" "k7bhxe58q7d48g7/facenet_model2_20170512-110547.zip?dl=1", ] @@ -117,51 +118,43 @@ class FaceNet(object): # code from https://github.com/davidsandberg/facenet model_exp = os.path.expanduser(self.model_path) with self.graph.as_default(): - if (os.path.isfile(model_exp)): - logger.info('Model filename: %s' % model_exp) - with tf.gfile.FastGFile(model_exp, 'rb') as f: - graph_def = tf.GraphDef() + if os.path.isfile(model_exp): + logger.info("Model filename: %s" % model_exp) + with tf.compat.v1.gfile.FastGFile(model_exp, "rb") as f: + graph_def = tf.compat.v1.GraphDef() graph_def.ParseFromString(f.read()) - tf.import_graph_def(graph_def, name='') + tf.import_graph_def(graph_def, name="") else: - logger.info('Model directory: %s' % model_exp) + logger.info("Model directory: %s" % model_exp) meta_file, ckpt_file = get_model_filenames(model_exp) - logger.info('Metagraph file: %s' % meta_file) - logger.info('Checkpoint file: %s' % ckpt_file) + logger.info("Metagraph file: %s" % meta_file) + logger.info("Checkpoint file: %s" % ckpt_file) - saver = tf.train.import_meta_graph( - os.path.join(model_exp, meta_file)) - saver.restore(self.session, - os.path.join(model_exp, ckpt_file)) + saver = tf.compat.v1.train.import_meta_graph( + os.path.join(model_exp, meta_file) + ) + saver.restore(self.session, os.path.join(model_exp, ckpt_file)) # Get input and output tensors self.images_placeholder = self.graph.get_tensor_by_name("input:0") self.embeddings = self.graph.get_tensor_by_name(self.layer_name) - self.phase_train_placeholder = self.graph.get_tensor_by_name( - "phase_train:0") + self.phase_train_placeholder = self.graph.get_tensor_by_name("phase_train:0") logger.info("Successfully loaded the model.") def __call__(self, img): images = self._check_feature(img) if self.session is None: self.graph = tf.Graph() - self.session = tf.Session(graph=self.graph) + self.session = tf.compat.v1.Session(graph=self.graph) if self.embeddings is None: self.load_model() - feed_dict = {self.images_placeholder: images, - self.phase_train_placeholder: False} - features = self.session.run( - self.embeddings, feed_dict=feed_dict) + feed_dict = { + self.images_placeholder: images, + self.phase_train_placeholder: False, + } + features = self.session.run(self.embeddings, feed_dict=feed_dict) return features.flatten() - @staticmethod - def get_rcvariable(): - """ - Variable name used in the Bob Global Configuration System - https://www.idiap.ch/software/bob/docs/bob/bob.extension/stable/rc.html - """ - return "bob.ip.tensorflow_extractor.facenet_modelpath" - @staticmethod def get_modelpath(): """ @@ -173,11 +166,13 @@ class FaceNet(object): """ # Priority to the RC path - model_path = rc[FaceNet.get_rcvariable()] + model_path = rc["bob.ip.tensorflow_extractor.facenet_modelpath"] if model_path is None: import pkg_resources + model_path = pkg_resources.resource_filename( - __name__, 'data/FaceNet/20170512-110547') + __name__, "data/FaceNet/20170512-110547" + ) return model_path diff --git a/bob/ip/tensorflow_extractor/MTCNN.py b/bob/ip/tensorflow_extractor/MTCNN.py index 1e21778714e17bf259c8b9180b3d5f24eec339fe..03e9830640e31faa035a2c15edde9a1e36b19684 100644 --- a/bob/ip/tensorflow_extractor/MTCNN.py +++ b/bob/ip/tensorflow_extractor/MTCNN.py @@ -39,14 +39,14 @@ class MTCNN: graph = tf.Graph() with graph.as_default(): with open(model_path, "rb") as f: - graph_def = tf.GraphDef.FromString(f.read()) + graph_def = tf.compat.v1.GraphDef.FromString(f.read()) tf.import_graph_def(graph_def, name="") self.graph = graph - config = tf.ConfigProto( + config = tf.compat.v1.ConfigProto( intra_op_parallelism_threads=multiprocessing.cpu_count(), inter_op_parallelism_threads=multiprocessing.cpu_count(), ) - self.sess = tf.Session(graph=graph, config=config) + self.sess = tf.compat.v1.Session(graph=graph, config=config) def detect(self, img): """Detects all faces in the image. diff --git a/bob/ip/tensorflow_extractor/Vgg16.py b/bob/ip/tensorflow_extractor/Vgg16.py deleted file mode 100644 index bc0dd8bad105edad8362028a23cd4584d26a6fa3..0000000000000000000000000000000000000000 --- a/bob/ip/tensorflow_extractor/Vgg16.py +++ /dev/null @@ -1,150 +0,0 @@ -#!/usr/bin/env python -# vim: set fileencoding=utf-8 : -# @author: Tiago de Freitas Pereira <tiago.pereira@idiap.ch> - -import tensorflow as tf -from tensorflow.contrib.slim.python.slim.nets import vgg -import tensorflow.contrib.slim as slim -from tensorflow.contrib.layers.python.layers import layers as layers_lib -from tensorflow.contrib import layers -from tensorflow.contrib.framework.python.ops import arg_scope -from .Extractor import Extractor -import numpy -import bob.extension.download -import os - -def vgg_16(inputs, - reuse=None, - dropout_keep_prob=0.5, - weight_decay=0.0005, - mode=tf.estimator.ModeKeys.TRAIN, **kwargs): - """ - Oxford Net VGG 16-Layers version E Example from tf-slim - - Adapted from here. - https://raw.githubusercontent.com/tensorflow/models/master/research/slim/nets/vgg.py - - **Parameters**: - - inputs: a 4-D tensor of size [batch_size, height, width, 3]. - - reuse: whether or not the network and its variables should be reused. To be - able to reuse 'scope' must be given. - - mode: - Estimator mode keys - """ - end_points = dict() - with tf.variable_scope('vgg_16', reuse=reuse): - - with arg_scope([layers.conv2d, layers_lib.fully_connected], - activation_fn=tf.nn.relu, - weights_regularizer=None, - biases_initializer=tf.zeros_initializer(), - trainable=mode==tf.estimator.ModeKeys.PREDICT): - - # Collect outputs for conv2d, fully_connected and max_pool2d. - - net = layers_lib.repeat(inputs, 2, layers.conv2d, 64, [3, 3], scope='conv1') - net = layers_lib.max_pool2d(net, [2, 2], scope='pool1') - end_points['conv1'] = net - - net = layers_lib.repeat(net, 2, layers.conv2d, 128, [3, 3], scope='conv2') - net = layers_lib.max_pool2d(net, [2, 2], scope='pool2') - end_points['conv2'] = net - - net = layers_lib.repeat(net, 3, layers.conv2d, 256, [3, 3], scope='conv3') - net = layers_lib.max_pool2d(net, [2, 2], scope='pool3') - end_points['conv3'] = net - - net = layers_lib.repeat(net, 3, layers.conv2d, 512, [3, 3], scope='conv4') - net = layers_lib.max_pool2d(net, [2, 2], scope='pool4') - end_points['conv4'] = net - - net = layers_lib.repeat(net, 3, layers.conv2d, 512, [3, 3], scope='conv5') - net = layers_lib.max_pool2d(net, [2, 2], scope='pool5') - end_points['conv5'] = net - - net = slim.flatten(net) - - net = layers.fully_connected(net, 4096, scope='fc6') - end_points['fc6'] = net - - net = layers.fully_connected(net, 4096, scope='fc7', activation_fn=None) - - end_points['fc7'] = net - - return net, end_points - - -class VGGFace(Extractor): - """ - Extract features using the VGG model - http://www.robots.ox.ac.uk/~vgg/software/vgg_face/ - - This was converted with the script https://github.com/tiagofrepereira2012 - - """ - - def __init__(self, checkpoint_filename=None, debug=False): - # Average image provided in - # http://www.robots.ox.ac.uk/~vgg/software/vgg_face/ - self.average_img = [129.1863, 104.7624, 93.5940] - - if checkpoint_filename is None: - checkpoint_filename = os.path.join(VGGFace.get_vggpath(),"vgg_face_tf") - - # Downloading the model if necessary - if not os.path.exists(checkpoint_filename): - zip_file = os.path.join(VGGFace.get_vggpath(), "vgg_face_tf.tar.gz") - - urls = [ - # This is a private link at Idiap to save bandwidth. - "https://www.idiap.ch/software/bob/data/bob/bob.ip.tensorflow_extractor/master/" - "vgg_face_tf.tar.gz", - "http://www.idiap.ch/software/bob/data/bob/bob.ip.tensorflow_extractor/master/" - "vgg_face_tf.tar.gz", - ] - - bob.extension.download.download_and_unzip(urls, zip_file) - - input_tensor = tf.placeholder(tf.float32, shape=(1, 224, 224, 3)) - graph = vgg_16(input_tensor)[0] - - super(VGGFace, self).__init__(checkpoint_filename=os.path.join(checkpoint_filename), - input_tensor=input_tensor, - graph=graph, - debug=debug) - - - def __call__(self, image): - - if len(image.shape) == 3: - - # Converting from RGB to BGR - R = image[0, :, :] - self.average_img[0] - G = image[1, :, :] - self.average_img[1] - B = image[2, :, :] - self.average_img[2] - - # Converting to - bgr_image = numpy.zeros(shape=image.shape) - bgr_image[0, :, :] = B - bgr_image[1, :, :] = G - bgr_image[2, :, :] = R - - # SWAPING TO CxHxW to HxWxC - bgr_image = numpy.moveaxis(bgr_image,0,-1) - bgr_image = numpy.expand_dims(bgr_image,0) - - if self.session is None: - self.session = tf.InteractiveSession() - return self.session.run(self.graph, feed_dict={self.input_tensor: bgr_image})[0] - else: - raise ValueError("Image should have 3 channels") - - - @staticmethod - def get_vggpath(): - import pkg_resources - return pkg_resources.resource_filename(__name__, 'data') - diff --git a/bob/ip/tensorflow_extractor/__init__.py b/bob/ip/tensorflow_extractor/__init__.py old mode 100755 new mode 100644 index 3899dd2eb915bedb2ac434089f95d739ea96f276..43d1428f7440c81e1c03bf8b4de1825717220067 --- a/bob/ip/tensorflow_extractor/__init__.py +++ b/bob/ip/tensorflow_extractor/__init__.py @@ -1,43 +1,17 @@ #!/usr/bin/env python -def scratch_network(inputs, end_point="fc1", reuse=False): - - import tensorflow as tf - slim = tf.contrib.slim - - # Creating a random network - initializer = tf.contrib.layers.xavier_initializer(seed=10) - end_points = dict() - - graph = slim.conv2d(inputs, 10, [3, 3], activation_fn=tf.nn.relu, stride=1, - scope='conv1', weights_initializer=initializer, - reuse=reuse) - end_points["conv1"] = graph - - graph = slim.max_pool2d(graph, [4, 4], scope='pool1') - end_points["pool1"] = graph - - graph = slim.flatten(graph, scope='flatten1') - end_points["flatten1"] = graph - - graph = slim.fully_connected(graph, 10, activation_fn=None, scope='fc1', - weights_initializer=initializer, reuse=reuse) - end_points["fc1"] = graph - - return end_points[end_point] def get_config(): """Returns a string containing the configuration information. """ import bob.extension + return bob.extension.get_config(__name__) -from .Extractor import Extractor from .FaceNet import FaceNet -from .DrGanMSU import DrGanMSUExtractor -from .Vgg16 import VGGFace, vgg_16 from .MTCNN import MTCNN +from .Extractor import Extractor # gets sphinx autodoc done right - don't remove it @@ -56,13 +30,7 @@ def __appropriate__(*args): obj.__module__ = __name__ -__appropriate__( - Extractor, - FaceNet, - DrGanMSUExtractor, - VGGFace, - MTCNN, -) +__appropriate__(FaceNet, MTCNN, Extractor) # gets sphinx autodoc done right - don't remove it -__all__ = [_ for _ in dir() if not _.startswith('_')] +__all__ = [_ for _ in dir() if not _.startswith("_")] diff --git a/bob/ip/tensorflow_extractor/data/checkpoint b/bob/ip/tensorflow_extractor/data/checkpoint deleted file mode 100644 index b12413de4ce3b1d66ef87edc2c45f1025ade150a..0000000000000000000000000000000000000000 --- a/bob/ip/tensorflow_extractor/data/checkpoint +++ /dev/null @@ -1,2 +0,0 @@ -model_checkpoint_path: "model.ckp" -all_model_checkpoint_paths: "model.ckp" diff --git a/bob/ip/tensorflow_extractor/data/model.ckp.data-00000-of-00001 b/bob/ip/tensorflow_extractor/data/model.ckp.data-00000-of-00001 deleted file mode 100644 index 2a854a2e6819b8fa65b82031237bef1d2e891e02..0000000000000000000000000000000000000000 Binary files a/bob/ip/tensorflow_extractor/data/model.ckp.data-00000-of-00001 and /dev/null differ diff --git a/bob/ip/tensorflow_extractor/data/model.ckp.index b/bob/ip/tensorflow_extractor/data/model.ckp.index deleted file mode 100644 index ad2bc305a685a829d683d664e9ab0622bcd7c1e0..0000000000000000000000000000000000000000 Binary files a/bob/ip/tensorflow_extractor/data/model.ckp.index and /dev/null differ diff --git a/bob/ip/tensorflow_extractor/data/model.ckp.meta b/bob/ip/tensorflow_extractor/data/model.ckp.meta deleted file mode 100644 index da24801f6e8a6ed46e0c433684867d712e215d68..0000000000000000000000000000000000000000 Binary files a/bob/ip/tensorflow_extractor/data/model.ckp.meta and /dev/null differ diff --git a/bob/ip/tensorflow_extractor/test.py b/bob/ip/tensorflow_extractor/test.py index dff72a5ba1faa6372228f12bdcc6454a5a395aa0..7ef9d2becca0ccc3f906e6be020008829319aab8 100644 --- a/bob/ip/tensorflow_extractor/test.py +++ b/bob/ip/tensorflow_extractor/test.py @@ -1,47 +1,11 @@ import bob.io.base import bob.io.image from bob.io.base.test_utils import datafile -import bob.ip.tensorflow_extractor -import tensorflow as tf - -import pkg_resources import numpy import json -import os numpy.random.seed(10) -slim = tf.contrib.slim -from . import scratch_network - - -def test_output(): - - # Loading MNIST model - filename = os.path.join( - pkg_resources.resource_filename(__name__, "data"), "model.ckp" - ) - inputs = tf.placeholder(tf.float32, shape=(None, 28, 28, 1)) - - # Testing the last output - graph = scratch_network(inputs) - extractor = bob.ip.tensorflow_extractor.Extractor(filename, inputs, graph) - - data = numpy.random.rand(2, 28, 28, 1).astype("float32") - output = extractor(data) - assert extractor(data).shape == (2, 10) - del extractor - - # Testing flatten - inputs = tf.placeholder(tf.float32, shape=(None, 28, 28, 1)) - graph = scratch_network(inputs, end_point="flatten1") - extractor = bob.ip.tensorflow_extractor.Extractor(filename, inputs, graph) - - data = numpy.random.rand(2, 28, 28, 1).astype("float32") - output = extractor(data) - assert output.shape == (2, 1690) - del extractor - def test_facenet(): from bob.ip.tensorflow_extractor import FaceNet @@ -52,24 +16,6 @@ def test_facenet(): assert output.size == 128, output.shape -def test_drgan(): - from bob.ip.tensorflow_extractor import DrGanMSUExtractor - - extractor = DrGanMSUExtractor() - data = numpy.random.rand(3, 96, 96).astype("uint8") - output = extractor(data) - assert output.size == 320, output.shape - - -def test_vgg16(): - pass - # from bob.ip.tensorflow_extractor import VGGFace - # extractor = VGGFace() - # data = numpy.random.rand(3, 224, 224).astype("uint8") - # output = extractor(data) - # assert output.size == 4096, output.shape - - def test_mtcnn(): test_image = datafile("mtcnn/test_image.png", __name__) ref_numbers = datafile("mtcnn/mtcnn.hdf5", __name__) diff --git a/doc/conf.py b/doc/conf.py index bb602aa67dd0932d115cd02503ac188f78ff4b2d..298524a74f42007cf18f38cd39bf240fe91b8cd4 100644 --- a/doc/conf.py +++ b/doc/conf.py @@ -10,30 +10,31 @@ import pkg_resources # -- General configuration ----------------------------------------------------- # If your documentation needs a minimal Sphinx version, state it here. -needs_sphinx = '1.3' +needs_sphinx = "1.3" # Add any Sphinx extension module names here, as strings. They can be extensions # coming with Sphinx (named 'sphinx.ext.*') or your custom ones. extensions = [ - 'sphinx.ext.todo', - 'sphinx.ext.coverage', - 'sphinx.ext.ifconfig', - 'sphinx.ext.autodoc', - 'sphinx.ext.autosummary', - 'sphinx.ext.doctest', - 'sphinx.ext.graphviz', - 'sphinx.ext.intersphinx', - 'sphinx.ext.napoleon', - 'sphinx.ext.viewcode', - 'matplotlib.sphinxext.plot_directive' - ] + "sphinx.ext.todo", + "sphinx.ext.coverage", + "sphinx.ext.ifconfig", + "sphinx.ext.autodoc", + "sphinx.ext.autosummary", + "sphinx.ext.doctest", + "sphinx.ext.graphviz", + "sphinx.ext.intersphinx", + "sphinx.ext.napoleon", + "sphinx.ext.viewcode", + "matplotlib.sphinxext.plot_directive", +] import sphinx + if sphinx.__version__ >= "1.4.1": - extensions.append('sphinx.ext.imgmath') - imgmath_image_format = 'svg' + extensions.append("sphinx.ext.imgmath") + imgmath_image_format = "svg" else: - extensions.append('sphinx.ext.pngmath') + extensions.append("sphinx.ext.pngmath") # Be picky about warnings nitpicky = True @@ -42,13 +43,13 @@ nitpicky = True nitpick_ignore = [] # Allows the user to override warnings from a separate file -if os.path.exists('nitpick-exceptions.txt'): - for line in open('nitpick-exceptions.txt'): +if os.path.exists("nitpick-exceptions.txt"): + for line in open("nitpick-exceptions.txt"): if line.strip() == "" or line.startswith("#"): continue dtype, target = line.split(None, 1) target = target.strip() - try: # python 2.x + try: # python 2.x target = unicode(target) except NameError: pass @@ -64,25 +65,27 @@ autosummary_generate = True numfig = True # If we are on OSX, the 'dvipng' path maybe different -dvipng_osx = '/opt/local/libexec/texlive/binaries/dvipng' -if os.path.exists(dvipng_osx): pngmath_dvipng = dvipng_osx +dvipng_osx = "/opt/local/libexec/texlive/binaries/dvipng" +if os.path.exists(dvipng_osx): + pngmath_dvipng = dvipng_osx # Add any paths that contain templates here, relative to this directory. -templates_path = ['_templates'] +templates_path = ["_templates"] # The suffix of source filenames. -source_suffix = '.rst' +source_suffix = ".rst" # The encoding of source files. -#source_encoding = 'utf-8-sig' +# source_encoding = 'utf-8-sig' # The master toctree document. -master_doc = 'index' +master_doc = "index" # General information about the project. -project = u'bob.ip.tensorflow_extractor' +project = u"bob.ip.tensorflow_extractor" import time -copyright = u'%s, Idiap Research Institute' % time.strftime('%Y') + +copyright = u"%s, Idiap Research Institute" % time.strftime("%Y") # Grab the setup entry distribution = pkg_resources.require(project)[0] @@ -98,42 +101,42 @@ release = distribution.version # The language for content autogenerated by Sphinx. Refer to documentation # for a list of supported languages. -#language = None +# language = None # There are two options for replacing |today|: either, you set today to some # non-false value, then it is used: -#today = '' +# today = '' # Else, today_fmt is used as the format for a strftime call. -#today_fmt = '%B %d, %Y' +# today_fmt = '%B %d, %Y' # List of patterns, relative to source directory, that match files and # directories to ignore when looking for source files. -exclude_patterns = ['links.rst'] +exclude_patterns = ["links.rst"] # The reST default role (used for this markup: `text`) to use for all documents. -#default_role = None +# default_role = None # If true, '()' will be appended to :func: etc. cross-reference text. -#add_function_parentheses = True +# add_function_parentheses = True # If true, the current module name will be prepended to all description # unit titles (such as .. function::). -#add_module_names = True +# add_module_names = True # If true, sectionauthor and moduleauthor directives will be shown in the # output. They are ignored by default. -#show_authors = False +# show_authors = False # The name of the Pygments (syntax highlighting) style to use. -pygments_style = 'sphinx' +pygments_style = "sphinx" # A list of ignored prefixes for module index sorting. -#modindex_common_prefix = [] +# modindex_common_prefix = [] # Some variables which are useful for generated material -project_variable = project.replace('.', '_') -short_description = u'Tensorflow bindings' -owner = [u'Idiap Research Institute'] +project_variable = project.replace(".", "_") +short_description = u"Tensorflow bindings" +owner = [u"Idiap Research Institute"] # -- Options for HTML output --------------------------------------------------- @@ -141,80 +144,81 @@ owner = [u'Idiap Research Institute'] # The theme to use for HTML and HTML Help pages. See the documentation for # a list of builtin themes. import sphinx_rtd_theme -html_theme = 'sphinx_rtd_theme' + +html_theme = "sphinx_rtd_theme" # Theme options are theme-specific and customize the look and feel of a theme # further. For a list of options available for each theme, see the # documentation. -#html_theme_options = {} +# html_theme_options = {} # Add any paths that contain custom themes here, relative to this directory. html_theme_path = [sphinx_rtd_theme.get_html_theme_path()] # The name for this set of Sphinx documents. If None, it defaults to # "<project> v<release> documentation". -#html_title = None +# html_title = None # A shorter title for the navigation bar. Default is the same as html_title. -#html_short_title = project_variable +# html_short_title = project_variable # The name of an image file (relative to this directory) to place at the top # of the sidebar. -html_logo = 'img/logo.png' +html_logo = "img/logo.png" # The name of an image file (within the static path) to use as favicon of the # docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32 # pixels large. -html_favicon = 'img/favicon.ico' +html_favicon = "img/favicon.ico" # Add any paths that contain custom static files (such as style sheets) here, # relative to this directory. They are copied after the builtin static files, # so a file named "default.css" will overwrite the builtin "default.css". -#html_static_path = ['_static'] +# html_static_path = ['_static'] # If not '', a 'Last updated on:' timestamp is inserted at every page bottom, # using the given strftime format. -#html_last_updated_fmt = '%b %d, %Y' +# html_last_updated_fmt = '%b %d, %Y' # If true, SmartyPants will be used to convert quotes and dashes to # typographically correct entities. -#html_use_smartypants = True +# html_use_smartypants = True # Custom sidebar templates, maps document names to template names. -#html_sidebars = {} +# html_sidebars = {} # Additional templates that should be rendered to pages, maps page names to # template names. -#html_additional_pages = {} +# html_additional_pages = {} # If false, no module index is generated. -#html_domain_indices = True +# html_domain_indices = True # If false, no index is generated. -#html_use_index = True +# html_use_index = True # If true, the index is split into individual pages for each letter. -#html_split_index = False +# html_split_index = False # If true, links to the reST sources are added to the pages. -#html_show_sourcelink = True +# html_show_sourcelink = True # If true, "Created using Sphinx" is shown in the HTML footer. Default is True. -#html_show_sphinx = True +# html_show_sphinx = True # If true, "(C) Copyright ..." is shown in the HTML footer. Default is True. -#html_show_copyright = True +# html_show_copyright = True # If true, an OpenSearch description file will be output, and all pages will # contain a <link> tag referring to it. The value of this option must be the # base URL from which the finished HTML is served. -#html_use_opensearch = '' +# html_use_opensearch = '' # This is the file name suffix for HTML files (e.g. ".xhtml"). -#html_file_suffix = None +# html_file_suffix = None # Output file base name for HTML help builder. -htmlhelp_basename = project_variable + u'_doc' +htmlhelp_basename = project_variable + u"_doc" # -- Post configuration -------------------------------------------------------- @@ -224,44 +228,48 @@ rst_epilog = """ .. |project| replace:: Bob .. |version| replace:: %s .. |current-year| date:: %%Y -""" % (version,) +""" % ( + version, +) # Default processing flags for sphinx -autoclass_content = 'class' -autodoc_member_order = 'bysource' +autoclass_content = "class" +autodoc_member_order = "bysource" autodoc_default_flags = [ - 'members', - 'show-inheritance', - ] + "members", + "show-inheritance", +] # For inter-documentation mapping: from bob.extension.utils import link_documentation, load_requirements + sphinx_requirements = "extra-intersphinx.txt" if os.path.exists(sphinx_requirements): - intersphinx_mapping = link_documentation( - additional_packages=['python','numpy'] + \ - load_requirements(sphinx_requirements) - ) + intersphinx_mapping = link_documentation( + additional_packages=["python", "numpy"] + load_requirements(sphinx_requirements) + ) else: - intersphinx_mapping = link_documentation() + intersphinx_mapping = link_documentation() # We want to remove all private (i.e. _. or __.__) members # that are not in the list of accepted functions -accepted_private_functions = ['__array__'] +accepted_private_functions = ["__array__"] + def member_function_test(app, what, name, obj, skip, options): - # test if we have a private function - if len(name) > 1 and name[0] == '_': - # test if this private function should be allowed - if name not in accepted_private_functions: - # omit privat functions that are not in the list of accepted private functions - return skip - else: - # test if the method is documented - if not hasattr(obj, '__doc__') or not obj.__doc__: - return skip - return False + # test if we have a private function + if len(name) > 1 and name[0] == "_": + # test if this private function should be allowed + if name not in accepted_private_functions: + # omit privat functions that are not in the list of accepted private functions + return skip + else: + # test if the method is documented + if not hasattr(obj, "__doc__") or not obj.__doc__: + return skip + return False + def setup(app): - app.connect('autodoc-skip-member', member_function_test) + app.connect("autodoc-skip-member", member_function_test) diff --git a/doc/guide.rst b/doc/guide.rst index 529a2d8dc4e9059c2fc2e2927dfd7402dbb19192..874d5741f3b4f127d8b918ea8791dc2c7b06c8de 100644 --- a/doc/guide.rst +++ b/doc/guide.rst @@ -2,52 +2,8 @@ User guide =========== -Using as a feature extractor ----------------------------- -In this example we'll take pretrained network using MNIST and -In this example we take the output of the layer `fc7` of the VGG face model as -features. - -.. doctest:: tensorflowtest - - >>> import numpy - >>> import bob.ip.tensorflow_extractor - >>> import bob.db.mnist - >>> from bob.ip.tensorflow_extractor import scratch_network - >>> import os - >>> import pkg_resources - >>> import tensorflow as tf - - >>> # Loading some samples from mnist - >>> db = bob.db.mnist.Database() - >>> images = db.data(groups='train', labels=[0,1,2,3,4,5,6,7,8,9])[0][0:3] - >>> images = numpy.reshape(images, (3, 28, 28, 1)) * 0.00390625 # Normalizing the data - - >>> # preparing my inputs - >>> inputs = tf.placeholder(tf.float32, shape=(None, 28, 28, 1)) - >>> graph = scratch_network(inputs) - - >>> # loading my model and projecting - >>> filename = os.path.join(pkg_resources.resource_filename("bob.ip.tensorflow_extractor", 'data'), 'model.ckp') - >>> extractor = bob.ip.tensorflow_extractor.Extractor(filename, inputs, graph) - >>> extractor(images).shape - (3, 10) - - -.. note:: - - The models will automatically download to the data folder of this package as - soon as you start using them. - -Using as a convolutional filter -------------------------------- - -In this example we plot some outputs of the convolutional layer `conv1`. - - - Facenet Model ------------- @@ -57,27 +13,11 @@ Check `here for more info <py_api.html#bob.ip.tensorflow_extractor.FaceNet>`_ .. note:: - The models will automatically download to the data folder of this package and save it in + The models will automatically download to the data folder of this package and save it in ``[env-path]./bob/ip/tensorflow_extractor/data/FaceNet``. If you want want set another path for this model do:: - - $ bob config set bob.ip.tensorflow_extractor.facenet_modelpath /path/to/mydatabase - - - -DRGan from L.Tran @ MSU: ------------------------- -:ref:`bob.bio.base <bob.bio.base>` wrapper to the DRGan model trained by L.Tran @ MSU. -Check `here <py_api.html#bob.ip.tensorflow_extractor.DrGanMSUExtractor>`_ for more info - -.. note:: - - The models will automatically download to the data folder of this package and save it in - ``[env-path]./bob/ip/tensorflow_extractor/data/DR_GAN_model``. - If you want want set another path for this model do:: - - $ bob config set bob.ip.tensorflow_extractor.drgan_modelpath /path/to/mydatabase + $ bob config set bob.ip.tensorflow_extractor.facenet_modelpath /path/to/mydatabase diff --git a/doc/index.rst b/doc/index.rst index f1a28b13ad355f0cf063064e89aa25e4176db25f..c32a01ceb55e85957c718e0efb7a428e9b3eb978 100644 --- a/doc/index.rst +++ b/doc/index.rst @@ -5,7 +5,7 @@ Bob interface for feature extraction using Tensorflow ======================================================= -This package contains functionality to extract features from CNNs trained with +This package contains functionality to extract features from Neural Networks trained with Tensorflow http://tensorflow.org/ Index diff --git a/setup.py b/setup.py index 44b73fce9d43db1bbe5460d3491e3e59929b7d29..8f682f4cd25f99b2e3bb853f6414662cfd85eb74 100644 --- a/setup.py +++ b/setup.py @@ -34,42 +34,38 @@ # administrative interventions. from setuptools import setup, dist -dist.Distribution(dict(setup_requires=['bob.extension'])) + +dist.Distribution(dict(setup_requires=["bob.extension"])) from bob.extension.utils import load_requirements, find_packages + install_requires = load_requirements() # The only thing we do in this file is to call the setup() function with all # parameters that define our package. setup( - # This is the basic information about your project. Modify all this # information before releasing code publicly. - name = 'bob.ip.tensorflow_extractor', - version = open("version.txt").read().rstrip(), - description = 'Feature extractor using tensorflow CNNs', - - url = 'https://gitlab.idiap.ch/tiago.pereira/bob.ip.caffe_extractor', - license = 'BSD', - author = 'Tiago de Freitas Pereira', - author_email = 'tiago.pereira@idiap.ch', - keywords = 'bob, biometric recognition, evaluation', - + name="bob.ip.tensorflow_extractor", + version=open("version.txt").read().rstrip(), + description="Feature extractor using tensorflow CNNs", + url="https://gitlab.idiap.ch/tiago.pereira/bob.ip.caffe_extractor", + license="BSD", + author="Tiago de Freitas Pereira", + author_email="tiago.pereira@idiap.ch", + keywords="bob, biometric recognition, evaluation", # If you have a better, long description of your package, place it on the # 'doc' directory and then hook it here - long_description = open('README.rst').read(), - + long_description=open("README.rst").read(), # This line is required for any distutils based packaging. - packages = find_packages(), - include_package_data = True, - + packages=find_packages(), + include_package_data=True, # This line defines which packages should be installed when you "install" # this package. All packages that are mentioned here, but are not installed # on the current system will be installed locally and only visible to the # scripts of this package. Don't worry - You won't need administrative # privileges when using buildout. - install_requires = install_requires, - + install_requires=install_requires, # Your project should be called something like 'bob.<foo>' or # 'bob.<foo>.<bar>'. To implement this correctly and still get all your # packages to be imported w/o problems, you need to implement namespaces @@ -80,8 +76,6 @@ setup( # Our database packages are good examples of namespace implementations # using several layers. You can check them out here: # https://github.com/idiap/bob/wiki/Satellite-Packages - - # This entry defines which scripts you will have inside the 'bin' directory # once you install the package (or run 'bin/buildout'). The order of each # entry under 'console_scripts' is like this: @@ -93,17 +87,16 @@ setup( # installed under 'example/foo.py' that contains a function which # implements the 'main()' function of particular script you want to have # should be referred as 'example.foo:main'. - # Classifiers are important if you plan to distribute this package through # PyPI. You can find the complete list of classifiers that are valid and # useful here (http://pypi.python.org/pypi?%3Aaction=list_classifiers). - classifiers = [ - 'Framework :: Bob', - 'Development Status :: 4 - Beta', - 'Intended Audience :: Developers', - 'License :: OSI Approved :: BSD License', - 'Natural Language :: English', - 'Programming Language :: Python', - 'Topic :: Scientific/Engineering :: Artificial Intelligence', + classifiers=[ + "Framework :: Bob", + "Development Status :: 4 - Beta", + "Intended Audience :: Developers", + "License :: OSI Approved :: BSD License", + "Natural Language :: English", + "Programming Language :: Python", + "Topic :: Scientific/Engineering :: Artificial Intelligence", ], )