Commit 00042f9a authored by Tiago de Freitas Pereira's avatar Tiago de Freitas Pereira

Merge branch 'tf2' into 'master'

Tensorflow 2 compatibility

Closes #9

See merge request !15
parents 22df3b3d b83dc957
Pipeline #38691 passed with stages
in 14 minutes and 59 seconds
# see https://docs.python.org/3/library/pkgutil.html # see https://docs.python.org/3/library/pkgutil.html
from pkgutil import extend_path from pkgutil import extend_path
__path__ = extend_path(__path__, __name__) __path__ = extend_path(__path__, __name__)
# see https://docs.python.org/3/library/pkgutil.html # see https://docs.python.org/3/library/pkgutil.html
from pkgutil import extend_path from pkgutil import extend_path
__path__ = extend_path(__path__, __name__) __path__ = extend_path(__path__, __name__)
This diff is collapsed.
...@@ -35,14 +35,17 @@ class Extractor(object): ...@@ -35,14 +35,17 @@ class Extractor(object):
self.graph = graph self.graph = graph
# Initializing the variables of the current graph # Initializing the variables of the current graph
self.session = tf.Session() self.session = tf.compat.v1.Session()
self.session.run(tf.global_variables_initializer()) self.session.run(tf.compat.v1.global_variables_initializer())
# Loading the last checkpoint and overwriting the current variables # Loading the last checkpoint and overwriting the current variables
saver = tf.train.Saver() saver = tf.compat.v1.train.Saver()
if os.path.splitext(checkpoint_filename)[1] == ".meta": if os.path.splitext(checkpoint_filename)[1] == ".meta":
saver.restore(self.session, tf.train.latest_checkpoint(os.path.dirname(checkpoint_filename))) saver.restore(
self.session,
tf.train.latest_checkpoint(os.path.dirname(checkpoint_filename)),
)
elif os.path.isdir(checkpoint_filename): elif os.path.isdir(checkpoint_filename):
saver.restore(self.session, tf.train.latest_checkpoint(checkpoint_filename)) saver.restore(self.session, tf.train.latest_checkpoint(checkpoint_filename))
else: else:
...@@ -52,10 +55,8 @@ class Extractor(object): ...@@ -52,10 +55,8 @@ class Extractor(object):
if debug: if debug:
self.session = tf_debug.LocalCLIDebugWrapperSession(self.session) self.session = tf_debug.LocalCLIDebugWrapperSession(self.session)
def __del__(self): def __del__(self):
tf.reset_default_graph() tf.compat.v1.reset_default_graph()
def __call__(self, data): def __call__(self, data):
""" """
...@@ -73,4 +74,3 @@ class Extractor(object): ...@@ -73,4 +74,3 @@ class Extractor(object):
""" """
return self.session.run(self.graph, feed_dict={self.input_tensor: data}) return self.session.run(self.graph, feed_dict={self.input_tensor: data})
...@@ -12,6 +12,8 @@ import bob.io.base ...@@ -12,6 +12,8 @@ import bob.io.base
logger = logging.getLogger(__name__) logger = logging.getLogger(__name__)
FACENET_MODELPATH_KEY = "bob.ip.tensorflow_extractor.facenet_modelpath"
def prewhiten(img): def prewhiten(img):
mean = numpy.mean(img) mean = numpy.mean(img)
...@@ -24,18 +26,18 @@ def prewhiten(img): ...@@ -24,18 +26,18 @@ def prewhiten(img):
def get_model_filenames(model_dir): def get_model_filenames(model_dir):
# code from https://github.com/davidsandberg/facenet # code from https://github.com/davidsandberg/facenet
files = os.listdir(model_dir) files = os.listdir(model_dir)
meta_files = [s for s in files if s.endswith('.meta')] meta_files = [s for s in files if s.endswith(".meta")]
if len(meta_files) == 0: if len(meta_files) == 0:
raise ValueError( raise ValueError("No meta file found in the model directory (%s)" % model_dir)
'No meta file found in the model directory (%s)' % model_dir)
elif len(meta_files) > 1: elif len(meta_files) > 1:
raise ValueError( raise ValueError(
'There should not be more than one meta file in the model ' "There should not be more than one meta file in the model "
'directory (%s)' % model_dir) "directory (%s)" % model_dir
)
meta_file = meta_files[0] meta_file = meta_files[0]
max_step = -1 max_step = -1
for f in files: for f in files:
step_str = re.match(r'(^model-[\w\- ]+.ckpt-(\d+))', f) step_str = re.match(r"(^model-[\w\- ]+.ckpt-(\d+))", f)
if step_str is not None and len(step_str.groups()) >= 2: if step_str is not None and len(step_str.groups()) >= 2:
step = int(step_str.groups()[1]) step = int(step_str.groups()[1])
if step > max_step: if step > max_step:
...@@ -74,11 +76,12 @@ class FaceNet(object): ...@@ -74,11 +76,12 @@ class FaceNet(object):
""" """
def __init__( def __init__(
self, self,
model_path=rc["bob.ip.tensorflow_extractor.facenet_modelpath"], model_path=rc[FACENET_MODELPATH_KEY],
image_size=160, image_size=160,
layer_name='embeddings:0', layer_name="embeddings:0",
**kwargs): **kwargs
):
super(FaceNet, self).__init__() super(FaceNet, self).__init__()
self.model_path = model_path self.model_path = model_path
self.image_size = image_size self.image_size = image_size
...@@ -101,14 +104,12 @@ class FaceNet(object): ...@@ -101,14 +104,12 @@ class FaceNet(object):
self.model_path = self.get_modelpath() self.model_path = self.get_modelpath()
if not os.path.exists(self.model_path): if not os.path.exists(self.model_path):
bob.io.base.create_directories_safe(FaceNet.get_modelpath()) bob.io.base.create_directories_safe(FaceNet.get_modelpath())
zip_file = os.path.join(FaceNet.get_modelpath(), zip_file = os.path.join(FaceNet.get_modelpath(), "20170512-110547.zip")
"20170512-110547.zip")
urls = [ urls = [
# This is a private link at Idiap to save bandwidth. # This link only works in Idiap CI to save bandwidth.
"http://beatubulatest.lab.idiap.ch/private/wheels/gitlab/" "http://www.idiap.ch/private/wheels/gitlab/"
"facenet_model2_20170512-110547.zip", "facenet_model2_20170512-110547.zip",
# this link to dropbox would work for everybody # this link to dropbox would work for everybody
# previous link to gogle drive would require cookies
"https://www.dropbox.com/s/" "https://www.dropbox.com/s/"
"k7bhxe58q7d48g7/facenet_model2_20170512-110547.zip?dl=1", "k7bhxe58q7d48g7/facenet_model2_20170512-110547.zip?dl=1",
] ]
...@@ -117,51 +118,43 @@ class FaceNet(object): ...@@ -117,51 +118,43 @@ class FaceNet(object):
# code from https://github.com/davidsandberg/facenet # code from https://github.com/davidsandberg/facenet
model_exp = os.path.expanduser(self.model_path) model_exp = os.path.expanduser(self.model_path)
with self.graph.as_default(): with self.graph.as_default():
if (os.path.isfile(model_exp)): if os.path.isfile(model_exp):
logger.info('Model filename: %s' % model_exp) logger.info("Model filename: %s" % model_exp)
with tf.gfile.FastGFile(model_exp, 'rb') as f: with tf.compat.v1.gfile.FastGFile(model_exp, "rb") as f:
graph_def = tf.GraphDef() graph_def = tf.compat.v1.GraphDef()
graph_def.ParseFromString(f.read()) graph_def.ParseFromString(f.read())
tf.import_graph_def(graph_def, name='') tf.import_graph_def(graph_def, name="")
else: else:
logger.info('Model directory: %s' % model_exp) logger.info("Model directory: %s" % model_exp)
meta_file, ckpt_file = get_model_filenames(model_exp) meta_file, ckpt_file = get_model_filenames(model_exp)
logger.info('Metagraph file: %s' % meta_file) logger.info("Metagraph file: %s" % meta_file)
logger.info('Checkpoint file: %s' % ckpt_file) logger.info("Checkpoint file: %s" % ckpt_file)
saver = tf.train.import_meta_graph( saver = tf.compat.v1.train.import_meta_graph(
os.path.join(model_exp, meta_file)) os.path.join(model_exp, meta_file)
saver.restore(self.session, )
os.path.join(model_exp, ckpt_file)) saver.restore(self.session, os.path.join(model_exp, ckpt_file))
# Get input and output tensors # Get input and output tensors
self.images_placeholder = self.graph.get_tensor_by_name("input:0") self.images_placeholder = self.graph.get_tensor_by_name("input:0")
self.embeddings = self.graph.get_tensor_by_name(self.layer_name) self.embeddings = self.graph.get_tensor_by_name(self.layer_name)
self.phase_train_placeholder = self.graph.get_tensor_by_name( self.phase_train_placeholder = self.graph.get_tensor_by_name("phase_train:0")
"phase_train:0")
logger.info("Successfully loaded the model.") logger.info("Successfully loaded the model.")
def __call__(self, img): def __call__(self, img):
images = self._check_feature(img) images = self._check_feature(img)
if self.session is None: if self.session is None:
self.graph = tf.Graph() self.graph = tf.Graph()
self.session = tf.Session(graph=self.graph) self.session = tf.compat.v1.Session(graph=self.graph)
if self.embeddings is None: if self.embeddings is None:
self.load_model() self.load_model()
feed_dict = {self.images_placeholder: images, feed_dict = {
self.phase_train_placeholder: False} self.images_placeholder: images,
features = self.session.run( self.phase_train_placeholder: False,
self.embeddings, feed_dict=feed_dict) }
features = self.session.run(self.embeddings, feed_dict=feed_dict)
return features.flatten() return features.flatten()
@staticmethod
def get_rcvariable():
"""
Variable name used in the Bob Global Configuration System
https://www.idiap.ch/software/bob/docs/bob/bob.extension/stable/rc.html
"""
return "bob.ip.tensorflow_extractor.facenet_modelpath"
@staticmethod @staticmethod
def get_modelpath(): def get_modelpath():
""" """
...@@ -173,11 +166,13 @@ class FaceNet(object): ...@@ -173,11 +166,13 @@ class FaceNet(object):
""" """
# Priority to the RC path # Priority to the RC path
model_path = rc[FaceNet.get_rcvariable()] model_path = rc["bob.ip.tensorflow_extractor.facenet_modelpath"]
if model_path is None: if model_path is None:
import pkg_resources import pkg_resources
model_path = pkg_resources.resource_filename( model_path = pkg_resources.resource_filename(
__name__, 'data/FaceNet/20170512-110547') __name__, "data/FaceNet/20170512-110547"
)
return model_path return model_path
...@@ -39,14 +39,14 @@ class MTCNN: ...@@ -39,14 +39,14 @@ class MTCNN:
graph = tf.Graph() graph = tf.Graph()
with graph.as_default(): with graph.as_default():
with open(model_path, "rb") as f: with open(model_path, "rb") as f:
graph_def = tf.GraphDef.FromString(f.read()) graph_def = tf.compat.v1.GraphDef.FromString(f.read())
tf.import_graph_def(graph_def, name="") tf.import_graph_def(graph_def, name="")
self.graph = graph self.graph = graph
config = tf.ConfigProto( config = tf.compat.v1.ConfigProto(
intra_op_parallelism_threads=multiprocessing.cpu_count(), intra_op_parallelism_threads=multiprocessing.cpu_count(),
inter_op_parallelism_threads=multiprocessing.cpu_count(), inter_op_parallelism_threads=multiprocessing.cpu_count(),
) )
self.sess = tf.Session(graph=graph, config=config) self.sess = tf.compat.v1.Session(graph=graph, config=config)
def detect(self, img): def detect(self, img):
"""Detects all faces in the image. """Detects all faces in the image.
......
#!/usr/bin/env python
# vim: set fileencoding=utf-8 :
# @author: Tiago de Freitas Pereira <tiago.pereira@idiap.ch>
import tensorflow as tf
from tensorflow.contrib.slim.python.slim.nets import vgg
import tensorflow.contrib.slim as slim
from tensorflow.contrib.layers.python.layers import layers as layers_lib
from tensorflow.contrib import layers
from tensorflow.contrib.framework.python.ops import arg_scope
from .Extractor import Extractor
import numpy
import bob.extension.download
import os
def vgg_16(inputs,
reuse=None,
dropout_keep_prob=0.5,
weight_decay=0.0005,
mode=tf.estimator.ModeKeys.TRAIN, **kwargs):
"""
Oxford Net VGG 16-Layers version E Example from tf-slim
Adapted from here.
https://raw.githubusercontent.com/tensorflow/models/master/research/slim/nets/vgg.py
**Parameters**:
inputs: a 4-D tensor of size [batch_size, height, width, 3].
reuse: whether or not the network and its variables should be reused. To be
able to reuse 'scope' must be given.
mode:
Estimator mode keys
"""
end_points = dict()
with tf.variable_scope('vgg_16', reuse=reuse):
with arg_scope([layers.conv2d, layers_lib.fully_connected],
activation_fn=tf.nn.relu,
weights_regularizer=None,
biases_initializer=tf.zeros_initializer(),
trainable=mode==tf.estimator.ModeKeys.PREDICT):
# Collect outputs for conv2d, fully_connected and max_pool2d.
net = layers_lib.repeat(inputs, 2, layers.conv2d, 64, [3, 3], scope='conv1')
net = layers_lib.max_pool2d(net, [2, 2], scope='pool1')
end_points['conv1'] = net
net = layers_lib.repeat(net, 2, layers.conv2d, 128, [3, 3], scope='conv2')
net = layers_lib.max_pool2d(net, [2, 2], scope='pool2')
end_points['conv2'] = net
net = layers_lib.repeat(net, 3, layers.conv2d, 256, [3, 3], scope='conv3')
net = layers_lib.max_pool2d(net, [2, 2], scope='pool3')
end_points['conv3'] = net
net = layers_lib.repeat(net, 3, layers.conv2d, 512, [3, 3], scope='conv4')
net = layers_lib.max_pool2d(net, [2, 2], scope='pool4')
end_points['conv4'] = net
net = layers_lib.repeat(net, 3, layers.conv2d, 512, [3, 3], scope='conv5')
net = layers_lib.max_pool2d(net, [2, 2], scope='pool5')
end_points['conv5'] = net
net = slim.flatten(net)
net = layers.fully_connected(net, 4096, scope='fc6')
end_points['fc6'] = net
net = layers.fully_connected(net, 4096, scope='fc7', activation_fn=None)
end_points['fc7'] = net
return net, end_points
class VGGFace(Extractor):
"""
Extract features using the VGG model
http://www.robots.ox.ac.uk/~vgg/software/vgg_face/
This was converted with the script https://github.com/tiagofrepereira2012
"""
def __init__(self, checkpoint_filename=None, debug=False):
# Average image provided in
# http://www.robots.ox.ac.uk/~vgg/software/vgg_face/
self.average_img = [129.1863, 104.7624, 93.5940]
if checkpoint_filename is None:
checkpoint_filename = os.path.join(VGGFace.get_vggpath(),"vgg_face_tf")
# Downloading the model if necessary
if not os.path.exists(checkpoint_filename):
zip_file = os.path.join(VGGFace.get_vggpath(), "vgg_face_tf.tar.gz")
urls = [
# This is a private link at Idiap to save bandwidth.
"https://www.idiap.ch/software/bob/data/bob/bob.ip.tensorflow_extractor/master/"
"vgg_face_tf.tar.gz",
"http://www.idiap.ch/software/bob/data/bob/bob.ip.tensorflow_extractor/master/"
"vgg_face_tf.tar.gz",
]
bob.extension.download.download_and_unzip(urls, zip_file)
input_tensor = tf.placeholder(tf.float32, shape=(1, 224, 224, 3))
graph = vgg_16(input_tensor)[0]
super(VGGFace, self).__init__(checkpoint_filename=os.path.join(checkpoint_filename),
input_tensor=input_tensor,
graph=graph,
debug=debug)
def __call__(self, image):
if len(image.shape) == 3:
# Converting from RGB to BGR
R = image[0, :, :] - self.average_img[0]
G = image[1, :, :] - self.average_img[1]
B = image[2, :, :] - self.average_img[2]
# Converting to
bgr_image = numpy.zeros(shape=image.shape)
bgr_image[0, :, :] = B
bgr_image[1, :, :] = G
bgr_image[2, :, :] = R
# SWAPING TO CxHxW to HxWxC
bgr_image = numpy.moveaxis(bgr_image,0,-1)
bgr_image = numpy.expand_dims(bgr_image,0)
if self.session is None:
self.session = tf.InteractiveSession()
return self.session.run(self.graph, feed_dict={self.input_tensor: bgr_image})[0]
else:
raise ValueError("Image should have 3 channels")
@staticmethod
def get_vggpath():
import pkg_resources
return pkg_resources.resource_filename(__name__, 'data')
#!/usr/bin/env python #!/usr/bin/env python
def scratch_network(inputs, end_point="fc1", reuse=False):
import tensorflow as tf
slim = tf.contrib.slim
# Creating a random network
initializer = tf.contrib.layers.xavier_initializer(seed=10)
end_points = dict()
graph = slim.conv2d(inputs, 10, [3, 3], activation_fn=tf.nn.relu, stride=1,
scope='conv1', weights_initializer=initializer,
reuse=reuse)
end_points["conv1"] = graph
graph = slim.max_pool2d(graph, [4, 4], scope='pool1')
end_points["pool1"] = graph
graph = slim.flatten(graph, scope='flatten1')
end_points["flatten1"] = graph
graph = slim.fully_connected(graph, 10, activation_fn=None, scope='fc1',
weights_initializer=initializer, reuse=reuse)
end_points["fc1"] = graph
return end_points[end_point]
def get_config(): def get_config():
"""Returns a string containing the configuration information. """Returns a string containing the configuration information.
""" """
import bob.extension import bob.extension
return bob.extension.get_config(__name__) return bob.extension.get_config(__name__)
from .Extractor import Extractor
from .FaceNet import FaceNet from .FaceNet import FaceNet
from .DrGanMSU import DrGanMSUExtractor
from .Vgg16 import VGGFace, vgg_16
from .MTCNN import MTCNN from .MTCNN import MTCNN
from .Extractor import Extractor
# gets sphinx autodoc done right - don't remove it # gets sphinx autodoc done right - don't remove it
...@@ -56,13 +30,7 @@ def __appropriate__(*args): ...@@ -56,13 +30,7 @@ def __appropriate__(*args):
obj.__module__ = __name__ obj.__module__ = __name__
__appropriate__( __appropriate__(FaceNet, MTCNN, Extractor)
Extractor,
FaceNet,
DrGanMSUExtractor,
VGGFace,
MTCNN,
)
# gets sphinx autodoc done right - don't remove it # gets sphinx autodoc done right - don't remove it
__all__ = [_ for _ in dir() if not _.startswith('_')] __all__ = [_ for _ in dir() if not _.startswith("_")]
model_checkpoint_path: "model.ckp"
all_model_checkpoint_paths: "model.ckp"
import bob.io.base import bob.io.base
import bob.io.image import bob.io.image
from bob.io.base.test_utils import datafile from bob.io.base.test_utils import datafile
import bob.ip.tensorflow_extractor
import tensorflow as tf
import pkg_resources
import numpy import numpy
import json import json
import os
numpy.random.seed(10) numpy.random.seed(10)
slim = tf.contrib.slim
from . import scratch_network
def test_output():
# Loading MNIST model
filename = os.path.join(
pkg_resources.resource_filename(__name__, "data"), "model.ckp"
)
inputs = tf.placeholder(tf.float32, shape=(None, 28, 28, 1))
# Testing the last output
graph = scratch_network(inputs)
extractor = bob.ip.tensorflow_extractor.Extractor(filename, inputs, graph)
data = numpy.random.rand(2, 28, 28, 1).astype("float32")
output = extractor(data)
assert extractor(data).shape == (2, 10)
del extractor
# Testing flatten
inputs = tf.placeholder(tf.float32, shape=(None, 28, 28, 1))
graph = scratch_network(inputs, end_point="flatten1")
extractor = bob.ip.tensorflow_extractor.Extractor(filename, inputs, graph)
data = numpy.random.rand(2, 28, 28, 1).astype("float32")
output = extractor(data)
assert output.shape == (2, 1690)
del extractor
def test_facenet(): def test_facenet():
from bob.ip.tensorflow_extractor import FaceNet from bob.ip.tensorflow_extractor import FaceNet
...@@ -52,24 +16,6 @@ def test_facenet(): ...@@ -52,24 +16,6 @@ def test_facenet():
assert output.size == 128, output.shape assert output.size == 128, output.shape
def test_drgan():
from bob.ip.tensorflow_extractor import DrGanMSUExtractor
extractor = DrGanMSUExtractor()
data = numpy.random.rand(3, 96, 96).astype("uint8")
output = extractor(data)
assert output.size == 320, output.shape
def test_vgg16():
pass
# from bob.ip.tensorflow_extractor import VGGFace
# extractor = VGGFace()
# data = numpy.random.rand(3, 224, 224).astype("uint8")
# output = extractor(data)
# assert output.size == 4096, output.shape
def test_mtcnn(): def test_mtcnn():
test_image = datafile("mtcnn/test_image.png", __name__) test_image = datafile("mtcnn/test_image.png", __name__)
ref_numbers = datafile("mtcnn/mtcnn.hdf5", __name__) ref_numbers = datafile("mtcnn/mtcnn.hdf5", __name__)
......
This diff is collapsed.
...@@ -2,52 +2,8 @@ ...@@ -2,52 +2,8 @@
User guide User guide
=========== ===========
Using as a feature extractor
----------------------------
In this example we'll take pretrained network using MNIST and
In this example we take the output of the layer `fc7` of the VGG face model as
features.
.. doctest:: tensorflowtest
>>> import numpy
>>> import bob.ip.tensorflow_extractor
>>> import bob.db.mnist
>>> from bob.ip.tensorflow_extractor import scratch_network
>>> import os
>>> import pkg_resources
>>> import tensorflow as tf
>>> # Loading some samples from mnist
>>> db = bob.db.mnist.Database()
>>> images = db.data(groups='train', labels=[0,1,2,3,4,5,6,7,8,9])[0][0:3]
>>> images = numpy.reshape(images, (3, 28, 28, 1)) * 0.00390625 # Normalizing the data
>>> # preparing my inputs
>>> inputs = tf.placeholder(tf.float32, shape=(None, 28, 28, 1))
>>> graph = scratch_network(inputs)
>>> # loading my model and projecting
>>> filename = os.path.join(pkg_resources.resource_filename("bob.ip.tensorflow_extractor", 'data'), 'model.ckp')
>>> extractor = bob.ip.tensorflow_extractor.Extractor(filename, inputs, graph)
>>> extractor(images).shape
(3, 10)
.. note::
The models will automatically download to the data folder of this package as
soon as you start using them.
Using as a convolutional filter
-------------------------------
In this example we plot some outputs of the convolutional layer `conv1`.
Facenet Model Facenet Model
------------- -------------
...@@ -57,27 +13,11 @@ Check `here for more info <py_api.html#bob.ip.tensorflow_extractor.FaceNet>`_ ...@@ -57,27 +13,11 @@ Check `here for more info <py_api.html#bob.ip.tensorflow_extractor.FaceNet>`_
.. note:: .. note::
The models will automatically download to the data folder of this package and save it in The models will automatically download to the data folder of this package and save it in
``[env-path]./bob/ip/tensorflow_extractor/data/FaceNet``. ``[env-path]./bob/ip/tensorflow_extractor/data/FaceNet``.
If you want want set another path for this model do:: If you want want set another path for this model do::
$ bob config set bob.ip.tensorflow_extractor.facenet_modelpath /path/to/mydatabase
DRGan from L.Tran @ MSU:
------------------------
:ref:`bob.bio.base <bob.bio.base>` wrapper to the DRGan model trained by L.Tran @ MSU. $ bob config set bob.ip.tensorflow_extractor.facenet_modelpath /path/to/mydatabase
Check `here <py_api.html#bob.ip.tensorflow_extractor.DrGanMSUExtractor>`_ for more info
.. note::
The models will automatically download to the data folder of this package and save it in
``[env-path]./bob/ip/tensorflow_extractor/data/DR_GAN_model``.
If you want want set another path for this model do::
$ bob config set bob.ip.tensorflow_extractor.drgan_m