Commit 00042f9a authored by Tiago de Freitas Pereira's avatar Tiago de Freitas Pereira

Merge branch 'tf2' into 'master'

Tensorflow 2 compatibility

Closes #9

See merge request !15
parents 22df3b3d b83dc957
Pipeline #38691 passed with stages
in 14 minutes and 59 seconds
# see https://docs.python.org/3/library/pkgutil.html
from pkgutil import extend_path
__path__ = extend_path(__path__, __name__)
# see https://docs.python.org/3/library/pkgutil.html
from pkgutil import extend_path
__path__ = extend_path(__path__, __name__)
This diff is collapsed.
......@@ -35,14 +35,17 @@ class Extractor(object):
self.graph = graph
# Initializing the variables of the current graph
self.session = tf.Session()
self.session.run(tf.global_variables_initializer())
self.session = tf.compat.v1.Session()
self.session.run(tf.compat.v1.global_variables_initializer())
# Loading the last checkpoint and overwriting the current variables
saver = tf.train.Saver()
saver = tf.compat.v1.train.Saver()
if os.path.splitext(checkpoint_filename)[1] == ".meta":
saver.restore(self.session, tf.train.latest_checkpoint(os.path.dirname(checkpoint_filename)))
saver.restore(
self.session,
tf.train.latest_checkpoint(os.path.dirname(checkpoint_filename)),
)
elif os.path.isdir(checkpoint_filename):
saver.restore(self.session, tf.train.latest_checkpoint(checkpoint_filename))
else:
......@@ -52,10 +55,8 @@ class Extractor(object):
if debug:
self.session = tf_debug.LocalCLIDebugWrapperSession(self.session)
def __del__(self):
tf.reset_default_graph()
tf.compat.v1.reset_default_graph()
def __call__(self, data):
"""
......@@ -73,4 +74,3 @@ class Extractor(object):
"""
return self.session.run(self.graph, feed_dict={self.input_tensor: data})
......@@ -12,6 +12,8 @@ import bob.io.base
logger = logging.getLogger(__name__)
FACENET_MODELPATH_KEY = "bob.ip.tensorflow_extractor.facenet_modelpath"
def prewhiten(img):
mean = numpy.mean(img)
......@@ -24,18 +26,18 @@ def prewhiten(img):
def get_model_filenames(model_dir):
# code from https://github.com/davidsandberg/facenet
files = os.listdir(model_dir)
meta_files = [s for s in files if s.endswith('.meta')]
meta_files = [s for s in files if s.endswith(".meta")]
if len(meta_files) == 0:
raise ValueError(
'No meta file found in the model directory (%s)' % model_dir)
raise ValueError("No meta file found in the model directory (%s)" % model_dir)
elif len(meta_files) > 1:
raise ValueError(
'There should not be more than one meta file in the model '
'directory (%s)' % model_dir)
"There should not be more than one meta file in the model "
"directory (%s)" % model_dir
)
meta_file = meta_files[0]
max_step = -1
for f in files:
step_str = re.match(r'(^model-[\w\- ]+.ckpt-(\d+))', f)
step_str = re.match(r"(^model-[\w\- ]+.ckpt-(\d+))", f)
if step_str is not None and len(step_str.groups()) >= 2:
step = int(step_str.groups()[1])
if step > max_step:
......@@ -75,10 +77,11 @@ class FaceNet(object):
def __init__(
self,
model_path=rc["bob.ip.tensorflow_extractor.facenet_modelpath"],
model_path=rc[FACENET_MODELPATH_KEY],
image_size=160,
layer_name='embeddings:0',
**kwargs):
layer_name="embeddings:0",
**kwargs
):
super(FaceNet, self).__init__()
self.model_path = model_path
self.image_size = image_size
......@@ -101,14 +104,12 @@ class FaceNet(object):
self.model_path = self.get_modelpath()
if not os.path.exists(self.model_path):
bob.io.base.create_directories_safe(FaceNet.get_modelpath())
zip_file = os.path.join(FaceNet.get_modelpath(),
"20170512-110547.zip")
zip_file = os.path.join(FaceNet.get_modelpath(), "20170512-110547.zip")
urls = [
# This is a private link at Idiap to save bandwidth.
"http://beatubulatest.lab.idiap.ch/private/wheels/gitlab/"
# This link only works in Idiap CI to save bandwidth.
"http://www.idiap.ch/private/wheels/gitlab/"
"facenet_model2_20170512-110547.zip",
# this link to dropbox would work for everybody
# previous link to gogle drive would require cookies
"https://www.dropbox.com/s/"
"k7bhxe58q7d48g7/facenet_model2_20170512-110547.zip?dl=1",
]
......@@ -117,51 +118,43 @@ class FaceNet(object):
# code from https://github.com/davidsandberg/facenet
model_exp = os.path.expanduser(self.model_path)
with self.graph.as_default():
if (os.path.isfile(model_exp)):
logger.info('Model filename: %s' % model_exp)
with tf.gfile.FastGFile(model_exp, 'rb') as f:
graph_def = tf.GraphDef()
if os.path.isfile(model_exp):
logger.info("Model filename: %s" % model_exp)
with tf.compat.v1.gfile.FastGFile(model_exp, "rb") as f:
graph_def = tf.compat.v1.GraphDef()
graph_def.ParseFromString(f.read())
tf.import_graph_def(graph_def, name='')
tf.import_graph_def(graph_def, name="")
else:
logger.info('Model directory: %s' % model_exp)
logger.info("Model directory: %s" % model_exp)
meta_file, ckpt_file = get_model_filenames(model_exp)
logger.info('Metagraph file: %s' % meta_file)
logger.info('Checkpoint file: %s' % ckpt_file)
logger.info("Metagraph file: %s" % meta_file)
logger.info("Checkpoint file: %s" % ckpt_file)
saver = tf.train.import_meta_graph(
os.path.join(model_exp, meta_file))
saver.restore(self.session,
os.path.join(model_exp, ckpt_file))
saver = tf.compat.v1.train.import_meta_graph(
os.path.join(model_exp, meta_file)
)
saver.restore(self.session, os.path.join(model_exp, ckpt_file))
# Get input and output tensors
self.images_placeholder = self.graph.get_tensor_by_name("input:0")
self.embeddings = self.graph.get_tensor_by_name(self.layer_name)
self.phase_train_placeholder = self.graph.get_tensor_by_name(
"phase_train:0")
self.phase_train_placeholder = self.graph.get_tensor_by_name("phase_train:0")
logger.info("Successfully loaded the model.")
def __call__(self, img):
images = self._check_feature(img)
if self.session is None:
self.graph = tf.Graph()
self.session = tf.Session(graph=self.graph)
self.session = tf.compat.v1.Session(graph=self.graph)
if self.embeddings is None:
self.load_model()
feed_dict = {self.images_placeholder: images,
self.phase_train_placeholder: False}
features = self.session.run(
self.embeddings, feed_dict=feed_dict)
feed_dict = {
self.images_placeholder: images,
self.phase_train_placeholder: False,
}
features = self.session.run(self.embeddings, feed_dict=feed_dict)
return features.flatten()
@staticmethod
def get_rcvariable():
"""
Variable name used in the Bob Global Configuration System
https://www.idiap.ch/software/bob/docs/bob/bob.extension/stable/rc.html
"""
return "bob.ip.tensorflow_extractor.facenet_modelpath"
@staticmethod
def get_modelpath():
"""
......@@ -173,11 +166,13 @@ class FaceNet(object):
"""
# Priority to the RC path
model_path = rc[FaceNet.get_rcvariable()]
model_path = rc["bob.ip.tensorflow_extractor.facenet_modelpath"]
if model_path is None:
import pkg_resources
model_path = pkg_resources.resource_filename(
__name__, 'data/FaceNet/20170512-110547')
__name__, "data/FaceNet/20170512-110547"
)
return model_path
......@@ -39,14 +39,14 @@ class MTCNN:
graph = tf.Graph()
with graph.as_default():
with open(model_path, "rb") as f:
graph_def = tf.GraphDef.FromString(f.read())
graph_def = tf.compat.v1.GraphDef.FromString(f.read())
tf.import_graph_def(graph_def, name="")
self.graph = graph
config = tf.ConfigProto(
config = tf.compat.v1.ConfigProto(
intra_op_parallelism_threads=multiprocessing.cpu_count(),
inter_op_parallelism_threads=multiprocessing.cpu_count(),
)
self.sess = tf.Session(graph=graph, config=config)
self.sess = tf.compat.v1.Session(graph=graph, config=config)
def detect(self, img):
"""Detects all faces in the image.
......
#!/usr/bin/env python
# vim: set fileencoding=utf-8 :
# @author: Tiago de Freitas Pereira <tiago.pereira@idiap.ch>
import tensorflow as tf
from tensorflow.contrib.slim.python.slim.nets import vgg
import tensorflow.contrib.slim as slim
from tensorflow.contrib.layers.python.layers import layers as layers_lib
from tensorflow.contrib import layers
from tensorflow.contrib.framework.python.ops import arg_scope
from .Extractor import Extractor
import numpy
import bob.extension.download
import os
def vgg_16(inputs,
reuse=None,
dropout_keep_prob=0.5,
weight_decay=0.0005,
mode=tf.estimator.ModeKeys.TRAIN, **kwargs):
"""
Oxford Net VGG 16-Layers version E Example from tf-slim
Adapted from here.
https://raw.githubusercontent.com/tensorflow/models/master/research/slim/nets/vgg.py
**Parameters**:
inputs: a 4-D tensor of size [batch_size, height, width, 3].
reuse: whether or not the network and its variables should be reused. To be
able to reuse 'scope' must be given.
mode:
Estimator mode keys
"""
end_points = dict()
with tf.variable_scope('vgg_16', reuse=reuse):
with arg_scope([layers.conv2d, layers_lib.fully_connected],
activation_fn=tf.nn.relu,
weights_regularizer=None,
biases_initializer=tf.zeros_initializer(),
trainable=mode==tf.estimator.ModeKeys.PREDICT):
# Collect outputs for conv2d, fully_connected and max_pool2d.
net = layers_lib.repeat(inputs, 2, layers.conv2d, 64, [3, 3], scope='conv1')
net = layers_lib.max_pool2d(net, [2, 2], scope='pool1')
end_points['conv1'] = net
net = layers_lib.repeat(net, 2, layers.conv2d, 128, [3, 3], scope='conv2')
net = layers_lib.max_pool2d(net, [2, 2], scope='pool2')
end_points['conv2'] = net
net = layers_lib.repeat(net, 3, layers.conv2d, 256, [3, 3], scope='conv3')
net = layers_lib.max_pool2d(net, [2, 2], scope='pool3')
end_points['conv3'] = net
net = layers_lib.repeat(net, 3, layers.conv2d, 512, [3, 3], scope='conv4')
net = layers_lib.max_pool2d(net, [2, 2], scope='pool4')
end_points['conv4'] = net
net = layers_lib.repeat(net, 3, layers.conv2d, 512, [3, 3], scope='conv5')
net = layers_lib.max_pool2d(net, [2, 2], scope='pool5')
end_points['conv5'] = net
net = slim.flatten(net)
net = layers.fully_connected(net, 4096, scope='fc6')
end_points['fc6'] = net
net = layers.fully_connected(net, 4096, scope='fc7', activation_fn=None)
end_points['fc7'] = net
return net, end_points
class VGGFace(Extractor):
"""
Extract features using the VGG model
http://www.robots.ox.ac.uk/~vgg/software/vgg_face/
This was converted with the script https://github.com/tiagofrepereira2012
"""
def __init__(self, checkpoint_filename=None, debug=False):
# Average image provided in
# http://www.robots.ox.ac.uk/~vgg/software/vgg_face/
self.average_img = [129.1863, 104.7624, 93.5940]
if checkpoint_filename is None:
checkpoint_filename = os.path.join(VGGFace.get_vggpath(),"vgg_face_tf")
# Downloading the model if necessary
if not os.path.exists(checkpoint_filename):
zip_file = os.path.join(VGGFace.get_vggpath(), "vgg_face_tf.tar.gz")
urls = [
# This is a private link at Idiap to save bandwidth.
"https://www.idiap.ch/software/bob/data/bob/bob.ip.tensorflow_extractor/master/"
"vgg_face_tf.tar.gz",
"http://www.idiap.ch/software/bob/data/bob/bob.ip.tensorflow_extractor/master/"
"vgg_face_tf.tar.gz",
]
bob.extension.download.download_and_unzip(urls, zip_file)
input_tensor = tf.placeholder(tf.float32, shape=(1, 224, 224, 3))
graph = vgg_16(input_tensor)[0]
super(VGGFace, self).__init__(checkpoint_filename=os.path.join(checkpoint_filename),
input_tensor=input_tensor,
graph=graph,
debug=debug)
def __call__(self, image):
if len(image.shape) == 3:
# Converting from RGB to BGR
R = image[0, :, :] - self.average_img[0]
G = image[1, :, :] - self.average_img[1]
B = image[2, :, :] - self.average_img[2]
# Converting to
bgr_image = numpy.zeros(shape=image.shape)
bgr_image[0, :, :] = B
bgr_image[1, :, :] = G
bgr_image[2, :, :] = R
# SWAPING TO CxHxW to HxWxC
bgr_image = numpy.moveaxis(bgr_image,0,-1)
bgr_image = numpy.expand_dims(bgr_image,0)
if self.session is None:
self.session = tf.InteractiveSession()
return self.session.run(self.graph, feed_dict={self.input_tensor: bgr_image})[0]
else:
raise ValueError("Image should have 3 channels")
@staticmethod
def get_vggpath():
import pkg_resources
return pkg_resources.resource_filename(__name__, 'data')
#!/usr/bin/env python
def scratch_network(inputs, end_point="fc1", reuse=False):
import tensorflow as tf
slim = tf.contrib.slim
# Creating a random network
initializer = tf.contrib.layers.xavier_initializer(seed=10)
end_points = dict()
graph = slim.conv2d(inputs, 10, [3, 3], activation_fn=tf.nn.relu, stride=1,
scope='conv1', weights_initializer=initializer,
reuse=reuse)
end_points["conv1"] = graph
graph = slim.max_pool2d(graph, [4, 4], scope='pool1')
end_points["pool1"] = graph
graph = slim.flatten(graph, scope='flatten1')
end_points["flatten1"] = graph
graph = slim.fully_connected(graph, 10, activation_fn=None, scope='fc1',
weights_initializer=initializer, reuse=reuse)
end_points["fc1"] = graph
return end_points[end_point]
def get_config():
"""Returns a string containing the configuration information.
"""
import bob.extension
return bob.extension.get_config(__name__)
from .Extractor import Extractor
from .FaceNet import FaceNet
from .DrGanMSU import DrGanMSUExtractor
from .Vgg16 import VGGFace, vgg_16
from .MTCNN import MTCNN
from .Extractor import Extractor
# gets sphinx autodoc done right - don't remove it
......@@ -56,13 +30,7 @@ def __appropriate__(*args):
obj.__module__ = __name__
__appropriate__(
Extractor,
FaceNet,
DrGanMSUExtractor,
VGGFace,
MTCNN,
)
__appropriate__(FaceNet, MTCNN, Extractor)
# gets sphinx autodoc done right - don't remove it
__all__ = [_ for _ in dir() if not _.startswith('_')]
__all__ = [_ for _ in dir() if not _.startswith("_")]
model_checkpoint_path: "model.ckp"
all_model_checkpoint_paths: "model.ckp"
import bob.io.base
import bob.io.image
from bob.io.base.test_utils import datafile
import bob.ip.tensorflow_extractor
import tensorflow as tf
import pkg_resources
import numpy
import json
import os
numpy.random.seed(10)
slim = tf.contrib.slim
from . import scratch_network
def test_output():
# Loading MNIST model
filename = os.path.join(
pkg_resources.resource_filename(__name__, "data"), "model.ckp"
)
inputs = tf.placeholder(tf.float32, shape=(None, 28, 28, 1))
# Testing the last output
graph = scratch_network(inputs)
extractor = bob.ip.tensorflow_extractor.Extractor(filename, inputs, graph)
data = numpy.random.rand(2, 28, 28, 1).astype("float32")
output = extractor(data)
assert extractor(data).shape == (2, 10)
del extractor
# Testing flatten
inputs = tf.placeholder(tf.float32, shape=(None, 28, 28, 1))
graph = scratch_network(inputs, end_point="flatten1")
extractor = bob.ip.tensorflow_extractor.Extractor(filename, inputs, graph)
data = numpy.random.rand(2, 28, 28, 1).astype("float32")
output = extractor(data)
assert output.shape == (2, 1690)
del extractor
def test_facenet():
from bob.ip.tensorflow_extractor import FaceNet
......@@ -52,24 +16,6 @@ def test_facenet():
assert output.size == 128, output.shape
def test_drgan():
from bob.ip.tensorflow_extractor import DrGanMSUExtractor
extractor = DrGanMSUExtractor()
data = numpy.random.rand(3, 96, 96).astype("uint8")
output = extractor(data)
assert output.size == 320, output.shape
def test_vgg16():
pass
# from bob.ip.tensorflow_extractor import VGGFace
# extractor = VGGFace()
# data = numpy.random.rand(3, 224, 224).astype("uint8")
# output = extractor(data)
# assert output.size == 4096, output.shape
def test_mtcnn():
test_image = datafile("mtcnn/test_image.png", __name__)
ref_numbers = datafile("mtcnn/mtcnn.hdf5", __name__)
......
This diff is collapsed.
......@@ -2,50 +2,6 @@
User guide
===========
Using as a feature extractor
----------------------------
In this example we'll take pretrained network using MNIST and
In this example we take the output of the layer `fc7` of the VGG face model as
features.
.. doctest:: tensorflowtest
>>> import numpy
>>> import bob.ip.tensorflow_extractor
>>> import bob.db.mnist
>>> from bob.ip.tensorflow_extractor import scratch_network
>>> import os
>>> import pkg_resources
>>> import tensorflow as tf
>>> # Loading some samples from mnist
>>> db = bob.db.mnist.Database()
>>> images = db.data(groups='train', labels=[0,1,2,3,4,5,6,7,8,9])[0][0:3]
>>> images = numpy.reshape(images, (3, 28, 28, 1)) * 0.00390625 # Normalizing the data
>>> # preparing my inputs
>>> inputs = tf.placeholder(tf.float32, shape=(None, 28, 28, 1))
>>> graph = scratch_network(inputs)
>>> # loading my model and projecting
>>> filename = os.path.join(pkg_resources.resource_filename("bob.ip.tensorflow_extractor", 'data'), 'model.ckp')
>>> extractor = bob.ip.tensorflow_extractor.Extractor(filename, inputs, graph)
>>> extractor(images).shape
(3, 10)
.. note::
The models will automatically download to the data folder of this package as
soon as you start using them.
Using as a convolutional filter
-------------------------------
In this example we plot some outputs of the convolutional layer `conv1`.
Facenet Model
......@@ -65,20 +21,4 @@ Check `here for more info <py_api.html#bob.ip.tensorflow_extractor.FaceNet>`_
DRGan from L.Tran @ MSU:
------------------------
:ref:`bob.bio.base <bob.bio.base>` wrapper to the DRGan model trained by L.Tran @ MSU.
Check `here <py_api.html#bob.ip.tensorflow_extractor.DrGanMSUExtractor>`_ for more info
.. note::
The models will automatically download to the data folder of this package and save it in
``[env-path]./bob/ip/tensorflow_extractor/data/DR_GAN_model``.
If you want want set another path for this model do::
$ bob config set bob.ip.tensorflow_extractor.drgan_modelpath /path/to/mydatabase
......@@ -5,7 +5,7 @@
Bob interface for feature extraction using Tensorflow
=======================================================
This package contains functionality to extract features from CNNs trained with
This package contains functionality to extract features from Neural Networks trained with
Tensorflow http://tensorflow.org/
Index
......
......@@ -34,42 +34,38 @@
# administrative interventions.
from setuptools import setup, dist
dist.Distribution(dict(setup_requires=['bob.extension']))
dist.Distribution(dict(setup_requires=["bob.extension"]))
from bob.extension.utils import load_requirements, find_packages
install_requires = load_requirements()
# The only thing we do in this file is to call the setup() function with all
# parameters that define our package.
setup(
# This is the basic information about your project. Modify all this
# information before releasing code publicly.
name = 'bob.ip.tensorflow_extractor',
version = open("version.txt").read().rstrip(),
description = 'Feature extractor using tensorflow CNNs',
url = 'https://gitlab.idiap.ch/tiago.pereira/bob.ip.caffe_extractor',
license = 'BSD',
author = 'Tiago de Freitas Pereira',
author_email = 'tiago.pereira@idiap.ch',
keywords = 'bob, biometric recognition, evaluation',
name="bob.ip.tensorflow_extractor",
version=open("version.txt").read().rstrip(),
description="Feature extractor using tensorflow CNNs",
url="https://gitlab.idiap.ch/tiago.pereira/bob.ip.caffe_extractor",
license="BSD",
author="Tiago de Freitas Pereira",
author_email="tiago.pereira@idiap.ch",
keywords="bob, biometric recognition, evaluation",
# If you have a better, long description of your package, place it on the
# 'doc' directory and then hook it here
long_description = open('README.rst').read(),
long_description=open("README.rst").read(),
# This line is required for any distutils based packaging.
packages = find_packages(),
include_package_data = True,
packages=find_packages(),
include_package_data=True,
# This line defines which packages should be installed when you "install"
# this package. All packages that are mentioned here, but are not installed
# on the current system will be installed locally and only visible to the
# scripts of this package. Don't worry - You won't need administrative
# privileges when using buildout.
install_requires = install_requires,
install_requires=install_requires,
# Your project should be called something like 'bob.<foo>' or
# 'bob.<foo>.<bar>'. To implement this correctly and still get all your
# packages to be imported w/o problems, you need to implement namespaces
......@@ -80,8 +76,6 @@ setup(
# Our database packages are good examples of namespace implementations
# using several layers. You can check them out here:
# https://github.com/idiap/bob/wiki/Satellite-Packages
# This entry defines which scripts you will have inside the 'bin' directory
# once you install the package (or run 'bin/buildout'). The order of each
# entry under 'console_scripts' is like this:
......@@ -93,17 +87,16 @@ setup(
# installed under 'example/foo.py' that contains a function which
# implements the 'main()' function of particular script you want to have
# should be referred as 'example.foo:main'.
# Classifiers are important if you plan to distribute this package through
# PyPI. You can find the complete list of classifiers that are valid and
# useful here (http://pypi.python.org/pypi?%3Aaction=list_classifiers).
classifiers = [
'Framework :: Bob',
'Development Status :: 4 - Beta',
'Intended Audience :: Developers',
'License :: OSI Approved :: BSD License',
'Natural Language :: English',
'Programming Language :: Python',
'Topic :: Scientific/Engineering :: Artificial Intelligence',
classifiers=[
"Framework :: Bob",
"Development Status :: 4 - Beta",
"Intended Audience :: Developers",
"License :: OSI Approved :: BSD License",
"Natural Language :: English",
"Programming Language :: Python",
"Topic :: Scientific/Engineering :: Artificial Intelligence",
],
)
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment