Commit c375bfcf authored by Tiago de Freitas Pereira's avatar Tiago de Freitas Pereira
Browse files

First commit

parents
Pipeline #12310 failed with stages
in 3 minutes and 23 seconds
*~
*.swp
*.pyc
bin
eggs
parts
.installed.cfg
.mr.developer.cfg
*.egg-info
src
develop-eggs
sphinx
dist
bob/ip/caffe_extractor/data/face_verification_experiment-master/
bob/ip/caffe_extractor/data/vgg_face_caffe/
# This build file heavily uses template features from YAML so it is generic
# enough for any Bob project. Don't modify it unless you know what you're
# doing.
# Definition of our build pipeline
stages:
- build
- test
- docs
- wheels
- deploy
# ---------
# Templates
# ---------
# Template for the build stage
# Needs to run on all supported architectures, platforms and python versions
.build_template: &build_job
stage: build
before_script:
- git clean -ffdx
- mkdir _ci
- curl --silent "https://gitlab.idiap.ch/bob/bob.admin/raw/master/gitlab/install.sh" > _ci/install.sh
- chmod 755 _ci/install.sh
- ./_ci/install.sh _ci #updates
- ./_ci/before_build.sh
script:
- ./_ci/build.sh
after_script:
- ./_ci/after_build.sh
artifacts:
expire_in: 1 week
paths:
- _ci/
- dist/
- sphinx/
# Template for the test stage - re-installs from uploaded wheels
# Needs to run on all supported architectures, platforms and python versions
.test_template: &test_job
stage: test
before_script:
- ./_ci/install.sh _ci #updates
- ./_ci/before_test.sh
script:
- ./_ci/test.sh
after_script:
- ./_ci/after_test.sh
# Template for the wheel uploading stage
# Needs to run against one supported architecture, platform and python version
.wheels_template: &wheels_job
stage: wheels
environment: intranet
only:
- master
- /^v\d+\.\d+\.\d+([abc]\d*)?$/ # PEP-440 compliant version (tags)
before_script:
- ./_ci/install.sh _ci #updates
- ./_ci/before_wheels.sh
script:
- ./_ci/wheels.sh
after_script:
- ./_ci/after_wheels.sh
# Template for (latest) documentation upload stage
# Only one real job needs to do this
.docs_template: &docs_job
stage: docs
environment: intranet
only:
- master
before_script:
- ./_ci/install.sh _ci #updates
- ./_ci/before_docs.sh
script:
- ./_ci/docs.sh
after_script:
- ./_ci/after_docs.sh
# Template for the deployment stage - re-installs from uploaded wheels
# Needs to run on a single architecture only
# Will deploy your package to PyPI and other required services
# Only runs for tags
.deploy_template: &deploy_job
stage: deploy
environment: internet
only:
- /^v\d+\.\d+\.\d+([abc]\d*)?$/ # PEP-440 compliant version (tags)
except:
- branches
before_script:
- ./_ci/install.sh _ci #updates
- ./_ci/before_deploy.sh
script:
- ./_ci/deploy.sh
after_script:
- ./_ci/after_deploy.sh
# -------------
# Build Targets
# -------------
# Linux + Python 2.7: Builds, tests, uploads wheel and deploys (if needed)
build_linux_27:
<<: *build_job
variables: &linux_27_build_variables
PYTHON_VERSION: "2.7"
WHEEL_TAG: "py27"
tags:
- conda-linux
test_linux_27:
<<: *test_job
variables: *linux_27_build_variables
dependencies:
- build_linux_27
tags:
- conda-linux
wheels_linux_27:
<<: *wheels_job
variables: *linux_27_build_variables
dependencies:
- build_linux_27
tags:
- conda-linux
deploy_linux_27:
<<: *deploy_job
variables: *linux_27_build_variables
dependencies:
- build_linux_27
tags:
- conda-linux
# Linux + Python 3.5: Builds, tests and uploads wheel
build_linux_35:
<<: *build_job
variables: &linux_35_build_variables
PYTHON_VERSION: "3.5"
WHEEL_TAG: "py3"
tags:
- conda-linux
test_linux_35:
<<: *test_job
variables: *linux_35_build_variables
dependencies:
- build_linux_35
tags:
- conda-linux
wheels_linux_35:
<<: *wheels_job
variables: *linux_35_build_variables
dependencies:
- build_linux_35
tags:
- conda-linux
docs_linux_35:
<<: *docs_job
variables: *linux_35_build_variables
dependencies:
- build_linux_35
tags:
- conda-linux
# Linux + Python 3.6: Builds and tests
build_linux_36:
<<: *build_job
variables: &linux_36_build_variables
PYTHON_VERSION: "3.6"
WHEEL_TAG: "py3"
tags:
- conda-linux
test_linux_36:
<<: *test_job
variables: *linux_36_build_variables
dependencies:
- build_linux_36
tags:
- conda-linux
Copyright (c) 2016 Idiap Research Institute, http://www.idiap.ch/
Written by Tiago de Freitas Pereira <tiago.pereira@idiap.ch>
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
1. Redistributions of source code must retain the above copyright notice, this
list of conditions and the following disclaimer.
2. Redistributions in binary form must reproduce the above copyright notice,
this list of conditions and the following disclaimer in the documentation
and/or other materials provided with the distribution.
3. Neither the name of the copyright holder nor the names of its contributors
may be used to endorse or promote products derived from this software without
specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
include README.rst buildout.cfg LICENSE version.txt requirements.txt ./bob/ip/tensorflow_extractor/data/checkpoint ./bob/ip/tensorflow_extractor/data/model.ckp.data-00000-of-00001 ./bob/ip/tensorflow_extractor/data/model.ckp.index ./bob/ip/tensorflow_extractor/data/model.ckp.meta
recursive-include doc *.py *.rst
recursive-include bob *.hdf5
.. vim: set fileencoding=utf-8 :
.. Thu Jul 20 12:30:48 CEST 2017
.. image:: https://img.shields.io/badge/docs-stable-yellow.svg
:target: https://www.idiap.ch/software/bob/docs/bob/bob.ip.tensorflow_extractor/stable/index.html
.. image:: https://img.shields.io/badge/docs-latest-orange.svg
:target: https://www.idiap.ch/software/bob/docs/bob/bob.ip.tensorflow_extractor/master/index.html
.. image:: https://gitlab.idiap.ch/bob/bob.ip.tensorflow_extractor/badges/master/build.svg
:target: https://gitlab.idiap.ch/bob/bob.ip.tensorflow_extractor/commits/master
.. image:: https://gitlab.idiap.ch/bob/bob.ip.tensorflow_extractor/badges/master/coverage.svg
:target: https://gitlab.idiap.ch/bob/bob.ip.tensorflow_extractor/commits/master
.. image:: https://img.shields.io/badge/gitlab-project-0000c0.svg
:target: https://gitlab.idiap.ch/bob/bob.ip.tensorflow_extractor
.. image:: https://img.shields.io/pypi/v/bob.ip.tensorflow_extractor.svg
:target: https://pypi.python.org/pypi/bob.ip.tensorflow_extractor
======================================================
Bob interface for feature extraction using Tensorflow
======================================================
This package is part of the signal-processing and machine learning toolbox
Bob_. This package contains functionality to extract features from CNNs trained
with tensorflow http://http://tensorflow.org/
Installation
------------
Complete Bob's `installation`_ instructions. Then, to install this package,
run::
$ conda install bob.ip.tensorflow_extractor
Contact
-------
For questions or reporting issues to this software package, contact our
development `mailing list`_.
.. Place your references here:
.. _bob: https://www.idiap.ch/software/bob
.. _installation: https://www.idiap.ch/software/bob/install
.. _mailing list: https://www.idiap.ch/software/bob/discuss
# see https://docs.python.org/3/library/pkgutil.html
from pkgutil import extend_path
__path__ = extend_path(__path__, __name__)
# see https://docs.python.org/3/library/pkgutil.html
from pkgutil import extend_path
__path__ = extend_path(__path__, __name__)
#!/usr/bin/env python
# vim: set fileencoding=utf-8 :
# @author: Tiago de Freitas Pereira <tiago.pereira@idiap.ch>
# @date: Fri 17 Jun 2016 10:41:36 CEST
import tensorflow as tf
import os
class Extractor(object):
"""
Feature extractor using tensorflow
"""
def __init__(self, checkpoint_filename, input_tensor, graph):
"""Loads the tensorflow model
Parameters
----------
checkpoint_filename: str
Path of your checkpoint. If the .meta file is providede the last checkpoint will be loaded.
model :
input_tensor: tf.Tensor used as a data entrypoint. It can be a **tf.placeholder**, the
result of **tf.train.string_input_producer**, etc
graph :
A tf.Tensor containing the operations to be executed
"""
self.input_tensor = input_tensor
self.graph = graph
# Initializing the variables of the current graph
self.session = tf.Session()
self.session.run(tf.global_variables_initializer())
# Loading the last checkpoint and overwriting the current variables
saver = tf.train.Saver()
if os.path.splitext(checkpoint_filename)[1] == ".meta":
saver.restore(self.session, tf.train.latest_checkpoint(os.path.dirname(checkpoint_filename)))
else:
saver.restore(self.session, checkpoint_filename)
def __del__(self):
tf.reset_default_graph()
def __call__(self, data):
"""
Forward the data with the loaded neural network
Parameters
----------
image : numpy.array
Input Data
Returns
-------
numpy.array
The features.
"""
return self.session.run(self.graph, feed_dict={self.input_tensor: data})
#!/usr/bin/env python
def scratch_network(inputs, end_point="fc1", reuse = False):
import tensorflow as tf
slim = tf.contrib.slim
# Creating a random network
initializer = tf.contrib.layers.xavier_initializer(seed=10)
end_points = dict()
graph = slim.conv2d(inputs, 10, [3, 3], activation_fn=tf.nn.relu, stride=1, scope='conv1',
weights_initializer=initializer, reuse=reuse)
end_points["conv1"] = graph
graph = slim.max_pool2d(graph, [4, 4], scope='pool1')
end_points["pool1"] = graph
graph = slim.flatten(graph, scope='flatten1')
end_points["flatten1"] = graph
graph = slim.fully_connected(graph, 10, activation_fn=None, scope='fc1',
weights_initializer=initializer, reuse=reuse)
end_points["fc1"] = graph
return end_points[end_point]
def download_file(url, out_file):
"""Downloads a file from a given url
Parameters
----------
url : str
The url to download form.
out_file : str
Where to save the file.
"""
import sys
if sys.version_info[0] < 3:
# python2 technique for downloading a file
from urllib2 import urlopen
with open(out_file, 'wb') as f:
response = urlopen(url)
f.write(response.read())
else:
# python3 technique for downloading a file
from urllib.request import urlopen
from shutil import copyfileobj
with urlopen(url) as response:
with open(out_file, 'wb') as f:
copyfileobj(response, f)
from .Extractor import Extractor
def get_config():
"""Returns a string containing the configuration information.
"""
import bob.extension
return bob.extension.get_config(__name__)
# gets sphinx autodoc done right - don't remove it
def __appropriate__(*args):
"""Says object was actually declared here, and not in the import module.
Fixing sphinx warnings of not being able to find classes, when path is shortened.
Parameters:
*args: An iterable of objects to modify
Resolves `Sphinx referencing issues
<https://github.com/sphinx-doc/sphinx/issues/3048>`
"""
for obj in args:
obj.__module__ = __name__
__appropriate__(
Extractor,
)
# gets sphinx autodoc done right - don't remove it
__all__ = [_ for _ in dir() if not _.startswith('_')]
model_checkpoint_path: "model.ckp"
all_model_checkpoint_paths: "model.ckp"
import bob.io.base
from bob.io.base.test_utils import datafile
import bob.ip.tensorflow_extractor
import tensorflow as tf
import pkg_resources
import numpy
numpy.random.seed(10)
import os
slim = tf.contrib.slim
from . import scratch_network
def test_output():
# Loading MNIST model
filename = os.path.join( pkg_resources.resource_filename(__name__, 'data'), 'model.ckp')
inputs = tf.placeholder(tf.float32, shape=(None, 28, 28, 1))
# Testing the last output
graph = scratch_network(inputs)
extractor = bob.ip.tensorflow_extractor.Extractor(filename, inputs, graph)
data = numpy.random.rand(2, 28, 28, 1).astype("float32")
output = extractor(data)
assert extractor(data).shape == (2, 10)
del extractor
# Testing flatten
inputs = tf.placeholder(tf.float32, shape=(None, 28, 28, 1))
graph = scratch_network(inputs, end_point="flatten1")
extractor = bob.ip.tensorflow_extractor.Extractor(filename, inputs, graph)
data = numpy.random.rand(2, 28, 28, 1).astype("float32")
output = extractor(data)
assert extractor(data).shape == (2, 1690)
del extractor
def test_output_from_meta():
# Loading MNIST model
filename = os.path.join( pkg_resources.resource_filename(__name__, 'data'), "model.ckp.meta")
inputs = tf.placeholder(tf.float32, shape=(None, 28, 28, 1))
# Testing the last output
graph = scratch_network(inputs)
extractor = bob.ip.tensorflow_extractor.Extractor(filename, inputs, graph)
data = numpy.random.rand(2, 28, 28, 1).astype("float32")
output = extractor(data)
assert extractor(data).shape == (2, 10)
del extractor
# Testing flatten
inputs = tf.placeholder(tf.float32, shape=(None, 28, 28, 1))
graph = scratch_network(inputs, end_point="flatten1")
extractor = bob.ip.tensorflow_extractor.Extractor(filename, inputs, graph)
data = numpy.random.rand(2, 28, 28, 1).astype("float32")
output = extractor(data)
assert extractor(data).shape == (2, 1690)
del extractor
; vim: set fileencoding=utf-8 :
; Manuel Guenther <tiago.pereira@idiap.ch>
; Thu Oct 9 16:51:06 CEST 2014
[buildout]
parts = scripts
develop = .
eggs = bob.ip.tensorflow_extractor
extensions = bob.buildout
auto-checkout = *
; options for bob.buildout
debug = false
verbose = true
newest = false
[scripts]
recipe = bob.buildout:scripts
dependent-scripts = true
#!/usr/bin/env python
# vim: set fileencoding=utf-8 :
import os
import sys
import glob
import pkg_resources
# -- General configuration -----------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
needs_sphinx = '1.3'
# Add any Sphinx extension module names here, as strings. They can be extensions
# coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
extensions = [
'sphinx.ext.todo',
'sphinx.ext.coverage',
'sphinx.ext.ifconfig',
'sphinx.ext.autodoc',
'sphinx.ext.autosummary',
'sphinx.ext.doctest',
'sphinx.ext.graphviz',
'sphinx.ext.intersphinx',
'sphinx.ext.napoleon',
'sphinx.ext.viewcode',
'matplotlib.sphinxext.plot_directive'
]
import sphinx
if sphinx.__version__ >= "1.4.1":
extensions.append('sphinx.ext.imgmath')
imgmath_image_format = 'svg'
else:
extensions.append('sphinx.ext.pngmath')
# Be picky about warnings
nitpicky = True
# Ignores stuff we can't easily resolve on other project's sphinx manuals
nitpick_ignore = []
# Allows the user to override warnings from a separate file
if os.path.exists('nitpick-exceptions.txt'):
for line in open('nitpick-exceptions.txt'):
if line.strip() == "" or line.startswith("#"):
continue
dtype, target = line.split(None, 1)
target = target.strip()
try: # python 2.x
target = unicode(target)
except NameError:
pass
nitpick_ignore.append((dtype, target))
# Always includes todos
todo_include_todos = True
# Generates auto-summary automatically
autosummary_generate = True
# Create numbers on figures with captions
numfig = True
# If we are on OSX, the 'dvipng' path maybe different
dvipng_osx = '/opt/local/libexec/texlive/binaries/dvipng'
if os.path.exists(dvipng_osx): pngmath_dvipng = dvipng_osx