Skip to content
Snippets Groups Projects
Commit c8c06b23 authored by Manuel Günther's avatar Manuel Günther
Browse files

Added baselines script; lots of small corrections

parent 337825c9
Branches
Tags
No related merge requests found
Showing
with 463 additions and 29 deletions
from . import preprocessor from . import preprocessor
from . import extractor from . import extractor
from . import algorithm from . import algorithm
from . import script
from . import test from . import test
......
...@@ -105,7 +105,7 @@ class GaborJet (Algorithm): ...@@ -105,7 +105,7 @@ class GaborJet (Algorithm):
return [[bob.ip.gabor.Jet(jets_per_node[n])] for n in range(len(jets_per_node))] return [[bob.ip.gabor.Jet(jets_per_node[n])] for n in range(len(jets_per_node))]
def save_model(self, model, model_file): def write_model(self, model, model_file):
"""Saves the enrolled model of Gabor jets to file.""" """Saves the enrolled model of Gabor jets to file."""
f = bob.io.base.HDF5File(model_file, 'w') f = bob.io.base.HDF5File(model_file, 'w')
# several model graphs # several model graphs
......
#!/usr/bin/env python
import bob.bio.base
import bob.ip.gabor
similarity_function = bob.ip.gabor.Similarity("PhaseDiffPlusCanberra", bob.ip.gabor.Transform())
def gabor_jet_similarities(f1, f2):
"""Computes the similarity vector between two Gabor graph features"""
assert len(f1) == len(f2)
return [similarity_function(f1[i], f2[i]) for i in range(len(f1))]
algorithm = bob.bio.base.algorithm.BIC(
# measure to compare two features in input space
comparison_function = gabor_jet_similarities,
# load and save functions
read_function = bob.ip.gabor.load_jets,
write_function = bob.ip.gabor.save_jets,
# Limit the number of training pairs
maximum_training_pair_count = 1000000,
# Dimensions of intrapersonal and extrapersonal subspaces
subspace_dimensions = (20, 20),
multiple_model_scoring = 'max'
)
...@@ -3,7 +3,7 @@ ...@@ -3,7 +3,7 @@
import bob.db.xm2vts import bob.db.xm2vts
import bob.bio.base import bob.bio.base
xm2vts_directory = "[YOUR_XM2VTS_IMAGE_DIRECTORY]" xm2vts_directory = "[YOUR_XM2VTS_DIRECTORY]"
# setup for XM2VTS # setup for XM2VTS
database = bob.bio.base.database.DatabaseBob( database = bob.bio.base.database.DatabaseBob(
......
#!/usr/bin/env python
import bob.bio.face
# Detects the face and eye landmarks crops it using the detected eyes
preprocessor = bob.bio.face.preprocessor.FaceDetect(
face_cropper = 'face-crop-eyes',
use_flandmark = True
)
# Detects the face amd crops it without eye detection
preprocessor_no_eyes = bob.bio.face.preprocessor.FaceDetect(
face_cropper = 'face-crop-eyes',
use_flandmark = False
)
#!/usr/bin/env python
import bob.bio.face
preprocessor = bob.bio.face.preprocessor.FaceDetect(
face_cropper = 'face-crop-eyes'
)
...@@ -3,3 +3,7 @@ import bob.bio.face ...@@ -3,3 +3,7 @@ import bob.bio.face
preprocessor = bob.bio.face.preprocessor.HistogramEqualization( preprocessor = bob.bio.face.preprocessor.HistogramEqualization(
face_cropper = 'face-crop-eyes' face_cropper = 'face-crop-eyes'
) )
preprocessor_no_crop = bob.bio.face.preprocessor.HistogramEqualization(
face_cropper = None
)
import bob.bio.face
import numpy
preprocessor = bob.bio.face.preprocessor.INormLBP(
face_cropper = 'face-crop-eyes',
dtype = numpy.float64
)
preprocessor_no_crop = bob.bio.face.preprocessor.INormLBP(
face_cropper = None,
dtype = numpy.float64
)
import bob.bio.face
preprocessor = bob.bio.face.preprocessor.INormLBP(
face_cropper = 'face-crop-eyes'
)
...@@ -3,3 +3,7 @@ import bob.bio.face ...@@ -3,3 +3,7 @@ import bob.bio.face
preprocessor = bob.bio.face.preprocessor.SelfQuotientImage( preprocessor = bob.bio.face.preprocessor.SelfQuotientImage(
face_cropper = 'face-crop-eyes' face_cropper = 'face-crop-eyes'
) )
preprocessor_no_crop = bob.bio.face.preprocessor.SelfQuotientImage(
face_cropper = None
)
...@@ -3,3 +3,7 @@ import bob.bio.face ...@@ -3,3 +3,7 @@ import bob.bio.face
preprocessor = bob.bio.face.preprocessor.TanTriggs( preprocessor = bob.bio.face.preprocessor.TanTriggs(
face_cropper = 'face-crop-eyes' face_cropper = 'face-crop-eyes'
) )
preprocessor_no_crop = bob.bio.face.preprocessor.TanTriggs(
face_cropper = None
)
#!../bin/python
from __future__ import print_function
import subprocess
import os
import sys
import argparse
import bob.bio.base
import bob.core
logger = bob.core.log.setup("bob.bio.face")
# This is the default set of algorithms that can be run using this script.
all_databases = bob.bio.base.resource_keys('database')
# check, which databases can actually be assessed
available_databases = []
for database in all_databases:
try:
bob.bio.base.load_resource(database, 'database')
available_databases.append(database)
except:
pass
# collect all algorithms that we provide baselines for
all_algorithms = ['eigenface', 'lda', 'gabor-graph', 'lgbphs', 'plda', 'bic']
try:
# try if GMM-based algorithms are available
bob.bio.base.load_resource('gmm', 'algorithm')
bob.bio.base.load_resource('isv', 'algorithm')
bob.bio.base.load_resource('ivector', 'algorithm')
all_algorithms += ['gmm', 'isv', 'ivector']
except:
print("Could not load the GMM-based algorithms. Did you specify bob.bio.gmm in your config file?")
try:
# try if the CSU extension is enabled
bob.bio.base.load_resource('lrpca', 'algorithm')
bob.bio.base.load_resource('lda-ir', 'algorithm')
all_algorithms += ['lrpca', 'lda-ir']
except:
print("Could not load the algorithms from the CSU resources. Did you specify bob.bio.csu in your config file?")
def command_line_arguments(command_line_parameters):
"""Defines the command line parameters that are accepted."""
# create parser
parser = argparse.ArgumentParser(description='Execute baseline algorithms with default parameters', formatter_class=argparse.ArgumentDefaultsHelpFormatter)
# add parameters
# - the algorithm to execute
parser.add_argument('-a', '--algorithms', choices = all_algorithms, default = ('eigenface',), nargs = '+', help = 'Select one (or more) algorithms that you want to execute.')
parser.add_argument('--all', action = 'store_true', help = 'Select all algorithms.')
# - the database to choose
parser.add_argument('-d', '--database', choices = available_databases, default = 'atnt', help = 'The database on which the baseline algorithm is executed.')
# - the database to choose
parser.add_argument('-b', '--baseline-directory', default = 'baselines', help = 'The sub-directory, where the baseline results are stored.')
# - the directory to write
parser.add_argument('-f', '--directory', help = 'The directory to write the data of the experiment into. If not specified, the default directories of the verify.py script are used (see ./bin/verify.py --help).')
# - use the Idiap grid -- option is only useful if you are at Idiap
parser.add_argument('-g', '--grid', action = 'store_true', help = 'Execute the algorithm in the SGE grid.')
# - run in parallel on the local machine
parser.add_argument('-l', '--parallel', type=int, help = 'Run the algorithms in parallel on the local machine, using the given number of parallel threads')
# - perform ZT-normalization
parser.add_argument('-z', '--zt-norm', action = 'store_true', help = 'Compute the ZT norm for the files (might not be availabe for all databases).')
# - just print?
parser.add_argument('-q', '--dry-run', action = 'store_true', help = 'Just print the commands, but do not execute them.')
# - evaluate the algorithm (after it has finished)
parser.add_argument('-e', '--evaluate', nargs='+', choices = ('EER', 'HTER', 'ROC', 'DET', 'CMC', 'RR'), help = 'Evaluate the results of the algorithms (instead of running them) using the given evaluation techniques.')
# - other parameters that are passed to the underlying script
parser.add_argument('parameters', nargs = argparse.REMAINDER, help = 'Parameters directly passed to the ./bin/verify.py script.')
bob.core.log.add_command_line_option(parser)
args = parser.parse_args(command_line_parameters)
if args.all:
args.algorithms = all_algorithms
bob.core.log.set_verbosity_level(logger, args.verbose)
return args
# In these functions, some default experiments are prepared.
# An experiment consists of three configuration files:
# - The features to be extracted
# - The algorithm to be run
# - The grid configuration that it requires (only used when the --grid option is chosen)
CONFIGURATIONS = {
'eigenface' : dict(
preprocessor = ('face-crop-eyes', 'base'),
extractor = 'linearize',
algorithm = 'pca',
),
'lda': dict(
preprocessor = ('face-crop-eyes', 'base'),
extractor = 'eigenface',
algorithm = 'lda',
),
'plda': dict(
preprocessor = ('face-crop-eyes', 'base'),
extractor = 'linearize',
algorithm = 'pca+plda',
grid = 'demanding'
),
'gabor-graph': dict(
preprocessor = ('inorm-lbp-crop', 'inorm-lbp'),
extractor = 'grid-graph',
algorithm = 'gabor-jet',
),
'lgbphs': dict(
preprocessor = ('tan-triggs-crop', 'tan-triggs'),
extractor = 'lgbphs',
algorithm = 'lgbphs',
),
'bic': dict(
preprocessor = ('face-crop-eyes', 'base'),
extractor = 'grid-graph',
algorithm = 'bic-jets',
grid = 'demanding'
),
'gmm': dict(
preprocessor = ('tan-triggs-crop', 'tan-triggs'),
extractor = 'dct-blocks',
algorithm = 'gmm',
grid = 'demanding',
script = './bin/verify_gmm.py'
),
'isv': dict(
preprocessor = ('tan-triggs-crop', 'tan-triggs'),
extractor = 'dct-blocks',
algorithm = 'isv',
grid = 'demanding',
script = './bin/verify_isv.py'
),
'ivector': dict(
preprocessor = ('tan-triggs-crop', 'tan-triggs'),
extractor = 'dct-blocks',
algorithm = 'ivector',
grid = 'demanding',
script = './bin/verify_ivector.py'
),
'lrpca': dict(
preprocessor = ('lrpca', None),
extractor = 'lrpca',
algorithm = 'lrpca'
),
'lda-ir': dict(
preprocessor = ('lda-ir', None),
extractor = 'lda-ir',
algorithm = 'lda-ir'
)
}
def main(command_line_parameters = None):
# Collect command line arguments
args = command_line_arguments(command_line_parameters)
# Check the database configuration file
has_eyes = args.database != 'atnt'
has_zt_norm = args.database in ('banca', 'mobio', 'multipie', 'scface')
has_eval = args.database in ('banca', 'mobio', 'multipie', 'scface', 'xm2vts')
if not args.evaluate:
# execution of the job is requested
for algorithm in args.algorithms:
logger.info("Executing algorithm '%s'", algorithm)
# get the setup for the desired algorithm
import copy
setup = copy.deepcopy(CONFIGURATIONS[algorithm])
if 'grid' not in setup: setup['grid'] = 'grid'
if 'script' not in setup or (not args.grid and args.parallel is None): setup['script'] = './bin/verify.py'
# select the preprocessor
setup['preprocessor'] = setup['preprocessor'][0 if has_eyes else 1]
if setup['preprocessor'] is None:
logger.warn("Skipping algorithm '%s' since no preprocessor is found that matches the given databases' '%s' configuration", algorithm, args.database)
# this is the default sub-directory that is used
sub_directory = os.path.join(args.baseline_directory, algorithm)
# create the command to the faceverify script
command = [
setup['script'],
'--database', args.database,
'--preprocessor', setup['preprocessor'],
'--extractor', setup['extractor'],
'--algorithm', setup['algorithm'],
'--sub-directory', sub_directory
]
# add grid argument, if available
if args.grid:
command += ['--grid', setup['grid'], '--stop-on-failure']
if args.parallel is not None:
command += ['--grid', 'bob.bio.base.grid.Grid("local", number_of_parallel_processes=%d)' % args.parallel, '--run-local-scheduler', '--stop-on-failure']
# compute ZT-norm if the database provides this setup
if has_zt_norm and args.zt_norm:
command += ['--zt-norm']
# compute results for both 'dev' and 'eval' group if the database provides these
if has_eval:
command += ['--groups', 'dev', 'eval']
# set the directories, if desired; we set both directories to be identical.
if args.directory is not None:
command += ['--temp-directory', os.path.join(args.directory, args.database), '--result-directory', os.path.join(args.directory, args.database)]
# set the verbosity level
if args.verbose:
command += ['-' + 'v'*args.verbose]
# add the command line arguments that were specified on command line
if args.parameters:
command += args.parameters[1:]
# print the command so that it can easily be re-issued
logger.info("Executing command:\n%s", bob.bio.base.tools.command_line(command))
# import ipdb; ipdb.set_trace()
# run the command
if not args.dry_run:
subprocess.call(command)
else:
# call the evaluate script with the desired parameters
# get the base directory of the results
is_idiap = os.path.isdir("/idiap")
if args.directory is None:
args.directory = "/idiap/user/%s/%s" % (os.environ["USER"], args.database) if is_idiap else "results"
if not os.path.exists(args.directory):
if not args.dry_run:
raise IOError("The result directory '%s' cannot be found. Please specify the --directory as it was specified during execution of the algorithms." % args.directory)
# get the result directory of the database
result_dir = os.path.join(args.directory, args.baseline_directory)
if not os.path.exists(result_dir):
if not args.dry_run:
raise IOError("The result directory '%s' for the desired database cannot be found. Did you already run the experiments for this database?" % result_dir)
# iterate over the algorithms and collect the result files
result_dev = []
result_eval = []
result_zt_dev = []
result_zt_eval = []
legends = []
# evaluate the results
for algorithm in args.algorithms:
if not os.path.exists(os.path.join(result_dir, algorithm)):
logger.warn("Skipping algorithm '%s' since the results cannot be found.", algorithm)
continue
protocols = [d for d in os.listdir(os.path.join(result_dir, algorithm)) if os.path.isdir(os.path.join(result_dir, algorithm, d))]
if not len(protocols):
logger.warn("Skipping algorithm '%s' since the results cannot be found.", algorithm)
continue
if len(protocols) > 1:
logger.warn("There are several protocols found in directory '%s'. Here, we use protocol '%s'.", os.path.join(result_dir, algorithm), protocols[0])
nonorm_sub_dir = os.path.join(algorithm, protocols[0], 'nonorm')
ztnorm_sub_dir = os.path.join(algorithm, protocols[0], 'ztnorm')
# collect the resulting files
if os.path.exists(os.path.join(result_dir, nonorm_sub_dir, 'scores-dev')):
result_dev.append(os.path.join(nonorm_sub_dir, 'scores-dev'))
legends.append(algorithm)
if has_eval and os.path.exists(os.path.join(result_dir, nonorm_sub_dir, 'scores-eval')):
result_eval.append(os.path.join(nonorm_sub_dir, 'scores-eval'))
if has_zt_norm:
if os.path.exists(os.path.join(result_dir, ztnorm_sub_dir, 'scores-dev')):
result_zt_dev.append(os.path.join(ztnorm_sub_dir, 'scores-dev'))
if has_eval and os.path.exists(os.path.join(result_dir, ztnorm_sub_dir, 'scores-eval')):
result_zt_eval.append(os.path.join(ztnorm_sub_dir, 'scores-eval'))
# check if we have found some results
if not result_dev:
logger.warn("No result files were detected -- skipping evaluation.")
return
# call the evaluate script
base_command = ['./bin/evaluate.py', '--directory', result_dir, '--legends'] + legends
if 'EER' in args.evaluate:
base_command += ['--criterion', 'EER']
elif 'HTER' in args.evaluate:
base_command += ['--criterion', 'HTER']
if 'ROC' in args.evaluate:
base_command += ['--roc', 'ROCxxx.pdf']
if 'DET' in args.evaluate:
base_command += ['--det', 'DETxxx.pdf']
if 'CMC' in args.evaluate:
base_command += ['--cmc', 'CMCxxx.pdf']
if 'RR' in args.evaluate:
base_command += ['--rr']
if args.verbose:
base_command += ['-' + 'v'*args.verbose]
# first, run the nonorm evaluation
if result_zt_dev:
command = [cmd.replace('xxx','_dev') for cmd in base_command]
else:
command = [cmd.replace('xxx','') for cmd in base_command]
command += ['--dev-files'] + result_dev
if result_eval:
command += ['--eval-files'] + result_eval
logger.info("Executing command:\n%s", bob.bio.base.tools.command_line(command))
if not args.dry_run:
subprocess.call(command)
# now, also run the ZT norm evaluation, if available
if result_zt_dev:
command = [cmd.replace('xxx','_eval') for cmd in base_command]
command += ['--dev-files'] + result_zt_dev
if result_zt_eval:
command += ['--eval-files'] + result_zt_eval
logger.info("Executing command:\n%s", bob.bio.base.tools.command_line(command))
if not args.dry_run:
subprocess.call(command)
...@@ -30,6 +30,7 @@ regenerate_refs = False ...@@ -30,6 +30,7 @@ regenerate_refs = False
import bob.bio.base import bob.bio.base
import bob.bio.face import bob.bio.face
import bob.db.verification.utils
def _compare(data, reference, write_function = bob.bio.base.save, read_function = bob.bio.base.load): def _compare(data, reference, write_function = bob.bio.base.save, read_function = bob.bio.base.load):
...@@ -86,10 +87,11 @@ def test_face_crop(): ...@@ -86,10 +87,11 @@ def test_face_crop():
def test_face_detect(): def test_face_detect():
image, annotation = _image(), None image, annotation = _image(), None
cropper = bob.bio.base.load_resource('face-detect-eyes', 'preprocessor') cropper = bob.bio.base.load_resource('face-detect', 'preprocessor')
assert isinstance(cropper, bob.bio.face.preprocessor.FaceDetect) assert isinstance(cropper, bob.bio.face.preprocessor.FaceDetect)
assert isinstance(cropper, bob.bio.face.preprocessor.Base) assert isinstance(cropper, bob.bio.face.preprocessor.Base)
assert isinstance(cropper, bob.bio.base.preprocessor.Preprocessor) assert isinstance(cropper, bob.bio.base.preprocessor.Preprocessor)
assert cropper.flandmark is None
# execute face detector # execute face detector
reference = pkg_resources.resource_filename('bob.bio.face.test', 'data/detected.hdf5') reference = pkg_resources.resource_filename('bob.bio.face.test', 'data/detected.hdf5')
...@@ -103,11 +105,11 @@ def test_face_detect(): ...@@ -103,11 +105,11 @@ def test_face_detect():
assert abs(cropper.quality - 33.1136586) < 1e-5 assert abs(cropper.quality - 33.1136586) < 1e-5
# execute face detector with tan-triggs # execute face detector with tan-triggs
cropper = bob.bio.face.preprocessor.TanTriggs(face_cropper='face-detect-eyes') cropper = bob.bio.face.preprocessor.TanTriggs(face_cropper='landmark-detect')
preprocessed = cropper(image, annotation) preprocessed = cropper(image, annotation)
# load reference and perform Tan-Triggs # load reference and perform Tan-Triggs
detected = bob.bio.base.load(pkg_resources.resource_filename('bob.bio.face.test', 'data/detected.hdf5')) detected = bob.bio.base.load(pkg_resources.resource_filename('bob.bio.face.test', 'data/flandmark.hdf5'))
tan_triggs = bob.bio.face.preprocessor.TanTriggs(face_cropper=None) tan_triggs = bob.bio.base.load_resource('tan-triggs', 'preprocessor')
reference = tan_triggs(detected) reference = tan_triggs(detected)
assert numpy.allclose(preprocessed, reference, atol=1e-5) assert numpy.allclose(preprocessed, reference, atol=1e-5)
...@@ -116,7 +118,7 @@ def test_tan_triggs(): ...@@ -116,7 +118,7 @@ def test_tan_triggs():
# read input # read input
image, annotation = _image(), _annotation() image, annotation = _image(), _annotation()
preprocessor = bob.bio.base.load_resource('tan-triggs-eyes', 'preprocessor') preprocessor = bob.bio.base.load_resource('tan-triggs-crop', 'preprocessor')
assert isinstance(preprocessor, bob.bio.face.preprocessor.TanTriggs) assert isinstance(preprocessor, bob.bio.face.preprocessor.TanTriggs)
assert isinstance(preprocessor, bob.bio.face.preprocessor.Base) assert isinstance(preprocessor, bob.bio.face.preprocessor.Base)
assert isinstance(preprocessor, bob.bio.base.preprocessor.Preprocessor) assert isinstance(preprocessor, bob.bio.base.preprocessor.Preprocessor)
...@@ -135,7 +137,7 @@ def test_inorm_lbp(): ...@@ -135,7 +137,7 @@ def test_inorm_lbp():
# read input # read input
image, annotation = _image(), _annotation() image, annotation = _image(), _annotation()
preprocessor = bob.bio.base.load_resource('inorm-lbp-eyes', 'preprocessor') preprocessor = bob.bio.base.load_resource('inorm-lbp-crop', 'preprocessor')
assert isinstance(preprocessor, bob.bio.face.preprocessor.INormLBP) assert isinstance(preprocessor, bob.bio.face.preprocessor.INormLBP)
assert isinstance(preprocessor, bob.bio.face.preprocessor.Base) assert isinstance(preprocessor, bob.bio.face.preprocessor.Base)
assert isinstance(preprocessor, bob.bio.base.preprocessor.Preprocessor) assert isinstance(preprocessor, bob.bio.base.preprocessor.Preprocessor)
...@@ -148,7 +150,7 @@ def test_heq(): ...@@ -148,7 +150,7 @@ def test_heq():
# read input # read input
image, annotation = _image(), _annotation() image, annotation = _image(), _annotation()
preprocessor = bob.bio.base.load_resource('histogram-eyes', 'preprocessor') preprocessor = bob.bio.base.load_resource('histogram-crop', 'preprocessor')
assert isinstance(preprocessor, bob.bio.face.preprocessor.HistogramEqualization) assert isinstance(preprocessor, bob.bio.face.preprocessor.HistogramEqualization)
assert isinstance(preprocessor, bob.bio.face.preprocessor.Base) assert isinstance(preprocessor, bob.bio.face.preprocessor.Base)
assert isinstance(preprocessor, bob.bio.base.preprocessor.Preprocessor) assert isinstance(preprocessor, bob.bio.base.preprocessor.Preprocessor)
...@@ -161,7 +163,7 @@ def test_sqi(): ...@@ -161,7 +163,7 @@ def test_sqi():
# read input # read input
image, annotation = _image(), _annotation() image, annotation = _image(), _annotation()
preprocessor = bob.bio.base.load_resource('self-quotient-eyes', 'preprocessor') preprocessor = bob.bio.base.load_resource('self-quotient-crop', 'preprocessor')
assert isinstance(preprocessor, bob.bio.face.preprocessor.SelfQuotientImage) assert isinstance(preprocessor, bob.bio.face.preprocessor.SelfQuotientImage)
assert isinstance(preprocessor, bob.bio.face.preprocessor.Base) assert isinstance(preprocessor, bob.bio.face.preprocessor.Base)
assert isinstance(preprocessor, bob.bio.base.preprocessor.Preprocessor) assert isinstance(preprocessor, bob.bio.base.preprocessor.Preprocessor)
......
import bob.bio.base.test.utils
import bob.bio.face
@bob.bio.base.test.utils.grid_available
def test_baselines():
# test that all of the baselines would execute
from bob.bio.face.script.baselines import available_databases, all_algorithms, main
for database in available_databases:
parameters = ['-d', database, '--dry-run', '-vv']
main(parameters)
parameters.append('--grid')
main(parameters)
parameters.extend(['-e', 'HTER'])
main(parameters)
for algorithm in all_algorithms:
parameters = ['-a', algorithm, '--dry-run', '-vv']
main(parameters)
parameters.append('-g')
main(parameters)
parameters.extend(['-e', 'HTER'])
main(parameters)
...@@ -5,6 +5,7 @@ ...@@ -5,6 +5,7 @@
[buildout] [buildout]
parts = scripts parts = scripts
eggs = bob.bio.face eggs = bob.bio.face
bob.bio.gmm
bob.db.arface bob.db.arface
bob.db.banca bob.db.banca
bob.db.caspeal bob.db.caspeal
...@@ -40,6 +41,7 @@ develop = src/bob.extension ...@@ -40,6 +41,7 @@ develop = src/bob.extension
src/bob.db.verification.filelist src/bob.db.verification.filelist
src/bob.db.atnt src/bob.db.atnt
src/bob.bio.base src/bob.bio.base
src/bob.bio.gmm
src/bob.learn.boosting src/bob.learn.boosting
src/bob.ip.facedetect src/bob.ip.facedetect
src/bob.ip.flandmark src/bob.ip.flandmark
...@@ -81,6 +83,7 @@ bob.db.verification.utils = git https://github.com/bioidiap/bob.db.verification. ...@@ -81,6 +83,7 @@ bob.db.verification.utils = git https://github.com/bioidiap/bob.db.verification.
bob.db.verification.filelist = git https://github.com/bioidiap/bob.db.verification.filelist bob.db.verification.filelist = git https://github.com/bioidiap/bob.db.verification.filelist
bob.db.atnt = git https://github.com/bioidiap/bob.db.atnt bob.db.atnt = git https://github.com/bioidiap/bob.db.atnt
bob.bio.base = git https://github.com/bioidiap/bob.bio.base bob.bio.base = git https://github.com/bioidiap/bob.bio.base
bob.bio.gmm = git https://github.com/bioidiap/bob.bio.gmm
bob.learn.boosting = git https://github.com/bioidiap/bob.learn.boosting bob.learn.boosting = git https://github.com/bioidiap/bob.learn.boosting
bob.ip.facedetect = git https://github.com/bioidiap/bob.ip.facedetect bob.ip.facedetect = git https://github.com/bioidiap/bob.ip.facedetect
bob.ip.flandmark = git https://github.com/bioidiap/bob.ip.flandmark bob.ip.flandmark = git https://github.com/bioidiap/bob.ip.flandmark
......
...@@ -33,10 +33,10 @@ ...@@ -33,10 +33,10 @@
# allows you to test your package with new python dependencies w/o requiring # allows you to test your package with new python dependencies w/o requiring
# administrative interventions. # administrative interventions.
from setuptools import setup, find_packages, dist from setuptools import setup, dist
dist.Distribution(dict(setup_requires=['bob.extension'])) dist.Distribution(dict(setup_requires=['bob.extension']))
from bob.extension.utils import load_requirements from bob.extension.utils import load_requirements, find_packages
install_requires = load_requirements() install_requires = load_requirements()
# The only thing we do in this file is to call the setup() function with all # The only thing we do in this file is to call the setup() function with all
...@@ -103,6 +103,7 @@ setup( ...@@ -103,6 +103,7 @@ setup(
# scripts should be declared using this entry: # scripts should be declared using this entry:
'console_scripts' : [ 'console_scripts' : [
'baselines.py = bob.bio.face.script.baselines:main'
], ],
'bob.bio.database': [ 'bob.bio.database': [
...@@ -127,12 +128,18 @@ setup( ...@@ -127,12 +128,18 @@ setup(
'bob.bio.preprocessor': [ 'bob.bio.preprocessor': [
'base = bob.bio.face.config.preprocessor.base:preprocessor', # simple color conversion 'base = bob.bio.face.config.preprocessor.base:preprocessor', # simple color conversion
'face-crop-eyes = bob.bio.face.config.preprocessor.face_crop_eyes:preprocessor', # face crop 'face-crop-eyes = bob.bio.face.config.preprocessor.face_crop_eyes:preprocessor', # face crop
'inorm-lbp-eyes = bob.bio.face.config.preprocessor.inorm_lbp_eyes:preprocessor', # face crop + inorm-lbp 'inorm-lbp-crop = bob.bio.face.config.preprocessor.inorm_lbp:preprocessor', # face crop + inorm-lbp
'tan-triggs-eyes = bob.bio.face.config.preprocessor.tan_triggs_eyes:preprocessor', # face crop + inorm-lbp 'tan-triggs-crop = bob.bio.face.config.preprocessor.tan_triggs:preprocessor', # face crop + Tan&Triggs
'histogram-eyes = bob.bio.face.config.preprocessor.histogram_equalization_eyes:preprocessor', # face crop + inorm-lbp 'histogram-crop = bob.bio.face.config.preprocessor.histogram_equalization:preprocessor', # face crop + histogram equalization
'self-quotient-eyes= bob.bio.face.config.preprocessor.self_quotient_image_eyes:preprocessor', # face crop + inorm-lbp 'self-quotient-crop= bob.bio.face.config.preprocessor.self_quotient_image:preprocessor', # face crop + self quotient image
'landmark-detect = bob.bio.face.config.preprocessor.face_detect:preprocessor', # face detection + landmark detection + cropping
'face-detect = bob.bio.face.config.preprocessor.face_detect:preprocessor_no_eyes', # face detection + cropping
'inorm-lbp = bob.bio.face.config.preprocessor.inorm_lbp:preprocessor_no_crop', # inorm-lbp w/o face-crop
'tan-triggs = bob.bio.face.config.preprocessor.tan_triggs:preprocessor_no_crop', # Tan&Triggs w/o face-crop
'histogram = bob.bio.face.config.preprocessor.histogram_equalization:preprocessor_no_crop', # histogram equalization w/o face-crop
'self-quotient = bob.bio.face.config.preprocessor.self_quotient_image:preprocessor_no_crop', # self quotient image w/o face-crop
'face-detect-eyes = bob.bio.face.config.preprocessor.face_detect_eyes:preprocessor', # face detection + cropping
], ],
'bob.bio.extractor': [ 'bob.bio.extractor': [
...@@ -145,6 +152,7 @@ setup( ...@@ -145,6 +152,7 @@ setup(
'bob.bio.algorithm': [ 'bob.bio.algorithm': [
'gabor-jet = bob.bio.face.config.algorithm.gabor_jet:algorithm', # Gabor jet comparison 'gabor-jet = bob.bio.face.config.algorithm.gabor_jet:algorithm', # Gabor jet comparison
'lgbphs = bob.bio.face.config.algorithm.lgbphs:algorithm', # LGBPHS histograms 'lgbphs = bob.bio.face.config.algorithm.lgbphs:algorithm', # LGBPHS histograms
'bic-jets = bob.bio.face.config.algorithm.bic_jets:algorithm', # BIC on gabor jets
], ],
}, },
......
0% Loading or .
You are about to add 0 people to the discussion. Proceed with caution.
Please register or to comment