Commit fe59dddf authored by Manuel Günther's avatar Manuel Günther
Browse files

Added single scripts to run preprocessing, extraction, enrollment and scoring...

Added single scripts to run preprocessing, extraction, enrollment and scoring separately; Fixed command line use None by default
parent 999724d8
......@@ -14,7 +14,7 @@ import logging
logger = logging.getLogger("bob.bio.base")
class LDA (Algorithm):
"""Tool for computing linear discriminant analysis (so-called Fisher faces)"""
"""Computes linear discriminant analysis"""
def __init__(
self,
......
......@@ -14,7 +14,7 @@ import logging
logger = logging.getLogger("bob.bio.base")
class PCA (Algorithm):
"""Tool for computing eigenfaces"""
"""Performs PCA on the given data"""
def __init__(
self,
......
"""This script can be used to enroll a model from several features using the given algorithm.
"""
import argparse
import bob.core
logger = bob.core.log.setup("bob.bio.base")
import bob.bio.base
def command_line_arguments(command_line_parameters):
"""Parse the program options"""
# set up command line parser
parser = argparse.ArgumentParser(description=__doc__, formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument('-a', '--algorithm', metavar = 'x', nargs = '+', required = True, help = 'Biometric recognition; registered algorithms are: %s' % bob.bio.base.resource_keys('algorithm'))
parser.add_argument('-e', '--extractor', metavar = 'x', nargs = '+', required = True, help = 'Feature extraction; registered feature extractors are: %s' % bob.bio.base.resource_keys('extractor'))
parser.add_argument('-P', '--projector-file', metavar = 'FILE', help = 'The pre-trained extractor file, if the algorithm performs projection')
parser.add_argument('-E', '--enroller-file', metavar = 'FILE', help = 'The pre-trained enroller file, if the extractor requires enroller training')
parser.add_argument('-i', '--input-files', metavar = 'FEATURE', nargs='+', required = True, help = "A list of feature files to enroll the model from")
parser.add_argument('-o', '--output-file', metavar = 'MODEL', default = 'model.hdf5', help = "The file to write the enrolled model into (should be of type HDF5)")
# add verbose option
bob.core.log.add_command_line_option(parser)
# parse arguments
args = parser.parse_args(command_line_parameters)
# set verbosity level
bob.core.log.set_verbosity_level(logger, args.verbose)
return args
def main(command_line_parameters=None):
"""Preprocesses the given image with the given preprocessor."""
args = command_line_arguments(command_line_parameters)
logger.debug("Loading extractor")
extractor = bob.bio.base.load_resource(' '.join(args.extractor), "extractor")
logger.debug("Loading algorithm")
algorithm = bob.bio.base.load_resource(' '.join(args.algorithm), "algorithm")
if algorithm.requires_projector_training:
if args.projector_file is None:
raise ValueError("The desired algorithm requires a pre-trained projector file, but it was not specified")
algorithm.load_projector(args.projector_file)
if algorithm.requires_enroller_training:
if args.enroller_file is None:
raise ValueError("The desired algorithm requires a pre-trained enroller file, but it was not specified")
algorithm.load_enroller(args.enroller_file)
logger.debug("Loading %d features for enrollment", len(args.input_files))
features = [extractor.read_feature(f) for f in args.input_files]
if algorithm.use_projected_features_for_enrollment:
logger.debug("Projecting enrollment features")
features = [algorithm.project(f) for f in features]
logger.debug("Enrolling model")
model = algorithm.enroll(features)
algorithm.write_model(model, args.output_file)
logger.info("Wrote model to file '%s'", args.output_file)
......@@ -67,9 +67,7 @@ def command_line_arguments(command_line_parameters):
parser.add_argument('-R', '--roc', help = "If given, ROC curves will be plotted into the given pdf file.")
parser.add_argument('-D', '--det', help = "If given, DET curves will be plotted into the given pdf file.")
parser.add_argument('-C', '--cmc', help = "If given, CMC curves will be plotted into the given pdf file.")
parser.add_argument('-p', '--parser', default = '4column', choices = ('4column', '5column'), help="The style of the resulting score files. The default fits to the usual output of FaceRecLib score files.")
parser.add_argument('--self-test', action='store_true', help=argparse.SUPPRESS)
parser.add_argument('--parser', default = '4column', choices = ('4column', '5column'), help="The style of the resulting score files. The default fits to the usual output of FaceRecLib score files.")
# add verbose option
bob.core.log.add_command_line_option(parser)
......
"""This script can be used to extract features using the given extractor from the given preprocessed image.
"""
import argparse
import bob.core
logger = bob.core.log.setup("bob.bio.base")
import bob.bio.base
def command_line_arguments(command_line_parameters):
"""Parse the program options"""
# set up command line parser
parser = argparse.ArgumentParser(description=__doc__, formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument('-e', '--extractor', metavar = 'x', nargs = '+', required = True, help = 'Feature extraction; registered feature extractors are: %s' % bob.bio.base.resource_keys('extractor'))
parser.add_argument('-E', '--extractor-file', metavar = 'FILE', help = "The pre-trained extractor file, if the extractor requires training")
parser.add_argument('-p', '--preprocessor', metavar = 'x', nargs = '+', required = True, help = 'Data preprocessing; registered preprocessors are: %s' % bob.bio.base.resource_keys('preprocessor'))
parser.add_argument('-i', '--input-file', metavar = 'PREPROCESSED', required = True, help = "The preprocessed data file to read.")
parser.add_argument('-o', '--output-file', metavar = 'FEATURE', default = 'extracted.hdf5', help = "The file to write the extracted features into (should be of type HDF5)")
# add verbose option
bob.core.log.add_command_line_option(parser)
# parse arguments
args = parser.parse_args(command_line_parameters)
# set verbosity level
bob.core.log.set_verbosity_level(logger, args.verbose)
return args
def main(command_line_parameters=None):
"""Preprocesses the given image with the given preprocessor."""
args = command_line_arguments(command_line_parameters)
logger.debug("Loading preprocessor")
preprocessor = bob.bio.base.load_resource(' '.join(args.preprocessor), "preprocessor")
logger.debug("Loading extractor")
extractor = bob.bio.base.load_resource(' '.join(args.extractor), "extractor")
if extractor.requires_training:
if args.extractor_file is None:
raise ValueError("The desired extractor requires a pre-trained extractor file, but it was not specified")
extractor.load(args.extractor_file)
logger.debug("Loading preprocessed data from file '%s'", args.input_file)
preprocessed = preprocessor.read_data(args.input_file)
logger.info("Extracting features")
extracted = extractor(preprocessed)
extractor.write_feature(extracted, args.output_file)
logger.info("Wrote extracted features to file '%s'", args.output_file)
......@@ -18,7 +18,7 @@ global configuration
global place_holder_key
# the extracted command line arguments
global args
# the job ids as returned by the call to the faceverify function
# the job ids as returned by the call to the verify function
global job_ids
# first fake job id (useful for the --dry-run option)
global fake_job_id
......@@ -74,7 +74,7 @@ def command_line_options(command_line_parameters):
help = 'Split the gridtk databases after the following level -1 - never split; 0 - preprocess; 1 - extract; 2 -- project; 3 -- enroll; 4 -- score;')
parser.add_argument('-x', '--executable',
help = '(optional) The executable to be executed instead of facereclib/script/faceverify.py (taken *always* from the facereclib, not from the bin directory)')
help = '(optional) The executable to be executed instead of bob/bio/base/verify.py (taken *always* from bob.bio.base, not from the bin directory)')
parser.add_argument('-R', '--result-directory', default = os.path.join("/idiap/user", os.environ["USER"]),
help = 'The directory where to write the resulting score files to.')
......@@ -101,7 +101,7 @@ def command_line_options(command_line_parameters):
help = 'Use the given variable instead of the "replace" keyword in the configuration file')
parser.add_argument('parameters', nargs = argparse.REMAINDER,
help = "Parameters directly passed to the face verify script. Use -- to separate this parameters from the parameters of this script. See 'bin/verify.py --help' for a complete list of options.")
help = "Parameters directly passed to the verify script. Use -- to separate this parameters from the parameters of this script. See 'bin/verify.py --help' for a complete list of options.")
bob.core.log.add_command_line_option(parser)
......@@ -168,7 +168,7 @@ def replace(string, replacements):
def create_command_line(replacements):
"""Creates the parameters for the function call that will be given to the faceverify script."""
"""Creates the parameters for the function call that will be given to the verify script."""
# get the values to be replaced with
values = {}
for key in configuration.replace:
......@@ -200,7 +200,7 @@ dependency_keys = ['DUMMY', 'preprocess', 'extract', 'project', 'enroll']
def directory_parameters(directories):
"""This function generates the faceverify parameters that define the directories, where the data is stored.
"""This function generates the verify parameters that define the directories, where the data is stored.
The directories are set such that data is reused whenever possible, but disjoint if needed."""
def _join_dirs(index, subdir):
# collect sub-directories
......@@ -318,7 +318,7 @@ def execute_dependent_task(command_line, directories, dependency_level):
if args.verbose:
print ("Would have executed job", utils.command_line(command_line))
else:
# execute the face verification experiment
# execute the verification experiment
global fake_job_id
new_job_ids = verify.verify(verif_args, command_line, external_fake_job_id = fake_job_id)
else:
......@@ -379,7 +379,7 @@ def create_recursive(replace_dict, step_index, directories, dependency_level, ke
new_dependency_level = step_index
def main(command_line_parameters = sys.argv):
def main(command_line_parameters = None):
"""Main entry point for the parameter test. Try --help to see the parameters that can be specified."""
global task_count, job_count, job_ids, score_directories
......@@ -388,7 +388,7 @@ def main(command_line_parameters = sys.argv):
job_ids = {}
score_directories = []
command_line_options(command_line_parameters[1:])
command_line_options(command_line_parameters)
global configuration, place_holder_key
configuration = utils.read_config_file(args.configuration_file)
......
"""This script can be used to preprocess a single data file with a given preprocessor.
"""
import argparse
import bob.core
logger = bob.core.log.setup("bob.bio.base")
import bob.bio.base
import bob.db.verification.utils
import numpy
import bob.core
import bob.io.base
import bob.io.image
def command_line_arguments(command_line_parameters):
"""Parse the program options"""
# set up command line parser
parser = argparse.ArgumentParser(description=__doc__, formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument('-p', '--preprocessor', metavar = 'x', nargs = '+', required = True, help = 'Data preprocessing; registered preprocessors are: %s' % bob.bio.base.resource_keys('preprocessor'))
parser.add_argument('-i', '--input-file', metavar = 'FILE', required = True, help = "The data file to be preprocessed.")
# parser.add_argument('-a', '--annotations', nargs='+', help = "Key=value-pairs for the annotations")
parser.add_argument('-a', '--annotation-file', metavar = 'FILE', help = "The annotation file for the given data file, if applicable and/or available; currently the only supported format is the 'named' annotation format.")
parser.add_argument('-o', '--output-file', metavar = 'PREPROCESSED', default = 'preprocessed.hdf5', help = "Write the preprocessed data into this file (should be of type HDF5)")
parser.add_argument('-c', '--convert-as-image', metavar = 'IMAGE', help = "Write the preprocessed data into this image file, converting it to an image, if possible")
# add verbose option
bob.core.log.add_command_line_option(parser)
# parse arguments
args = parser.parse_args(command_line_parameters)
# set verbosity level
bob.core.log.set_verbosity_level(logger, args.verbose)
return args
def main(command_line_parameters=None):
"""Preprocesses the given image with the given preprocessor."""
args = command_line_arguments(command_line_parameters)
logger.debug("Loading preprocessor")
preprocessor = bob.bio.base.load_resource(' '.join(args.preprocessor), "preprocessor")
logger.debug("Loading input data from file '%s'%s", args.input_file, " and '%s'" % args.annotation_file if args.annotation_file is not None else "")
data = preprocessor.read_original_data(args.input_file)
annotations = bob.db.verification.utils.read_annotation_file(args.annotation_file, 'named') if args.annotation_file is not None else None
logger.info("Preprocessing data")
preprocessed = preprocessor(data, annotations)
preprocessor.write_data(preprocessed, args.output_file)
logger.info("Wrote preprocessed data to file '%s'", args.output_file)
if args.convert_as_image is not None:
converted = bob.core.convert(preprocessed, 'uint8', dest_range=(0,255), source_range=(numpy.min(preprocessed), numpy.max(preprocessed)))
bob.io.base.save(converted, args.convert_as_image)
logger.info("Wrote preprocessed data to image file '%s'", args.convert_as_image)
"""This script can be used to compute scores between a list of enrolled models and a list of probe files.
"""
from __future__ import print_function
import argparse
import bob.core
logger = bob.core.log.setup("bob.bio.base")
import bob.bio.base
def command_line_arguments(command_line_parameters):
"""Parse the program options"""
# set up command line parser
parser = argparse.ArgumentParser(description=__doc__, formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument('-a', '--algorithm', metavar = 'x', nargs = '+', required = True, help = 'Biometric recognition; registered algorithms are: %s' % bob.bio.base.resource_keys('algorithm'))
parser.add_argument('-P', '--projector-file', metavar = 'FILE', help = 'The pre-trained extractor file, if the algorithm performs projection')
parser.add_argument('-E', '--enroller-file' , metavar = 'FILE', help = 'The pre-trained enroller file, if the extractor requires enroller training')
parser.add_argument('-m', '--model-files', metavar = 'MODEL', nargs='+', required = True, help = "A list of enrolled model files")
parser.add_argument('-p', '--probe-files', metavar = 'PROBE', nargs='+', required = True, help = "A list of extracted feature files used as probes")
# add verbose option
bob.core.log.add_command_line_option(parser)
# parse arguments
args = parser.parse_args(command_line_parameters)
# set verbosity level
bob.core.log.set_verbosity_level(logger, args.verbose)
return args
def main(command_line_parameters=None):
"""Preprocesses the given image with the given preprocessor."""
args = command_line_arguments(command_line_parameters)
logger.debug("Loading algorithm")
algorithm = bob.bio.base.load_resource(' '.join(args.algorithm), "algorithm")
if algorithm.requires_projector_training:
if args.projector_file is None:
raise ValueError("The desired algorithm requires a pre-trained projector file, but it was not specified")
algorithm.load_projector(args.projector_file)
if algorithm.requires_enroller_training:
if args.enroller_file is None:
raise ValueError("The desired algorithm requires a pre-trained enroller file, but it was not specified")
algorithm.load_enroller(args.enroller_file)
logger.debug("Loading %d models", len(args.model_files))
models = {m : algorithm.read_model(m) for m in args.model_files}
logger.debug("Loading %d probes", len(args.probe_files))
probes = {p : algorithm.read_probe(p) for p in args.probe_files}
if algorithm.performs_projection:
logger.debug("Projecting %d probes", len(args.probe_files))
probes = {p : algorithm.project(probes[p]) for p in probes}
logger.info("Computing scores")
for p in args.probe_files:
for m in args.model_files:
print("Score between model '%s' and probe '%s' is %3.8f" % (m, p, algorithm.score(models[m], probes[p])))
......@@ -34,7 +34,7 @@ def parse_arguments(command_line_parameters, exclude_resources_from = []):
skips = ['preprocessing', 'extractor-training', 'extraction', 'projector-training', 'projection', 'enroller-training', 'enrollment', 'score-computation', 'concatenation', 'calibration'])
def add_jobs(args, submitter = None):
def add_jobs(args, submitter):
"""Adds all (desired) jobs of the tool chain to the grid, or to the local list to be executed."""
# collect the job ids
......@@ -378,9 +378,9 @@ def verify(args, command_line_parameters, external_fake_job_id = 0):
return {}
else:
# add jobs
submitter = tools.GridSubmission(args, command_line_parameters, first_fake_job_id = external_fake_job_id) if args.grid else None
submitter = tools.GridSubmission(args, command_line_parameters, first_fake_job_id = external_fake_job_id)
retval = add_jobs(args, submitter)
tools.write_info(args, command_line_parameters)
tools.write_info(args, command_line_parameters, submitter.executable)
if args.grid is not None:
if args.grid.is_local() and args.run_local_scheduler:
......@@ -419,11 +419,11 @@ def verify(args, command_line_parameters, external_fake_job_id = 0):
return {}
def main(command_line_parameters = sys.argv):
def main(command_line_parameters = None):
"""Executes the main function"""
try:
# do the command line parsing
args = parse_arguments(command_line_parameters[1:])
args = parse_arguments(command_line_parameters)
# perform face verification test
verify(args, command_line_parameters)
......
......@@ -5,7 +5,6 @@ from __future__ import print_function
import bob.measure
import os
import sys
import shutil
import tempfile
import numpy
......@@ -29,7 +28,7 @@ data_dir = pkg_resources.resource_filename('bob.bio.base', 'test/data')
def _verify(parameters, test_dir, sub_dir, ref_modifier="", score_modifier=('scores','')):
from bob.bio.base.script.verify import main
try:
main([sys.argv[0]] + parameters)
main(parameters)
# assert that the score file exists
score_files = [os.path.join(test_dir, sub_dir, 'Default', norm, '%s-dev%s'%score_modifier) for norm in ('nonorm', 'ztnorm')]
......@@ -227,7 +226,7 @@ def test_verify_filelist():
try:
from bob.bio.base.script.verify import main
main([sys.argv[0]] + parameters)
main(parameters)
# assert that the score file exists
score_files = [os.path.join(test_dir, 'test_filelist', 'None', norm, 'scores-dev') for norm in ('nonorm', 'ztnorm')]
......@@ -300,7 +299,6 @@ def test_grid_search():
try:
# first test without grid option
parameters = [
sys.argv[0],
'-c', os.path.join(dummy_dir, 'grid_search.py'),
'-d', 'dummy',
'-e', 'dummy',
......@@ -319,7 +317,6 @@ def test_grid_search():
# now, in the grid...
parameters = [
sys.argv[0],
'-c', os.path.join(dummy_dir, 'grid_search.py'),
'-d', 'dummy',
'-s', 'test_grid_search',
......@@ -340,7 +337,6 @@ def test_grid_search():
# and now, finally run locally
parameters = [
sys.argv[0],
'-c', os.path.join(dummy_dir, 'grid_search.py'),
'-d', 'dummy',
'-s', 'test_grid_search',
......@@ -359,6 +355,96 @@ def test_grid_search():
# number of jobs in the grid: 36 (including best possible re-use of files; minus preprocessing)
assert bob.bio.base.script.grid_search.job_count == 0
finally:
shutil.rmtree(test_dir)
def test_scripts():
# Tests the bin/preprocess.py, bin/extract.py, bin/enroll.py and bin/score.py scripts
test_dir = tempfile.mkdtemp(prefix='bobtest_')
data_file = os.path.join(test_dir, "data.hdf5")
annotation_file = os.path.join(test_dir, "annotatations.txt")
preprocessed_file = os.path.join(test_dir, "preprocessed.hdf5")
preprocessed_image = os.path.join(test_dir, "preprocessed.png")
extractor_file = os.path.join(test_dir, "extractor.hdf5")
extracted_file = os.path.join(test_dir, "extracted.hdf5")
projector_file = os.path.join(test_dir, "projector.hdf5")
enroller_file = os.path.join(test_dir, "enroller.hdf5")
model_file = os.path.join(test_dir, "model.hdf5")
# tests that the parameter_test.py script works properly
try:
# create test data
test_data = utils.random_array((20,20), 0., 255., seed=84)
test_data[0,0] = 0.
test_data[19,19] = 255.
bob.io.base.save(test_data, data_file)
with open(annotation_file, 'w') as a:
a.write("leye 100 200\nreye 100 100")
extractor = bob.bio.base.load_resource("dummy", "extractor")
extractor.train([], extractor_file)
algorithm = bob.bio.base.load_resource("dummy", "algorithm")
algorithm.train_projector([], projector_file)
algorithm.train_enroller([], enroller_file)
from bob.bio.base.script.preprocess import main as preprocess
from bob.bio.base.script.extract import main as extract
from bob.bio.base.script.enroll import main as enroll
from bob.bio.base.script.score import main as score
# preprocessing
parameters = [
'-i', data_file,
'-a', annotation_file,
'-p', 'dummy',
'-o', preprocessed_file,
'-c', preprocessed_image
]
preprocess(parameters)
assert os.path.isfile(preprocessed_file)
assert os.path.isfile(preprocessed_image)
assert numpy.allclose(bob.io.base.load(preprocessed_file), test_data)
assert numpy.allclose(bob.io.base.load(preprocessed_image), test_data, rtol=1., atol=1.)
# feature extraction
parameters = [
'-i', preprocessed_file,
'-p', 'dummy',
'-e', 'dummy',
'-E', extractor_file,
'-o', extracted_file,
]
extract(parameters)
assert os.path.isfile(extracted_file)
assert numpy.allclose(bob.io.base.load(extracted_file), test_data.flatten())
# enrollment
parameters = [
'-i', extracted_file, extracted_file,
'-e', 'dummy',
'-a', 'dummy',
'-P', projector_file,
'-E', enroller_file,
'-o', model_file
]
enroll(parameters)
assert os.path.isfile(model_file)
assert numpy.allclose(bob.io.base.load(model_file), test_data.flatten())
# scoring
parameters = [
'-m', model_file, model_file,
'-p', extracted_file, extracted_file,
'-a', 'dummy',
'-P', projector_file,
'-E', enroller_file,
]
score(parameters)
finally:
shutil.rmtree(test_dir)
......@@ -25,11 +25,11 @@ def command_line_parser(description=__doc__, exclude_resources_from=[]):
config_group.add_argument('-d', '--database', metavar = 'x', nargs = '+', required = True,
help = 'Database and the protocol; registered databases are: %s' % resource_keys('database', exclude_resources_from))
config_group.add_argument('-p', '--preprocessor', metavar = 'x', nargs = '+', required = True,
help = 'Image preprocessing; registered preprocessors are: %s' % resource_keys('preprocessor', exclude_resources_from))
help = 'Data preprocessing; registered preprocessors are: %s' % resource_keys('preprocessor', exclude_resources_from))
config_group.add_argument('-e', '--extractor', metavar = 'x', nargs = '+', required = True,
help = 'Feature extraction; registered feature extractors are: %s' % resource_keys('extractor', exclude_resources_from))
config_group.add_argument('-a', '--algorithm', metavar = 'x', nargs = '+', required = True,
help = 'Face recognition; registered face recognition algorithms are: %s' % resource_keys('algorithm', exclude_resources_from))
help = 'Biometric recognition; registered algorithms are: %s' % resource_keys('algorithm', exclude_resources_from))
config_group.add_argument('-g', '--grid', metavar = 'x', nargs = '+',
help = 'Configuration for the grid setup; if not specified, the commands are executed sequentially on the local machine.')
config_group.add_argument('--imports', metavar = 'LIB', nargs = '+', default = ['bob.bio.base'],
......@@ -249,13 +249,24 @@ def groups(args):
return groups
def write_info(args, command_line_parameters):
def command_line(cmdline):
"""Converts the given options to a string that can be executed on command line."""
c = ""
for cmd in cmdline:
if cmd[0] in '/-':
c += "%s " % cmd
else:
c += "'%s' " % cmd
return c
def write_info(args, command_line_parameters, executable):
# write configuration
try:
bob.io.base.create_directories_safe(os.path.dirname(args.info_file))
f = open(args.info_file, 'w')
f.write("Command line:\n")
f.write(" ".join(command_line_parameters) + "\n\n")
f.write(command_line([executable] + command_line_parameters) + "\n\n")
f.write("Configuration:\n")
f.write("Database:\n%s\n\n" % args.database)
f.write("Preprocessing:\n%s\n\n" % args.preprocessor)
......@@ -263,14 +274,3 @@ def write_info(args, command_line_parameters):
f.write("Algorithm:\n%s\n\n" % args.algorithm)
except IOError:
logger.error("Could not write the experimental setup into file '%s'", args.info_file)
def command_line(cmdline):
"""Converts the given options to a string that can be executed on command line."""
c = ""
for cmd in cmdline:
if cmd[0] in '/-':
c += "%s " % cmd
else:
c += "'%s' " % cmd
return c
......@@ -34,30 +34,37 @@ def indices(list_to_split, number_of_parallel_jobs, task_id=None):
class GridSubmission:
def __init__(self, args, command_line_parameters, executable = 'verify.py', first_fake_job_id = 0):
assert isinstance(args.grid, Grid)
# find, where jman and the executable are installed
# find, where the executable is installed
import bob.extension
jmans = bob.extension.find_executable('jman', prefixes = ['bin'])
if not len(jmans):
raise IOError("Could not find the 'jman' executable. Have you installed GridTK?")
executables = bob.extension.find_executable(executable, prefixes = ['bin'])
if not len(executables):
raise IOError("Could not find the '%s' executable." % executable)
jman, executable = jmans[0], executables[0]