Skip to content
Snippets Groups Projects
Commit 325b6368 authored by Manuel Günther's avatar Manuel Günther
Browse files

Small corrections in order to make it compatible with bob.bio.gmm

parent 0e0586d8
No related branches found
No related tags found
No related merge requests found
......@@ -6,6 +6,7 @@ from . import algorithm
from . import tools
from . import grid # only one file, not complete directory
from . import script
from . import test
......
import bob.bio.base
# define the queue using all the default parameters
grid = bob.bio.base.GridParameters(
grid = 'local',
number_of_parallel_processes = 4
)
# define a queue that is highly parallelized
grid_p8 = bob.bio.base.GridParameters(
grid = 'local',
number_of_parallel_processes = 8
)
# define a queue that is highly parallelized
grid_p16 = bob.bio.base.GridParameters(
grid = 'local',
number_of_parallel_processes = 16
)
from .utils import File, FileSet
from .Database import Database, DatabaseZT
from .DatabaseBob import DatabaseBob, DatabaseBobZT
from .DatabaseFileList import DatabaseFileList
......
from . import verify
......@@ -86,12 +86,12 @@ def add_jobs(args, submitter = None):
if args.grid is None:
jobs_to_execute.append(('train-projector',))
else:
job_ids['projector_training'] = submitter.submit(
job_ids['projector-training'] = submitter.submit(
'--sub-task train-projector',
name="train-p",
dependencies = deps,
**args.grid.training_queue)
deps.append(job_ids['projector_training'])
deps.append(job_ids['projector-training'])
# feature projection
if not args.skip_projection and args.algorithm.performs_projection:
......@@ -110,12 +110,12 @@ def add_jobs(args, submitter = None):
if args.grid is None:
jobs_to_execute.append(('train-enroller',))
else:
job_ids['enroller_training'] = submitter.submit(
job_ids['enroller-training'] = submitter.submit(
'--sub-task train-enroller',
name = "train-e",
dependencies = deps,
**args.grid.training_queue)
deps.append(job_ids['enroller_training'])
deps.append(job_ids['enroller-training'])
# enroll models
enroll_deps_n = {}
......@@ -129,38 +129,38 @@ def add_jobs(args, submitter = None):
if args.grid is None:
jobs_to_execute.append(('enroll', group, 'N'))
else:
job_ids['enroll_%s_N'%group] = submitter.submit(
job_ids['enroll-%s-N'%group] = submitter.submit(
'--sub-task enroll --group %s --model-type N'%group,
name = "enr-N-%s"%group,
number_of_parallel_jobs = args.grid.number_of_enrollment_jobs,
dependencies = deps,
**args.grid.enrollment_queue)
enroll_deps_n[group].append(job_ids['enroll_%s_N'%group])
enroll_deps_n[group].append(job_ids['enroll-%s-N'%group])
if args.zt_norm:
if args.grid is None:
jobs_to_execute.append(('enroll', group, 'T'))
else:
job_ids['enroll_%s_T'%group] = submitter.submit(
job_ids['enroll-%s-T'%group] = submitter.submit(
'--sub-task enroll --group %s --model-type T'%group,
name = "enr-T-%s"%group,
number_of_parallel_jobs = args.grid.number_of_enrollment_jobs,
dependencies = deps,
**args.grid.enrollment_queue)
enroll_deps_t[group].append(job_ids['enroll_%s_T'%group])
enroll_deps_t[group].append(job_ids['enroll-%s-T'%group])
# compute A,B,C, and D scores
if not args.skip_score_computation:
if args.grid is None:
jobs_to_execute.append(('compute-scores', group, None, 'A'))
else:
job_ids['score_%s_A'%group] = submitter.submit(
job_ids['score-%s-A'%group] = submitter.submit(
'--sub-task compute-scores --group %s --score-type A'%group,
name = "score-A-%s"%group,
number_of_parallel_jobs = args.grid.number_of_scoring_jobs,
dependencies = enroll_deps_n[group],
**args.grid.scoring_queue)
concat_deps[group] = [job_ids['score_%s_A'%group]]
concat_deps[group] = [job_ids['score-%s-A'%group]]
if args.zt_norm:
if args.grid is None:
......@@ -169,21 +169,21 @@ def add_jobs(args, submitter = None):
jobs_to_execute.append(('compute-scores', group, None, 'D'))
jobs_to_execute.append(('compute-scores', group, None, 'Z'))
else:
job_ids['score_%s_B'%group] = submitter.submit(
job_ids['score-%s-B'%group] = submitter.submit(
'--sub-task compute-scores --group %s --score-type B'%group,
name = "score-B-%s"%group,
number_of_parallel_jobs = args.grid.number_of_scoring_jobs,
dependencies = enroll_deps_n[group],
**args.grid.scoring_queue)
job_ids['score_%s_C'%group] = submitter.submit(
job_ids['score-%s-C'%group] = submitter.submit(
'--sub-task compute-scores --group %s --score-type C'%group,
name = "score-C-%s"%group,
number_of_parallel_jobs = args.grid.number_of_scoring_jobs,
dependencies = enroll_deps_t[group],
**args.grid.scoring_queue)
job_ids['score_%s_D'%group] = submitter.submit(
job_ids['score-%s-D'%group] = submitter.submit(
'--sub-task compute-scores --group %s --score-type D'%group,
name = "score-D-%s"%group,
number_of_parallel_jobs = args.grid.number_of_scoring_jobs,
......@@ -191,12 +191,12 @@ def add_jobs(args, submitter = None):
**args.grid.scoring_queue)
# compute zt-norm
score_deps[group] = [job_ids['score_%s_A'%group], job_ids['score_%s_B'%group], job_ids['score_%s_C'%group], job_ids['score_%s_D'%group]]
job_ids['score_%s_Z'%group] = submitter.submit(
score_deps[group] = [job_ids['score-%s-A'%group], job_ids['score-%s-B'%group], job_ids['score-%s-C'%group], job_ids['score-%s-D'%group]]
job_ids['score-%s-Z'%group] = submitter.submit(
'--sub-task compute-scores --group %s --score-type Z'%group,
name = "score-Z-%s"%group,
dependencies = score_deps[group])
concat_deps[group].extend([job_ids['score_%s_B'%group], job_ids['score_%s_C'%group], job_ids['score_%s_D'%group], job_ids['score_%s_Z'%group]])
concat_deps[group].extend([job_ids['score-%s-B'%group], job_ids['score-%s-C'%group], job_ids['score-%s-D'%group], job_ids['score-%s-Z'%group]])
else:
concat_deps[group] = []
......@@ -205,7 +205,7 @@ def add_jobs(args, submitter = None):
if args.grid is None:
jobs_to_execute.append(('concatenate', group))
else:
job_ids['concat_%s'%group] = submitter.submit(
job_ids['concat-%s'%group] = submitter.submit(
'--sub-task concatenate --group %s'%group,
name = "concat-%s"%group,
dependencies = concat_deps[group])
......@@ -215,7 +215,7 @@ def add_jobs(args, submitter = None):
if args.grid is None:
jobs_to_execute.append(('calibrate',))
else:
calib_deps = [job_ids['concat_%s'%g] for g in args.groups if 'concat_%s'%g in job_ids]
calib_deps = [job_ids['concat-%s'%g] for g in args.groups if 'concat-%s'%g in job_ids]
job_ids['calibrate'] = submitter.submit(
'--sub-task calibrate',
dependencies = calib_deps)
......@@ -362,7 +362,8 @@ def verify(args, command_line_parameters, external_fake_job_id = 0):
# as the main entry point, check whether the sub-task is specified
if args.sub_task is not None:
# execute the desired sub-task
execute(args)
if not execute(args):
raise ValueError("The specified --sub-task '%s' is not known to the system" % args.sub_task)
return {}
else:
# add jobs
......@@ -394,7 +395,8 @@ def verify(args, command_line_parameters, external_fake_job_id = 0):
args.group = None if len(job) <= 1 else job[1]
args.model_type = None if len(job) <= 2 else job[2]
args.score_type = None if len(job) <= 3 else job[3]
execute(args)
if not execute(args):
raise ValueError("The current --sub-task '%s' is not known to the system" % args.sub_task)
if args.timer:
end_time = os.times()
......
import bob.db.atnt
import os
from bob.bio.base.database import DatabaseBob, DatabaseBobZT
from bob.bio.base.database import DatabaseBob, DatabaseBobZT, FileSet
from bob.bio.base.test.utils import atnt_database_directory
class FileSetDatabase (DatabaseBobZT):
def __init__(self):
# call base class constructor with useful parameters
facereclib.databases.DatabaseBobZT.__init__(
DatabaseBobZT.__init__(
self,
database = bob.db.atnt.Database(
original_directory = atnt_database_directory(),
......@@ -29,7 +29,7 @@ class FileSetDatabase (DatabaseBobZT):
file_sets = []
for client_files in files:
# generate file set for each client
file_set = facereclib.databases.FileSet(client_files[0].client_id, client_files[0].client_id, client_files[0].path)
file_set = FileSet(client_files[0].client_id, client_files[0].client_id, client_files[0].path)
file_set.files = client_files
file_sets.append(file_set)
return file_sets
......
......@@ -124,7 +124,7 @@ def test_verify_commandline():
@utils.grid_available
def test_verify_parallel():
test_dir = tempfile.mkdtemp(prefix='frltest_')
test_database = os.path.join(test_dir, "database.sql3")
test_database = os.path.join(test_dir, "submitted.sql3")
# define dummy parameters
parameters = [
......
......@@ -89,7 +89,7 @@ def load_resource(resource, keyword, imports = ['bob.bio.base'], preferred_distr
else:
# TODO: extract current package name and use this one, if possible
# Now: check if there are only two entry points, and one is from the facereclib, then use the other one
# Now: check if there are only two entry points, and one is from the bob.bio.base, then use the other one
index = -1
if preferred_distribution:
for i,p in enumerate(entry_points):
......@@ -141,11 +141,11 @@ def read_file_resource(resource, keyword):
else:
# TODO: extract current package name and use this one, if possible
# Now: check if there are only two entry points, and one is from the facereclib, then use the other one
# Now: check if there are only two entry points, and one is from the bob.bio.base, then use the other one
index = -1
if len(entry_points) == 2:
if entry_points[0].dist.project_name == 'facereclib': index = 1
elif entry_points[1].dist.project_name == 'facereclib': index = 0
if entry_points[0].dist.project_name == 'bob.bio.base': index = 1
elif entry_points[1].dist.project_name == 'bob.bio.base': index = 0
if index != -1:
logger.info("RESOURCES: Using the resource '%s' from '%s', and ignoring the one from '%s'" %(resource, entry_points[index].module_name, entry_points[1-index].module_name))
......@@ -168,7 +168,7 @@ def list_resources(keyword, strip=['dummy']):
entry_points = _get_entry_points(keyword, strip)
last_dist = None
retval = ""
for entry_point in entry_points:
for entry_point in sorted(entry_points):
if last_dist != str(entry_point.dist):
retval += "\n- %s: \n" % str(entry_point.dist)
last_dist = str(entry_point.dist)
......
......@@ -125,6 +125,12 @@ setup(
'pca+lda = bob.bio.base.config.algorithm.lda:algorithm',
'bic = bob.bio.base.config.algorithm.bic:algorithm',
],
'bob.bio.grid': [
'local-p4 = bob.bio.base.config.grid.local:grid',
'local-p8 = bob.bio.base.config.grid.local:grid_p8',
'local-p16 = bob.bio.base.config.grid.local:grid_p16'
],
},
# Classifiers are important if you plan to distribute this package through
......
0% Loading or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment