Skip to content
Snippets Groups Projects
Commit 79a2177f authored by Theophile GENTILHOMME's avatar Theophile GENTILHOMME
Browse files

Change paths to loading/conversion functions now in bob.bio.base (previously in bob.measure)

parent 48a9c067
No related branches found
No related tags found
2 merge requests!146Add 4-5-col files related functionalities and add click commands,!138Moving biometrics-related functionality from bob.measure
Pipeline #
......@@ -29,8 +29,9 @@ import sys, os, glob
import argparse
import numpy
import bob.measure
import bob.core
from .. import score
logger = bob.core.log.setup("bob.bio.base")
def command_line_arguments(command_line_parameters):
......@@ -80,9 +81,9 @@ class Result:
def _calculate(self, dev_file, eval_file = None):
"""Calculates the EER and HTER or FRR based on the threshold criterion."""
if self.m_args.criterion in ("RR", "DIR"):
scores_dev = bob.measure.load.cmc(dev_file)
scores_dev = score.cmc(dev_file)
if eval_file is not None:
scores_eval = bob.measure.load.cmc(eval_file)
scores_eval = score.cmc(eval_file)
if self.m_args.criterion == "DIR":
# get negatives without positives
......@@ -110,7 +111,7 @@ class Result:
else:
dev_neg, dev_pos = bob.measure.load.split(dev_file)
dev_neg, dev_pos = score.split(dev_file)
# switch which threshold function to use
if self.m_args.criterion == 'EER':
......@@ -127,7 +128,7 @@ class Result:
dev_hter = (dev_far + dev_frr)/2.0
if eval_file:
eval_neg, eval_pos = bob.measure.load.split(eval_file)
eval_neg, eval_pos = score.split(eval_file)
eval_far, eval_frr = bob.measure.farfrr(eval_neg, eval_pos, threshold)
eval_hter = (eval_far + eval_frr)/2.0
else:
......
This diff is collapsed.
......@@ -17,6 +17,7 @@ import bob, os, sys
import bob.learn.linear
import bob.core
from .. import score
logger = bob.core.log.setup("bob.bio.base")
def parse_command_line(command_line_options):
......@@ -66,7 +67,7 @@ def main(command_line_options = None):
for i in range(n_systems):
logger.info("Loading development set score file '%s'", args.dev_files[i])
# pythonic way: create inline dictionary "{...}", index with desired value "[...]", execute function "(...)"
data.append({'4column' : bob.measure.load.split_four_column, '5column' : bob.measure.load.split_five_column}[args.parser](args.dev_files[i]))
data.append({'4column' : score.split_four_column, '5column' : score.split_five_column}[args.parser](args.dev_files[i]))
import numpy
trainer = bob.learn.linear.CGLogRegTrainer(0.5, args.convergence_threshold, args.max_iterations, mean_std_norm=not args.no_whitening)
......@@ -78,7 +79,7 @@ def main(command_line_options = None):
gen_data_dev = []
for i in range(n_systems):
logger.info("Loading development set score file '%s'", args.dev_files[i])
gen_data_dev.append({'4column' : bob.measure.load.four_column, '5column' : bob.measure.load.five_column}[args.parser](args.dev_files[i]))
gen_data_dev.append({'4column' : score.four_column, '5column' : score.five_column}[args.parser](args.dev_files[i]))
logger.info("Writing fused development set score file '%s'", args.fused_dev_file)
outf = open(args.fused_dev_file, 'w')
......@@ -99,7 +100,7 @@ def main(command_line_options = None):
gen_data_eval = []
for i in range(n_systems):
logger.info("Loading evaluation set score file '%s'", args.eval_files[i])
gen_data_eval.append({'4column' : bob.measure.load.four_column, '5column' : bob.measure.load.five_column}[args.parser](args.eval_files[i]))
gen_data_eval.append({'4column' : score.four_column, '5column' : score.five_column}[args.parser](args.eval_files[i]))
logger.info("Writing fused evaluation set score file '%s'", args.fused_eval_file)
outf = open(args.fused_eval_file, 'w')
......
......@@ -8,6 +8,7 @@ import nose
import bob.io.image
import bob.bio.base
from . import utils
from .. import score
from nose.plugins.skip import SkipTest
......@@ -20,7 +21,6 @@ data_dir = pkg_resources.resource_filename('bob.bio.base', 'test/data')
def _verify(parameters, test_dir, sub_dir, ref_modifier="", score_modifier=('scores',''), counts=3, check_zt=True):
from bob.bio.base.script.verify import main
import bob.measure
try:
main(parameters)
......@@ -42,7 +42,7 @@ def _verify(parameters, test_dir, sub_dir, ref_modifier="", score_modifier=('sco
d = []
# read reference and new data
for score_file in (score_files[i], reference_files[i]):
f = bob.measure.load.open_file(score_file)
f = score.open_file(score_file)
d_ = []
for line in f:
if isinstance(line, bytes): line = line.decode('utf-8')
......@@ -278,7 +278,6 @@ def test_verify_filelist():
]
from bob.bio.base.script.verify import main
import bob.measure
try:
main(parameters)
......@@ -292,8 +291,8 @@ def test_verify_filelist():
for i in (0,1):
# load scores
a1, b1 = bob.measure.load.split_four_column(score_files[i])
a2, b2 = bob.measure.load.split_four_column(reference_files[i])
a1, b1 = score.split_four_column(score_files[i])
a2, b2 = score.split_four_column(reference_files[i])
# sort scores
a1 = sorted(a1); a2 = sorted(a2); b1 = sorted(b1); b2 = sorted(b2)
......@@ -323,7 +322,6 @@ def test_verify_missing():
]
from bob.bio.base.script.verify import main
import bob.measure
try:
main(parameters)
......@@ -336,7 +334,7 @@ def test_verify_missing():
for i in (0,1):
# load scores
a, b = bob.measure.load.split_four_column(score_files[i])
a, b = score.split_four_column(score_files[i])
assert numpy.all(numpy.isnan(a))
assert numpy.all(numpy.isnan(b))
......@@ -479,15 +477,14 @@ def test_fusion():
# execute the script
from bob.bio.base.script.fuse_scores import main
import bob.measure
try:
main(parameters)
# assert that we can read the two files, and that they contain the same number of lines as the original file
for i in (0,1):
assert os.path.exists(output_files[i])
r = bob.measure.load.four_column(reference_files[i])
o = bob.measure.load.four_column(output_files[i])
r = score.four_column(reference_files[i])
o = score.four_column(output_files[i])
assert len(list(r)) == len(list(o))
finally:
shutil.rmtree(test_dir)
......
import bob.io.base
import bob.learn.em
import bob.learn.linear
import bob.measure
import numpy
import os, sys
import tarfile
......@@ -12,6 +11,7 @@ logger = logging.getLogger("bob.bio.base")
from .FileSelector import FileSelector
from .. import utils
from .. import score
def _scores(algorithm, reader, model, probe_objects, allow_missing_files):
"""Compute scores for the given model and a list of probes.
......@@ -62,12 +62,12 @@ def _scores(algorithm, reader, model, probe_objects, allow_missing_files):
def _open_to_read(score_file):
"""Checks for the existence of the normal and the compressed version of the file, and calls :py:func:`bob.measure.load.open_file` for the existing one."""
"""Checks for the existence of the normal and the compressed version of the file, and calls :py:func:`score.open_file` for the existing one."""
if not os.path.exists(score_file):
score_file += '.tar.bz2'
if not os.path.exists(score_file):
raise IOError("The score file '%s' cannot be found. Aborting!" % score_file)
return bob.measure.load.open_file(score_file)
return score.open_file(score_file)
def _open_to_write(score_file, write_compressed):
......@@ -115,7 +115,7 @@ def _delete(score_file, write_compressed):
def _save_scores(score_file, scores, probe_objects, client_id, write_compressed):
"""Saves the scores of one model into a text file that can be interpreted by :py:func:`bob.measure.load.split_four_column`."""
"""Saves the scores of one model into a text file that can be interpreted by :py:func:`score.split_four_column`."""
assert len(probe_objects) == scores.shape[1]
# open file for writing
......@@ -493,7 +493,7 @@ def _concat(score_files, output, write_compressed, model_ids):
def concatenate(compute_zt_norm, groups = ['dev', 'eval'], write_compressed = False, add_model_id = False):
"""Concatenates all results into one (or two) score files per group.
Score files, which were generated per model, are concatenated into a single score file, which can be interpreter by :py:func:`bob.measure.load.split_four_column`.
Score files, which were generated per model, are concatenated into a single score file, which can be interpreter by :py:func:`score.split_four_column`.
The score files are always re-computed, regardless if they exist or not.
**Parameters:**
......@@ -563,7 +563,7 @@ def calibrate(compute_zt_norm, groups = ['dev', 'eval'], prior = 0.5, write_comp
logger.info(" - Calibration: Training calibration for type %s from group %s", norm, groups[0])
llr_trainer = bob.learn.linear.CGLogRegTrainer(prior, 1e-16, 100000)
training_scores = list(bob.measure.load.split_four_column(training_score_file))
training_scores = list(score.split_four_column(training_score_file))
for i in (0,1):
h = numpy.array(training_scores[i])
# remove NaN's
......@@ -582,7 +582,7 @@ def calibrate(compute_zt_norm, groups = ['dev', 'eval'], prior = 0.5, write_comp
logger.info(" - Calibration: calibrating scores from '%s' to '%s'", score_file, calibrated_file)
# iterate through the score file and calibrate scores
scores = bob.measure.load.four_column(_open_to_read(score_file))
scores = score.four_column(_open_to_read(score_file))
f = _open_to_write(calibrated_file, write_compressed)
......
0% Loading or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment