diff --git a/bob/measure/script/apply_threshold.py b/bob/measure/script/apply_threshold.py index c88acaf0162db00682f6c0d6f36cb5a3c4485d3d..36edcb66236d08f30ab785765baf46d67aeb6331 100644 --- a/bob/measure/script/apply_threshold.py +++ b/bob/measure/script/apply_threshold.py @@ -1,107 +1,72 @@ #!/usr/bin/env python # vim: set fileencoding=utf-8 : -# Andre Anjos <andre.anjos@idiap.ch> -# Wed May 25 13:27:46 2011 +0200 -# -# Copyright (C) 2011-2013 Idiap Research Institute, Martigny, Switzerland +# Wed 28 Sep 2016 17:55:17 CEST + + +"""Applies a threshold to score file and reports error rates + +Usage: %(prog)s [-v...] [options] <threshold> <scores> + %(prog)s --help + %(prog)s --version + + +Arguments: + <threshold> The threshold value to apply (float) + <scores> Path to the file containing the scores where to apply the + threshold and calculate error rates + +Options: + -h, --help Shows this help message and exits + -V, --version Prints the version and exits + -v, --verbose Increases the output verbosity level -"""This script applies a threshold to score file and reports error rates -""" -__epilog__ = """ Examples: - 1. Standard usage + Applies the threshold of 0.5 to the scores file in scores.txt and reports: + + $ %(prog)s 0.5 scores.txt - $ %(prog)s --scores=my-scores.txt --threshold=0.5 """ + import os import sys -from .. import farfrr, load - -def apthres(neg, pos, thres): - """Prints a single output line that contains all info for the threshold""" - - far, frr = farfrr(neg, pos, thres) - hter = (far + frr)/2.0 - - ni = neg.shape[0] #number of impostors - fa = int(round(far*ni)) #number of false accepts - nc = pos.shape[0] #number of clients - fr = int(round(frr*nc)) #number of false rejects - - print("FAR : %.3f%% (%d/%d)" % (100*far, fa, ni)) - print("FRR : %.3f%% (%d/%d)" % (100*frr, fr, nc)) - print("HTER: %.3f%%" % (100*hter,)) - -def get_options(user_input): - """Parse the program options""" - - usage = 'usage: %s [arguments]' % os.path.basename(sys.argv[0]) - - import argparse - parser = argparse.ArgumentParser(usage=usage, - description=(__doc__ % {'prog': os.path.basename(sys.argv[0])}), - epilog=(__epilog__ % {'prog': os.path.basename(sys.argv[0])}), - formatter_class=argparse.RawDescriptionHelpFormatter) - - parser.add_argument('-s', '--scores', dest="ifile", default=None, - help="Name of the file containing the scores (defaults to %(default)s)", - metavar="FILE") - parser.add_argument('-t', '--threshold', dest='thres', default=None, - type=float, help="The threshold value to apply", metavar="FLOAT") - parser.add_argument('-p', '--parser', dest="parser", default="4column", - help="Name of a known parser or of a python-importable function that can parse your input files and return a tuple (negatives, positives) as blitz 1-D arrays of 64-bit floats. Consult the API of bob.measure.load.split_four_column() for details", metavar="NAME.FUNCTION") - - # This option is not normally shown to the user... - parser.add_argument("--self-test", - action="store_true", dest="test", default=False, help=argparse.SUPPRESS) - #help="if set, runs an internal verification test and erases any output") - - args = parser.parse_args(args=user_input) - - if args.test: - # then we go into test mode, all input is preset - args.thres = 0.0 - - if args.ifile is None: - parser.error("you should give an input score set with --scores") - - if args.thres is None: - parser.error("you should give a threshold value with --threshold") - - #parse the score-parser - if args.parser.lower() in ('4column', '4col'): - args.parser = load.split_four_column - elif args.parser.lower() in ('5column', '5col'): - args.parser = load.split_five_column - else: #try an import - if args.parser.find('.') == -1: - parser.error("parser module should be either '4column', '5column' or a valid python function identifier in the format 'module.function': '%s' is invalid" % args.parser) - - mod, fct = args.parser.rsplit('.', 2) - import imp - try: - fp, pathname, description = imp.find_module(mod, ['.'] + sys.path) - except Exception as e: - parser.error("import error for '%s': %s" % (args.parser, e)) - - try: - pmod = imp.load_module(mod, fp, pathname, description) - args.parser = getattr(pmod, fct) - except Exception as e: - parser.error("loading error for '%s': %s" % (args.parser, e)) - finally: - fp.close() - - return args + +from .eval_threshold import apthres + def main(user_input=None): - options = get_options(user_input) + if user_input is not None: + argv = user_input + else: + argv = sys.argv[1:] + + import docopt + import pkg_resources + + completions = dict( + prog=os.path.basename(sys.argv[0]), + version=pkg_resources.require('bob.measure')[0].version + ) + + args = docopt.docopt( + __doc__ % completions, + argv=argv, + version=completions['version'], + ) + + # handles threshold validation + try: + args['<threshold>'] = float(args['<threshold>']) + except: + raise docopt.DocoptExit("cannot convert %s into float for threshold" % \ + args['<threshold>']) + + from ..load import load_score, get_negatives_positives + neg, pos = get_negatives_positives(load_score(args['<scores>'])) - neg, pos = options.parser(options.ifile) - apthres(neg, pos, options.thres) + apthres(neg, pos, args['<threshold>']) return 0 diff --git a/bob/measure/script/compute_perf.py b/bob/measure/script/compute_perf.py index 283ec58ad45d21eca748d3a198ab34ba8fe203fe..dcef7c18bbee632290a5dcaea023fb59ef70ee32 100644 --- a/bob/measure/script/compute_perf.py +++ b/bob/measure/script/compute_perf.py @@ -1,35 +1,62 @@ #!/usr/bin/env python # vim: set fileencoding=utf-8 : -# Wed May 25 13:27:46 2011 +0200 +# Wed 28 Sep 2016 15:39:05 CEST + +"""Runs error analysis on score sets -"""This script runs error analysis on the development and test set scores, in a -four column format: 1. Computes the threshold using either EER or min. HTER criteria on develoment set scores; 2. Applies the above threshold on test set scores to compute the HTER 3. Plots ROC, EPC and DET curves to a multi-page PDF file -""" -__epilog__ = """ + +Usage: %(prog)s [-v...] [options] <dev-scores> <test-scores> + %(prog)s --help + %(prog)s --version + + +Arguments: + <dev-scores> Path to the file containing the development scores + <test-scores> Path to the file containing the test scores + + +Options: + -h, --help Shows this help message and exits + -V, --version Prints the version and exits + -v, --verbose Increases the output verbosity level + -n <int>, --points=<int> Number of points to use in the curves + [default: 100] + -o <path>, --output=<path> Name of the output file that will contain the + plots [default: curves.pdf] + -x, --no-plot If set, then I'll execute no plotting + + Examples: 1. Specify a different output filename - $ %(prog)s --output=mycurves.pdf --devel=dev.scores --test=test.scores + $ %(prog)s -vv --output=mycurves.pdf dev.scores test.scores 2. Specify a different number of points - $ %(prog)s --points=500 --devel=dev.scores --test=test.scores + $ %(prog)s --points=500 dev.scores test.scores 3. Don't plot (only calculate thresholds) - $ %(prog)s --no-plot --devel=dev.scores --test=test.scores + $ %(prog)s --no-plot dev.scores test.scores + """ import os import sys import numpy +import logging +__logging_format__='[%(levelname)s] %(message)s' +logging.basicConfig(format=__logging_format__) +logger = logging.getLogger('bob') + + def print_crit(dev_neg, dev_pos, test_neg, test_pos, crit): """Prints a single output line that contains all info for a given criterium""" @@ -79,7 +106,8 @@ def print_crit(dev_neg, dev_pos, test_neg, test_pos, crit): print(" HTER | %s | %s" % (fmt(dev_hter_str, -1*dev_max_len), fmt(test_hter_str, -1*test_max_len))) -def plots(dev_neg, dev_pos, test_neg, test_pos, npoints, filename): + +def plots(dev_neg, dev_pos, test_neg, test_pos, points, filename): """Saves ROC, DET and EPC curves on the file pointed out by filename.""" from .. import plot @@ -93,9 +121,9 @@ def plots(dev_neg, dev_pos, test_neg, test_pos, npoints, filename): # ROC fig = mpl.figure() - plot.roc(dev_neg, dev_pos, npoints, color=(0.3,0.3,0.3), + plot.roc(dev_neg, dev_pos, points, color=(0.3,0.3,0.3), linestyle='--', dashes=(6,2), label='development') - plot.roc(test_neg, test_pos, npoints, color=(0,0,0), + plot.roc(test_neg, test_pos, points, color=(0,0,0), linestyle='-', label='test') mpl.axis([0,40,0,40]) mpl.title("ROC Curve") @@ -107,9 +135,9 @@ def plots(dev_neg, dev_pos, test_neg, test_pos, npoints, filename): # DET fig = mpl.figure() - plot.det(dev_neg, dev_pos, npoints, color=(0.3,0.3,0.3), + plot.det(dev_neg, dev_pos, points, color=(0.3,0.3,0.3), linestyle='--', dashes=(6,2), label='development') - plot.det(test_neg, test_pos, npoints, color=(0,0,0), + plot.det(test_neg, test_pos, points, color=(0,0,0), linestyle='-', label='test') plot.det_axis([0.01, 40, 0.01, 40]) mpl.title("DET Curve") @@ -121,7 +149,7 @@ def plots(dev_neg, dev_pos, test_neg, test_pos, npoints, filename): # EPC fig = mpl.figure() - plot.epc(dev_neg, dev_pos, test_neg, test_pos, npoints, + plot.epc(dev_neg, dev_pos, test_neg, test_pos, points, color=(0,0,0), linestyle='-') mpl.title('EPC Curve') mpl.xlabel('Cost') @@ -131,86 +159,53 @@ def plots(dev_neg, dev_pos, test_neg, test_pos, npoints, filename): pp.close() -def get_options(user_input): - """Parse the program options""" - - usage = 'usage: %s [arguments]' % os.path.basename(sys.argv[0]) - - import argparse - parser = argparse.ArgumentParser(usage=usage, - description=(__doc__ % {'prog': os.path.basename(sys.argv[0])}), - epilog=(__epilog__ % {'prog': os.path.basename(sys.argv[0])}), - formatter_class=argparse.RawDescriptionHelpFormatter) - - parser.add_argument('-d', '--devel', dest="dev", default=None, - help="Name of the file containing the development scores (defaults to %(default)s)", metavar="FILE") - parser.add_argument('-t', '--test', dest="test", default=None, - help="Name of the file containing the test scores (defaults to %(default)s)", metavar="FILE") - parser.add_argument('-n', '--points', dest="npoints", default=100, type=int, - help="Number of points to use in the curves (defaults to %(default)s)", - metavar="INT(>0)") - parser.add_argument('-o', '--output', dest="plotfile", default="curves.pdf", - help="Name of the output file that will contain the plots (defaults to %(default)s)", metavar="FILE") - parser.add_argument('-x', '--no-plot', dest="doplot", default=True, - action='store_false', help="If set, then I'll execute no plotting") - parser.add_argument('-p', '--parser', dest="parser", default="4column", - help="Name of a known parser or of a python-importable function that can parse your input files and return a tuple (negatives, positives) as blitz 1-D arrays of 64-bit floats. Consult the API of bob.measure.load.split_four_column() for details", metavar="NAME.FUNCTION") - - # This option is not normally shown to the user... - parser.add_argument("--self-test", - action="store_true", dest="selftest", default=False, - help=argparse.SUPPRESS) - #help="if set, runs an internal verification test and erases any output") - - args = parser.parse_args(args=user_input) - - if args.selftest: - # then we go into test mode, all input is preset - import tempfile - outputdir = tempfile.mkdtemp(prefix='bobtest_') - args.plotfile = os.path.join(outputdir, "curves.pdf") - - if args.dev is None: - parser.error("you should give a development score set with --devel") - - if args.test is None: - parser.error("you should give a test score set with --test") - - #parse the score-parser - from .. import load - - if args.parser.lower() in ('4column', '4col'): - args.parser = load.split_four_column - elif args.parser.lower() in ('5column', '5col'): - args.parser = load.split_five_column - else: #try an import - if args.parser.find('.') == -1: - parser.error("parser module should be either '4column', '5column' or a valid python function identifier in the format 'module.function': '%s' is invalid" % arg.parser) - - mod, fct = args.parser.rsplit('.', 1) - try: - args.parser = getattr(__import__(mod, fromlist=['*']), fct) - except Exception as e: - parser.error("error importing '%s': %s" % (args.parser, e)) - - return args def main(user_input=None): - options = get_options(user_input) + if user_input is not None: + argv = user_input + else: + argv = sys.argv[1:] + + import docopt + import pkg_resources + + completions = dict( + prog=os.path.basename(sys.argv[0]), + version=pkg_resources.require('bob.measure')[0].version + ) - dev_neg, dev_pos = options.parser(options.dev) - test_neg, test_pos = options.parser(options.test) + args = docopt.docopt( + __doc__ % completions, + argv=argv, + version=completions['version'], + ) + + # Sets-up logging + if args['--verbose'] == 1: logging.getLogger().setLevel(logging.INFO) + elif args['--verbose'] >= 2: logging.getLogger().setLevel(logging.DEBUG) + + # Checks number of points option + try: + args['--points'] = int(args['--points']) + except: + raise docopt.DocoptExit("cannot convert %s into int for points" % \ + args['--points']) + + if args['--points'] <= 0: + raise docopt.DocoptExit('Number of points (--points) should greater ' \ + 'than zero') + + from ..load import load_score, get_negatives_positives + dev_neg, dev_pos = get_negatives_positives(load_score(args['<dev-scores>'])) + test_neg, test_pos = get_negatives_positives(load_score(args['<test-scores>'])) print_crit(dev_neg, dev_pos, test_neg, test_pos, 'EER') print_crit(dev_neg, dev_pos, test_neg, test_pos, 'Min. HTER') - if options.doplot: - plots(dev_neg, dev_pos, test_neg, test_pos, options.npoints, - options.plotfile) - print("[Plots] Performance curves => '%s'" % options.plotfile) - - if options.selftest: #remove output file + tmp directory - import shutil - shutil.rmtree(os.path.dirname(options.plotfile)) + + if not args['--no-plot']: + plots(dev_neg, dev_pos, test_neg, test_pos, args['--points'], + args['--output']) + print("[Plots] Performance curves => '%s'" % args['--output']) return 0 diff --git a/bob/measure/script/eval_threshold.py b/bob/measure/script/eval_threshold.py index 1c14f9cace07c2d2dfaf3c0ed87870eb0234b8c6..f3fc54afd45a385116d42edc1117e5a5adbbcda6 100644 --- a/bob/measure/script/eval_threshold.py +++ b/bob/measure/script/eval_threshold.py @@ -1,32 +1,53 @@ #!/usr/bin/env python # vim: set fileencoding=utf-8 : -# Andre Anjos <andre.anjos@idiap.ch> -# Wed May 25 13:27:46 2011 +0200 -# -# Copyright (C) 2011-2013 Idiap Research Institute, Martigny, Switzerland +# Wed 28 Sep 2016 16:56:52 CEST + + +"""Computes the threshold following a minimization criteria on input scores + +Usage: %(prog)s [-v...] [options] <scores> + %(prog)s --help + %(prog)s --version + + +Arguments: + <scores> Path to the file containing the scores to be used for calculating + the threshold + + +Options: + -h, --help Shows this help message and exits + -V, --version Prints the version and exits + -v, --verbose Increases the output verbosity level + -c <crit>, --criterium=<crit> The minimization criterium to use (choose + between mhter, mwer or eer) [default: eer] + -w <float>, --cost=<float> The value w of the cost when minimizing using + the minimum weighter error rate (mwer) + criterium. This value is ignored for eer or + mhter criteria. [default: 0.5] -"""This script computes the threshold following a certain minimization criteria -on the given input data.""" -__epilog__ = """ Examples: 1. Specify a different criteria (only mhter, mwer or eer accepted): - $ %(prog)s --scores=dev.scores --criterium=mhter + $ %(prog)s --criterium=mhter scores.txt 2. Calculate the threshold that minimizes the weither HTER for a cost of 0.4: - $ %(prog)s --scores=dev.scores --criterium=mwer --cost=0.4 + $ %(prog)s --criterium=mwer --cost=0.4 scores.txt 3. Parse your input using a 5-column format - $ %(prog)s --scores=dev.scores --parser=5column + $ %(prog)s scores.txt + """ + import os import sys + def apthres(neg, pos, thres): """Prints a single output line that contains all info for the threshold""" @@ -44,87 +65,63 @@ def apthres(neg, pos, thres): print("FRR : %.3f%% (%d/%d)" % (100*frr, fr, nc)) print("HTER: %.3f%%" % (100*hter,)) + def calculate(neg, pos, crit, cost): """Returns the threshold given a certain criteria""" - from .. import eer_threshold, min_hter_threshold, min_weighted_error_rate_threshold - if crit == 'eer': + from .. import eer_threshold return eer_threshold(neg, pos) elif crit == 'mhter': + from .. import min_hter_threshold return min_hter_threshold(neg, pos) # defaults to the minimum of the weighter error rate + from .. import min_weighted_error_rate_threshold return min_weighted_error_rate_threshold(neg, pos, cost) -def get_options(user_input): - """Parse the program options""" - - usage = 'usage: %s [arguments]' % os.path.basename(sys.argv[0]) - - import argparse - parser = argparse.ArgumentParser(usage=usage, - description=(__doc__ % {'prog': os.path.basename(sys.argv[0])}), - epilog=(__epilog__ % {'prog': os.path.basename(sys.argv[0])}), - formatter_class=argparse.RawDescriptionHelpFormatter) - - parser.add_argument('-s', '--scores', dest="ifile", default=None, - help="Name of the file containing the scores (defaults to %(default)s)", - metavar="FILE") - parser.add_argument('-c', '--criterium', dest='crit', default='eer', - choices=('eer', 'mhter', 'mwer'), - help="The minimization criterium to use", metavar="CRITERIUM") - parser.add_argument('-w', '--cost', dest='cost', default=0.5, - type=float, help="The value w of the cost when minimizing using the minimum weighter error rate (mwer) criterium. This value is ignored for eer or mhter criteria.", metavar="FLOAT") - parser.add_argument('-p', '--parser', dest="parser", default="4column", - help="Name of a known parser or of a python-importable function that can parse your input files and return a tuple (negatives, positives) as blitz 1-D arrays of 64-bit floats. Consult the API of bob.measure.load.split_four_column() for details", metavar="NAME.FUNCTION") - - # This option is not normally shown to the user... - parser.add_argument("--self-test", - action="store_true", dest="test", default=False, help=argparse.SUPPRESS) - #help="if set, runs an internal verification test and erases any output") - - args = parser.parse_args(args=user_input) - - if args.ifile is None: - parser.error("you should give an input score set with --scores") - - if args.cost < 0.0 or args.cost > 1.0: - parser.error("cost should lie between 0.0 and 1.0") - - #parse the score-parser - from .. import load - if args.parser.lower() in ('4column', '4col'): - args.parser = load.split_four_column - elif args.parser.lower() in ('5column', '5col'): - args.parser = load.split_five_column - else: #try an import - if args.parser.find('.') == -1: - parser.error("parser module should be either '4column', '5column' or a valid python function identifier in the format 'module.function': '%s' is invalid" % args.parser) - - mod, fct = args.parser.rsplit('.', 2) - import imp - try: - fp, pathname, description = imp.find_module(mod, ['.'] + sys.path) - except Exception as e: - parser.error("import error for '%s': %s" % (args.parser, e)) - - try: - pmod = imp.load_module(mod, fp, pathname, description) - args.parser = getattr(pmod, fct) - except Exception as e: - parser.error("loading error for '%s': %s" % (args.parser, e)) - finally: - fp.close() - - return args def main(user_input=None): - options = get_options(user_input) - - neg, pos = options.parser(options.ifile) - t = calculate(neg, pos, options.crit, options.cost) + if user_input is not None: + argv = user_input + else: + argv = sys.argv[1:] + + import docopt + import pkg_resources + + completions = dict( + prog=os.path.basename(sys.argv[0]), + version=pkg_resources.require('bob.measure')[0].version + ) + + args = docopt.docopt( + __doc__ % completions, + argv=argv, + version=completions['version'], + ) + + # validates criterium + valid_criteria = ('eer', 'mhter', 'mwer') + if args['--criterium'] not in valid_criteria: + raise docopt.DocoptExit("--criterium must be one of %s" % \ + ', '.join(valid_criteria)) + + # handles cost validation + try: + args['--cost'] = float(args['--cost']) + except: + raise docopt.DocoptExit("cannot convert %s into float for cost" % \ + args['--cost']) + + if args['--cost'] < 0.0 or args['--cost'] > 1.0: + docopt.DocoptExit("cost should lie between 0.0 and 1.0") + + from ..load import load_score, get_negatives_positives + neg, pos = get_negatives_positives(load_score(args['<scores>'])) + + t = calculate(neg, pos, args['--criterium'], args['--cost']) print("Threshold:", t) apthres(neg, pos, t) diff --git a/bob/measure/script/plot_cmc.py b/bob/measure/script/plot_cmc.py index 71ca794c5aaf5577d06beb6493b379ed4e59f0bd..712fb9285f4b84aa2b704451013f25a203b952f1 100644 --- a/bob/measure/script/plot_cmc.py +++ b/bob/measure/script/plot_cmc.py @@ -1,81 +1,118 @@ #!/usr/bin/env python # vim: set fileencoding=utf-8 : -# Manuel Guenther <manuel.guenther@idiap.ch> -# Tue Jan 8 13:36:12 CET 2013 -# -# Copyright (C) 2011-2014 Idiap Research Institute, Martigny, Switzerland +# Wed 28 Sep 2016 21:24:46 CEST -from __future__ import print_function +"""Computes and plots a cumulative rank characteristics (CMC) curve -"""This script computes and plot a cumulative rank characteristics (CMC) curve -from a score file in four or five column format. +Usage: %(prog)s [-v...] [options] <scores> + %(prog)s --help + %(prog)s --version -Note: The score file has to contain the exact probe file names as the 3rd -(4column) or 4th (5column) column. -""" +Arguments: -import os -import sys + <scores> The score file in 4 or 5 column format to test -def parse_command_line(command_line_options): - """Parse the program options""" - usage = 'usage: %s [arguments]' % os.path.basename(sys.argv[0]) +Options: - import argparse - parser = argparse.ArgumentParser(usage=usage, description=__doc__, formatter_class=argparse.ArgumentDefaultsHelpFormatter) + -h, --help Shows this help message and exits + -V, --version Prints the version and exits + -v, --verbose Increases the output verbosity level + -o <path>, --output=<path> Name of the output file that will contain the + plots [default: cmc.pdf] + -x, --no-plot If set, then I'll execute no plotting + -l, --log-x-scale If set, plots logarithmic rank axis + -r <int>, --rank=<int> Plot detection & identification rate curve for + the given rank instead of the CMC curve. - # This option is not normally shown to the user... - parser.add_argument('--self-test', action = 'store_true', help = argparse.SUPPRESS) - parser.add_argument('-s', '--score-file', required = True, help = 'The score file in 4 or 5 column format to test.') - parser.add_argument('-o', '--output-pdf-file', default = 'cmc.pdf', help = 'The PDF file to write.') - parser.add_argument('-l', '--log-x-scale', action='store_true', help = 'Plot logarithmic Rank axis.') - parser.add_argument('-r', '--rank', type=int, help = 'Plot Detection & Identification rate curve for the given rank instead of the CMC curve.') - parser.add_argument('-x', '--no-plot', action = 'store_true', help = 'Do not print a PDF file, but only report the results.') - parser.add_argument('-p', '--parser', default = '4column', choices = ('4column', '5column'), help = 'The type of the score file.') - - args = parser.parse_args(command_line_options) +""" - if args.self_test: - # then we go into test mode, all input is preset - import tempfile - temp_dir = tempfile.mkdtemp(prefix="bobtest_") - args.output_pdf_file = os.path.join(temp_dir, "cmc.pdf") - print("temporary using file", args.output_pdf_file) +from __future__ import print_function - return args +import os +import sys -def main(command_line_options = None): - """Computes and plots the CMC curve.""" - from .. import load, plot, recognition_rate +def main(user_input=None): + + if user_input is not None: + argv = user_input + else: + argv = sys.argv[1:] + + import docopt + import pkg_resources + + completions = dict( + prog=os.path.basename(sys.argv[0]), + version=pkg_resources.require('bob.measure')[0].version + ) + + args = docopt.docopt( + __doc__ % completions, + argv=argv, + version=completions['version'], + ) + + # Sets-up logging + if args['--verbose'] == 1: logging.getLogger().setLevel(logging.INFO) + elif args['--verbose'] >= 2: logging.getLogger().setLevel(logging.DEBUG) + + # Validates rank + if args['--rank'] is not None: + try: + args['--rank'] = int(args['--rank']) + except: + raise docopt.DocoptExit("cannot convert %s into int for rank" % \ + args['--rank']) + + if args['--rank'] <= 0: + raise docopt.DocoptExit('Rank (--rank) should greater than zero') + + from .. import load + + # Loads score file + f = load.open_file(args['<scores>']) + try: + line = f.readline() + ncolumns = len(line.split()) + except Exception: + logger.warn('Could not guess the number of columns in file: {}. ' + 'Assuming 4 column format.'.format(args['<scores>'])) + ncolumns = 4 + finally: + f.close() + + if ncolumns == 4: + data = load.cmc_four_column(args['<scores>']) + else: + data = load.cmc_five_column(args['<scores>']) - args = parse_command_line(command_line_options) + # compute recognition rate + from .. import recognition_rate + rr = recognition_rate(data, args['--rank']) + print("Recognition rate for score file %s is %3.2f%%" % (args['<scores>'], + rr * 100)) - # read data - if not os.path.isfile(args.score_file): raise IOError("The given score file does not exist") - # pythonic way: create inline dictionary "{...}", index with desired value "[...]", execute function "(...)" - data = {'4column' : load.cmc_four_column, '5column' : load.cmc_five_column}[args.parser](args.score_file) + if not args['--no-plot']: - # compute recognition rate - rr = recognition_rate(data, args.rank) - print("Recognition rate for score file", args.score_file, "is %3.2f%%" % (rr * 100)) + from .. import plot - if not args.no_plot: # compute CMC import matplotlib if not hasattr(matplotlib, 'backends'): matplotlib.use('pdf') import matplotlib.pyplot as mpl from matplotlib.backends.backend_pdf import PdfPages - pp = PdfPages(args.output_pdf_file) + pp = PdfPages(args['--output']) # CMC fig = mpl.figure() - if args.rank is None: - max_rank = plot.cmc(data, color=(0,0,1), linestyle='--', dashes=(6,2), logx = args.log_x_scale) + if args['--rank'] is None: + max_rank = plot.cmc(data, color=(0,0,1), linestyle='--', dashes=(6,2), + logx = args['--log-x-scale']) mpl.title("CMC Curve") - if args.log_x_scale: + if args['--log-x-scale']: mpl.xlabel('Rank (log)') else: mpl.xlabel('Rank') @@ -84,10 +121,13 @@ def main(command_line_options = None): ticks = [int(t) for t in mpl.xticks()[0]] mpl.xticks(ticks, ticks) mpl.xlim([1, max_rank]) + else: - plot.detection_identification_curve(data, rank = args.rank, color=(0,0,1), linestyle='--', dashes=(6,2), logx = args.log_x_scale) + plot.detection_identification_curve(data, rank = args['--rank'], + color=(0,0,1), linestyle='--', dashes=(6,2), + logx = args['--log-x-scale']) mpl.title("Detection \& Identification Curve") - if args.log_x_scale: + if args['--log-x-scale']: mpl.xlabel('False Acceptance Rate (log) in %') else: mpl.xlabel('False Acceptance Rate in %') @@ -104,11 +144,4 @@ def main(command_line_options = None): pp.savefig(fig) pp.close() - if args.self_test: #remove output file + tmp directory - import shutil - shutil.rmtree(os.path.dirname(args.output_pdf_file)) - return 0 - -if __name__ == '__main__': - main(sys.argv[1:]) diff --git a/bob/measure/test_scripts.py b/bob/measure/test_scripts.py index 1408316406e33f42a7decce7975aa11d4f00d8e8..2a6d240ce2db39e93c0875df00cd18ccfad1fad5 100644 --- a/bob/measure/test_scripts.py +++ b/bob/measure/test_scripts.py @@ -1,17 +1,17 @@ #!/usr/bin/env python # vim: set fileencoding=utf-8 : -# Andre Anjos <andre.anjos@idiap.ch> # Tue 21 Aug 2012 12:14:43 CEST -# -# Copyright (C) 2011-2013 Idiap Research Institute, Martigny, Switzerland """Script tests for bob.measure """ import os +import tempfile + import nose.tools import pkg_resources + def F(f): """Returns the test file on the "data" subdirectory""" return pkg_resources.resource_filename(__name__, os.path.join('data', f)) @@ -27,33 +27,49 @@ SCORES_5COL_CMC = F('scores-cmc-5col.txt') SCORES_4COL_CMC_OS = F('scores-cmc-4col-open-set.txt') + def test_compute_perf(): # sanity checks assert os.path.exists(DEV_SCORES) assert os.path.exists(TEST_SCORES) + tmp_output = tempfile.NamedTemporaryFile(prefix=__name__, suffix='.pdf') + + cmdline = [ + DEV_SCORES, + TEST_SCORES, + '--output=' + tmp_output.name, + ] + from .script.compute_perf import main - cmdline = '--devel=%s --test=%s --self-test' % (DEV_SCORES, TEST_SCORES) - nose.tools.eq_(main(cmdline.split()), 0) + nose.tools.eq_(main(cmdline), 0) + def test_eval_threshold(): # sanity checks assert os.path.exists(DEV_SCORES) + cmdline = [DEV_SCORES] + from .script.eval_threshold import main - cmdline = '--scores=%s --self-test' % (DEV_SCORES,) - nose.tools.eq_(main(cmdline.split()), 0) + nose.tools.eq_(main(cmdline), 0) + def test_apply_threshold(): # sanity checks assert os.path.exists(TEST_SCORES) + cmdline = [ + '0.5', + TEST_SCORES, + ] + from .script.apply_threshold import main - cmdline = '--scores=%s --self-test' % (TEST_SCORES,) - nose.tools.eq_(main(cmdline.split()), 0) + nose.tools.eq_(main(cmdline), 0) + def test_compute_perf_5col(): @@ -61,9 +77,17 @@ def test_compute_perf_5col(): assert os.path.exists(DEV_SCORES_5COL) assert os.path.exists(TEST_SCORES_5COL) + tmp_output = tempfile.NamedTemporaryFile(prefix=__name__, suffix='.pdf') + + cmdline = [ + DEV_SCORES_5COL, + TEST_SCORES_5COL, + '--output=' + tmp_output.name, + ] + from .script.compute_perf import main - cmdline = '--devel=%s --test=%s --parser=bob.measure.load.split_five_column --self-test' % (DEV_SCORES_5COL, TEST_SCORES_5COL) - nose.tools.eq_(main(cmdline.split()), 0) + nose.tools.eq_(main(cmdline), 0) + def test_compute_cmc(): @@ -73,6 +97,26 @@ def test_compute_cmc(): assert os.path.exists(SCORES_4COL_CMC_OS) from .script.plot_cmc import main - nose.tools.eq_(main(['--self-test', '--score-file', SCORES_4COL_CMC, '--log-x-scale']), 0) - nose.tools.eq_(main(['--self-test', '--score-file', SCORES_5COL_CMC, '--parser', '5column']), 0) - nose.tools.eq_(main(['--self-test', '--score-file', SCORES_4COL_CMC_OS, '--rank', '1']), 0) + + tmp_output = tempfile.NamedTemporaryFile(prefix=__name__, suffix='.pdf') + + nose.tools.eq_(main([ + SCORES_4COL_CMC, + '--log-x-scale', + '--output=%s' % tmp_output.name, + ]), 0) + + tmp_output = tempfile.NamedTemporaryFile(prefix=__name__, suffix='.pdf') + + nose.tools.eq_(main([ + SCORES_5COL_CMC, + '--output=%s' % tmp_output.name, + ]), 0) + + tmp_output = tempfile.NamedTemporaryFile(prefix=__name__, suffix='.pdf') + + nose.tools.eq_(main([ + SCORES_4COL_CMC_OS, + '--rank=1', + '--output=%s' % tmp_output.name, + ]), 0)