Commit c29397de authored by André Anjos's avatar André Anjos 💬

Merge branch 'docfix' into 'master'

Docfix

This fixes include migrating from argparse to docopt and huge simplifications on the command-lines for each application.

It closes #16.

See merge request !15
parents ecc3d483 4b5fa1c1
Pipeline #4153 passed with stages
in 14 minutes and 57 seconds
This diff is collapsed.
#!/usr/bin/env python
# vim: set fileencoding=utf-8 :
# Manuel Guenther <Manuel.Guenther@idiap.ch>
# Thu May 16 11:41:49 CEST 2013
#
# Copyright (C) 2011-2013 Idiap Research Institute, Martigny, Switzerland
"""Measures for calibration"""
import math
import numpy
def cllr(negatives, positives):
"""cllr(negatives, positives) -> cllr
"""Cost of log likelihood ratio as defined by the Bosaris toolkit
Computes the 'cost of log likelihood ratio' (:math:`C_{llr}`) measure as
given in the Bosaris toolkit
Parameters:
negatives (array): 1D float array that contains the scores of the
"negative" (noise, non-class) samples of your classifier.
Computes the 'cost of log likelihood ratio' (:math:`C_{llr}`) measure as given in the Bosaris toolkit
positives (array): 1D float array that contains the scores of the
"positive" (signal, class) samples of your classifier.
**Parameters:**
``negatives, positives`` : array_like(1D, float)
The scores computed by comparing elements from different classes and the same class, respectively.
Returns:
**Returns**
float: The computed :math:`C_{llr}` value.
``cllr`` : float
The computed :math:`C_{llr}` value.
"""
sum_pos, sum_neg = 0., 0.
for pos in positives:
......@@ -34,19 +38,25 @@ def cllr(negatives, positives):
def min_cllr(negatives, positives):
"""min_cllr(negatives, positives) -> min_cllr
"""Minimum cost of log likelihood ratio as defined by the Bosaris toolkit
Computes the 'minimum cost of log likelihood ratio' (:math:`C_{llr}^{min}`)
measure as given in the bosaris toolkit
Parameters:
negatives (array): 1D float array that contains the scores of the
"negative" (noise, non-class) samples of your classifier.
Computes the 'minimum cost of log likelihood ratio' (:math:`C_{llr}^{min}`) measure as given in the bosaris toolkit
positives (array): 1D float array that contains the scores of the
"positive" (signal, class) samples of your classifier.
**Parameters:**
``negatives, positives`` : array_like(1D, float)
The scores computed by comparing elements from different classes and the same class, respectively.
Returns:
**Returns**
float: The computed :math:`C_{llr}^{min}` value.
``min_cllr`` : float
The computed :math:`C_{llr}^{min}` value.
"""
from bob.math import pavx
......
This diff is collapsed.
......@@ -102,7 +102,7 @@ static auto det_doc = bob::extension::FunctionDoc(
"[0] X axis values in the normal deviate scale for the false-accepts\n\n"
"[1] Y axis values in the normal deviate scale for the false-rejections\n\n"
"You can plot the results using your preferred tool to first create a plot using rows 0 and 1 from the returned value and then replace the X/Y axis annotation using a pre-determined set of tickmarks as recommended by NIST. "
"The derivative scales are computed with the :py:func:`ppndf` function."
"The derivative scales are computed with the :py:func:`bob.measure.ppndf` function."
)
.add_prototype("negatives, positives, n_points", "curve")
.add_parameter("negatives, positives", "array_like(1D, float)", "The list of negative and positive scores to compute the DET for")
......@@ -264,7 +264,7 @@ static auto eer_threshold_doc = bob::extension::FunctionDoc(
.add_prototype("negatives, positives, [is_sorted]", "threshold")
.add_parameter("negatives, positives", "array_like(1D, float)", "The set of negative and positive scores to compute the threshold")
.add_parameter("is_sorted", "bool", "[Default: ``False``] Are both sets of scores already in ascendantly sorted order?")
.add_return("threshold", "float", "The threshold (i.e., as used in :py:func:`farfrr`) where FAR and FRR are as close as possible")
.add_return("threshold", "float", "The threshold (i.e., as used in :py:func:`bob.measure.farfrr`) where FAR and FRR are as close as possible")
;
static PyObject* eer_threshold(PyObject*, PyObject* args, PyObject* kwds) {
BOB_TRY
......@@ -344,7 +344,7 @@ BOB_CATCH_FUNCTION("min_weighted_error_rate_threshold", 0)
static auto min_hter_threshold_doc = bob::extension::FunctionDoc(
"min_hter_threshold",
"Calculates the :py:func:`min_weighted_error_rate_threshold` with ``cost=0.5``"
"Calculates the :py:func:`bob.measure.min_weighted_error_rate_threshold` with ``cost=0.5``"
)
.add_prototype("negatives, positives, [is_sorted]", "threshold")
.add_parameter("negatives, positives", "array_like(1D, float)", "The set of negative and positive scores to compute the threshold")
......@@ -390,7 +390,7 @@ static auto precision_recall_doc = bob::extension::FunctionDoc(
"where :math:`tp` are the true positives, :math:`fp` are the false positives and :math:`fn` are the false negatives.\n\n"
"``positives`` holds the score information for samples that are labeled to belong to a certain class (a.k.a., 'signal' or 'client'). "
"``negatives`` holds the score information for samples that are labeled **not** to belong to the class (a.k.a., 'noise' or 'impostor'). "
"For more precise details about how the method considers error rates, see :py:func:`farfrr`."
"For more precise details about how the method considers error rates, see :py:func:`bob.measure.farfrr`."
)
.add_prototype("negatives, positives, threshold", "precision, recall")
.add_parameter("negatives, positives", "array_like(1D, float)", "The set of negative and positive scores to compute the measurements")
......@@ -429,7 +429,7 @@ BOB_CATCH_FUNCTION("precision_recall", 0)
static auto f_score_doc = bob::extension::FunctionDoc(
"f_score",
"This method computes the F-score of the accuracy of the classification",
"The F-score is a weighted mean of precision and recall measurements, see :py:func:`precision_recall`. "
"The F-score is a weighted mean of precision and recall measurements, see :py:func:`bob.measure.precision_recall`. "
"It is computed as:\n\n"
".. math::\n\n"
" \\mathrm{f-score} = (1 + w^2)\\frac{\\mathrm{precision}\\cdot{}\\mathrm{recall}}{w^2\\cdot{}\\mathrm{precision} + \\mathrm{recall}}\n\n"
......
This diff is collapsed.
This diff is collapsed.
#!/usr/bin/env python
# vim: set fileencoding=utf-8 :
# Andre Anjos <andre.anjos@idiap.ch>
# Wed May 25 13:27:46 2011 +0200
#
# Copyright (C) 2011-2013 Idiap Research Institute, Martigny, Switzerland
# Wed 28 Sep 2016 17:55:17 CEST
"""Applies a threshold to score file and reports error rates
Usage: %(prog)s [-v...] [options] <threshold> <scores>
%(prog)s --help
%(prog)s --version
Arguments:
<threshold> The threshold value to apply (float)
<scores> Path to the file containing the scores where to apply the
threshold and calculate error rates
Options:
-h, --help Shows this help message and exits
-V, --version Prints the version and exits
-v, --verbose Increases the output verbosity level
"""This script applies a threshold to score file and reports error rates
"""
__epilog__ = """
Examples:
1. Standard usage
Applies the threshold of 0.5 to the scores file in scores.txt and reports:
$ %(prog)s 0.5 scores.txt
$ %(prog)s --scores=my-scores.txt --threshold=0.5
"""
import os
import sys
from .. import farfrr, load
def apthres(neg, pos, thres):
"""Prints a single output line that contains all info for the threshold"""
far, frr = farfrr(neg, pos, thres)
hter = (far + frr)/2.0
ni = neg.shape[0] #number of impostors
fa = int(round(far*ni)) #number of false accepts
nc = pos.shape[0] #number of clients
fr = int(round(frr*nc)) #number of false rejects
print("FAR : %.3f%% (%d/%d)" % (100*far, fa, ni))
print("FRR : %.3f%% (%d/%d)" % (100*frr, fr, nc))
print("HTER: %.3f%%" % (100*hter,))
def get_options(user_input):
"""Parse the program options"""
usage = 'usage: %s [arguments]' % os.path.basename(sys.argv[0])
import argparse
parser = argparse.ArgumentParser(usage=usage,
description=(__doc__ % {'prog': os.path.basename(sys.argv[0])}),
epilog=(__epilog__ % {'prog': os.path.basename(sys.argv[0])}),
formatter_class=argparse.RawDescriptionHelpFormatter)
parser.add_argument('-s', '--scores', dest="ifile", default=None,
help="Name of the file containing the scores (defaults to %(default)s)",
metavar="FILE")
parser.add_argument('-t', '--threshold', dest='thres', default=None,
type=float, help="The threshold value to apply", metavar="FLOAT")
parser.add_argument('-p', '--parser', dest="parser", default="4column",
help="Name of a known parser or of a python-importable function that can parse your input files and return a tuple (negatives, positives) as blitz 1-D arrays of 64-bit floats. Consult the API of bob.measure.load.split_four_column() for details", metavar="NAME.FUNCTION")
# This option is not normally shown to the user...
parser.add_argument("--self-test",
action="store_true", dest="test", default=False, help=argparse.SUPPRESS)
#help="if set, runs an internal verification test and erases any output")
args = parser.parse_args(args=user_input)
if args.test:
# then we go into test mode, all input is preset
args.thres = 0.0
if args.ifile is None:
parser.error("you should give an input score set with --scores")
if args.thres is None:
parser.error("you should give a threshold value with --threshold")
#parse the score-parser
if args.parser.lower() in ('4column', '4col'):
args.parser = load.split_four_column
elif args.parser.lower() in ('5column', '5col'):
args.parser = load.split_five_column
else: #try an import
if args.parser.find('.') == -1:
parser.error("parser module should be either '4column', '5column' or a valid python function identifier in the format 'module.function': '%s' is invalid" % args.parser)
mod, fct = args.parser.rsplit('.', 2)
import imp
try:
fp, pathname, description = imp.find_module(mod, ['.'] + sys.path)
except Exception as e:
parser.error("import error for '%s': %s" % (args.parser, e))
try:
pmod = imp.load_module(mod, fp, pathname, description)
args.parser = getattr(pmod, fct)
except Exception as e:
parser.error("loading error for '%s': %s" % (args.parser, e))
finally:
fp.close()
return args
import logging
__logging_format__='[%(levelname)s] %(message)s'
logging.basicConfig(format=__logging_format__)
logger = logging.getLogger('bob')
from .eval_threshold import apthres
def main(user_input=None):
options = get_options(user_input)
if user_input is not None:
argv = user_input
else:
argv = sys.argv[1:]
import docopt
import pkg_resources
completions = dict(
prog=os.path.basename(sys.argv[0]),
version=pkg_resources.require('bob.measure')[0].version
)
args = docopt.docopt(
__doc__ % completions,
argv=argv,
version=completions['version'],
)
# Sets-up logging
if args['--verbose'] == 1: logging.getLogger().setLevel(logging.INFO)
elif args['--verbose'] >= 2: logging.getLogger().setLevel(logging.DEBUG)
# handles threshold validation
try:
args['<threshold>'] = float(args['<threshold>'])
except:
raise docopt.DocoptExit("cannot convert %s into float for threshold" % \
args['<threshold>'])
from ..load import load_score, get_negatives_positives
neg, pos = get_negatives_positives(load_score(args['<scores>']))
neg, pos = options.parser(options.ifile)
apthres(neg, pos, options.thres)
apthres(neg, pos, args['<threshold>'])
return 0
#!/usr/bin/env python
# vim: set fileencoding=utf-8 :
# Andre Anjos <andre.anjos@idiap.ch>
# Wed May 25 13:27:46 2011 +0200
#
# Copyright (C) 2011-2013 Idiap Research Institute, Martigny, Switzerland
# Wed 28 Sep 2016 15:39:05 CEST
"""Runs error analysis on score sets
"""This script runs error analysis on the development and test set scores, in a
four column format:
1. Computes the threshold using either EER or min. HTER criteria on
develoment set scores;
2. Applies the above threshold on test set scores to compute the HTER
3. Plots ROC, EPC and DET curves to a multi-page PDF file
"""
__epilog__ = """
Usage: %(prog)s [-v...] [options] <dev-scores> <test-scores>
%(prog)s --help
%(prog)s --version
Arguments:
<dev-scores> Path to the file containing the development scores
<test-scores> Path to the file containing the test scores
Options:
-h, --help Shows this help message and exits
-V, --version Prints the version and exits
-v, --verbose Increases the output verbosity level
-n <int>, --points=<int> Number of points to use in the curves
[default: 100]
-o <path>, --output=<path> Name of the output file that will contain the
plots [default: curves.pdf]
-x, --no-plot If set, then I'll execute no plotting
Examples:
1. Specify a different output filename
$ %(prog)s --output=mycurves.pdf --devel=dev.scores --test=test.scores
$ %(prog)s -vv --output=mycurves.pdf dev.scores test.scores
2. Specify a different number of points
$ %(prog)s --points=500 --devel=dev.scores --test=test.scores
$ %(prog)s --points=500 dev.scores test.scores
3. Don't plot (only calculate thresholds)
$ %(prog)s --no-plot --devel=dev.scores --test=test.scores
$ %(prog)s --no-plot dev.scores test.scores
"""
import os
import sys
import numpy
import logging
__logging_format__='[%(levelname)s] %(message)s'
logging.basicConfig(format=__logging_format__)
logger = logging.getLogger('bob')
def print_crit(dev_neg, dev_pos, test_neg, test_pos, crit):
"""Prints a single output line that contains all info for a given criterium"""
"""Prints a single output line that contains all info for a given criterion"""
from .. import eer_threshold, min_hter_threshold, farfrr
......@@ -49,7 +73,7 @@ def print_crit(dev_neg, dev_pos, test_neg, test_pos, crit):
test_far, test_frr = farfrr(test_neg, test_pos, thres)
test_hter = (test_far + test_frr)/2.0
print("[Min. criterium: %s] Threshold on Development set: %e" % (crit, thres))
print("[Min. criterion: %s] Threshold on Development set: %e" % (crit, thres))
dev_ni = dev_neg.shape[0] #number of impostors
dev_fa = int(round(dev_far*dev_ni)) #number of false accepts
......@@ -82,7 +106,8 @@ def print_crit(dev_neg, dev_pos, test_neg, test_pos, crit):
print(" HTER | %s | %s" % (fmt(dev_hter_str, -1*dev_max_len),
fmt(test_hter_str, -1*test_max_len)))
def plots(dev_neg, dev_pos, test_neg, test_pos, npoints, filename):
def plots(dev_neg, dev_pos, test_neg, test_pos, points, filename):
"""Saves ROC, DET and EPC curves on the file pointed out by filename."""
from .. import plot
......@@ -96,9 +121,9 @@ def plots(dev_neg, dev_pos, test_neg, test_pos, npoints, filename):
# ROC
fig = mpl.figure()
plot.roc(dev_neg, dev_pos, npoints, color=(0.3,0.3,0.3),
plot.roc(dev_neg, dev_pos, points, color=(0.3,0.3,0.3),
linestyle='--', dashes=(6,2), label='development')
plot.roc(test_neg, test_pos, npoints, color=(0,0,0),
plot.roc(test_neg, test_pos, points, color=(0,0,0),
linestyle='-', label='test')
mpl.axis([0,40,0,40])
mpl.title("ROC Curve")
......@@ -110,9 +135,9 @@ def plots(dev_neg, dev_pos, test_neg, test_pos, npoints, filename):
# DET
fig = mpl.figure()
plot.det(dev_neg, dev_pos, npoints, color=(0.3,0.3,0.3),
plot.det(dev_neg, dev_pos, points, color=(0.3,0.3,0.3),
linestyle='--', dashes=(6,2), label='development')
plot.det(test_neg, test_pos, npoints, color=(0,0,0),
plot.det(test_neg, test_pos, points, color=(0,0,0),
linestyle='-', label='test')
plot.det_axis([0.01, 40, 0.01, 40])
mpl.title("DET Curve")
......@@ -124,7 +149,7 @@ def plots(dev_neg, dev_pos, test_neg, test_pos, npoints, filename):
# EPC
fig = mpl.figure()
plot.epc(dev_neg, dev_pos, test_neg, test_pos, npoints,
plot.epc(dev_neg, dev_pos, test_neg, test_pos, points,
color=(0,0,0), linestyle='-')
mpl.title('EPC Curve')
mpl.xlabel('Cost')
......@@ -134,86 +159,53 @@ def plots(dev_neg, dev_pos, test_neg, test_pos, npoints, filename):
pp.close()
def get_options(user_input):
"""Parse the program options"""
usage = 'usage: %s [arguments]' % os.path.basename(sys.argv[0])
import argparse
parser = argparse.ArgumentParser(usage=usage,
description=(__doc__ % {'prog': os.path.basename(sys.argv[0])}),
epilog=(__epilog__ % {'prog': os.path.basename(sys.argv[0])}),
formatter_class=argparse.RawDescriptionHelpFormatter)
parser.add_argument('-d', '--devel', dest="dev", default=None,
help="Name of the file containing the development scores (defaults to %(default)s)", metavar="FILE")
parser.add_argument('-t', '--test', dest="test", default=None,
help="Name of the file containing the test scores (defaults to %(default)s)", metavar="FILE")
parser.add_argument('-n', '--points', dest="npoints", default=100, type=int,
help="Number of points to use in the curves (defaults to %(default)s)",
metavar="INT(>0)")
parser.add_argument('-o', '--output', dest="plotfile", default="curves.pdf",
help="Name of the output file that will contain the plots (defaults to %(default)s)", metavar="FILE")
parser.add_argument('-x', '--no-plot', dest="doplot", default=True,
action='store_false', help="If set, then I'll execute no plotting")
parser.add_argument('-p', '--parser', dest="parser", default="4column",
help="Name of a known parser or of a python-importable function that can parse your input files and return a tuple (negatives, positives) as blitz 1-D arrays of 64-bit floats. Consult the API of bob.measure.load.split_four_column() for details", metavar="NAME.FUNCTION")
# This option is not normally shown to the user...
parser.add_argument("--self-test",
action="store_true", dest="selftest", default=False,
help=argparse.SUPPRESS)
#help="if set, runs an internal verification test and erases any output")
args = parser.parse_args(args=user_input)
if args.selftest:
# then we go into test mode, all input is preset
import tempfile
outputdir = tempfile.mkdtemp(prefix='bobtest_')
args.plotfile = os.path.join(outputdir, "curves.pdf")
if args.dev is None:
parser.error("you should give a development score set with --devel")
if args.test is None:
parser.error("you should give a test score set with --test")
#parse the score-parser
from .. import load
if args.parser.lower() in ('4column', '4col'):
args.parser = load.split_four_column
elif args.parser.lower() in ('5column', '5col'):
args.parser = load.split_five_column
else: #try an import
if args.parser.find('.') == -1:
parser.error("parser module should be either '4column', '5column' or a valid python function identifier in the format 'module.function': '%s' is invalid" % arg.parser)
mod, fct = args.parser.rsplit('.', 1)
try:
args.parser = getattr(__import__(mod, fromlist=['*']), fct)
except Exception as e:
parser.error("error importing '%s': %s" % (args.parser, e))
return args
def main(user_input=None):
options = get_options(user_input)
if user_input is not None:
argv = user_input
else:
argv = sys.argv[1:]
import docopt
import pkg_resources
completions = dict(
prog=os.path.basename(sys.argv[0]),
version=pkg_resources.require('bob.measure')[0].version
)
dev_neg, dev_pos = options.parser(options.dev)
test_neg, test_pos = options.parser(options.test)
args = docopt.docopt(
__doc__ % completions,
argv=argv,
version=completions['version'],
)
# Sets-up logging
if args['--verbose'] == 1: logging.getLogger().setLevel(logging.INFO)
elif args['--verbose'] >= 2: logging.getLogger().setLevel(logging.DEBUG)
# Checks number of points option
try:
args['--points'] = int(args['--points'])
except:
raise docopt.DocoptExit("cannot convert %s into int for points" % \
args['--points'])
if args['--points'] <= 0:
raise docopt.DocoptExit('Number of points (--points) should greater ' \
'than zero')
from ..load import load_score, get_negatives_positives
dev_neg, dev_pos = get_negatives_positives(load_score(args['<dev-scores>']))
test_neg, test_pos = get_negatives_positives(load_score(args['<test-scores>']))
print_crit(dev_neg, dev_pos, test_neg, test_pos, 'EER')
print_crit(dev_neg, dev_pos, test_neg, test_pos, 'Min. HTER')
if options.doplot:
plots(dev_neg, dev_pos, test_neg, test_pos, options.npoints,
options.plotfile)
print("[Plots] Performance curves => '%s'" % options.plotfile)
if options.selftest: #remove output file + tmp directory
import shutil
shutil.rmtree(os.path.dirname(options.plotfile))
if not args['--no-plot']:
plots(dev_neg, dev_pos, test_neg, test_pos, args['--points'],
args['--output'])
print("[Plots] Performance curves => '%s'" % args['--output'])
return 0
#!/usr/bin/env python
# vim: set fileencoding=utf-8 :
# Andre Anjos <andre.anjos@idiap.ch>
# Wed May 25 13:27:46 2011 +0200
#
# Copyright (C) 2011-2013 Idiap Research Institute, Martigny, Switzerland
# Wed 28 Sep 2016 16:56:52 CEST
"""Computes the threshold following a minimization criteria on input scores
Usage: %(prog)s [-v...] [options] <scores>
%(prog)s --help
%(prog)s --version
Arguments:
<scores> Path to the file containing the scores to be used for calculating
the threshold
Options:
-h, --help Shows this help message and exits
-V, --version Prints the version and exits
-v, --verbose Increases the output verbosity level
-c <crit>, --criterion=<crit> The minimization criterion to use (choose
between mhter, mwer or eer) [default: eer]
-w <float>, --cost=<float> The value w of the cost when minimizing using
the minimum weighter error rate (mwer)
criterion. This value is ignored for eer or
mhter criteria. [default: 0.5]
"""This script computes the threshold following a certain minimization criteria
on the given input data."""
__epilog__ = """
Examples:
1. Specify a different criteria (only mhter, mwer or eer accepted):
$ %(prog)s --scores=dev.scores --criterium=mhter
$ %(prog)s --criterion=mhter scores.txt
2. Calculate the threshold that minimizes the weither HTER for a cost of 0.4:
$ %(prog)s --scores=dev.scores --criterium=mwer --cost=0.4
$ %(prog)s --criterion=mwer --cost=0.4 scores.txt
3. Parse your input using a 5-column format
$ %(prog)s --scores=dev.scores --parser=5column
$ %(prog)s scores.txt
"""
import os
import sys
import logging
__logging_format__='[%(levelname)s] %(message)s'
logging.basicConfig(format=__logging_format__)
logger = logging.getLogger('bob')
def apthres(neg, pos, thres):
"""Prints a single output line that contains all info for the threshold"""
......@@ -44,87 +70,67 @@ def apthres(neg, pos, thres):
print("FRR : %.3f%% (%d/%d)" % (100*frr, fr, nc))
print("HTER: %.3f%%" % (100*hter,))
def calculate(neg, pos, crit, cost):
"""Returns the threshold given a certain criteria"""
from .. import eer_threshold, min_hter_threshold, min_weighted_error_rate_threshold
if crit == 'eer':
from .. import eer_threshold
return eer_threshold(neg, pos)
elif crit == 'mhter':
from .. import min_hter_threshold
return min_hter_threshold(neg, pos)
# defaults to the minimum of the weighter error rate
from .. import min_weighted_error_rate_threshold
return min_weighted_error_rate_threshold(neg, pos, cost)
def get_options(user_input):
"""Parse the program options"""
usage = 'usage: %s [arguments]' % os.path.basename(sys.argv[0])
import argparse
parser = argparse.ArgumentParser(usage=usage,
description=(__doc__ % {'prog': os.path.basename(sys.argv[0])}),
epilog=(__epilog__ % {'prog': os.path.basename(sys.argv[0])}),
formatter_class=argparse.RawDescriptionHelpFormatter)
parser.add_argument('-s', '--scores', dest="ifile", default=None,
help="Name of the file containing the scores (defaults to %(default)s)",
metavar="FILE")
parser.add_argument('-c', '--criterium', dest='crit', default='eer',
choices=('eer', 'mhter', 'mwer'),
help="The minimization criterium to use", metavar="CRITERIUM")
parser.add_argument('-w', '--cost', dest='cost', default=0.5,
type=float, help="The value w of the cost when minimizing using the minimum weighter error rate (mwer) criterium. This value is ignored for eer or mhter criteria.", metavar="FLOAT")
parser.add_argument('-p', '--parser', dest="parser", default="4column",
help="Name of a known parser or of a python-importable function that can parse your input files and return a tuple (negatives, positives) as blitz 1-D arrays of 64-bit floats. Consult the API of bob.measure.load.split_four_column() for details", metavar="NAME.FUNCTION")
# This option is not normally shown to the user...
parser.add_argument("--self-test",
action="store_true", dest="test", default=False, help=argparse.SUPPRESS)
#help="if set, runs an internal verification test and erases any output")
args = parser.parse_args(args=user_input)
if args.ifile is None:
parser.error("you should give an input score set with --scores")
if args.cost < 0.0 or args.cost > 1.0:
parser.error("cost should lie between 0.0 and 1.0")
#parse the score-parser
from .. import load
if args.parser.lower() in ('4column', '4col'):
args.parser = load.split_four_column
elif args.parser.lower() in ('5column', '5col'):
args.parser = load.split_five_column
else: #try an import
if args.parser.find('.') == -1:
parser.error("parser module should be either '4column', '5column' or a valid python function identifier in the format 'module.function': '%s' is invalid" % args.parser)
mod, fct = args.parser.rsplit('.', 2)
import imp
try:
fp, pathname, description = imp.find_module(mod, ['.'] + sys.path)
except Exception as e:
parser.error("import error for '%s': %s" % (args.parser, e))
try:
pmod = imp.load_module(mod, fp, pathname, description)
args.parser = getattr(pmod, fct)
except Exception as e:
parser.error("loading error for '%s': %s" % (args.parser, e))
finally: