Commit b6b0b0f0 authored by Manuel Günther's avatar Manuel Günther
Browse files

Fixed issue with --directory flag in evaluate.py

parent 8d7a645c
Pipeline #13480 passed with stages
in 16 minutes and 9 seconds
...@@ -75,8 +75,9 @@ def command_line_arguments(command_line_parameters): ...@@ -75,8 +75,9 @@ def command_line_arguments(command_line_parameters):
# some sanity checks: # some sanity checks:
for f in args.dev_files + (args.eval_files or []): for f in args.dev_files + (args.eval_files or []):
if not os.path.exists(f): real_file = os.path.join(args.directory, f)
raise ValueError("The provided score file '%s' does not exist", f) if not os.path.exists(real_file):
raise ValueError("The provided score file '%s' does not exist", real_file)
if args.eval_files is not None and len(args.dev_files) != len(args.eval_files): if args.eval_files is not None and len(args.dev_files) != len(args.eval_files):
logger.error("The number of --dev-files (%d) and --eval-files (%d) are not identical", len(args.dev_files), len(args.eval_files)) logger.error("The number of --dev-files (%d) and --eval-files (%d) are not identical", len(args.dev_files), len(args.eval_files))
......
...@@ -478,12 +478,12 @@ def test_fusion(): ...@@ -478,12 +478,12 @@ def test_fusion():
def test_evaluate_closedset(): def test_evaluate_closedset():
# tests our 'evaluate' script using the reference files # tests our 'evaluate' script using the reference files
test_dir = tempfile.mkdtemp(prefix='bobtest_') test_dir = tempfile.mkdtemp(prefix='bobtest_')
reference_files = [os.path.join(data_dir, s) for s in ('scores-nonorm-dev', 'scores-ztnorm-dev')] reference_files = ('scores-nonorm-dev', 'scores-ztnorm-dev')
plots = [os.path.join(test_dir, '%s.pdf')%f for f in ['roc', 'cmc', 'det', 'epc']] plots = [os.path.join(test_dir, '%s.pdf')%f for f in ['roc', 'cmc', 'det', 'epc']]
parameters = [ parameters = [
'--dev-files', reference_files[0], reference_files[1], '--dev-files', reference_files[0], reference_files[1],
'--eval-files', reference_files[0], reference_files[1], '--eval-files', reference_files[0], reference_files[1],
'--directory', os.path.join(data_dir), '--directory', data_dir, # will not be ignored since reference files are relative
'--legends', 'no norm', 'ZT norm', '--legends', 'no norm', 'ZT norm',
'--criterion', 'HTER', '--criterion', 'HTER',
'--roc', plots[0], '--roc', plots[0],
...@@ -516,7 +516,7 @@ def test_evaluate_openset(): ...@@ -516,7 +516,7 @@ def test_evaluate_openset():
parameters = [ parameters = [
'--dev-files', reference_file, '--dev-files', reference_file,
'--eval-files', reference_file, '--eval-files', reference_file,
'--directory', os.path.join(data_dir), '--directory', "/non/existing/directory", # will be ignored since reference_file is absolute
'--legends', 'Test', '--legends', 'Test',
'--dir', plot, '--dir', plot,
'--min-far-value', '1e-6', '--min-far-value', '1e-6',
......
Supports Markdown
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment