From 02e18e578322971b1e5ba75876709d66e62cb7dd Mon Sep 17 00:00:00 2001 From: Andre Anjos <andre.dos.anjos@gmail.com> Date: Tue, 24 Jan 2023 23:26:10 +0100 Subject: [PATCH] [tests] Use LFS ci assets for tests --- doc/index.rst | 2 +- doc/usage/evaluation.rst | 4 +-- tests/__init__.py | 55 ----------------------------- tests/conftest.py | 32 +++++++++++++++++ tests/data/lfs | 2 +- tests/test_cli.py | 75 ++++++++++++++++++---------------------- tests/test_config.py | 21 +++++------ 7 files changed, 78 insertions(+), 113 deletions(-) delete mode 100644 tests/__init__.py diff --git a/doc/index.rst b/doc/index.rst index 591e5da1..7757bedb 100644 --- a/doc/index.rst +++ b/doc/index.rst @@ -37,7 +37,7 @@ Please use the BibTeX reference below to cite this work: number = {Idiap-Com-01-2021}, year = {2021}, institution = {Idiap}, - url = {https://gitlab.idiap.ch/bob/bob.med.tb}, + url = {https://gitlab.idiap.ch/biosignal/software/ptbench}, pdf = {https://publidiap.idiap.ch/downloads/reports/2021/Raposo_Idiap-Com-01-2021.pdf} } diff --git a/doc/usage/evaluation.rst b/doc/usage/evaluation.rst index c712585d..d8b15040 100644 --- a/doc/usage/evaluation.rst +++ b/doc/usage/evaluation.rst @@ -53,13 +53,13 @@ summaries that help analysis of a trained model. Evaluation is done using the dataset configuration, and the path to the pretrained weights via the ``--weight`` argument. -Use ``bob tb evaluate --help`` for more information. +Use ``ptbench evaluate --help`` for more information. E.g. run evaluation on predictions from the Montgomery set, do the following: .. code:: sh - bob tb evaluate -vv montgomery -p /predictions/folder -o /eval/results/folder + ptbench evaluate -vv montgomery -p /predictions/folder -o /eval/results/folder Comparing Systems diff --git a/tests/__init__.py b/tests/__init__.py deleted file mode 100644 index 0652fd27..00000000 --- a/tests/__init__.py +++ /dev/null @@ -1,55 +0,0 @@ -# SPDX-FileCopyrightText: Copyright © 2023 Idiap Research Institute <contact@idiap.ch> -# -# SPDX-License-Identifier: GPL-3.0-or-later - -"""Unit tests.""" - -import logging -import tempfile - -logger = logging.getLogger(__name__) - -TESTDB_TMPDIR = None -_URL = "http://www.idiap.ch/software/bob/data/bob/bob.med.tb/master/_testdb.zip" -_RCKEY = "datadir.montgomery" -_FOLDER_NAME = "MontgomerySet_Light" - - -def teardown_package(): - global TESTDB_TMPDIR - if TESTDB_TMPDIR is not None: - logger.info(f"Removing temporary directory {TESTDB_TMPDIR.name}...") - TESTDB_TMPDIR.cleanup() - - -def mock_dataset(): - from ptbench.utils.rc import load_rc - - global TESTDB_TMPDIR - - rc = load_rc() - - if (TESTDB_TMPDIR is not None) or (_RCKEY in rc.get("datadir", {})): - logger.info("Test database already set up - not downloading") - else: - logger.info("Test database not available, downloading...") - import urllib.request - import zipfile - - # Download the file from `url` and save it locally under `file_name`: - with urllib.request.urlopen(_URL) as r, tempfile.TemporaryFile() as f: - f.write(r.read()) - f.flush() - f.seek(0) - TESTDB_TMPDIR = tempfile.TemporaryDirectory(prefix=__name__ + "-") - print(f"Creating test database at {TESTDB_TMPDIR.name}...") - logger.info(f"Creating test database at {TESTDB_TMPDIR.name}...") - with zipfile.ZipFile(f) as zf: - zf.extractall(TESTDB_TMPDIR.name) - - if TESTDB_TMPDIR is None: - # if the user has the Montgomery directory ready, then we do a normal return - return rc["datadir.montgomery"] - - # else, we do a "mock" return - return TESTDB_TMPDIR.name + "/" + _FOLDER_NAME + "/" diff --git a/tests/conftest.py b/tests/conftest.py index 5b106359..933dcae9 100644 --- a/tests/conftest.py +++ b/tests/conftest.py @@ -67,3 +67,35 @@ def rc_variable_set(name): @pytest.fixture(scope="session") def temporary_basedir(tmp_path_factory): return tmp_path_factory.mktemp("test-cli") + + +@pytest.fixture(scope="session") +def montgomery_datadir(tmp_path_factory, request) -> pathlib.Path: + from ptbench.utils.rc import load_rc + + database_dir = load_rc().get("datadir.montgomery") + if database_dir is not None: + return pathlib.Path(database_dir) + + # else, we must extract the LFS component + archive = ( + pathlib.Path(request.module.__file__).parents[0] + / "data" + / "lfs" + / "test-database.zip" + ) + assert archive.exists(), ( + f"Neither datadir.montgomery is set on the global configuration, " + f"(typically ~/.config/ptbench.toml), or it is possible to detect " + f"the presence of {archive}' (did you git submodule init --update " + f"this submodule?)" + ) + + database_dir = tmp_path_factory.mktemp("montgomery_datadir") + + import zipfile + + with zipfile.ZipFile(archive) as zf: + zf.extractall(database_dir) + + return database_dir / "MontgomerySet_Light" diff --git a/tests/data/lfs b/tests/data/lfs index fd74cbe6..64c25ecf 160000 --- a/tests/data/lfs +++ b/tests/data/lfs @@ -1 +1 @@ -Subproject commit fd74cbe68829e13d5d16ff56c2d22668603fab77 +Subproject commit 64c25ecf20b6f6ac2f250772fcb5338c1196a950 diff --git a/tests/test_cli.py b/tests/test_cli.py index 188d963c..32d24f94 100644 --- a/tests/test_cli.py +++ b/tests/test_cli.py @@ -13,16 +13,6 @@ import tomli_w from click.testing import CliRunner -from . import mock_dataset - -# Download test data and get their location if needed -montgomery_datadir = mock_dataset() - -_pasa_checkpoint_URL = "https://www.idiap.ch/software/bob/data/biosignal/ptbench/main/_test_fpasa_checkpoint.pth" -_signstotb_checkpoint_URL = "https://www.idiap.ch/software/bob/data/biosignal/ptbench/main/_test_signstotb_checkpoint.pth" -_logreg_checkpoint_URL = "https://www.idiap.ch/software/bob/data/biosignal/ptbench/main/_test_logreg_checkpoint.pth" -# _densenetrs_checkpoint_URL = "http://www.idiap.ch/software/bob/data/biosignal/ptbench/main/_test_densenetrs_checkpoint.pth" - @contextlib.contextmanager def rc_context(**new_config): @@ -197,9 +187,9 @@ def test_compare_help(): _check_help(compare) -def test_train_pasa_montgomery(temporary_basedir): - # Temporarily modifies Montgomery datadir - new_value = {"datadir.montgomery": montgomery_datadir} +def test_train_pasa_montgomery(temporary_basedir, montgomery_datadir): + # Temporarily modifies Montgomery datadir if need be + new_value = {"datadir.montgomery": str(montgomery_datadir)} with rc_context(**new_value): from ptbench.scripts.train import train @@ -259,9 +249,12 @@ def test_train_pasa_montgomery(temporary_basedir): ) -def test_predict_pasa_montgomery(temporary_basedir): - # Temporarily modifies Montgomery datadir - new_value = {"datadir.montgomery": montgomery_datadir} +def test_predict_pasa_montgomery( + temporary_basedir, montgomery_datadir, datadir +): + + # Temporarily modifies Montgomery datadir if need be + new_value = {"datadir.montgomery": str(montgomery_datadir)} with rc_context(**new_value): from ptbench.scripts.predict import predict @@ -279,7 +272,7 @@ def test_predict_pasa_montgomery(temporary_basedir): "-vv", "--batch-size=1", "--relevance-analysis", - f"--weight={_pasa_checkpoint_URL}", + f"--weight={str(datadir / 'lfs' / 'models' / 'pasa.pth')}", f"--output-folder={output_folder}", ], ) @@ -315,9 +308,9 @@ def test_predict_pasa_montgomery(temporary_basedir): ) -def test_predtojson(datadir, temporary_basedir): - # Temporarily modify Montgomery datadir - new_value = {"datadir.montgomery": montgomery_datadir} +def test_predtojson(datadir, temporary_basedir, montgomery_datadir): + # Temporarily modify Montgomery datadir if need be + new_value = {"datadir.montgomery": str(montgomery_datadir)} with rc_context(**new_value): from ptbench.scripts.predtojson import predtojson @@ -360,9 +353,9 @@ def test_predtojson(datadir, temporary_basedir): ) -def test_evaluate_pasa_montgomery(temporary_basedir): - # Temporarily modify Montgomery datadir - new_value = {"datadir.montgomery": montgomery_datadir} +def test_evaluate_pasa_montgomery(temporary_basedir, montgomery_datadir): + # Temporarily modify Montgomery datadir if need be + new_value = {"datadir.montgomery": str(montgomery_datadir)} with rc_context(**new_value): from ptbench.scripts.evaluate import evaluate @@ -414,9 +407,9 @@ def test_evaluate_pasa_montgomery(temporary_basedir): ) -def test_compare_pasa_montgomery(temporary_basedir): - # Temporarily modify Montgomery datadir - new_value = {"datadir.montgomery": montgomery_datadir} +def test_compare_pasa_montgomery(temporary_basedir, montgomery_datadir): + # Temporarily modify Montgomery datadir if need be + new_value = {"datadir.montgomery": str(montgomery_datadir)} with rc_context(**new_value): from ptbench.scripts.compare import compare @@ -462,7 +455,7 @@ def test_compare_pasa_montgomery(temporary_basedir): ) -def test_train_signstotb_montgomery_rs(temporary_basedir): +def test_train_signstotb_montgomery_rs(temporary_basedir, datadir): from ptbench.scripts.train import train runner = CliRunner() @@ -478,7 +471,7 @@ def test_train_signstotb_montgomery_rs(temporary_basedir): "-vv", "--epochs=1", "--batch-size=1", - f"--weight={_signstotb_checkpoint_URL}", + f"--weight={str(datadir / 'lfs' / 'models' / 'signstotb.pth')}", f"--output-folder={output_folder}", ], ) @@ -515,7 +508,7 @@ def test_train_signstotb_montgomery_rs(temporary_basedir): ) -def test_predict_signstotb_montgomery_rs(): +def test_predict_signstotb_montgomery_rs(datadir): from ptbench.scripts.predict import predict runner = CliRunner() @@ -531,7 +524,7 @@ def test_predict_signstotb_montgomery_rs(): "-vv", "--batch-size=1", "--relevance-analysis", - f"--weight={_signstotb_checkpoint_URL}", + f"--weight={str(datadir / 'lfs' / 'models' / 'signstotb.pth')}", f"--output-folder={output_folder}", ], ) @@ -564,7 +557,7 @@ def test_predict_signstotb_montgomery_rs(): ) -def test_train_logreg_montgomery_rs(temporary_basedir): +def test_train_logreg_montgomery_rs(temporary_basedir, datadir): from ptbench.scripts.train import train runner = CliRunner() @@ -580,7 +573,7 @@ def test_train_logreg_montgomery_rs(temporary_basedir): "-vv", "--epochs=1", "--batch-size=1", - f"--weight={_logreg_checkpoint_URL}", + f"--weight={str(datadir / 'lfs' / 'models' / 'logreg.pth')}", f"--output-folder={output_folder}", ], ) @@ -617,7 +610,7 @@ def test_train_logreg_montgomery_rs(temporary_basedir): ) -def test_predict_logreg_montgomery_rs(temporary_basedir): +def test_predict_logreg_montgomery_rs(temporary_basedir, datadir): from ptbench.scripts.predict import predict runner = CliRunner() @@ -632,7 +625,7 @@ def test_predict_logreg_montgomery_rs(temporary_basedir): "montgomery_rs", "-vv", "--batch-size=1", - f"--weight={_logreg_checkpoint_URL}", + f"--weight={str(datadir / 'lfs' / 'models' / 'logreg.pth')}", f"--output-folder={output_folder}", ], ) @@ -660,9 +653,9 @@ def test_predict_logreg_montgomery_rs(temporary_basedir): ) -def test_aggregpred(temporary_basedir): - # Temporarily modify Montgomery datadir - new_value = {"datadir.montgomery": montgomery_datadir} +def test_aggregpred(temporary_basedir, montgomery_datadir): + # Temporarily modify Montgomery datadir if need be + new_value = {"datadir.montgomery": str(montgomery_datadir)} with rc_context(**new_value): from ptbench.scripts.aggregpred import aggregpred @@ -706,10 +699,10 @@ def test_aggregpred(temporary_basedir): # Not enough RAM available to do this test -# def test_predict_densenetrs_montgomery(temporary_basedir): +# def test_predict_densenetrs_montgomery(temporary_basedir, montgomery_datadir, datadir): -# # Temporarily modify Montgomery datadir -# new_value = {"datadir.montgomery": montgomery_datadir} +# # Temporarily modify Montgomery datadir if need be +# new_value = {"datadir.montgomery": str(montgomery_datadir)} # with rc_context(**new_value): # from ptbench.scripts.predict import predict @@ -726,7 +719,7 @@ def test_aggregpred(temporary_basedir): # "montgomery_f0_rgb", # "-vv", # "--batch-size=1", -# f"--weight={_densenetrs_checkpoint_URL}", +# f"--weight={str(datadir / 'lfs' / 'models' / 'densenetrs.pth')}", # f"--output-folder={output_folder}", # "--grad-cams" # ], diff --git a/tests/test_config.py b/tests/test_config.py index c6cacef5..f20d7ba6 100644 --- a/tests/test_config.py +++ b/tests/test_config.py @@ -15,11 +15,6 @@ from torch.utils.data import ConcatDataset from ptbench.configs.datasets import get_positive_weights, get_samples_weights -from . import mock_dataset - -# Download test data and get their location if needed -montgomery_datadir = mock_dataset() - # we only iterate over the first N elements at most - dataset loading has # already been checked on the individual datset tests. Here, we are only # testing for the extra tools wrapping the dataset @@ -65,9 +60,9 @@ def test_montgomery(): _check_subset(dataset["test"], 28) -def test_get_samples_weights(): +def test_get_samples_weights(montgomery_datadir): # Temporarily modify Montgomery datadir - new_value = {"datadir.montgomery": montgomery_datadir} + new_value = {"datadir.montgomery": str(montgomery_datadir)} with rc_context(**new_value): from ptbench.configs.datasets.montgomery.default import dataset @@ -93,9 +88,9 @@ def test_get_samples_weights_multi(): ) -def test_get_samples_weights_concat(): +def test_get_samples_weights_concat(montgomery_datadir): # Temporarily modify Montgomery datadir - new_value = {"datadir.montgomery": montgomery_datadir} + new_value = {"datadir.montgomery": str(montgomery_datadir)} with rc_context(**new_value): from ptbench.configs.datasets.montgomery.default import dataset @@ -134,9 +129,9 @@ def test_get_samples_weights_multi_concat(): np.testing.assert_equal(train_samples_weights, ref_samples_weights) -def test_get_positive_weights(): +def test_get_positive_weights(montgomery_datadir): # Temporarily modify Montgomery datadir - new_value = {"datadir.montgomery": montgomery_datadir} + new_value = {"datadir.montgomery": str(montgomery_datadir)} with rc_context(**new_value): from ptbench.configs.datasets.montgomery.default import dataset @@ -212,9 +207,9 @@ def test_get_positive_weights_multi(): ) -def test_get_positive_weights_concat(): +def test_get_positive_weights_concat(montgomery_datadir): # Temporarily modify Montgomery datadir - new_value = {"datadir.montgomery": montgomery_datadir} + new_value = {"datadir.montgomery": str(montgomery_datadir)} with rc_context(**new_value): from ptbench.configs.datasets.montgomery.default import dataset -- GitLab