Newer
Older
# SPDX-FileCopyrightText: Copyright © 2023 Idiap Research Institute <contact@idiap.ch>
#
# SPDX-License-Identifier: GPL-3.0-or-later
"""Tests for our CLI applications."""
import contextlib
import pytest
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
from click.testing import CliRunner
@contextlib.contextmanager
def stdout_logging():
# copy logging messages to std out
import io
import logging
buf = io.StringIO()
ch = logging.StreamHandler(buf)
ch.setFormatter(logging.Formatter("%(message)s"))
ch.setLevel(logging.INFO)
logger = logging.getLogger("ptbench")
logger.addHandler(ch)
yield buf
logger.removeHandler(ch)
def _assert_exit_0(result):
assert (
result.exit_code == 0
), f"Exit code {result.exit_code} != 0 -- Output:\n{result.output}"
def _check_help(entry_point):
runner = CliRunner()
result = runner.invoke(entry_point, ["--help"])
_assert_exit_0(result)
assert result.output.startswith("Usage:")
def test_config_help():
from ptbench.scripts.config import config
_check_help(config)
def test_config_list_help():
from ptbench.scripts.config import list
_check_help(list)
def test_config_list():
from ptbench.scripts.config import list
runner = CliRunner()
result = runner.invoke(list)
_assert_exit_0(result)

André Anjos
committed
assert "module: ptbench.config.data" in result.output
assert "module: ptbench.config.models" in result.output
def test_config_list_v():
from ptbench.scripts.config import list
result = CliRunner().invoke(list, ["--verbose"])
_assert_exit_0(result)

André Anjos
committed
assert "module: ptbench.config.data" in result.output
assert "module: ptbench.config.models" in result.output
def test_config_describe_help():
from ptbench.scripts.config import describe
_check_help(describe)
@pytest.mark.skip_if_rc_var_not_set("datadir.montgomery")
def test_config_describe_montgomery():
from ptbench.scripts.config import describe
runner = CliRunner()
result = runner.invoke(describe, ["montgomery"])
_assert_exit_0(result)
assert "Montgomery datamodule for TB detection." in result.output
def test_database_help():
from ptbench.scripts.database import database
_check_help(database)
from ptbench.scripts.database import list
from ptbench.scripts.database import list
runner = CliRunner()
result = runner.invoke(list)
_assert_exit_0(result)
assert result.output.startswith("Available databases:")
from ptbench.scripts.database import check
@pytest.mark.skip_if_rc_var_not_set("datadir.montgomery")
def test_database_check():
from ptbench.scripts.database import check
result = runner.invoke(check, ["--verbose", "--limit=1", "montgomery"])
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
_assert_exit_0(result)
def test_main_help():
from ptbench.scripts.cli import cli
_check_help(cli)
def test_train_help():
from ptbench.scripts.train import train
_check_help(train)
def _str_counter(substr, s):
return sum(1 for _ in re.finditer(substr, s, re.MULTILINE))
def test_predict_help():
from ptbench.scripts.predict import predict
_check_help(predict)
def test_evaluate_help():
from ptbench.scripts.evaluate import evaluate
_check_help(evaluate)
@pytest.mark.skip(reason="Test need to be updated")
@pytest.mark.skip_if_rc_var_not_set("datadir.montgomery")

André Anjos
committed
def test_train_pasa_montgomery(temporary_basedir):
from ptbench.scripts.train import train

André Anjos
committed
runner = CliRunner()

André Anjos
committed
with stdout_logging() as buf:
output_folder = str(temporary_basedir / "results")
result = runner.invoke(
train,
[
"pasa",
"montgomery",
"-vv",
"--epochs=1",
"--batch-size=1",
f"--output-folder={output_folder}",
],
)
_assert_exit_0(result)

André Anjos
committed
assert os.path.exists(

André Anjos
committed
)
assert os.path.exists(
os.path.join(output_folder, "model_lowest_valid_loss.ckpt")

André Anjos
committed
)
assert os.path.exists(os.path.join(output_folder, "constants.csv"))
assert (
len(
glob.glob(
os.path.join(output_folder, "logs", "events.out.tfevents.*")
)
)
== 1

André Anjos
committed
assert os.path.exists(os.path.join(output_folder, "model_summary.txt"))
keywords = {
r"^Writing command-line for reproduction at .*$": 1,
r"^Loading dataset:`train` without caching. Trade-off: CPU RAM: less | Disk: more.$": 1,
r"^Loading dataset:`validation` without caching. Trade-off: CPU RAM: less | Disk: more.$": 1,
r"^Applying datamodule train sampler balancing...$": 1,
r"^Balancing samples from dataset using metadata targets `label`$": 1,
r"^Training for at most 1 epochs.$": 1,
r"^Uninitialised pasa model - computing z-norm factors from train dataloader.$": 1,

André Anjos
committed
r"^Saving model summary at.*$": 1,
r"^Dataset `train` is already setup. Not re-instantiating it.$": 1,
r"^Dataset `validation` is already setup. Not re-instantiating it.$": 1,

André Anjos
committed
}
buf.seek(0)
logging_output = buf.read()
for k, v in keywords.items():
assert _str_counter(k, logging_output) == v, (
f"Count for string '{k}' appeared "
f"({_str_counter(k, logging_output)}) "
f"instead of the expected {v}:\nOutput:\n{logging_output}"

André Anjos
committed
@pytest.mark.skip(reason="Test need to be updated")
@pytest.mark.skip_if_rc_var_not_set("datadir.montgomery")
def test_train_pasa_montgomery_from_checkpoint(temporary_basedir):
from ptbench.scripts.train import train
runner = CliRunner()
output_folder = str(temporary_basedir / "results/pasa_checkpoint")
result0 = runner.invoke(
train,
[
"pasa",
"montgomery",
"-vv",
"--epochs=1",
"--batch-size=1",
f"--output-folder={output_folder}",
],
)
_assert_exit_0(result0)
assert os.path.exists(os.path.join(output_folder, "model_final_epoch.ckpt"))
os.path.join(output_folder, "model_lowest_valid_loss.ckpt")
)
assert os.path.exists(os.path.join(output_folder, "constants.csv"))
assert (
len(
glob.glob(
os.path.join(output_folder, "logs", "events.out.tfevents.*")
)
)
== 1
assert os.path.exists(os.path.join(output_folder, "model_summary.txt"))
with stdout_logging() as buf:
result = runner.invoke(
train,
[
"pasa",
"montgomery",
"-vv",
"--epochs=2",
"--batch-size=1",
f"--output-folder={output_folder}",
],
)
_assert_exit_0(result)
assert os.path.exists(
os.path.join(output_folder, "model_lowest_valid_loss.ckpt")
)
assert os.path.exists(os.path.join(output_folder, "constants.csv"))
assert (
len(
glob.glob(
os.path.join(output_folder, "logs", "events.out.tfevents.*")
)
)
== 2
assert os.path.exists(os.path.join(output_folder, "model_summary.txt"))
keywords = {
r"^Writing command-line for reproduction at .*$": 1,
r"^Loading dataset:`train` without caching. Trade-off: CPU RAM: less | Disk: more.$": 1,
r"^Loading dataset:`validation` without caching. Trade-off: CPU RAM: less | Disk: more.$": 1,
r"^Applying datamodule train sampler balancing...$": 1,
r"^Balancing samples from dataset using metadata targets `label`$": 1,
r"^Training for at most 2 epochs.$": 1,
r"^Resuming from epoch 0...$": 1,
r"^Saving model summary at.*$": 1,
r"^Dataset `train` is already setup. Not re-instantiating it.$": 1,
r"^Dataset `validation` is already setup. Not re-instantiating it.$": 1,
r"^Restoring normalizer from checkpoint.$": 1,
}
buf.seek(0)
logging_output = buf.read()
for k, v in keywords.items():
assert _str_counter(k, logging_output) == v, (
f"Count for string '{k}' appeared "
f"({_str_counter(k, logging_output)}) "
f"instead of the expected {v}:\nOutput:\n{logging_output}"
)
@pytest.mark.skip(reason="Test need to be updated")
@pytest.mark.skip_if_rc_var_not_set("datadir.montgomery")

André Anjos
committed
def test_predict_pasa_montgomery(temporary_basedir, datadir):
from ptbench.scripts.predict import predict
runner = CliRunner()
with stdout_logging() as buf:
output_folder = str(temporary_basedir / "predictions")
result = runner.invoke(
predict,
[
"pasa",
"montgomery",
"-vv",
"--batch-size=1",
f"--weight={str(datadir / 'lfs' / 'models' / 'pasa.ckpt')}",

André Anjos
committed
f"--output-folder={output_folder}",
],
)
_assert_exit_0(result)
# check predictions are there
train_predictions_file = os.path.join(output_folder, "train.csv")
validation_predictions_file = os.path.join(
output_folder, "validation.csv"

André Anjos
committed
)
test_predictions_file = os.path.join(output_folder, "test.csv")
assert os.path.exists(train_predictions_file)
assert os.path.exists(validation_predictions_file)
assert os.path.exists(test_predictions_file)

André Anjos
committed
keywords = {
r"^Restoring normalizer from checkpoint.$": 1,
r"^Output folder: .*$": 1,
r"^Loading dataset: * without caching. Trade-off: CPU RAM: less | Disk: more": 3,
r"^Saving predictions in .*$": 3,

André Anjos
committed
}
buf.seek(0)
logging_output = buf.read()
for k, v in keywords.items():
assert _str_counter(k, logging_output) == v, (
f"Count for string '{k}' appeared "
f"({_str_counter(k, logging_output)}) "
f"instead of the expected {v}:\nOutput:\n{logging_output}"

André Anjos
committed
@pytest.mark.skip(reason="Test need to be updated")
@pytest.mark.skip_if_rc_var_not_set("datadir.montgomery")

André Anjos
committed
def test_evaluate_pasa_montgomery(temporary_basedir):
from ptbench.scripts.evaluate import evaluate
runner = CliRunner()
with stdout_logging() as buf:
prediction_folder = str(temporary_basedir / "predictions")
output_folder = str(temporary_basedir / "evaluations")
result = runner.invoke(
evaluate,
[
"-vv",
"montgomery",
f"--predictions-folder={prediction_folder}",
f"--output-folder={output_folder}",

André Anjos
committed
"--steps=2000",
],
)
_assert_exit_0(result)
assert os.path.exists(os.path.join(output_folder, "scores.pdf"))
assert os.path.exists(os.path.join(output_folder, "plots.pdf"))
assert os.path.exists(os.path.join(output_folder, "table.txt"))

André Anjos
committed
keywords = {
r"^Evaluating threshold on.*$": 1,
r"^Maximum F1-score of.*$": 4,
r"^Set --f1_threshold=.*$": 1,
r"^Set --eer_threshold=.*$": 1,
}
buf.seek(0)
logging_output = buf.read()

André Anjos
committed
for k, v in keywords.items():
assert _str_counter(k, logging_output) == v, (
f"Count for string '{k}' appeared "
f"({_str_counter(k, logging_output)}) "
f"instead of the expected {v}:\nOutput:\n{logging_output}"
)
@pytest.mark.skip(reason="Test need to be updated")
@pytest.mark.skip_if_rc_var_not_set("datadir.montgomery")
def test_train_mlp_montgomery_rs(temporary_basedir, datadir):
from ptbench.scripts.train import train
runner = CliRunner()
with stdout_logging() as buf:
output_folder = str(temporary_basedir / "results/mlp")
"--batch-size=1",
f"--output-folder={output_folder}",
],
)
_assert_exit_0(result)
assert os.path.exists(
os.path.join(output_folder, "model_lowest_valid_loss.ckpt")
)
assert os.path.exists(os.path.join(output_folder, "constants.csv"))
assert os.path.exists(
os.path.join(output_folder, "logs_csv", "version_0", "metrics.csv")
)
assert os.path.exists(
os.path.join(output_folder, "logs_tensorboard", "version_0")
)
assert os.path.exists(os.path.join(output_folder, "model_summary.txt"))
keywords = {
r"^Found \(dedicated\) '__train__' set for training$": 1,
r"^Found \(dedicated\) '__valid__' set for validation$": 1,
r"^Continuing from epoch 0$": 1,
r"^Saving model summary at.*$": 1,
}
buf.seek(0)
logging_output = buf.read()
for k, v in keywords.items():
assert _str_counter(k, logging_output) == v, (
f"Count for string '{k}' appeared "
f"({_str_counter(k, logging_output)}) "
f"instead of the expected {v}:\nOutput:\n{logging_output}"
)
@pytest.mark.skip(reason="Test need to be updated")
@pytest.mark.skip_if_rc_var_not_set("datadir.montgomery")
def test_predict_mlp_montgomery_rs(temporary_basedir, datadir):
from ptbench.scripts.predict import predict
runner = CliRunner()
with stdout_logging() as buf:
output_folder = str(temporary_basedir / "predictions")
"montgomery_rs",
"-vv",
"--batch-size=1",
"--relevance-analysis",
f"--weight={str(datadir / 'lfs' / 'models' / 'mlp.ckpt')}",
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
f"--output-folder={output_folder}",
],
)
_assert_exit_0(result)
# check predictions are there
predictions_file = os.path.join(output_folder, "train/predictions.csv")
RA1 = os.path.join(output_folder, "train_RA.pdf")
RA2 = os.path.join(output_folder, "validation_RA.pdf")
RA3 = os.path.join(output_folder, "test_RA.pdf")
assert os.path.exists(predictions_file)
assert os.path.exists(RA1)
assert os.path.exists(RA2)
assert os.path.exists(RA3)
keywords = {
r"^Loading checkpoint from.*$": 1,
r"^Starting relevance analysis for subset.*$": 3,
r"^Creating and saving plot at.*$": 3,
}
buf.seek(0)
logging_output = buf.read()
for k, v in keywords.items():
assert _str_counter(k, logging_output) == v, (
f"Count for string '{k}' appeared "
f"({_str_counter(k, logging_output)}) "
f"instead of the expected {v}:\nOutput:\n{logging_output}"
)
@pytest.mark.skip(reason="Test need to be updated")
@pytest.mark.skip_if_rc_var_not_set("datadir.montgomery")
def test_train_logreg_montgomery_rs(temporary_basedir, datadir):
from ptbench.scripts.train import train
runner = CliRunner()
with stdout_logging() as buf:
output_folder = str(temporary_basedir / "results/logreg")
result = runner.invoke(
train,
[
"logistic_regression",
"montgomery_rs",
"-vv",
"--batch-size=1",
f"--output-folder={output_folder}",
],
)
_assert_exit_0(result)
assert os.path.exists(
os.path.join(output_folder, "model_lowest_valid_loss.ckpt")
)
assert os.path.exists(os.path.join(output_folder, "constants.csv"))
assert os.path.exists(
os.path.join(output_folder, "logs_csv", "version_0", "metrics.csv")
)
assert os.path.exists(
os.path.join(output_folder, "logs_tensorboard", "version_0")
)
assert os.path.exists(os.path.join(output_folder, "model_summary.txt"))
keywords = {
r"^Found \(dedicated\) '__train__' set for training$": 1,
r"^Found \(dedicated\) '__valid__' set for validation$": 1,
r"^Continuing from epoch 0$": 1,
r"^Saving model summary at.*$": 1,
}
buf.seek(0)
logging_output = buf.read()
for k, v in keywords.items():
assert _str_counter(k, logging_output) == v, (
f"Count for string '{k}' appeared "
f"({_str_counter(k, logging_output)}) "
f"instead of the expected {v}:\nOutput:\n{logging_output}"
)
@pytest.mark.skip(reason="Test need to be updated")
@pytest.mark.skip_if_rc_var_not_set("datadir.montgomery")
def test_predict_logreg_montgomery_rs(temporary_basedir, datadir):
from ptbench.scripts.predict import predict
runner = CliRunner()
with stdout_logging() as buf:
output_folder = str(temporary_basedir / "predictions")
result = runner.invoke(
predict,
[
"logistic_regression",
"montgomery_rs",
"-vv",
"--batch-size=1",
f"--weight={str(datadir / 'lfs' / 'models' / 'logreg.ckpt')}",
f"--output-folder={output_folder}",
],
)
_assert_exit_0(result)
# check predictions are there
predictions_file = os.path.join(output_folder, "train/predictions.csv")
wfile = os.path.join(output_folder, "LogReg_Weights.pdf")
assert os.path.exists(predictions_file)
assert os.path.exists(wfile)
keywords = {
r"^Loading checkpoint from.*$": 1,
r"^Logistic regression identified: saving model weights.*$": 1,
}
buf.seek(0)
logging_output = buf.read()
for k, v in keywords.items():
assert _str_counter(k, logging_output) == v, (

André Anjos
committed
f"Count for string '{k}' appeared "
f"({_str_counter(k, logging_output)}) "
f"instead of the expected {v}:\nOutput:\n{logging_output}"
)
# @pytest.mark.skip_if_rc_var_not_set("datadir.montgomery")

André Anjos
committed
609
610
611
612
613
614
615
616
617
618
619
620
621
622
623
624
625
626
627
628
629
630
631
632
633
634
635
636
637
638
639
640
641
642
643
644
645
646
647
648
649
650
651
652
653
654
655
656
657
658
659
660
661
662
663
664
665
666
667
668
669
670
671
672
# def test_predict_densenetrs_montgomery(temporary_basedir, datadir):
# from ptbench.scripts.predict import predict
# runner = CliRunner()
# with stdout_logging() as buf:
# output_folder = str(temporary_basedir / "predictions")
# result = runner.invoke(
# predict,
# [
# "densenet_rs",
# "montgomery_f0_rgb",
# "-vv",
# "--batch-size=1",
# f"--weight={str(datadir / 'lfs' / 'models' / 'densenetrs.pth')}",
# f"--output-folder={output_folder}",
# "--grad-cams"
# ],
# )
# _assert_exit_0(result)
# # check predictions are there
# predictions_file1 = os.path.join(output_folder, "train/predictions.csv")
# predictions_file2 = os.path.join(output_folder, "validation/predictions.csv")
# predictions_file3 = os.path.join(output_folder, "test/predictions.csv")
# assert os.path.exists(predictions_file1)
# assert os.path.exists(predictions_file2)
# assert os.path.exists(predictions_file3)
# # check some grad cams are there
# cam1 = os.path.join(output_folder, "train/cams/MCUCXR_0002_0_cam.png")
# cam2 = os.path.join(output_folder, "train/cams/MCUCXR_0126_1_cam.png")
# cam3 = os.path.join(output_folder, "train/cams/MCUCXR_0275_1_cam.png")
# cam4 = os.path.join(output_folder, "validation/cams/MCUCXR_0399_1_cam.png")
# cam5 = os.path.join(output_folder, "validation/cams/MCUCXR_0113_1_cam.png")
# cam6 = os.path.join(output_folder, "validation/cams/MCUCXR_0013_0_cam.png")
# cam7 = os.path.join(output_folder, "test/cams/MCUCXR_0027_0_cam.png")
# cam8 = os.path.join(output_folder, "test/cams/MCUCXR_0094_0_cam.png")
# cam9 = os.path.join(output_folder, "test/cams/MCUCXR_0375_1_cam.png")
# assert os.path.exists(cam1)
# assert os.path.exists(cam2)
# assert os.path.exists(cam3)
# assert os.path.exists(cam4)
# assert os.path.exists(cam5)
# assert os.path.exists(cam6)
# assert os.path.exists(cam7)
# assert os.path.exists(cam8)
# assert os.path.exists(cam9)
# keywords = {
# r"^Loading checkpoint from.*$": 1,
# r"^Total time:.*$": 3,
# r"^Grad cams folder:.*$": 3,
# }
# buf.seek(0)
# logging_output = buf.read()
# for k, v in keywords.items():
# assert _str_counter(k, logging_output) == v, (
# f"Count for string '{k}' appeared "
# f"({_str_counter(k, logging_output)}) "
# f"instead of the expected {v}:\nOutput:\n{logging_output}"
# )