Skip to content
Snippets Groups Projects
Commit d606f37e authored by André Anjos's avatar André Anjos :speech_balloon:
Browse files

[tests] Remove tests for ptbench compare for now

parent cc0b6c3b
No related branches found
No related tags found
1 merge request!6Making use of LightningDataModule and simplification of data loading
Pipeline #77537 passed
...@@ -154,12 +154,6 @@ def test_evaluate_help(): ...@@ -154,12 +154,6 @@ def test_evaluate_help():
_check_help(evaluate) _check_help(evaluate)
def test_compare_help():
from ptbench.scripts.compare import compare
_check_help(compare)
@pytest.mark.skip(reason="Test need to be updated") @pytest.mark.skip(reason="Test need to be updated")
@pytest.mark.skip_if_rc_var_not_set("datadir.montgomery") @pytest.mark.skip_if_rc_var_not_set("datadir.montgomery")
def test_train_pasa_montgomery(temporary_basedir): def test_train_pasa_montgomery(temporary_basedir):
...@@ -410,51 +404,6 @@ def test_evaluate_pasa_montgomery(temporary_basedir): ...@@ -410,51 +404,6 @@ def test_evaluate_pasa_montgomery(temporary_basedir):
) )
@pytest.mark.skip(reason="Test need to be updated")
@pytest.mark.skip_if_rc_var_not_set("datadir.montgomery")
def test_compare_pasa_montgomery(temporary_basedir):
from ptbench.scripts.compare import compare
runner = CliRunner()
with stdout_logging() as buf:
predictions_folder = str(temporary_basedir / "predictions")
output_folder = str(temporary_basedir / "comparisons")
result = runner.invoke(
compare,
[
"-vv",
"train",
f"{predictions_folder}/train/predictions.csv",
"test",
f"{predictions_folder}/test/predictions.csv",
f"--output-figure={output_folder}/compare.pdf",
f"--output-table={output_folder}/table.txt",
"--threshold=0.5",
],
)
_assert_exit_0(result)
# check comparisons are there
assert os.path.exists(os.path.join(output_folder, "compare.pdf"))
assert os.path.exists(os.path.join(output_folder, "table.txt"))
keywords = {
r"^Dataset '\*': threshold =.*$": 1,
r"^Loading predictions from.*$": 2,
r"^Tabulating performance summary...": 1,
}
buf.seek(0)
logging_output = buf.read()
for k, v in keywords.items():
assert _str_counter(k, logging_output) == v, (
f"Count for string '{k}' appeared "
f"({_str_counter(k, logging_output)}) "
f"instead of the expected {v}:\nOutput:\n{logging_output}"
)
@pytest.mark.skip(reason="Test need to be updated") @pytest.mark.skip(reason="Test need to be updated")
@pytest.mark.skip_if_rc_var_not_set("datadir.montgomery") @pytest.mark.skip_if_rc_var_not_set("datadir.montgomery")
def test_train_mlp_montgomery_rs(temporary_basedir, datadir): def test_train_mlp_montgomery_rs(temporary_basedir, datadir):
......
0% Loading or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment