Skip to content
Snippets Groups Projects
Commit 0e198fc4 authored by Daniel CARRON's avatar Daniel CARRON 🅱️ Committed by André Anjos
Browse files

[results] Hardcode trainlog and evaluation file names

parent 014bcf0b
No related branches found
No related tags found
1 merge request!31Experiment and model upload to GitLab via MLflow interface
...@@ -25,13 +25,13 @@ logger = setup(__name__.split(".")[0], format="%(levelname)s: %(message)s") ...@@ -25,13 +25,13 @@ logger = setup(__name__.split(".")[0], format="%(levelname)s: %(message)s")
.. code:: sh .. code:: sh
mednet evaluate -vv --predictions=path/to/predictions.json --output=evaluation.json mednet evaluate -vv --predictions=path/to/predictions.json
2. Run evaluation on an existing prediction output, tune threshold a priori on the `validation` set: 2. Run evaluation on an existing prediction output, tune threshold a priori on the `validation` set:
.. code:: sh .. code:: sh
mednet evaluate -vv --predictions=path/to/predictions.json --output=evaluation.json --threshold=validation mednet evaluate -vv --predictions=path/to/predictions.json --threshold=validation
""", """,
) )
@click.option( @click.option(
...@@ -48,19 +48,18 @@ logger = setup(__name__.split(".")[0], format="%(levelname)s: %(message)s") ...@@ -48,19 +48,18 @@ logger = setup(__name__.split(".")[0], format="%(levelname)s: %(message)s")
cls=ResourceOption, cls=ResourceOption,
) )
@click.option( @click.option(
"--output", "--output-folder",
"-o", "-o",
help="""Path to a JSON file in which to save evaluation results help="Directory in which to store results (created if does not exist)",
(leading directories are created if they do not exist).""",
required=True, required=True,
default="evaluation.json",
cls=ResourceOption,
type=click.Path( type=click.Path(
file_okay=True, file_okay=False,
dir_okay=False, dir_okay=True,
writable=True, writable=True,
path_type=pathlib.Path, path_type=pathlib.Path,
), ),
default="results",
cls=ResourceOption,
) )
@click.option( @click.option(
"--threshold", "--threshold",
...@@ -106,7 +105,7 @@ logger = setup(__name__.split(".")[0], format="%(levelname)s: %(message)s") ...@@ -106,7 +105,7 @@ logger = setup(__name__.split(".")[0], format="%(levelname)s: %(message)s")
@verbosity_option(logger=logger, cls=ResourceOption, expose_value=False) @verbosity_option(logger=logger, cls=ResourceOption, expose_value=False)
def evaluate( def evaluate(
predictions: pathlib.Path, predictions: pathlib.Path,
output: pathlib.Path, output_folder: pathlib.Path,
threshold: str | float, threshold: str | float,
binning: str, binning: str,
plot: bool, plot: bool,
...@@ -128,13 +127,16 @@ def evaluate( ...@@ -128,13 +127,16 @@ def evaluate(
) )
from .utils import execution_metadata, save_json_with_backup from .utils import execution_metadata, save_json_with_backup
evaluation_filename = "evaluation.json"
evaluation_file = pathlib.Path(output_folder) / evaluation_filename
with predictions.open("r") as f: with predictions.open("r") as f:
predict_data = json.load(f) predict_data = json.load(f)
# register metadata # register metadata
json_data: dict[str, typing.Any] = execution_metadata() json_data: dict[str, typing.Any] = execution_metadata()
json_data = {k.replace("_", "-"): v for k, v in json_data.items()} json_data = {k.replace("_", "-"): v for k, v in json_data.items()}
save_json_with_backup(output.with_suffix(".meta.json"), json_data) save_json_with_backup(evaluation_file.with_suffix(".meta.json"), json_data)
if threshold in predict_data: if threshold in predict_data:
# it is the name of a split # it is the name of a split
...@@ -166,8 +168,8 @@ def evaluate( ...@@ -166,8 +168,8 @@ def evaluate(
) )
# records full result analysis to a JSON file # records full result analysis to a JSON file
logger.info(f"Saving evaluation results at `{output}`...") logger.info(f"Saving evaluation results at `{evaluation_file}`...")
with output.open("w") as f: with evaluation_file.open("w") as f:
json.dump(results, f, indent=2, cls=NumpyJSONEncoder) json.dump(results, f, indent=2, cls=NumpyJSONEncoder)
# dump evaluation results in RST format to screen and file # dump evaluation results in RST format to screen and file
...@@ -181,7 +183,7 @@ def evaluate( ...@@ -181,7 +183,7 @@ def evaluate(
table = tabulate_results(table_data, fmt="rst") table = tabulate_results(table_data, fmt="rst")
click.echo(table) click.echo(table)
table_path = output.with_suffix(".rst") table_path = evaluation_file.with_suffix(".rst")
logger.info( logger.info(
f"Saving evaluation results in table format at `{table_path}`...", f"Saving evaluation results in table format at `{table_path}`...",
) )
...@@ -189,7 +191,7 @@ def evaluate( ...@@ -189,7 +191,7 @@ def evaluate(
f.write(table) f.write(table)
# dump evaluation plots in file # dump evaluation plots in file
figure_path = output.with_suffix(".pdf") figure_path = evaluation_file.with_suffix(".pdf")
logger.info(f"Saving evaluation figures at `{figure_path}`...") logger.info(f"Saving evaluation figures at `{figure_path}`...")
if plot: if plot:
......
...@@ -101,11 +101,10 @@ def experiment( ...@@ -101,11 +101,10 @@ def experiment(
from .train_analysis import train_analysis from .train_analysis import train_analysis
logdir = train_output_folder / "logs" logdir = train_output_folder / "logs"
output_pdf = train_output_folder / "trainlog.pdf"
ctx.invoke( ctx.invoke(
train_analysis, train_analysis,
logdir=logdir, logdir=logdir,
output=output_pdf, output_folder=train_output_folder,
) )
logger.info("Ended train analysis") logger.info("Ended train analysis")
...@@ -139,12 +138,10 @@ def experiment( ...@@ -139,12 +138,10 @@ def experiment(
from .evaluate import evaluate from .evaluate import evaluate
evaluation_output = output_folder / "evaluation.json"
ctx.invoke( ctx.invoke(
evaluate, evaluate,
predictions=predictions_output, predictions=predictions_output,
output=evaluation_output, output=output_folder,
threshold="validation", threshold="validation",
) )
......
...@@ -5,7 +5,7 @@ ...@@ -5,7 +5,7 @@
import pathlib import pathlib
import click import click
from clapper.click import verbosity_option from clapper.click import ResourceOption, verbosity_option
from clapper.logging import setup from clapper.logging import setup
# avoids X11/graphical desktop requirement when creating plots # avoids X11/graphical desktop requirement when creating plots
...@@ -119,18 +119,23 @@ def create_figures( ...@@ -119,18 +119,23 @@ def create_figures(
type=click.Path(dir_okay=True, exists=True, path_type=pathlib.Path), type=click.Path(dir_okay=True, exists=True, path_type=pathlib.Path),
) )
@click.option( @click.option(
"--output", "--output-folder",
"-o", "-o",
help="Name of the output file to create (multi-page .pdf)", help="Directory in which to store results (created if does not exist)",
required=True, required=True,
show_default=True, type=click.Path(
default="trainlog.pdf", file_okay=False,
type=click.Path(dir_okay=False, file_okay=True, path_type=pathlib.Path), dir_okay=True,
writable=True,
path_type=pathlib.Path,
),
default="results",
cls=ResourceOption,
) )
@verbosity_option(logger=logger, expose_value=False) @verbosity_option(logger=logger, expose_value=False)
def train_analysis( def train_analysis(
logdir: pathlib.Path, logdir: pathlib.Path,
output: pathlib.Path, output_folder: pathlib.Path,
) -> None: # numpydoc ignore=PR01 ) -> None: # numpydoc ignore=PR01
"""Create a plot for each metric in the training logs and saves them in a .pdf file.""" """Create a plot for each metric in the training logs and saves them in a .pdf file."""
import matplotlib.pyplot as plt import matplotlib.pyplot as plt
...@@ -138,11 +143,14 @@ def train_analysis( ...@@ -138,11 +143,14 @@ def train_analysis(
from ..utils.tensorboard import scalars_to_dict from ..utils.tensorboard import scalars_to_dict
train_log_filename = "trainlog.pdf"
train_log_file = pathlib.Path(output_folder) / train_log_filename
data = scalars_to_dict(logdir) data = scalars_to_dict(logdir)
output.parent.mkdir(parents=True, exist_ok=True) train_log_file.parent.mkdir(parents=True, exist_ok=True)
with PdfPages(output) as pdf: with PdfPages(train_log_file) as pdf:
for figure in create_figures(data): for figure in create_figures(data):
pdf.savefig(figure) pdf.savefig(figure)
plt.close(figure) plt.close(figure)
0% Loading or .
You are about to add 0 people to the discussion. Proceed with caution.
Please register or to comment