Skip to content
Snippets Groups Projects
Commit 67ca29f4 authored by André Anjos's avatar André Anjos :speech_balloon:
Browse files

[scripts.experiment] Reflect changes from evaluation; closes #44 after...

[scripts.experiment] Reflect changes from evaluation; closes #44 after noticing train-analysis is performed
parent 1ad21347
No related branches found
No related tags found
1 merge request!6Making use of LightningDataModule and simplification of data loading
Pipeline #77146 failed
...@@ -12,8 +12,6 @@ from clapper.logging import setup ...@@ -12,8 +12,6 @@ from clapper.logging import setup
logger = setup(__name__.split(".")[0], format="%(levelname)s: %(message)s") logger = setup(__name__.split(".")[0], format="%(levelname)s: %(message)s")
from .utils import save_sh_command
@click.command( @click.command(
entry_point_group="ptbench.config", entry_point_group="ptbench.config",
...@@ -21,12 +19,13 @@ from .utils import save_sh_command ...@@ -21,12 +19,13 @@ from .utils import save_sh_command
epilog="""Examples: epilog="""Examples:
\b \b
1. Trains a pasa model with shenzhen dataset, on the CPU, for only two epochs, then runs inference and 1. Trains a pasa model with montgomery dataset, on the CPU, for only two
evaluation on stock datasets, report performance as a table and a figure: epochs, then runs inference and evaluation on stock datasets, report
performance as a table and a figure:
.. code:: sh .. code:: sh
$ ptbench experiment -vv pasa shenzhen --epochs=2 $ ptbench experiment -vv pasa montgomery --epochs=2
""", """,
) )
@click.option( @click.option(
...@@ -199,36 +198,12 @@ from .utils import save_sh_command ...@@ -199,36 +198,12 @@ from .utils import save_sh_command
"-B/-N", "-B/-N",
help="""If set, then balances weights of the random sampler during help="""If set, then balances weights of the random sampler during
training, so that samples from all sample classes are picked picked training, so that samples from all sample classes are picked picked
equitably. It also sets the training (and validation) losses to account equitably.""",
for the populations of each class.""",
required=True, required=True,
show_default=True, show_default=True,
default=True, default=True,
cls=ResourceOption, cls=ResourceOption,
) )
@click.option(
"--steps",
"-S",
help="This number is used to define the number of threshold steps to "
"consider when evaluating the highest possible F1-score on test data.",
default=1000,
show_default=True,
required=True,
cls=ResourceOption,
)
@click.option(
"--plot-limits",
"-L",
help="""If set, this option affects the performance comparison plots. It
must be a 4-tuple containing the bounds of the plot for the x and y axis
respectively (format: x_low, x_high, y_low, y_high]). If not set, use
normal bounds ([0, 1, 0, 1]) for the performance curve.""",
default=[0.0, 1.0, 0.0, 1.0],
show_default=True,
nargs=4,
type=float,
cls=ResourceOption,
)
@verbosity_option(logger=logger, cls=ResourceOption) @verbosity_option(logger=logger, cls=ResourceOption)
@click.pass_context @click.pass_context
def experiment( def experiment(
...@@ -248,8 +223,7 @@ def experiment( ...@@ -248,8 +223,7 @@ def experiment(
monitoring_interval, monitoring_interval,
resume_from, resume_from,
balance_classes, balance_classes,
steps, **_,
**kwargs,
): ):
"""Runs a complete experiment, from training, to prediction and evaluation. """Runs a complete experiment, from training, to prediction and evaluation.
...@@ -260,10 +234,11 @@ def experiment( ...@@ -260,10 +234,11 @@ def experiment(
\b \b
└─ <output-folder>/ └─ <output-folder>/
├── command ├── command
├── model/ #the generated model will be here ├── model/ # the generated model will be here
├── predictions/ #the prediction outputs for the sets ├── predictions/ # the prediction outputs for the sets
└── evaluations/ #the outputs of the evaluations for the sets └── evaluation/ # the outputs of the evaluations for the sets
""" """
from .utils import save_sh_command
command_sh = os.path.join(output_folder, "command.sh") command_sh = os.path.join(output_folder, "command.sh")
if os.path.exists(command_sh): if os.path.exists(command_sh):
...@@ -342,15 +317,13 @@ def experiment( ...@@ -342,15 +317,13 @@ def experiment(
from .evaluate import evaluate from .evaluate import evaluate
evaluations_folder = os.path.join(output_folder, "evaluations") evaluations_folder = os.path.join(output_folder, "evaluation")
ctx.invoke( ctx.invoke(
evaluate, evaluate,
output_folder=evaluations_folder, output_folder=evaluations_folder,
predictions_folder=predictions_folder, predictions=os.path.join(predictions_folder, "predictions.json"),
datamodule=datamodule, threshold="validation",
threshold="train",
steps=steps,
) )
logger.info("Ended evaluating") logger.info("Ended evaluating")
...@@ -194,8 +194,7 @@ logger = setup(__name__.split(".")[0], format="%(levelname)s: %(message)s") ...@@ -194,8 +194,7 @@ logger = setup(__name__.split(".")[0], format="%(levelname)s: %(message)s")
"-B/-N", "-B/-N",
help="""If set, then balances weights of the random sampler during help="""If set, then balances weights of the random sampler during
training, so that samples from all sample classes are picked picked training, so that samples from all sample classes are picked picked
equitably. It also sets the training (and validation) losses to account equitably.""",
for the populations of each class.""",
required=True, required=True,
show_default=True, show_default=True,
default=True, default=True,
......
0% Loading or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment