Skip to content
Snippets Groups Projects
Commit 20364461 authored by André Anjos's avatar André Anjos :speech_balloon:
Browse files

[engine.trainer] Move creation of output_folder into engine; Call train...

[engine.trainer] Move creation of output_folder into engine; Call train argument output-path -> output-folder to make it more explicit
parent 48ebdccd
No related branches found
No related tags found
1 merge request!12Streamlining
......@@ -213,6 +213,10 @@ def run(
start_epoch = arguments["epoch"]
max_epoch = arguments["max_epoch"]
if not os.path.exists(output_folder):
logger.debug(f"Creating output directory '{output_folder}'...")
os.makedirs(output_folder)
# Log to file
logfile_name = os.path.join(output_folder, "trainlog.csv")
......
......@@ -72,6 +72,10 @@ def run(
start_epoch = arguments["epoch"]
max_epoch = arguments["max_epoch"]
if not os.path.exists(output_folder):
logger.debug(f"Creating output directory '{output_folder}'...")
os.makedirs(output_folder)
# Log to file
logfile_name = os.path.join(output_folder, "trainlog.csv")
......
#!/usr/bin/env python
# vim: set fileencoding=utf-8 :
# coding=utf-8
import os
import pkg_resources
import click
from click_plugins import with_plugins
......@@ -20,7 +19,6 @@ from bob.extension.scripts.click_helper import (
from ..utils.checkpointer import DetectronCheckpointer
import logging
logger = logging.getLogger(__name__)
......@@ -50,7 +48,7 @@ logger = logging.getLogger(__name__)
""",
)
@click.option(
"--output-path",
"--output-folder",
"-o",
help="Path where to store the generated model (created if does not exist)",
required=True,
......@@ -193,7 +191,7 @@ def train(
model,
optimizer,
scheduler,
output_path,
output_folder,
epochs,
pretrained_backbone,
batch_size,
......@@ -217,8 +215,6 @@ def train(
abruptly.
"""
if not os.path.exists(output_path):
os.makedirs(output_path)
torch.manual_seed(seed)
# PyTorch dataloader
......@@ -232,7 +228,7 @@ def train(
# Checkpointer
checkpointer = DetectronCheckpointer(
model, optimizer, scheduler, save_dir=output_path, save_to_disk=True
model, optimizer, scheduler, save_dir=output_folder, save_to_disk=True
)
arguments = {}
......@@ -256,7 +252,7 @@ def train(
checkpoint_period,
device,
arguments,
output_path,
output_folder,
)
else:
......@@ -271,6 +267,6 @@ def train(
checkpoint_period,
device,
arguments,
output_path,
output_folder,
rampup,
)
......@@ -33,6 +33,7 @@ card, for supervised training of baselines. Use it like this:
# change <model> and <dataset> by one of items bellow
$ bob binseg train -vv <model> <dataset> --batch-size=<see-table> --device="cuda:0"
# check results in the "results" folder
.. list-table::
......
0% Loading or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment