diff --git a/bob/ip/binseg/engine/ssltrainer.py b/bob/ip/binseg/engine/ssltrainer.py index 3ba5e46acd2fb8028e2e6f3aa1e9619428a1d23e..4d1d1c2c2d778b01b7ffc7c31c8b2b7b1cbeb7cc 100644 --- a/bob/ip/binseg/engine/ssltrainer.py +++ b/bob/ip/binseg/engine/ssltrainer.py @@ -213,6 +213,10 @@ def run( start_epoch = arguments["epoch"] max_epoch = arguments["max_epoch"] + if not os.path.exists(output_folder): + logger.debug(f"Creating output directory '{output_folder}'...") + os.makedirs(output_folder) + # Log to file logfile_name = os.path.join(output_folder, "trainlog.csv") diff --git a/bob/ip/binseg/engine/trainer.py b/bob/ip/binseg/engine/trainer.py index 501105a9036f35f522bf52bf3dc3b1650813e557..0d575bc99759cc52c479e58da4485857604778bb 100644 --- a/bob/ip/binseg/engine/trainer.py +++ b/bob/ip/binseg/engine/trainer.py @@ -72,6 +72,10 @@ def run( start_epoch = arguments["epoch"] max_epoch = arguments["max_epoch"] + if not os.path.exists(output_folder): + logger.debug(f"Creating output directory '{output_folder}'...") + os.makedirs(output_folder) + # Log to file logfile_name = os.path.join(output_folder, "trainlog.csv") diff --git a/bob/ip/binseg/script/train.py b/bob/ip/binseg/script/train.py index ac1ba76987d0e6a2cf6d85161ea91d18d734ad81..3302e9ea59539ef6cf33fbbc6d34ee3f0e46cab2 100644 --- a/bob/ip/binseg/script/train.py +++ b/bob/ip/binseg/script/train.py @@ -1,8 +1,7 @@ #!/usr/bin/env python -# vim: set fileencoding=utf-8 : +# coding=utf-8 import os -import pkg_resources import click from click_plugins import with_plugins @@ -20,7 +19,6 @@ from bob.extension.scripts.click_helper import ( from ..utils.checkpointer import DetectronCheckpointer import logging - logger = logging.getLogger(__name__) @@ -50,7 +48,7 @@ logger = logging.getLogger(__name__) """, ) @click.option( - "--output-path", + "--output-folder", "-o", help="Path where to store the generated model (created if does not exist)", required=True, @@ -193,7 +191,7 @@ def train( model, optimizer, scheduler, - output_path, + output_folder, epochs, pretrained_backbone, batch_size, @@ -217,8 +215,6 @@ def train( abruptly. """ - if not os.path.exists(output_path): - os.makedirs(output_path) torch.manual_seed(seed) # PyTorch dataloader @@ -232,7 +228,7 @@ def train( # Checkpointer checkpointer = DetectronCheckpointer( - model, optimizer, scheduler, save_dir=output_path, save_to_disk=True + model, optimizer, scheduler, save_dir=output_folder, save_to_disk=True ) arguments = {} @@ -256,7 +252,7 @@ def train( checkpoint_period, device, arguments, - output_path, + output_folder, ) else: @@ -271,6 +267,6 @@ def train( checkpoint_period, device, arguments, - output_path, + output_folder, rampup, ) diff --git a/doc/training.rst b/doc/training.rst index 7096bb8ad14d0c82ce12f7c65fd750bd1482018b..65cf666937b5751dd4c5e3bf70d924d32ad96dc6 100644 --- a/doc/training.rst +++ b/doc/training.rst @@ -33,6 +33,7 @@ card, for supervised training of baselines. Use it like this: # change <model> and <dataset> by one of items bellow $ bob binseg train -vv <model> <dataset> --batch-size=<see-table> --device="cuda:0" + # check results in the "results" folder .. list-table::