Skip to content
Snippets Groups Projects
Commit fcb54756 authored by André Anjos's avatar André Anjos :speech_balloon:
Browse files

[predictor] Multiple fixes so it works confortably

parent 3f99e03c
No related branches found
No related tags found
1 merge request!12Streamlining
Pipeline #38785 passed
......@@ -4,10 +4,13 @@
import os
import time
import datetime
import numpy as np
import numpy
import torch
from tqdm import tqdm
import bob.io.base
import logging
logger = logging.getLogger(__name__)
......@@ -35,9 +38,11 @@ def save_hdf5(predictions, names, output_folder):
img = predictions.cpu().data[j].squeeze(0).numpy()
filename = "{}.hdf5".format(names[j].split(".")[0])
fullpath = os.path.join(output_folder, filename)
logger.info(f"saving {filename}")
tqdm.write(f"Saving {fullpath}...")
fulldir = os.path.dirname(fullpath)
if not os.path.exists(fulldir):
tqdm.write(f"Creating directory {fulldir}...")
# protect against concurrent access - exist_ok=True
os.makedirs(fulldir, exist_ok=True)
bob.io.base.save(img, fullpath)
......@@ -62,8 +67,13 @@ def run(model, data_loader, device, output_folder):
"""
logger.info("Start prediction")
logger.info(f"Output folder: {output_folder}, Device: {device}")
os.makedirs(output_folder, exist_ok=True)
logger.info(f"Output folder: {output_folder}")
logger.info(f"Device: {device}")
if not os.path.exists(output_folder):
logger.debug(f"Creating output directory '{output_folder}'...")
# protect against concurrent access - exist_ok=True
os.makedirs(output_folder, exist_ok=True)
model.eval().to(device)
# Sigmoid for probabilities
......@@ -105,8 +115,8 @@ def run(model, data_loader, device, output_folder):
total_time = datetime.timedelta(seconds=int(time.time() - start_total_time))
logger.info(f"Total time: {total_time}")
average_batch_time = np.mean(times)
average_batch_time = numpy.mean(times)
logger.info(f"Average batch time: {average_batch_time:g}s\n")
average_image_time = np.sum(times * len_samples) / float(sum(len_samples))
average_image_time = numpy.sum(numpy.array(times) * len_samples) / float(sum(len_samples))
logger.info(f"Average image time: {average_image_time:g}s\n")
#!/usr/bin/env python
# coding=utf-8
import os
import click
from click_plugins import with_plugins
......@@ -15,6 +17,7 @@ from bob.extension.scripts.click_helper import (
)
from ..engine.predictor import run
from ..utils.checkpointer import DetectronCheckpointer
import logging
logger = logging.getLogger(__name__)
......@@ -27,7 +30,7 @@ logger = logging.getLogger(__name__)
\b
1. Runs prediction on an existing dataset configuration:
\b
$ bob binseg predict -vv m2unet drive-test --weight=path/to/model_final.pth --output-path=path/to/predictions
\b
2. To run prediction on a folder with your own images, you must first
......@@ -36,10 +39,10 @@ logger = logging.getLogger(__name__)
performance. To figure out such specifications, you must consult the
dataset configuration used for **training** the provided model. Once
you figured this out, do the following:
$ bob binseg config copy image-folder myfolder.py
# modify "myfolder.py" to include the base path and required transforms
$ bob binseg predict -vv m2unet myfolder.py --weight=path/to/model_final.pth --output-path=path/to/predictions
\b
$ bob binseg config copy folder-dataset-example mydataset.py
# modify "mydataset.py" to include the base path and required transforms
$ bob binseg predict -vv m2unet mydataset.py --weight=path/to/model_final.pth --output-path=path/to/predictions
""",
)
@click.option(
......@@ -102,8 +105,11 @@ def predict(output_path, model, dataset, batch_size, device, weight, **kwargs):
)
# checkpointer, loads pre-fit model
checkpointer = DetectronCheckpointer(model, save_dir=output_path,
weight_fullpath = os.path.abspath(weight)
weight_path = os.path.dirname(weight_fullpath)
weight_name = os.path.basename(weight_fullpath)
checkpointer = DetectronCheckpointer(model, save_dir=weight_path,
save_to_disk=False)
checkpointer.load(weight)
checkpointer.load(weight_name)
run(model, data_loader, device, output_path)
......@@ -6,13 +6,22 @@
Inference and Evaluation
==========================
This guides explains how to run inference or a complete evaluation using
command-line tools. Inference produces probability maps for input images,
while evaluation will analyze such output against existing annotations and
produce performance figures.
Inference
---------
You may use one of your trained models (or :ref:`one of ours
<bob.ip.binseg.models>` to run inference on existing datasets or your own
dataset.
dataset. In inference (or prediction) mode, we input data, the trained model,
and output HDF5 files containing the prediction outputs for every input image.
Each HDF5 file contains a single object with a 2-dimensional matrix of floating
point numbers indicating the vessel probability (``[0.0,1.0]``) for each pixel
in the input image.
Inference on an existing datasets
......@@ -43,7 +52,6 @@ you need to instantiate one of:
Read the appropriate module documentation for details.
.. code-block:: bash
$ bob binseg config copy folder-dataset-example mydataset.py
......@@ -53,16 +61,18 @@ Read the appropriate module documentation for details.
$ bob binseg predict -vv <model> -w <path/to/model.pth> ./mydataset.py
Inference typically consumes less resources than training, but you may speed
things up using ``--device='cuda:0'`` in case you have a GPU.
Evaluation
----------
To evaluate trained models use our CLI interface. ``bob binseg evaluate``
followed by the model and the dataset configuration, and the path to the
pretrained model via the argument ``--weight``.
Alternatively point to the output folder used during training via the
``--output-path`` argument. The Checkpointer will load the model as indicated
in the file: ``last_checkpoint``.
In evaluation we input an **annotated** dataset and a pre-trained model to
output a complete set of performance figures that can help analysis of model
performance. Evaluation is done using ``bob binseg evaluate`` followed by the
model and the dataset configuration, and the path to the pretrained model via
the ``--weight`` argument.
Use ``bob binseg evaluate --help`` for more information.
......
0% Loading or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment