diff --git a/src/mednet/scripts/upload.py b/src/mednet/scripts/upload.py
index 0b0f1417c5c45ebda7a1e9e2da8d429cc3d96271..0b4f01e74dd97befdc6f7a48694b0e125e1cbf60 100644
--- a/src/mednet/scripts/upload.py
+++ b/src/mednet/scripts/upload.py
@@ -2,17 +2,16 @@
 #
 # SPDX-License-Identifier: GPL-3.0-or-later
 
-import os
+import configparser
 import json
+import os
 import pathlib
 import shutil
 import tempfile
-import configparser
 
+import click
 import gitlab
 import mlflow
-
-import click
 from clapper.click import ResourceOption, verbosity_option
 from clapper.logging import setup
 
@@ -20,15 +19,16 @@ from .click import ConfigCommand
 
 logger = setup(__name__.split(".")[0], format="%(levelname)s: %(message)s")
 
+
 def get_config():
     """Return an instance of the Gitlab object for remote operations.
-    
+
     Returns
     -------
         Gitlab entry and credential.
     """
 
-    cfg = pathlib.Path(os.path.expanduser("~/.python-gitlab.cfg"))
+    cfg = pathlib.Path("~/.python-gitlab.cfg").expanduser()
     if cfg.exists():
         gl = gitlab.Gitlab.from_config("idiap", [str(cfg)])
         config = configparser.ConfigParser()
@@ -41,8 +41,9 @@ def get_config():
 
     return gl, config
 
+
 def create_temp_copy(source, target):
-    """ Create a copy of original file in temp folder.
+    """Create a copy of original file in temp folder.
 
     Parameters
     ----------
@@ -56,11 +57,12 @@ def create_temp_copy(source, target):
         Path to target file in temp folder.
     """
 
-    temp_dir = tempfile.gettempdir()
-    target = os.path.join(temp_dir, target)
+    temp_dir = pathlib.Path(tempfile.gettempdir())
+    target = temp_dir / target
     shutil.copy2(source, target)
     return target
 
+
 @click.command(
     entry_point_group="mednet.config",
     cls=ConfigCommand,
@@ -86,30 +88,30 @@ def create_temp_copy(source, target):
 """,
 )
 @click.option(
-        "--output-folder",
-        "-o",
-        help="Directory in which to upload results from",
-        required=True,
-        type=click.Path(
-            file_okay=False,
-            dir_okay=True,
-            path_type=pathlib.Path,
-        ),
-        default="results",
-        cls=ResourceOption,
-    )
+    "--output-folder",
+    "-o",
+    help="Directory in which to upload results from",
+    required=True,
+    type=click.Path(
+        file_okay=False,
+        dir_okay=True,
+        path_type=pathlib.Path,
+    ),
+    default="results",
+    cls=ResourceOption,
+)
 @click.option(
-        "--experiment-name",
-        "-e",
-        help='A string indicating the experiment name (e.g. "exp-pasa_mc" or "exp-densenet_mc-ch")',
-        cls=ResourceOption,
-    )
+    "--experiment-name",
+    "-e",
+    help='A string indicating the experiment name (e.g. "exp-pasa_mc" or "exp-densenet_mc-ch")',
+    cls=ResourceOption,
+)
 @click.option(
-        "--run-name",
-        "-r",
-        help='A string indicating the run name (e.g. "run-1")',
-        cls=ResourceOption,
-    )
+    "--run-name",
+    "-r",
+    help='A string indicating the run name (e.g. "run-1")',
+    cls=ResourceOption,
+)
 @verbosity_option(logger=logger, cls=ResourceOption, expose_value=False)
 def upload(
     output_folder: pathlib.Path,
@@ -119,10 +121,9 @@ def upload(
 ) -> None:  # numpydoc ignore=PR01
     """Upload results from an experiment output folder."""
 
-    logger.info(f"Getting Gitlab credentials for accessing to MLFlow server...")
+    logger.info("Getting Gitlab credentials for accessing to MLFlow server...")
     gitlab, config = get_config()
-    project = gitlab.projects.get('biosignal/software/mednet')
-    tracking_uri = gitlab.api_url + f"/projects/{project.id}/ml/mlflow"
+    project = gitlab.projects.get("biosignal/software/mednet")
     os.environ["MLFLOW_TRACKING_TOKEN"] = config["idiap"]["private_token"]
     os.environ["MLFLOW_TRACKING_URI"] = (
         gitlab.api_url + f"/projects/{project.id}/ml/mlflow"
@@ -132,12 +133,14 @@ def upload(
     train_output_folder = output_folder / "model"
     train_meta_file = train_output_folder / "meta.json"
     train_log_file = train_output_folder / "trainlog.pdf"
-    train_model_file =  [str(f) for f in train_output_folder.glob("*lowest*")][0]
-    train_model_temp_file = train_model_file.split(os.sep)[-1].replace("=", "_")
-    train_model_temp_file = create_temp_copy(train_model_file, train_model_temp_file)
+    train_model_file = [f for f in train_output_folder.glob("*lowest*")][0]
+    train_model_temp_file = train_model_file.parts[-1].replace("=", "_")
+    train_model_temp_file = create_temp_copy(
+        train_model_file, train_model_temp_file
+    )
     with train_meta_file.open("r") as f:
         meta_data = json.load(f)
-    
+
     # prepare evaluation files
     evaluation_file = output_folder / "evaluation.json"
     evaluation_meta_file = output_folder / "evaluation.meta.json"
@@ -147,29 +150,34 @@ def upload(
     test_data = evaluation_data["test"]
 
     # prepare experiment and run names
-    experiment_name = experiment_name if experiment_name else f'{meta_data["model-name"]}_{meta_data["database-name"]}'
+    experiment_name = (
+        experiment_name
+        if experiment_name
+        else f'{meta_data["model-name"]}_{meta_data["database-name"]}'
+    )
     run_name = run_name if run_name else meta_data["datetime"]
 
-    logger.info(f"Setting experiment and run names on the MLFlow server...")
+    logger.info("Setting experiment and run names on the MLFlow server...")
     mlflow.set_experiment(experiment_name=experiment_name)
-    with mlflow.start_run(run_name=run_name) as run:
+    with mlflow.start_run(run_name=run_name):
         # upload metrics
-        logger.info(f"Uploading metrics to MLFlow server...")
+        logger.info("Uploading metrics to MLFlow server...")
         mlflow.log_metric("threshold", test_data["threshold"])
         mlflow.log_metric("precision", test_data["precision"])
         mlflow.log_metric("recall", test_data["recall"])
         mlflow.log_metric("f1_score", test_data["f1_score"])
-        mlflow.log_metric("average_precision_score", test_data["average_precision_score"])
+        mlflow.log_metric(
+            "average_precision_score", test_data["average_precision_score"]
+        )
         mlflow.log_metric("specificity", test_data["specificity"])
         mlflow.log_metric("auc_score", test_data["auc_score"])
         mlflow.log_metric("accuracy", test_data["accuracy"])
         mlflow.log_param("version", meta_data["package-version"])
         # upload artifacts
-        logger.info(f"Uploading artifacts to MLFlow server...")
+        logger.info("Uploading artifacts to MLFlow server...")
         mlflow.log_artifact(train_meta_file)
         mlflow.log_artifact(train_log_file)
         mlflow.log_artifact(train_model_temp_file)
         mlflow.log_artifact(evaluation_file)
         mlflow.log_artifact(evaluation_meta_file)
         mlflow.log_artifact(evaluation_log_file)
-