diff --git a/src/ptbench/configs/datasets/__init__.py b/src/ptbench/configs/datasets/__init__.py
index d5b0352ab4317b08057ee21750ad3bfe95ef54c3..400d54231f7f5dabe9dd5d5426f2f77dfbc21362 100644
--- a/src/ptbench/configs/datasets/__init__.py
+++ b/src/ptbench/configs/datasets/__init__.py
@@ -136,7 +136,6 @@ def make_dataset(
     t_transforms += post_transforms
 
     for key in subsets.keys():
-
         retval[key] = make_subset(
             subsets[key], transforms=transforms, suffixes=post_transforms
         )
@@ -187,10 +186,8 @@ def get_samples_weights(dataset):
 
     if isinstance(dataset, torch.utils.data.ConcatDataset):
         for ds in dataset.datasets:
-
             # Weighting only for binary labels
             if isinstance(ds._samples[0].label, int):
-
                 # Groundtruth
                 targets = []
                 for s in ds._samples:
@@ -272,7 +269,6 @@ def get_positive_weights(dataset):
     targets = []
 
     if isinstance(dataset, torch.utils.data.ConcatDataset):
-
         for ds in dataset.datasets:
             for s in ds._samples:
                 targets.append(s.label)
diff --git a/src/ptbench/data/transforms.py b/src/ptbench/data/transforms.py
index 19b1cb6b3ab5972e25fb8d29540d8163399a91dd..c1f1f7d0a4457fb441ddfefa5bd53c9e57bd2a60 100644
--- a/src/ptbench/data/transforms.py
+++ b/src/ptbench/data/transforms.py
@@ -78,7 +78,6 @@ class ElasticDeformation:
 
     def __call__(self, img):
         if random.random() < self.p:
-
             img = numpy.asarray(img)
 
             assert img.ndim == 2
diff --git a/src/ptbench/engine/evaluator.py b/src/ptbench/engine/evaluator.py
index 5e86ee855aad524ab942b054952afac3000816b7..acbf7f3641b219e9212e024d0a4b1afe6ddf44f7 100644
--- a/src/ptbench/engine/evaluator.py
+++ b/src/ptbench/engine/evaluator.py
@@ -292,7 +292,6 @@ def run(
         fig.savefig(fullpath)
 
     if f1_thresh is not None and eer_thresh is not None:
-
         # get the closest possible threshold we have
         index = int(round(steps * f1_thresh))
         f1_a_priori = data_df["f1_score"][index]
diff --git a/src/ptbench/engine/predictor.py b/src/ptbench/engine/predictor.py
index 27d9d2142f761b401f1709e85132fd01fde50349..6c4dd4af73bcd1ff868a2f20964f9d7980ddd2a5 100644
--- a/src/ptbench/engine/predictor.py
+++ b/src/ptbench/engine/predictor.py
@@ -114,7 +114,6 @@ def run(model, data_loader, name, device, output_folder, grad_cams=False):
             leave=False,
             disable=None,
         ):
-
             names = samples[0]
             images = samples[1].to(
                 device=device, non_blocking=torch.cuda.is_available()
@@ -133,10 +132,8 @@ def run(model, data_loader, name, device, output_folder, grad_cams=False):
                 topk = 1
 
                 for i in range(topk):
-
                     # Keep only "positive" signs
                     if probs[:, [i]] > 0.5:
-
                         # Grad-CAM
                         b = ids[:, [i]]
                         gcam.backward(ids=ids[:, [i]])
@@ -145,7 +142,6 @@ def run(model, data_loader, name, device, output_folder, grad_cams=False):
                         )
 
                         for j in range(len(images)):
-
                             current_cam = regions[j, 0].cpu().numpy()
                             current_cam[current_cam < 0.75] = 0.0
                             current_cam[current_cam >= 0.75] = 1.0
@@ -158,14 +154,12 @@ def run(model, data_loader, name, device, output_folder, grad_cams=False):
                             ]
 
                 if len(cams_img) > 0:
-
                     # Convert original image tensor into PIL Image
                     original_image = transforms.ToPILImage(mode="RGB")(
                         images[0]
                     )
 
                     for sign_id, label_prob in cams_img.items():
-
                         label = label_prob[0]
 
                         # Create the colored overlay for current sign
@@ -222,7 +216,6 @@ def run(model, data_loader, name, device, output_folder, grad_cams=False):
                     fig.savefig(cam_filename)
 
             with torch.no_grad():
-
                 start_time = time.perf_counter()
                 outputs = model(images)
                 probabilities = torch.sigmoid(outputs)
diff --git a/src/ptbench/engine/trainer.py b/src/ptbench/engine/trainer.py
index 2e3d91a60387edffea0abdcf85eea54cc97fc32d..8cc72514f863e1f7d8447ebd964429c6fbeeef55 100644
--- a/src/ptbench/engine/trainer.py
+++ b/src/ptbench/engine/trainer.py
@@ -225,7 +225,6 @@ def train_epoch(loader, model, optimizer, device, criterion, batch_chunk_count):
     for idx, samples in enumerate(
         tqdm(loader, desc="train", leave=False, disable=None)
     ):
-
         images = samples[1].to(
             device=device, non_blocking=torch.cuda.is_available()
         )
@@ -310,7 +309,6 @@ def validate_epoch(loader, model, device, criterion, pbar_desc):
     samples_in_batch = []
 
     with torch.no_grad(), torch_evaluation(model):
-
         for samples in tqdm(loader, desc=pbar_desc, leave=False, disable=None):
             images = samples[1].to(
                 device=device,
@@ -599,7 +597,6 @@ def run(
             leave=False,
             disable=None,
         ):
-
             with ResourceMonitor(
                 interval=monitoring_interval,
                 has_gpu=(device.type == "cuda"),
diff --git a/src/ptbench/scripts/aggregpred.py b/src/ptbench/scripts/aggregpred.py
index 9c7a082a21e7f0faa0fa6dbeb33779d78438d4f2..a6d1974769f707292ee41e0f0faba9017183414e 100644
--- a/src/ptbench/scripts/aggregpred.py
+++ b/src/ptbench/scripts/aggregpred.py
@@ -46,7 +46,6 @@ def aggregpred(label_path, output_folder) -> None:
     # loads all data
     series = []
     for predictions_path in label_path:
-
         # Load predictions
         logger.info(f"Loading predictions from {predictions_path}...")
         pred_data = pandas.read_csv(predictions_path)
diff --git a/src/ptbench/scripts/compare.py b/src/ptbench/scripts/compare.py
index f5ff1725cd38cbaff7a90df97387df2534204d4c..95223a37a6ecb0ef5fdfa42a255786ed1642e132 100644
--- a/src/ptbench/scripts/compare.py
+++ b/src/ptbench/scripts/compare.py
@@ -62,7 +62,6 @@ def _load(data, threshold):
     # loads all data
     retval = {}
     for name, predictions_path in data.items():
-
         # Load predictions
         logger.info(f"Loading predictions from {predictions_path}...")
         pred_data = pandas.read_csv(predictions_path)
diff --git a/src/ptbench/scripts/config.py b/src/ptbench/scripts/config.py
index 6ecd6a8aeeaa63cf60ae009c81e5f7602fe60f03..13ef09df8a182707448b7df47b9175a3306d112a 100644
--- a/src/ptbench/scripts/config.py
+++ b/src/ptbench/scripts/config.py
@@ -96,7 +96,6 @@ def list(verbose) -> None:
                 entry_points_by_module[k][name] = ep
 
     for config_type in sorted(entry_points_by_module):
-
         # calculates the longest config name so we offset the printing
         longest_name_length = max(
             len(k) for k in entry_points_by_module[config_type].keys()
diff --git a/src/ptbench/scripts/predict.py b/src/ptbench/scripts/predict.py
index e57e227f46fc90a89fd393558b8987aaa7e5d148..18e57c3d5c6f5e019536e902cb338b4f04481073 100644
--- a/src/ptbench/scripts/predict.py
+++ b/src/ptbench/scripts/predict.py
@@ -150,7 +150,6 @@ def predict(
         pdf.close()
 
     for k, v in dataset.items():
-
         if k.startswith("_"):
             logger.info(f"Skipping dataset '{k}' (not to be evaluated)")
             continue
@@ -186,7 +185,6 @@ def predict(
 
                 all_mse = []
                 for f in range(nb_features):
-
                     v_original = copy.deepcopy(v)
 
                     # Randomly permute feature values from all samples
diff --git a/src/ptbench/scripts/predtojson.py b/src/ptbench/scripts/predtojson.py
index 361d2316887c972e6f573fb3e5acfe8967b744d1..549aa3a9c38c7ce41f9f0d3c1846d362541ec824 100644
--- a/src/ptbench/scripts/predtojson.py
+++ b/src/ptbench/scripts/predtojson.py
@@ -44,7 +44,6 @@ def _load(data):
     # loads all data
     retval = {}
     for name, predictions_path in data.items():
-
         # Load predictions
         logger.info(f"Loading predictions from {predictions_path}...")
         pred_data = pandas.read_csv(predictions_path)
@@ -110,7 +109,6 @@ def predtojson(label_path, output_folder) -> None:
 
     logger.info("Saving JSON file...")
     with open(output_file, "a+", newline="") as f:
-
         f.write("{")
         for i, (name, value) in enumerate(data.items()):
             if i > 0:
diff --git a/src/ptbench/scripts/train.py b/src/ptbench/scripts/train.py
index e69e81cf1fbcedc7f5c57e74e74975e64e8aa2ee..e3d9b04345fc43a7959ec6b5169383066d348a35 100644
--- a/src/ptbench/scripts/train.py
+++ b/src/ptbench/scripts/train.py
@@ -458,7 +458,6 @@ def train(
 
     valid_loader = None
     if validation_dataset is not None:
-
         # Redefine a weighted valid criterion if possible
         if (
             isinstance(criterion_valid, torch.nn.BCEWithLogitsLoss)
diff --git a/src/ptbench/scripts/train_analysis.py b/src/ptbench/scripts/train_analysis.py
index 5baf36383e97c171d9ff20fc2445ff8b12edf461..8eddb18aa1446cf7f32179f454e044d9fa7d4144 100644
--- a/src/ptbench/scripts/train_analysis.py
+++ b/src/ptbench/scripts/train_analysis.py
@@ -185,7 +185,6 @@ def train_analysis(
 
     # now, do the analysis
     with PdfPages(output_pdf) as pdf:
-
         figure = _loss_evolution(data)
         pdf.savefig(figure)
         plt.close(figure)
diff --git a/src/ptbench/utils/plot.py b/src/ptbench/utils/plot.py
index fd6bc1e8b1246082960c3ab153f7d2d4f32aa84f..410a086af4e6a48c6f4cc96f260569f165b82d2b 100644
--- a/src/ptbench/utils/plot.py
+++ b/src/ptbench/utils/plot.py
@@ -149,11 +149,9 @@ def precision_recall_f1iso(data):
     linecycler = cycle(lines)
 
     with _precision_recall_canvas(title=None) as (fig, axes):
-
         legend = []
 
         for name, value in data.items():
-
             df = value["df"]
 
             # plots Recall/Precision curve
@@ -249,7 +247,6 @@ def roc_curve(data, title=None):
     legend = []
 
     for name, value in data.items():
-
         df = value["df"]
 
         # plots roc curve
diff --git a/src/ptbench/utils/resources.py b/src/ptbench/utils/resources.py
index d9a1baf63eb6a9dc39ed69c08344d8a0cae292c6..be23ee452a1823555220c5d92d80a2f7c6a9223f 100644
--- a/src/ptbench/utils/resources.py
+++ b/src/ptbench/utils/resources.py
@@ -54,7 +54,6 @@ def run_nvidia_smi(query, rename=None):
         memory information is transformed to gigabytes (floating-point).
     """
     if _nvidia_smi is not None:
-
         if rename is None:
             rename = query
         else:
diff --git a/src/ptbench/utils/summary.py b/src/ptbench/utils/summary.py
index 9b8156457080808a284da53d547ff9559a7a5df7..2f7d468c5a26439c3415326be383c9df9af66a09 100644
--- a/src/ptbench/utils/summary.py
+++ b/src/ptbench/utils/summary.py
@@ -9,6 +9,40 @@ from functools import reduce
 from torch.nn.modules.module import _addindent
 
 
+# ignore this space!
+def _repr(model):
+    # We treat the extra repr like the sub-module, one item per line
+    extra_lines = []
+    extra_repr = model.extra_repr()
+    # empty string will be split into list ['']
+    if extra_repr:
+        extra_lines = extra_repr.split("\n")
+    child_lines = []
+    total_params = 0
+    for key, module in model._modules.items():
+        mod_str, num_params = _repr(module)
+        mod_str = _addindent(mod_str, 2)
+        child_lines.append("(" + key + "): " + mod_str)
+        total_params += num_params
+    lines = extra_lines + child_lines
+
+    for _, p in model._parameters.items():
+        if hasattr(p, "dtype"):
+            total_params += reduce(lambda x, y: x * y, p.shape)
+
+    main_str = model._get_name() + "("
+    if lines:
+        # simple one-liner info, which most builtin Modules will use
+        if len(extra_lines) == 1 and not child_lines:
+            main_str += extra_lines[0]
+        else:
+            main_str += "\n  " + "\n  ".join(lines) + "\n"
+
+    main_str += ")"
+    main_str += f", {total_params:,} params"
+    return main_str, total_params
+
+
 def summary(model):
     """Counts the number of parameters in each model layer.
 
@@ -27,37 +61,4 @@ def summary(model):
     nparam : int
         number of parameters
     """
-    # ignore this space!
-    def _repr(model):
-        # We treat the extra repr like the sub-module, one item per line
-        extra_lines = []
-        extra_repr = model.extra_repr()
-        # empty string will be split into list ['']
-        if extra_repr:
-            extra_lines = extra_repr.split("\n")
-        child_lines = []
-        total_params = 0
-        for key, module in model._modules.items():
-            mod_str, num_params = _repr(module)
-            mod_str = _addindent(mod_str, 2)
-            child_lines.append("(" + key + "): " + mod_str)
-            total_params += num_params
-        lines = extra_lines + child_lines
-
-        for _, p in model._parameters.items():
-            if hasattr(p, "dtype"):
-                total_params += reduce(lambda x, y: x * y, p.shape)
-
-        main_str = model._get_name() + "("
-        if lines:
-            # simple one-liner info, which most builtin Modules will use
-            if len(extra_lines) == 1 and not child_lines:
-                main_str += extra_lines[0]
-            else:
-                main_str += "\n  " + "\n  ".join(lines) + "\n"
-
-        main_str += ")"
-        main_str += f", {total_params:,} params"
-        return main_str, total_params
-
     return _repr(model)
diff --git a/tests/test_cli.py b/tests/test_cli.py
index 5a95c42a5282baca805af7ebcef9a3f377055ac2..6507e3d09e2959bbf952a01a44a9a65b4147b413 100644
--- a/tests/test_cli.py
+++ b/tests/test_cli.py
@@ -191,13 +191,11 @@ def test_train_pasa_montgomery(temporary_basedir, montgomery_datadir):
     # Temporarily modifies Montgomery datadir if need be
     new_value = {"datadir.montgomery": str(montgomery_datadir)}
     with rc_context(**new_value):
-
         from ptbench.scripts.train import train
 
         runner = CliRunner()
 
         with stdout_logging() as buf:
-
             output_folder = str(temporary_basedir / "results")
             result = runner.invoke(
                 train,
@@ -252,17 +250,14 @@ def test_train_pasa_montgomery(temporary_basedir, montgomery_datadir):
 def test_predict_pasa_montgomery(
     temporary_basedir, montgomery_datadir, datadir
 ):
-
     # Temporarily modifies Montgomery datadir if need be
     new_value = {"datadir.montgomery": str(montgomery_datadir)}
     with rc_context(**new_value):
-
         from ptbench.scripts.predict import predict
 
         runner = CliRunner()
 
         with stdout_logging() as buf:
-
             output_folder = str(temporary_basedir / "predictions")
             result = runner.invoke(
                 predict,
@@ -312,13 +307,11 @@ def test_predtojson(datadir, temporary_basedir, montgomery_datadir):
     # Temporarily modify Montgomery datadir if need be
     new_value = {"datadir.montgomery": str(montgomery_datadir)}
     with rc_context(**new_value):
-
         from ptbench.scripts.predtojson import predtojson
 
         runner = CliRunner()
 
         with stdout_logging() as buf:
-
             predictions = str(datadir / "test_predictions.csv")
             output_folder = str(temporary_basedir / "pred_to_json")
             result = runner.invoke(
@@ -357,13 +350,11 @@ def test_evaluate_pasa_montgomery(temporary_basedir, montgomery_datadir):
     # Temporarily modify Montgomery datadir if need be
     new_value = {"datadir.montgomery": str(montgomery_datadir)}
     with rc_context(**new_value):
-
         from ptbench.scripts.evaluate import evaluate
 
         runner = CliRunner()
 
         with stdout_logging() as buf:
-
             prediction_folder = str(temporary_basedir / "predictions")
             output_folder = str(temporary_basedir / "evaluations")
             result = runner.invoke(
@@ -411,13 +402,11 @@ def test_compare_pasa_montgomery(temporary_basedir, montgomery_datadir):
     # Temporarily modify Montgomery datadir if need be
     new_value = {"datadir.montgomery": str(montgomery_datadir)}
     with rc_context(**new_value):
-
         from ptbench.scripts.compare import compare
 
         runner = CliRunner()
 
         with stdout_logging() as buf:
-
             predictions_folder = str(temporary_basedir / "predictions")
             output_folder = str(temporary_basedir / "comparisons")
             result = runner.invoke(
@@ -461,7 +450,6 @@ def test_train_signstotb_montgomery_rs(temporary_basedir, datadir):
     runner = CliRunner()
 
     with stdout_logging() as buf:
-
         output_folder = str(temporary_basedir / "results")
         result = runner.invoke(
             train,
@@ -514,7 +502,6 @@ def test_predict_signstotb_montgomery_rs(temporary_basedir, datadir):
     runner = CliRunner()
 
     with stdout_logging() as buf:
-
         output_folder = str(temporary_basedir / "predictions")
         result = runner.invoke(
             predict,
@@ -563,7 +550,6 @@ def test_train_logreg_montgomery_rs(temporary_basedir, datadir):
     runner = CliRunner()
 
     with stdout_logging() as buf:
-
         output_folder = str(temporary_basedir / "results")
         result = runner.invoke(
             train,
@@ -616,7 +602,6 @@ def test_predict_logreg_montgomery_rs(temporary_basedir, datadir):
     runner = CliRunner()
 
     with stdout_logging() as buf:
-
         output_folder = str(temporary_basedir / "predictions")
         result = runner.invoke(
             predict,
@@ -657,13 +642,11 @@ def test_aggregpred(temporary_basedir, montgomery_datadir):
     # Temporarily modify Montgomery datadir if need be
     new_value = {"datadir.montgomery": str(montgomery_datadir)}
     with rc_context(**new_value):
-
         from ptbench.scripts.aggregpred import aggregpred
 
         runner = CliRunner()
 
         with stdout_logging() as buf:
-
             predictions = str(
                 temporary_basedir / "predictions" / "train" / "predictions.csv"
             )
diff --git a/tests/test_config.py b/tests/test_config.py
index f20d7ba6d7b6e273c59a6a8bc97ad1318ea659f8..6493892baea443210cc8c24410b07ffdb753d2d1 100644
--- a/tests/test_config.py
+++ b/tests/test_config.py
@@ -64,7 +64,6 @@ def test_get_samples_weights(montgomery_datadir):
     # Temporarily modify Montgomery datadir
     new_value = {"datadir.montgomery": str(montgomery_datadir)}
     with rc_context(**new_value):
-
         from ptbench.configs.datasets.montgomery.default import dataset
 
         train_samples_weights = get_samples_weights(
@@ -92,7 +91,6 @@ def test_get_samples_weights_concat(montgomery_datadir):
     # Temporarily modify Montgomery datadir
     new_value = {"datadir.montgomery": str(montgomery_datadir)}
     with rc_context(**new_value):
-
         from ptbench.configs.datasets.montgomery.default import dataset
 
         train_dataset = ConcatDataset(
@@ -133,7 +131,6 @@ def test_get_positive_weights(montgomery_datadir):
     # Temporarily modify Montgomery datadir
     new_value = {"datadir.montgomery": str(montgomery_datadir)}
     with rc_context(**new_value):
-
         from ptbench.configs.datasets.montgomery.default import dataset
 
         train_positive_weights = get_positive_weights(
@@ -211,7 +208,6 @@ def test_get_positive_weights_concat(montgomery_datadir):
     # Temporarily modify Montgomery datadir
     new_value = {"datadir.montgomery": str(montgomery_datadir)}
     with rc_context(**new_value):
-
         from ptbench.configs.datasets.montgomery.default import dataset
 
         train_dataset = ConcatDataset(
diff --git a/tests/test_data_utils.py b/tests/test_data_utils.py
index 557b74b58b2bd98a5ae89df0148826a74cf7d2a1..d21d050af41d3fb2bad01c773e3fe71bfad0cb64 100644
--- a/tests/test_data_utils.py
+++ b/tests/test_data_utils.py
@@ -26,7 +26,6 @@ def test_random_permute():
     nb_equal = 0.0
 
     for k, s in enumerate(test_set._samples):
-
         if original[k] == s.data["data"][2]:
             nb_equal += 1
         else: