From 51e2e13e114a14a1d2ca2bc2822ed2b085367a2e Mon Sep 17 00:00:00 2001
From: Andre Anjos <andre.dos.anjos@gmail.com>
Date: Wed, 2 Aug 2023 12:25:22 +0200
Subject: [PATCH] [models.densenet_rs] Remove outdated module (functionality is
 now incorporated at stock densenet model)

---
 pyproject.toml                    |   1 -
 src/ptbench/models/densenet_rs.py | 107 ------------------------------
 2 files changed, 108 deletions(-)
 delete mode 100644 src/ptbench/models/densenet_rs.py

diff --git a/pyproject.toml b/pyproject.toml
index 30531b7c..ded50f8f 100644
--- a/pyproject.toml
+++ b/pyproject.toml
@@ -72,7 +72,6 @@ ptbench = "ptbench.scripts.cli:cli"
 pasa = "ptbench.configs.models.pasa"
 signs-to-tb = "ptbench.configs.models.signs_to_tb"
 logistic-regression = "ptbench.configs.models.logistic_regression"
-densenet-rs = "ptbench.configs.models_datasets.densenet_rs"
 alexnet = "ptbench.configs.models.alexnet"
 alexnet-pretrained = "ptbench.configs.models.alexnet_pretrained"
 densenet = "ptbench.configs.models.densenet"
diff --git a/src/ptbench/models/densenet_rs.py b/src/ptbench/models/densenet_rs.py
deleted file mode 100644
index 0fbf2b25..00000000
--- a/src/ptbench/models/densenet_rs.py
+++ /dev/null
@@ -1,107 +0,0 @@
-# SPDX-FileCopyrightText: Copyright © 2023 Idiap Research Institute <contact@idiap.ch>
-#
-# SPDX-License-Identifier: GPL-3.0-or-later
-
-import lightning.pytorch as pl
-import torch
-import torch.nn as nn
-import torchvision.models as models
-
-from .normalizer import TorchVisionNormalizer
-
-
-class DensenetRS(pl.LightningModule):
-    """Densenet121 module for radiological extraction."""
-
-    def __init__(
-        self,
-        criterion,
-        criterion_valid,
-        optimizer,
-        optimizer_configs,
-    ):
-        super().__init__()
-
-        self.save_hyperparameters(ignore=["criterion", "criterion_valid"])
-
-        self.name = "DensenetRS"
-
-        self.normalizer = TorchVisionNormalizer()
-
-        # Load pretrained model
-        self.model_ft = models.densenet121(
-            weights=models.DenseNet121_Weights.DEFAULT
-        )
-
-        # Adapt output features
-        num_ftrs = self.model_ft.classifier.in_features
-        self.model_ft.classifier = nn.Linear(num_ftrs, 14)
-
-    def forward(self, x):
-        x = self.normalizer(x)
-        x = self.model_ft(x)
-        return x
-
-    def training_step(self, batch, batch_idx):
-        images = batch[1]
-        labels = batch[2]
-
-        # Increase label dimension if too low
-        # Allows single and multiclass usage
-        if labels.ndim == 1:
-            labels = torch.reshape(labels, (labels.shape[0], 1))
-
-        # Forward pass on the network
-        outputs = self(images)
-
-        # Manually move criterion to selected device, since not part of the model.
-        self.hparams.criterion = self.hparams.criterion.to(self.device)
-        training_loss = self.hparams.criterion(outputs, labels.float())
-
-        return {"loss": training_loss}
-
-    def validation_step(self, batch, batch_idx, dataloader_idx=0):
-        images = batch[1]
-        labels = batch[2]
-
-        # Increase label dimension if too low
-        # Allows single and multiclass usage
-        if labels.ndim == 1:
-            labels = torch.reshape(labels, (labels.shape[0], 1))
-
-        # data forwarding on the existing network
-        outputs = self(images)
-
-        # Manually move criterion to selected device, since not part of the model.
-        self.hparams.criterion_valid = self.hparams.criterion_valid.to(
-            self.device
-        )
-        validation_loss = self.hparams.criterion_valid(outputs, labels.float())
-
-        if dataloader_idx == 0:
-            return {"validation_loss": validation_loss}
-        else:
-            return {f"extra_validation_loss_{dataloader_idx}": validation_loss}
-
-    def predict_step(self, batch, batch_idx, dataloader_idx=0, grad_cams=False):
-        names = batch[0]
-        images = batch[1]
-
-        outputs = self(images)
-        probabilities = torch.sigmoid(outputs)
-
-        # necessary check for HED architecture that uses several outputs
-        # for loss calculation instead of just the last concatfuse block
-        if isinstance(outputs, list):
-            outputs = outputs[-1]
-
-        return names[0], torch.flatten(probabilities), torch.flatten(batch[2])
-
-    def configure_optimizers(self):
-        # Dynamically instantiates the optimizer given the configs
-        optimizer = getattr(torch.optim, self.hparams.optimizer)(
-            filter(lambda p: p.requires_grad, self.model_ft.parameters()),
-            **self.hparams.optimizer_configs,
-        )
-
-        return optimizer
-- 
GitLab