Skip to content
GitLab
Explore
Sign in
Primary navigation
Search or go to…
Project
mednet
Manage
Activity
Members
Labels
Plan
Issues
Issue boards
Milestones
Code
Merge requests
Repository
Branches
Commits
Tags
Repository graph
Compare revisions
Build
Pipelines
Jobs
Pipeline schedules
Artifacts
Deploy
Releases
Package Registry
Model registry
Operate
Environments
Terraform modules
Monitor
Incidents
Analyze
Value stream analytics
Contributor analytics
CI/CD analytics
Repository analytics
Model experiments
Help
Help
Support
GitLab documentation
Compare GitLab plans
Community forum
Contribute to GitLab
Provide feedback
Keyboard shortcuts
?
Snippets
Groups
Projects
Show more breadcrumbs
medai
software
mednet
Commits
f31df6a2
Commit
f31df6a2
authored
9 months ago
by
Daniel CARRON
Committed by
André Anjos
8 months ago
Browse files
Options
Downloads
Patches
Plain Diff
[segmentation.models] Add hed model
parent
10d78c5a
No related branches found
No related tags found
1 merge request
!46
Create common library
Changes
3
Hide whitespace changes
Inline
Side-by-side
Showing
3 changed files
pyproject.toml
+1
-0
1 addition, 0 deletions
pyproject.toml
src/mednet/libs/segmentation/config/models/hed.py
+35
-0
35 additions, 0 deletions
src/mednet/libs/segmentation/config/models/hed.py
src/mednet/libs/segmentation/models/hed.py
+188
-0
188 additions, 0 deletions
src/mednet/libs/segmentation/models/hed.py
with
224 additions
and
0 deletions
pyproject.toml
+
1
−
0
View file @
f31df6a2
...
@@ -423,6 +423,7 @@ visceral = "mednet.config.data.visceral.default"
...
@@ -423,6 +423,7 @@ visceral = "mednet.config.data.visceral.default"
# models
# models
driu
=
"mednet.libs.segmentation.config.models.driu"
driu
=
"mednet.libs.segmentation.config.models.driu"
hed
=
"mednet.libs.segmentation.config.models.hed"
lwnet
=
"mednet.libs.segmentation.config.models.lwnet"
lwnet
=
"mednet.libs.segmentation.config.models.lwnet"
unet
=
"mednet.libs.segmentation.config.models.unet"
unet
=
"mednet.libs.segmentation.config.models.unet"
...
...
This diff is collapsed.
Click to expand it.
src/mednet/libs/segmentation/config/models/hed.py
0 → 100644
+
35
−
0
View file @
f31df6a2
# SPDX-FileCopyrightText: Copyright © 2024 Idiap Research Institute <contact@idiap.ch>
#
# SPDX-License-Identifier: GPL-3.0-or-later
from
mednet.libs.segmentation.engine.adabound
import
AdaBound
from
mednet.libs.segmentation.models.hed
import
HED
from
mednet.libs.segmentation.models.losses
import
MultiSoftJaccardBCELogitsLoss
lr
=
0.001
alpha
=
0.7
betas
=
(
0.9
,
0.999
)
eps
=
1e-08
weight_decay
=
0
final_lr
=
0.1
gamma
=
1e-3
eps
=
1e-8
amsbound
=
False
crop_size
=
544
model
=
HED
(
loss_type
=
MultiSoftJaccardBCELogitsLoss
,
loss_arguments
=
dict
(
alpha
=
alpha
),
optimizer_type
=
AdaBound
,
optimizer_arguments
=
dict
(
lr
=
lr
,
betas
=
betas
,
final_lr
=
final_lr
,
gamma
=
gamma
,
eps
=
eps
,
weight_decay
=
weight_decay
,
amsbound
=
amsbound
,
),
augmentation_transforms
=
[],
crop_size
=
crop_size
,
)
This diff is collapsed.
Click to expand it.
src/mednet/libs/segmentation/models/hed.py
0 → 100644
+
188
−
0
View file @
f31df6a2
# SPDX-FileCopyrightText: Copyright © 2023 Idiap Research Institute <contact@idiap.ch>
#
# SPDX-License-Identifier: GPL-3.0-or-later
import
logging
import
typing
import
torch
import
torch.nn
from
mednet.libs.common.data.typing
import
TransformSequence
from
mednet.libs.common.models.model
import
Model
from
mednet.libs.common.models.transforms
import
ResizeMaxSide
,
SquareCenterPad
from
.backbones.vgg
import
vgg16_for_segmentation
from
.losses
import
MultiSoftJaccardBCELogitsLoss
from
.make_layers
import
UpsampleCropBlock
,
conv_with_kaiming_uniform
logger
=
logging
.
getLogger
(
"
mednet
"
)
class
ConcatFuseBlock
(
torch
.
nn
.
Module
):
"""
Take in five feature maps with one channel each, concatenates thems and
applies a 1x1 convolution with 1 output channel.
"""
def
__init__
(
self
):
super
().
__init__
()
self
.
conv
=
conv_with_kaiming_uniform
(
5
,
1
,
1
,
1
,
0
)
def
forward
(
self
,
x1
,
x2
,
x3
,
x4
,
x5
):
x_cat
=
torch
.
cat
([
x1
,
x2
,
x3
,
x4
,
x5
],
dim
=
1
)
return
self
.
conv
(
x_cat
)
class
HEDHead
(
torch
.
nn
.
Module
):
"""
HED head module.
Parameters
----------
in_channels_list : list
Number of channels for each feature map that is returned from backbone.
"""
def
__init__
(
self
,
in_channels_list
=
None
):
super
().
__init__
()
(
in_conv_1_2_16
,
in_upsample2
,
in_upsample_4
,
in_upsample_8
,
in_upsample_16
,
)
=
in_channels_list
self
.
conv1_2_16
=
torch
.
nn
.
Conv2d
(
in_conv_1_2_16
,
1
,
3
,
1
,
1
)
# Upsample
self
.
upsample2
=
UpsampleCropBlock
(
in_upsample2
,
1
,
4
,
2
,
0
)
self
.
upsample4
=
UpsampleCropBlock
(
in_upsample_4
,
1
,
8
,
4
,
0
)
self
.
upsample8
=
UpsampleCropBlock
(
in_upsample_8
,
1
,
16
,
8
,
0
)
self
.
upsample16
=
UpsampleCropBlock
(
in_upsample_16
,
1
,
32
,
16
,
0
)
# Concat and Fuse
self
.
concatfuse
=
ConcatFuseBlock
()
def
forward
(
self
,
x
):
hw
=
x
[
0
]
conv1_2_16
=
self
.
conv1_2_16
(
x
[
1
])
upsample2
=
self
.
upsample2
(
x
[
2
],
hw
)
upsample4
=
self
.
upsample4
(
x
[
3
],
hw
)
upsample8
=
self
.
upsample8
(
x
[
4
],
hw
)
upsample16
=
self
.
upsample16
(
x
[
5
],
hw
)
concatfuse
=
self
.
concatfuse
(
conv1_2_16
,
upsample2
,
upsample4
,
upsample8
,
upsample16
)
return
(
upsample2
,
upsample4
,
upsample8
,
upsample16
,
concatfuse
)
class
HED
(
Model
):
"""
Implementation of the HED model.
Parameters
----------
loss_type
The loss to be used for training and evaluation.
.. warning::
The loss should be set to always return batch averages (as opposed
to the batch sum), as our logging system expects it so.
loss_arguments
Arguments to the loss.
optimizer_type
The type of optimizer to use for training.
optimizer_arguments
Arguments to the optimizer after ``params``.
augmentation_transforms
An optional sequence of torch modules containing transforms to be
applied on the input **before** it is fed into the network.
num_classes
Number of outputs (classes) for this model.
pretrained
If True, will use VGG16 pretrained weights.
crop_size
The size of the image after center cropping.
"""
def
__init__
(
self
,
loss_type
:
torch
.
nn
.
Module
=
MultiSoftJaccardBCELogitsLoss
,
loss_arguments
:
dict
[
str
,
typing
.
Any
]
=
{},
optimizer_type
:
type
[
torch
.
optim
.
Optimizer
]
=
torch
.
optim
.
Adam
,
optimizer_arguments
:
dict
[
str
,
typing
.
Any
]
=
{},
augmentation_transforms
:
TransformSequence
=
[],
num_classes
:
int
=
1
,
pretrained
:
bool
=
False
,
crop_size
:
int
=
544
,
):
super
().
__init__
(
loss_type
,
loss_arguments
,
optimizer_type
,
optimizer_arguments
,
augmentation_transforms
,
num_classes
,
)
self
.
name
=
"
hed
"
resize_transform
=
ResizeMaxSide
(
crop_size
)
self
.
model_transforms
=
[
resize_transform
,
SquareCenterPad
(),
]
self
.
pretrained
=
pretrained
self
.
backbone
=
vgg16_for_segmentation
(
pretrained
=
self
.
pretrained
,
return_features
=
[
3
,
8
,
14
,
22
,
29
],
)
self
.
head
=
HEDHead
([
64
,
128
,
256
,
512
,
512
])
def
forward
(
self
,
x
):
if
self
.
normalizer
is
not
None
:
x
=
self
.
normalizer
(
x
)
x
=
self
.
backbone
(
x
)
return
self
.
head
(
x
)
def
set_normalizer
(
self
,
dataloader
:
torch
.
utils
.
data
.
DataLoader
)
->
None
:
"""
Initialize the normalizer for the current model.
This function is NOOP if ``pretrained = True`` (normalizer set to
imagenet weights, during contruction).
Parameters
----------
dataloader
A torch Dataloader from which to compute the mean and std.
Will not be used if the model is pretrained.
"""
if
self
.
pretrained
:
from
mednet.libs.common.models.normalizer
import
make_imagenet_normalizer
logger
.
warning
(
f
"
ImageNet pre-trained
{
self
.
name
}
model - NOT
"
f
"
computing z-norm factors from train dataloader.
"
f
"
Using preset factors from torchvision.
"
,
)
self
.
normalizer
=
make_imagenet_normalizer
()
else
:
self
.
normalizer
=
None
def
training_step
(
self
,
batch
,
batch_idx
):
images
=
batch
[
0
]
ground_truths
=
batch
[
1
][
"
target
"
]
masks
=
batch
[
1
][
"
mask
"
]
outputs
=
self
(
self
.
_augmentation_transforms
(
images
))
return
self
.
_train_loss
(
outputs
,
ground_truths
,
masks
)
def
validation_step
(
self
,
batch
,
batch_idx
):
images
=
batch
[
0
]
ground_truths
=
batch
[
1
][
"
target
"
]
masks
=
batch
[
1
][
"
mask
"
]
outputs
=
self
(
images
)
return
self
.
_validation_loss
(
outputs
,
ground_truths
,
masks
)
This diff is collapsed.
Click to expand it.
Preview
0%
Loading
Try again
or
attach a new file
.
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Save comment
Cancel
Please
register
or
sign in
to comment