diff --git a/src/mednet/config/data/tbx11k/make_splits_from_database.py b/src/mednet/config/data/tbx11k/make_splits_from_database.py
index 48b0b90bccb96e8c0c81126f47684197222ecd75..5c097d1749ceb4e0bd69376941a2034cf02c4725 100644
--- a/src/mednet/config/data/tbx11k/make_splits_from_database.py
+++ b/src/mednet/config/data/tbx11k/make_splits_from_database.py
@@ -124,6 +124,11 @@ def normalize_labels(data: list) -> list:
     4: sick (but no tb), comes from the imgs/sick subdir, does not have any
        annotated bounding box.
 
+    Parameters
+    ----------
+    data
+        A list of samples.
+
     Returns
     -------
     list
@@ -179,7 +184,13 @@ def normalize_labels(data: list) -> list:
 
 
 def print_statistics(d: dict):
-    """Print some statistics about the dataset."""
+    """Print some statistics about the dataset.
+
+    Parameters
+    ----------
+    d
+        A dictionary of database splits.
+    """
 
     label_translations = {
         -1: "Unknown",
@@ -191,7 +202,13 @@ def print_statistics(d: dict):
     }
 
     def _print_dataset(ds: list):
-        """Print stats only for the dataset."""
+        """Print stats only for the dataset.
+
+        Parameters
+        ----------
+        ds
+            The dataset to print stats for.
+        """
         class_count = collections.Counter([k[1] for k in ds])
         for k, v in class_count.items():
             print(f"  - {label_translations[k]}: {v}")
@@ -273,6 +290,16 @@ def create_v2_default_split(d: dict, seed: int, validation_size) -> dict:
        sets.  The selection of samples is stratified (respects class
        proportions in Özgür's way - see comments)
 
+    Parameters
+    ----------
+    d
+        The original dataset that will be split.
+    seed
+        The seed to use at the relevant RNG.
+    validation_size
+        The proportion of data when we split the training set to make a
+        train and validation sets.
+
     Returns
     -------
     dict
@@ -321,6 +348,11 @@ def create_folds(
         The original split to consider.
     n
         The number of folds to produce.
+    seed
+        The seed to use at the relevant RNG.
+    validation_size
+        The proportion of data when we split the training set to make a
+        train and validation sets.
 
     Returns
     -------
diff --git a/src/mednet/data/augmentations.py b/src/mednet/data/augmentations.py
index 10c4ed384685ca93bc771f1e880facf940b18e6e..8dbb4520c91c3338c7d3c9bb5e60c84b91f1a709 100644
--- a/src/mednet/data/augmentations.py
+++ b/src/mednet/data/augmentations.py
@@ -143,6 +143,8 @@ def _elastic_deformation_on_batch(
 
     Parameters
     ----------
+    batch
+        The batch to apply elastic deformation to.
     alpha
         A multiplier for the gaussian filter outputs.
     sigma
@@ -156,6 +158,8 @@ def _elastic_deformation_on_batch(
     p
         Probability that this transformation will be applied.  Meaningful when
         using it as a data augmentation technique.
+    pool
+        The multiprocessing pool to use.
 
     Returns
     -------
diff --git a/src/mednet/engine/saliency/completeness.py b/src/mednet/engine/saliency/completeness.py
index 46ac77fadd272bb8017e8278a8f9ec717a6af541..6c8be633336b0e8a5f5150f5d2946fe14cb16fbd 100644
--- a/src/mednet/engine/saliency/completeness.py
+++ b/src/mednet/engine/saliency/completeness.py
@@ -126,6 +126,8 @@ def _process_sample(
 
     Parameters
     ----------
+    sample
+        The Sample to process.
     model
         Neural network model (e.g. pasa).
     device
diff --git a/src/mednet/models/loss_weights.py b/src/mednet/models/loss_weights.py
index 08ac2d32d435cba41bdedc9e033092139a5bbb25..8051f63b27ad7d43b04ca0316738a48c336326a3 100644
--- a/src/mednet/models/loss_weights.py
+++ b/src/mednet/models/loss_weights.py
@@ -74,6 +74,11 @@ def make_balanced_bcewithlogitsloss(
     The loss is weighted using the ratio between positives and total examples
     available.
 
+    Parameters
+    ----------
+    dataloader
+        The DataLoader to use to compute the BCE weights.
+
     Returns
     -------
     torch.nn.BCEWithLogitsLoss
diff --git a/tests/conftest.py b/tests/conftest.py
index 01223e6b8251f5e9fb9a1f2bb2bb55e8983cf2f9..b4f92331138424dc528900644bcef76e8f4c26ce 100644
--- a/tests/conftest.py
+++ b/tests/conftest.py
@@ -17,7 +17,12 @@ from mednet.data.typing import DatabaseSplit
 
 @pytest.fixture
 def datadir(request) -> pathlib.Path:
-    """Return the directory in which the test is sitting.
+    """Return the directory in which the test is sitting. Check the pytest documentation for more information.
+
+    Parameters
+    ----------
+    request
+        Information of the requesting test function.
 
     Returns
     -------
@@ -28,7 +33,13 @@ def datadir(request) -> pathlib.Path:
 
 
 def pytest_configure(config):
-    """This function is run once for pytest setup."""
+    """This function is run once for pytest setup.
+
+    Parameters
+    ----------
+    config
+        Configuration values. Check the pytest documentation for more information.
+    """
     config.addinivalue_line(
         "markers",
         "skip_if_rc_var_not_set(name): this mark skips the test if a certain "
@@ -43,6 +54,11 @@ def pytest_runtest_setup(item):
 
     The test is run if this function returns ``None``.  To skip a test,
     call ``pytest.skip()``, specifying a reason.
+
+    Parameters
+    ----------
+    item
+        A test invocation item. Check the pytest documentation for more information.
     """
     from mednet.utils.rc import load_rc
 
@@ -82,7 +98,13 @@ def temporary_basedir(tmp_path_factory):
 
 
 def pytest_sessionstart(session: pytest.Session) -> None:
-    """Preset the session start to ensure the Montgomery dataset is always available."""
+    """Preset the session start to ensure the Montgomery dataset is always available.
+
+    Parameters
+    ----------
+    session
+        The session to use.
+    """
 
     from mednet.utils.rc import load_rc