Skip to content
Snippets Groups Projects
Commit 7f0d1cb6 authored by Guillaume HEUSCH's avatar Guillaume HEUSCH
Browse files

[datasets] making docstrings numpy style

parent b15ed354
No related branches found
No related tags found
1 merge request!2Resolve "fix docstrings in datasets"
Pipeline #
...@@ -2,7 +2,6 @@ ...@@ -2,7 +2,6 @@
# encoding: utf-8 # encoding: utf-8
import os import os
import numpy import numpy
from torch.utils.data import Dataset, DataLoader from torch.utils.data import Dataset, DataLoader
...@@ -14,22 +13,36 @@ from .utils import map_labels ...@@ -14,22 +13,36 @@ from .utils import map_labels
class CasiaWebFaceDataset(Dataset): class CasiaWebFaceDataset(Dataset):
"""Casia WebFace dataset (for CNN training). """Class representing the CASIA WebFace dataset
Class representing the CASIA WebFace dataset
**Parameters** Note that here the only label is identity
root-dir: path Attributes
----------
root_dir : str
The path to the data The path to the data
transform : `torchvision.transforms`
frontal_only: boolean
If you want to only use frontal faces
transform: torchvision.transforms
The transform(s) to apply to the face images The transform(s) to apply to the face images
data_files : list of str
The list of data files
id_labels : list of int
The list of identities, for each data file
""" """
def __init__(self, root_dir, transform=None, start_index=0): def __init__(self, root_dir, transform=None, start_index=0):
"""Init function
Parameters
----------
root_dir : str
The path to the data
transform : :py:class:`torchvision.transforms`
The transform(s) to apply to the face images
start_index : int
label of the first identity (useful if you use
several databases)
"""
self.root_dir = root_dir self.root_dir = root_dir
self.transform = transform self.transform = transform
self.data_files = [] self.data_files = []
...@@ -46,14 +59,25 @@ class CasiaWebFaceDataset(Dataset): ...@@ -46,14 +59,25 @@ class CasiaWebFaceDataset(Dataset):
self.id_labels = map_labels(id_labels, start_index) self.id_labels = map_labels(id_labels, start_index)
def __len__(self): def __len__(self):
""" """Returns the length of the dataset (i.e. nb of examples)
return the length of the dataset (i.e. nb of examples)
Returns
-------
int
the number of examples in the dataset
""" """
return len(self.data_files) return len(self.data_files)
def __getitem__(self, idx): def __getitem__(self, idx):
""" """Returns a sample from the dataset
return a sample from the dataset
Returns
-------
dict
an example of the dataset, containing the
transformed face image and its identity
""" """
image = bob.io.base.load(self.data_files[idx]) image = bob.io.base.load(self.data_files[idx])
identity = self.id_labels[idx] identity = self.id_labels[idx]
...@@ -66,39 +90,63 @@ class CasiaWebFaceDataset(Dataset): ...@@ -66,39 +90,63 @@ class CasiaWebFaceDataset(Dataset):
class CasiaDataset(Dataset): class CasiaDataset(Dataset):
"""Casia WebFace dataset. """Class representing the CASIA WebFace dataset
Class representing the CASIA WebFace dataset
**Parameters** Note that in this class, two labels are provided
with each image: identity and pose.
root-dir: path Pose labels have been automatically inferred using
The path to the data the ROC face recognirion SDK from RankOne.
frontal_only: boolean There are 13 pose labels, corresponding to cluster
If you want to only use frontal faces of 15 degrees, ranging from -90 degress (left profile)
to 90 degrees (right profile)
transform: torchvision.transforms
Attributes
----------
root_dir: str
The path to the data
transform : `torchvision.transforms`
The transform(s) to apply to the face images The transform(s) to apply to the face images
data_files: list of str
The list of data files
id_labels : list of int
The list of identities, for each data file
pose_labels : list of int
The list containing the pose labels
""" """
def __init__(self, root_dir, frontal_only=False, transform=None, start_index=0): def __init__(self, root_dir, transform=None, start_index=0):
"""Init function
Parameters
----------
root_dir: str
The path to the data
transform: :py:class:`torchvision.transforms`
The transform(s) to apply to the face images
start_index : int
label of the first identity (useful if you use
several databases)
"""
self.root_dir = root_dir self.root_dir = root_dir
self.transform = transform self.transform = transform
dir_to_pose_label = {'l90': '0', dir_to_pose_label = {'l90': '0',
'l75': '1', 'l75': '1',
'l60': '2', 'l60': '2',
'l45': '3', 'l45': '3',
'l30': '4', 'l30': '4',
'l15': '5', 'l15': '5',
'0' : '6', '0' : '6',
'r15': '7', 'r15': '7',
'r30': '8', 'r30': '8',
'r45': '9', 'r45': '9',
'r60': '10', 'r60': '10',
'r75': '11', 'r75': '11',
'r90': '12', 'r90': '12',
} }
# get all the needed file, the pose labels, and the id labels # get all the needed file, the pose labels, and the id labels
self.data_files = [] self.data_files = []
...@@ -119,15 +167,26 @@ class CasiaDataset(Dataset): ...@@ -119,15 +167,26 @@ class CasiaDataset(Dataset):
def __len__(self): def __len__(self):
""" """Returns the length of the dataset (i.e. nb of examples)
return the length of the dataset (i.e. nb of examples)
Returns
-------
int
the number of examples in the dataset
""" """
return len(self.data_files) return len(self.data_files)
def __getitem__(self, idx): def __getitem__(self, idx):
""" """Returns a sample from the dataset
return a sample from the dataset
Returns
-------
dict
an example of the dataset, containing the
transformed face image, its identity and pose information
""" """
image = bob.io.base.load(self.data_files[idx]) image = bob.io.base.load(self.data_files[idx])
identity = self.id_labels[idx] identity = self.id_labels[idx]
......
...@@ -15,28 +15,40 @@ import bob.io.image ...@@ -15,28 +15,40 @@ import bob.io.image
from .utils import map_labels from .utils import map_labels
class MultiPIEDataset(Dataset): class MultiPIEDataset(Dataset):
"""MultiPIE dataset. """Class representing the Multi-PIE dataset
Class represeting the Multi-PIE dataset
**Parameters**
root-dir: path Attributes
----------
root_dir : str
The path to the data The path to the data
world : bool
world: boolean
If you want to only use data corresponding to the world model If you want to only use data corresponding to the world model
transform: `torchvision.transforms`
frontal_only: boolean
If you want to only use frontal faces
transform: torchvision.transforms
The transform(s) to apply to the face images The transform(s) to apply to the face images
data_files: list of str
The list of data files
id_labels : list of int
The list of identities, for each data file
pose_labels : list of int
The list containing the pose labels
""" """
# TODO: Start from original data and annotations - Guillaume HEUSCH, 06-11-2017
def __init__(self, root_dir, world=False, frontal_only=False, transform=None): def __init__(self, root_dir, world=False, frontal_only=False, transform=None):
"""Class representing the Multi-PIE dataset
Attributes
----------
root_dir : str
The path to the data
world : bool
If you want to only use data corresponding to the world model
frontal_only : bool
If you want to only use frontal faces
transform: `torchvision.transforms`
The transform(s) to apply to the face images
"""
self.root_dir = root_dir self.root_dir = root_dir
self.transform = transform self.transform = transform
self.world = world self.world = world
...@@ -110,15 +122,26 @@ class MultiPIEDataset(Dataset): ...@@ -110,15 +122,26 @@ class MultiPIEDataset(Dataset):
def __len__(self): def __len__(self):
""" """Returns the length of the dataset (i.e. nb of examples)
return the length of the dataset (i.e. nb of examples)
Returns
-------
int
the number of examples in the dataset
""" """
return len(self.data_files) return len(self.data_files)
def __getitem__(self, idx): def __getitem__(self, idx):
""" """Returns a sample from the dataset
return a sample from the dataset
Returns
-------
dict
an example of the dataset, containing the
transformed face image, its identity and pose information
""" """
image = bob.io.base.load(self.data_files[idx]) image = bob.io.base.load(self.data_files[idx])
identity = self.id_labels[idx] identity = self.id_labels[idx]
......
0% Loading or .
You are about to add 0 people to the discussion. Proceed with caution.
Please register or to comment