helpers.py 6.5 KB
Newer Older
1
2
3
4
5
import bob.bio.face
from sklearn.pipeline import make_pipeline
from bob.bio.base.wrappers import wrap_sample_preprocessor
from bob.pipelines import wrap
from bob.bio.face.helpers import face_crop_solver
6
import numpy as np
7
8
9
10
11
12
13
14


def embedding_transformer_160x160(embedding, annotation_type, fixed_positions):
    """
    Creates a pipeline composed by and FaceCropper and an Embedding extractor.
    This transformer is suited for Facenet based architectures
    
    .. warning::
15
       This will resize images to :math:`160 \times 160`
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
    
    """

    # This is the size of the image that this model expects
    CROPPED_IMAGE_HEIGHT = 160
    CROPPED_IMAGE_WIDTH = 160
    cropped_image_size = (CROPPED_IMAGE_HEIGHT, CROPPED_IMAGE_WIDTH)
    color_channel = "rgb"

    #### SOLVING THE FACE CROPPER TO BE USED
    if annotation_type == "bounding-box":
        transform_extra_arguments = (("annotations", "annotations"),)
        TOP_LEFT_POS = (0, 0)
        BOTTOM_RIGHT_POS = (CROPPED_IMAGE_HEIGHT, CROPPED_IMAGE_WIDTH)

        # Detects the face and crops it without eye detection
        face_cropper = face_crop_solver(
            cropped_image_size,
            color_channel=color_channel,
            cropped_positions={"topleft": TOP_LEFT_POS, "bottomright": BOTTOM_RIGHT_POS},
            fixed_positions=fixed_positions,
        )

    elif annotation_type == "eyes-center":
        transform_extra_arguments = (("annotations", "annotations"),)
        # eye positions for frontal images
        RIGHT_EYE_POS = (46, 53)
        LEFT_EYE_POS = (46, 107)

        # Detects the face and crops it without eye detection
        face_cropper = face_crop_solver(
            cropped_image_size,
            color_channel=color_channel,
            cropped_positions={"leye": LEFT_EYE_POS, "reye": RIGHT_EYE_POS},
            fixed_positions=fixed_positions,
        )

    else:
        transform_extra_arguments = None
        # DEFAULT TO FACE SIMPLE RESIZE
        face_cropper = face_crop_solver(cropped_image_size)

    transformer = make_pipeline(
        wrap(
            ["sample"],
            face_cropper,
            transform_extra_arguments=transform_extra_arguments,
        ),
        wrap(["sample"], embedding),
    )

    return transformer
Tiago de Freitas Pereira's avatar
Tiago de Freitas Pereira committed
68
69
70
71
72
73
74
75


def embedding_transformer_112x112(embedding, annotation_type, fixed_positions):
    """
    Creates a pipeline composed by and FaceCropper and an Embedding extractor.
    This transformer is suited for Facenet based architectures
    
    .. warning::
76
       This will resize images to :math:`112 \times 112`
Tiago de Freitas Pereira's avatar
Tiago de Freitas Pereira committed
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
    
    """

    # This is the size of the image that this model expects
    CROPPED_IMAGE_HEIGHT = 112
    CROPPED_IMAGE_WIDTH = 112
    cropped_image_size = (CROPPED_IMAGE_HEIGHT, CROPPED_IMAGE_WIDTH)
    color_channel = "rgb"

    #### SOLVING THE FACE CROPPER TO BE USED
    if annotation_type == "bounding-box":
        transform_extra_arguments = (("annotations", "annotations"),)
        TOP_LEFT_POS = (0, 0)
        BOTTOM_RIGHT_POS = (CROPPED_IMAGE_HEIGHT, CROPPED_IMAGE_WIDTH)

        # Detects the face and crops it without eye detection
        face_cropper = face_crop_solver(
            cropped_image_size,
            color_channel=color_channel,
            cropped_positions={"topleft": TOP_LEFT_POS, "bottomright": BOTTOM_RIGHT_POS},
            fixed_positions=fixed_positions,
        )

    elif annotation_type == "eyes-center":
        transform_extra_arguments = (("annotations", "annotations"),)
        # eye positions for frontal images
        RIGHT_EYE_POS = (32, 34)
        LEFT_EYE_POS = (32, 77)

        # Detects the face and crops it without eye detection
        face_cropper = face_crop_solver(
            cropped_image_size,
            color_channel=color_channel,
            cropped_positions={"leye": LEFT_EYE_POS, "reye": RIGHT_EYE_POS},
            fixed_positions=fixed_positions,
        )

    else:
        transform_extra_arguments = None
        # DEFAULT TO FACE SIMPLE RESIZE
        face_cropper = face_crop_solver(cropped_image_size)

    transformer = make_pipeline(
        wrap(
            ["sample"],
            face_cropper,
            transform_extra_arguments=transform_extra_arguments,
        ),
        wrap(["sample"], embedding),
    )

    return transformer
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169


def crop_80x64(annotation_type, fixed_positions=None, color_channel="gray"):
    """
    Crops a face to :math:`80 \times 64`


    Parameters
    ----------

       annotation_type: str
          Type of annotations. Possible values are: `bounding-box`, `eyes-center` and None

       fixed_positions: tuple
          A tuple containing the annotations. This is used in case your input is already registered
          with fixed positions (eyes or bounding box)

       color_channel: str


    Returns
    -------

      face_cropper:
         A face cropper to be used
      
      transform_extra_arguments:
         The parameters to the transformer

    """

    # Cropping
    CROPPED_IMAGE_HEIGHT = 80
    CROPPED_IMAGE_WIDTH = CROPPED_IMAGE_HEIGHT * 4 // 5

    # eye positions for frontal images
    RIGHT_EYE_POS = (CROPPED_IMAGE_HEIGHT // 5, CROPPED_IMAGE_WIDTH // 4 - 1)
    LEFT_EYE_POS = (CROPPED_IMAGE_HEIGHT // 5, CROPPED_IMAGE_WIDTH // 4 * 3)

    cropped_image_size = (CROPPED_IMAGE_HEIGHT, CROPPED_IMAGE_WIDTH)
    color_channel = color_channel
170
    dtype = np.float64
171
172
173
174
175
176
177
178
179
180
181
182

    if annotation_type == "bounding-box":
        transform_extra_arguments = (("annotations", "annotations"),)
        TOP_LEFT_POS = (0, 0)
        BOTTOM_RIGHT_POS = (CROPPED_IMAGE_HEIGHT, CROPPED_IMAGE_WIDTH)

        # Detects the face and crops it without eye detection
        face_cropper = face_crop_solver(
            cropped_image_size,
            color_channel=color_channel,
            cropped_positions={"topleft": TOP_LEFT_POS, "bottomright": BOTTOM_RIGHT_POS},
            fixed_positions=fixed_positions,
183
            dtype=dtype
184
185
186
187
188
189
190
191
192
193
194
195
        )

    elif annotation_type == "eyes-center":
        transform_extra_arguments = (("annotations", "annotations"),)
        # eye positions for frontal images

        # Detects the face and crops it without eye detection
        face_cropper = face_crop_solver(
            cropped_image_size,
            color_channel=color_channel,
            cropped_positions={"leye": LEFT_EYE_POS, "reye": RIGHT_EYE_POS},
            fixed_positions=fixed_positions,
196
            dtype=dtype
197
198
199
200
201
202
203
204
        )

    else:
        transform_extra_arguments = None
        # DEFAULT TO FACE SIMPLE RESIZE
        face_cropper = face_crop_solver(cropped_image_size)

    return face_cropper, transform_extra_arguments