From 821d7e817941e7c6dd7c02c1b3f7b704ff1b2146 Mon Sep 17 00:00:00 2001
From: Andre Anjos <andre.dos.anjos@gmail.com>
Date: Tue, 21 Apr 2020 11:02:41 +0200
Subject: [PATCH] [data.transforms] Remove bob.core dependence; Test 16-bit
 auto-level transform

---
 MANIFEST.in                           |   2 +-
 bob/ip/binseg/data/transforms.py      |  36 +++++++++-------
 bob/ip/binseg/test/test_transforms.py |  58 +++++++++++++++++++-------
 bob/ip/binseg/test/testimg-16bit.png  | Bin 0 -> 1415 bytes
 conda/meta.yaml                       |  16 +++----
 requirements.txt                      |   3 +-
 6 files changed, 74 insertions(+), 41 deletions(-)
 create mode 100644 bob/ip/binseg/test/testimg-16bit.png

diff --git a/MANIFEST.in b/MANIFEST.in
index dc8f75ec..cf1d827b 100644
--- a/MANIFEST.in
+++ b/MANIFEST.in
@@ -1,3 +1,3 @@
 include README.rst buildout.cfg COPYING version.txt requirements.txt
 recursive-include doc *.rst *.png *.ico *.txt
-recursive-include bob *.json
+recursive-include bob *.json *.png
diff --git a/bob/ip/binseg/data/transforms.py b/bob/ip/binseg/data/transforms.py
index a347fcbb..bea81bcd 100644
--- a/bob/ip/binseg/data/transforms.py
+++ b/bob/ip/binseg/data/transforms.py
@@ -18,8 +18,6 @@ import PIL.Image
 import torchvision.transforms
 import torchvision.transforms.functional
 
-import bob.core
-
 
 class TupleMixin:
     """Adds support to work with tuples of objects to torchvision transforms"""
@@ -104,12 +102,17 @@ class SingleAutoLevel16to8:
     To auto-level, we calculate the maximum and the minimum of the image, and
     consider such a range should be mapped to the [0,255] range of the
     destination image.
+
     """
 
     def __call__(self, img):
+        imin, imax = img.getextrema()
+        irange = imax - imin
         return PIL.Image.fromarray(
-            bob.core.convert(img, "uint8", (0, 255), img.getextrema())
-        )
+            numpy.round(
+                255.0 * (numpy.array(img).astype(float) - imin) / irange
+            ).astype("uint8"),
+        ).convert("L")
 
 
 class AutoLevel16to8(TupleMixin, SingleAutoLevel16to8):
@@ -121,6 +124,7 @@ class AutoLevel16to8(TupleMixin, SingleAutoLevel16to8):
     consider such a range should be mapped to the [0,255] range of the
     destination image.
     """
+
     pass
 
 
@@ -132,6 +136,7 @@ class SingleToRGB:
     defaults.  This may be aggressive if applied to 16-bit images without
     further considerations.
     """
+
     def __call__(self, img):
         return img.convert(mode="RGB")
 
@@ -195,8 +200,8 @@ class RandomRotation(torchvision.transforms.RandomRotation):
     """
 
     def __init__(self, p=0.5, **kwargs):
-        kwargs.setdefault('degrees', 15)
-        kwargs.setdefault('resample', PIL.Image.BILINEAR)
+        kwargs.setdefault("degrees", 15)
+        kwargs.setdefault("resample", PIL.Image.BILINEAR)
         super(RandomRotation, self).__init__(**kwargs)
         self.p = p
 
@@ -205,16 +210,17 @@ class RandomRotation(torchvision.transforms.RandomRotation):
         if random.random() < self.p:
             angle = self.get_params(self.degrees)
             return [
-                torchvision.transforms.functional.rotate(img, angle,
-                    self.resample, self.expand, self.center)
+                torchvision.transforms.functional.rotate(
+                    img, angle, self.resample, self.expand, self.center
+                )
                 for img in args
-                ]
+            ]
         else:
             return args
 
     def __repr__(self):
         retval = super(RandomRotation, self).__repr__()
-        return retval.replace('(', f'(p={self.p},', 1)
+        return retval.replace("(", f"(p={self.p},", 1)
 
 
 class ColorJitter(torchvision.transforms.ColorJitter):
@@ -243,10 +249,10 @@ class ColorJitter(torchvision.transforms.ColorJitter):
     """
 
     def __init__(self, p=0.5, **kwargs):
-        kwargs.setdefault('brightness', 0.3)
-        kwargs.setdefault('contrast', 0.3)
-        kwargs.setdefault('saturation', 0.02)
-        kwargs.setdefault('hue', 0.02)
+        kwargs.setdefault("brightness", 0.3)
+        kwargs.setdefault("contrast", 0.3)
+        kwargs.setdefault("saturation", 0.02)
+        kwargs.setdefault("hue", 0.02)
         super(ColorJitter, self).__init__(**kwargs)
         self.p = p
 
@@ -259,4 +265,4 @@ class ColorJitter(torchvision.transforms.ColorJitter):
 
     def __repr__(self):
         retval = super(ColorJitter, self).__repr__()
-        return retval.replace('(', f'(p={self.p},', 1)
+        return retval.replace("(", f"(p={self.p},", 1)
diff --git a/bob/ip/binseg/test/test_transforms.py b/bob/ip/binseg/test/test_transforms.py
index 826343f6..a1967fe5 100644
--- a/bob/ip/binseg/test/test_transforms.py
+++ b/bob/ip/binseg/test/test_transforms.py
@@ -4,7 +4,10 @@
 import random
 
 import nose.tools
+import pkg_resources
+
 import numpy
+import PIL.Image
 import torch
 import torchvision.transforms.functional
 
@@ -93,7 +96,7 @@ def test_pad_default():
     # checks that the border introduced with padding is all about "fill"
     img_t = numpy.array(img_t)
     img_t[idx] = 0
-    border_size_plane = (img_t[:,:,0].size - numpy.array(img)[:,:,0].size)
+    border_size_plane = img_t[:, :, 0].size - numpy.array(img)[:, :, 0].size
     nose.tools.eq_(img_t.sum(), 0)
 
     gt_t = numpy.array(gt_t)
@@ -131,8 +134,8 @@ def test_pad_2tuple():
     # checks that the border introduced with padding is all about "fill"
     img_t = numpy.array(img_t)
     img_t[idx] = 0
-    border_size_plane = (img_t[:,:,0].size - numpy.array(img)[:,:,0].size)
-    expected_sum = sum((fill[k]*border_size_plane) for k in range(3))
+    border_size_plane = img_t[:, :, 0].size - numpy.array(img)[:, :, 0].size
+    expected_sum = sum((fill[k] * border_size_plane) for k in range(3))
     nose.tools.eq_(img_t.sum(), expected_sum)
 
     gt_t = numpy.array(gt_t)
@@ -170,8 +173,8 @@ def test_pad_4tuple():
     # checks that the border introduced with padding is all about "fill"
     img_t = numpy.array(img_t)
     img_t[idx] = 0
-    border_size_plane = (img_t[:,:,0].size - numpy.array(img)[:,:,0].size)
-    expected_sum = sum((fill[k]*border_size_plane) for k in range(3))
+    border_size_plane = img_t[:, :, 0].size - numpy.array(img)[:, :, 0].size
+    expected_sum = sum((fill[k] * border_size_plane) for k in range(3))
     nose.tools.eq_(img_t.sum(), expected_sum)
 
     gt_t = numpy.array(gt_t)
@@ -194,7 +197,7 @@ def test_resize_downscale_w():
     img, gt, mask = [_create_img(im_size) for i in range(3)]
     nose.tools.eq_(img.size, (im_size[2], im_size[1]))  # confirms the above
     img_t, gt_t, mask_t = transforms(img, gt, mask)
-    new_size = (new_size, (new_size*im_size[1])/im_size[2])
+    new_size = (new_size, (new_size * im_size[1]) / im_size[2])
     nose.tools.eq_(img_t.size, new_size)
     nose.tools.eq_(gt_t.size, new_size)
     nose.tools.eq_(mask_t.size, new_size)
@@ -224,8 +227,8 @@ def test_crop():
 
     # test
     idx = (
-        slice(crop_size[0], crop_size[0]+crop_size[2]),
-        slice(crop_size[1], crop_size[1]+crop_size[3]),
+        slice(crop_size[0], crop_size[0] + crop_size[2]),
+        slice(crop_size[1], crop_size[1] + crop_size[3]),
         slice(0, im_size[0]),
     )
     transforms = Crop(*crop_size)
@@ -297,7 +300,7 @@ def test_rotation():
     assert numpy.any(numpy.array(img1_t) != numpy.array(img))
 
     # asserts two random transforms are not the same
-    img_t2, = transforms(img)
+    (img_t2,) = transforms(img)
     assert numpy.any(numpy.array(img_t2) != numpy.array(img1_t))
 
 
@@ -327,15 +330,40 @@ def test_color_jitter():
 
 def test_compose():
 
-    transforms = Compose([
-                RandomVerticalFlip(p=1),
-                RandomHorizontalFlip(p=1),
-                RandomVerticalFlip(p=1),
-                RandomHorizontalFlip(p=1),
-                ])
+    transforms = Compose(
+        [
+            RandomVerticalFlip(p=1),
+            RandomHorizontalFlip(p=1),
+            RandomVerticalFlip(p=1),
+            RandomHorizontalFlip(p=1),
+        ]
+    )
 
     img, gt, mask = [_create_img((3, 24, 42)) for i in range(3)]
     img_t, gt_t, mask_t = transforms(img, gt, mask)
     assert numpy.all(numpy.array(img_t) == numpy.array(img))
     assert numpy.all(numpy.array(gt_t) == numpy.array(gt))
     assert numpy.all(numpy.array(mask_t) == numpy.array(mask))
+
+
+def test_16bit_autolevel():
+
+    test_image_path = pkg_resources.resource_filename(
+        __name__, "testimg-16bit.png"
+    )
+    # the way to load a 16-bit PNG image correctly, according to:
+    # https://stackoverflow.com/questions/32622658/read-16-bit-png-image-file-using-python
+    # https://github.com/python-pillow/Pillow/issues/3011
+    img = PIL.Image.fromarray(
+        numpy.array(
+            PIL.Image.open("bob/ip/binseg/test/testimg-16bit.png")
+        ).astype("uint16")
+    )
+    nose.tools.eq_(img.mode, "I;16")
+    nose.tools.eq_(img.getextrema(), (0, 65281))
+
+    timg = SingleAutoLevel16to8()(img)
+    nose.tools.eq_(timg.mode, "L")
+    nose.tools.eq_(timg.getextrema(), (0, 255))
+    #timg.show()
+    #import ipdb; ipdb.set_trace()
diff --git a/bob/ip/binseg/test/testimg-16bit.png b/bob/ip/binseg/test/testimg-16bit.png
new file mode 100644
index 0000000000000000000000000000000000000000..1bd5c6ad984e9200e0611d492149126b628fd74d
GIT binary patch
literal 1415
zcmWlXe^iZk9LL|!x98@m?yW~^HRbHNol4UWMYRffN=cczt5(!%9mc3JCxs!NTg?sG
zYO!o(u5d0lR2LbO-I;d!aj0{I`87o*%yi7cG3?`y=Y5{@J+IH}d7sbs*`!NO(AbZ)
z2cSuu6_*Nd+?z+F18Cf8<^!ta#JHIB)%6c=Bo<x^7Ud5nJ^guwW%*|XUUqES)20nv
zVD5Hr=b=B2FEyQi_|uC7sb{3*#qC$xj;4~EQtye^Hz5vP?MZ>~<gbDEBtB~7$#}4r
zYo5Ca`ekj?Y!$gFdg`cB*%Tcul*_vuf8b{rXB(QB)Vrr<Rg5^)d#_s-`X1d{8qAwl
zec=gq%p<~y>AN)Jgi`s2ZAy8JG5^;Drqvzs_%v3lYsog0YIRL@rZZaIp=+0!q-)%?
zRx9rB>uZ;Uz9o+rx%1}Gk}#2lW&P-3ueT4(aoi}sdh2t;3~6omZBDA1qJJ)m9rYcG
zC=AY79INInzTsX1iyTuwTBWbt<Y-qWpDs6@jghi%EnwOz|1Tq}#g1)VMx8L2xFj`z
zx9km{;Lajb>s>W^^#Ml*gZ$q%Q~L~Q&HV>V5~q}vj20{EJI{#1*>j5*sQHFPt&;>6
zGWq)3Dt*OwL7Fo8RORvCVx+7;3v9HazoD&Ktk~9lODCM|T(Tm7Zy0Qia%Ulh*T-q}
z!igX!gR<fLwcFbMos%J~zUv5fT1D5_6AMN8I0mw{F&I*Rngns!&jrHoZffuc9Y%q*
zs(5hSu}o2NVmR4qZibYXmItxfFBHO%n<x0A4o+YveaLn$m2A0*kmbfuNX9fTh`oNU
z5ISnfws$t!EIwrGOeNb#iJUB(k3y2t=0mLV3xja5mTVcb$yVSB=(i_9v961UoYy)D
zQgEgg;*JB<?Rt|t#UW3MMv3BpH-ySe7w|Lv9l#d2+CpEQWTRM%+c~-4{20>mTpNfH
zjVcJ9mwE7C<=cSmD&Q&dRbY?8Y@ok<l~b%MG&IoiEu`V>G>Fd*2SQkP(H(qiTO@>|
zqo5zb$uY2Assma}9=s2{evkxA|JNIeA<7xnw3h<<_buZwoH3E%nQlDA+;<ov_B(L!
zyi^7x-<JW?#qm(Kn8~#8m>u$rT|9!*a~RwYPT-)<tO1s#S5tiWHATi&ip(v5uQ?7#
zyQqcIX%=BE3KNjG+nz^r`g(@B0aG{x)KW3Ni+Xw8q1)JPafUKC(hhQSB@Zbpnn4Wm
z;80OZHkU54O}L{4_E|(IS&;(d7BvrJ);b1VP!xy#Ze&w$CYx_N+4eM$ZC;2Stj!fX
z^0KBf1P6`fu)~dPo}0-Q)J`^21KF}e1Xx=H9{t}>=MZss4)FJLXUNJ_J4kWsd5GS$
zAECyHqq?+}*o7&;FBV@Y%dZe-4Iu23m4t=*6IQ1o?0hL<rZ?CH!iMh=RvJoJ{2Crr
z-hm9WY8*LuCy_1Hhp^6X2rKF&?2I>I3kwMgeV4F0HDSqQOFf=UShf$8Cx=Eu)@KMX
z#{I)Xw|5?c-vv(&F8LdQq~cw`=HgnQtC;S?ex-rV!f7<#gGMb|X>?#FjTX_Lr8`A^
z{cc8b2py^fvWjSVbE3SUgqKj8nI-g=(KZJ1kz6|%b9#96HYYGFzA}*p9@4h2j>H2y
fUpS!fH3qM|avD)=H3I`F^hF>sJ~{4iY+mXAk>t33

literal 0
HcmV?d00001

diff --git a/conda/meta.yaml b/conda/meta.yaml
index a6e591aa..b0b78dc5 100644
--- a/conda/meta.yaml
+++ b/conda/meta.yaml
@@ -25,21 +25,21 @@ requirements:
   host:
     - python {{ python }}
     - setuptools {{ setuptools }}
-    - torchvision  {{ torchvision }} # [linux]
-    - pytorch {{ pytorch }} # [linux]
     - numpy {{ numpy }}
+    - h5py {{ h5py }}
+    - pytorch {{ pytorch }} # [linux]
+    - torchvision  {{ torchvision }} # [linux]
     - bob.extension
-    - bob.core
-    - bob.io.base
   run:
     - python
     - setuptools
+    - {{ pin_compatible('numpy') }}
+    - {{ pin_compatible('pillow') }}
+    - {{ pin_compatible('pandas') }}
+    - {{ pin_compatible('matplotlib') }}
     - {{ pin_compatible('pytorch') }} # [linux]
     - {{ pin_compatible('torchvision') }} # [linux]
-    - {{ pin_compatible('numpy') }}
-    - pandas
-    - pillow
-    - matplotlib
+    - {{ pin_compatible('h5py') }}
     - tqdm
     - tabulate
 
diff --git a/requirements.txt b/requirements.txt
index 8eff46db..e394a12d 100644
--- a/requirements.txt
+++ b/requirements.txt
@@ -1,9 +1,8 @@
-bob.core
 bob.extension
-bob.io.base
 matplotlib
 numpy
 pandas
+h5py
 pillow
 setuptools
 tabulate
-- 
GitLab