Commit ebb87d9c authored by André Anjos's avatar André Anjos 💬

Remove outdated code; Improve annotator to handle lines, points and polygons

parent 006df864
Pipeline #26399 failed with stages
in 13 minutes and 11 seconds
This diff is collapsed.
#!/usr/bin/env python
# vim: set fileencoding=utf-8 :
# Andre Anjos <andre.anjos@idiap.ch>
# Fri 29 Jun 2012 13:42:57 CEST
# -*- coding: utf-8 -*-
"""A set of utilities and library functions to handle keypoint annotations."""
import os
import six
import numpy
def save_multiple(data, fp, backup=False):
"""Saves a given data set to a file
Parameters:
data : Data array(s) to be saved;
In case of line annotations it is a tuple of lists of the keypoints (y, x).
fp (file, str): File or filename to which the data is saved.
The extension should be ``*.npy``
backup (:py:obj:`bool`, Optional): If set, backs-up a possibly existing file
path before overriding it. Note this is not valid in case 'fp' above points
to an opened file.
Example with ``numpy.sevez`` that is used in this function:
data = ([(1, 2), (2, 3)], [(5,6)])
fp = open("test.npz", 'wt')
numpy.savez(fp, data)
fp.close()
#read data:
fp = open("test.npz", 'rt')
npzdata = numpy.load(fp)
npzdata.files
npzdata['arr_0']
"""
if isinstance(fp, six.string_types):
if backup and os.path.exists(fp):
bname = fp + '~'
if os.path.exists(bname): os.unlink(bname)
os.rename(fp, bname)
fp = open(fp, 'wt')
numpy.savez(fp, data)#numpy.savetxt(fp, data, fmt='%d')
fp.close()
return 0
def load_multiple(fp):
"""Loads a given data set from ``*.npy`` file
Parameters:
fp (file, str): The name of a file, with full path, to be used for
reading the data or an already opened file-like object, that accepts
the "read()" call.
Returns:
tuple: Containing the lists of line points (annotations), in the
format (y, x). That is Bob's style.
"""
if isinstance(fp, six.string_types):
fp = open(fp, 'rt')
retval = numpy.load(fp)
retval = retval['arr_0'].tolist()
fp.close()
#retval = numpy.loadtxt(fp, ndmin=2)
return retval#list([tuple(k) for k in retval])
import json
def save(data, fp, backup=False):
......@@ -88,8 +14,7 @@ def save(data, fp, backup=False):
Parameters:
data (numpy.ndarray): A dictionary where the keys are frame numbers and the
values are lists of tuples indicating each of the keypoints in (y, x)
data (list): A list of lists, each containing points in the format ``(y,x)``
fp (file, str): The name of a file, with full path, to be used for recording
the data or an already opened file-like object, that accepts the "write()"
......@@ -108,9 +33,9 @@ def save(data, fp, backup=False):
if os.path.exists(bname): os.unlink(bname)
os.rename(fp, bname)
fp = open(fp, 'wb')
fp = open(fp, 'w')
return numpy.savetxt(fp, data, fmt='%d')
json.dump(data, fp)
def load(fp):
......@@ -126,13 +51,11 @@ def load(fp):
Returns:
list: Containing the matrix of loaded annotations, one per row,
in the format (y, x). That is Bob's style.
list: A list of lists, each containing points in the format ``(y,x)``
"""
if isinstance(fp, six.string_types):
fp = open(fp, 'rt')
fp = open(fp, 'r')
retval = numpy.loadtxt(fp, ndmin=2)
return list([tuple(k) for k in retval])
return json.load(fp)
This diff is collapsed.
This diff is collapsed.
#!/usr/bin/env python
# vim: set fileencoding=utf-8 :
"""A TkInter-based keypoint annotation tool for images (%(version)s)
Usage:
%(prog)s [-v ...] [-r] [-f] [-s] [-z <value>] [-b=<path>] [-o=<path>] <image>...
Arguments:
<image> Paths to the images to be annotated. The paths given will be made
absolute and duplicates will be removed. The common directory
between all images is saved as a base directory for the application.
The output directory will contain the annotations taking into
consideration the path structure of input images starting from that
base directory. For example, if input images are "A/B/img1.png" and
"A/C/img2.png", then the output annotations will be available in
"<output-path>/B/img1.txt" and "<output-path>/C/img2.txt"
Options:
-h, --help Shows this help message and exit
-V, --version Shows program's version number and exit
-o, --output=<path> The output directory where annotations will be saved
[default: .]
-v, --verbose If set, increases the verbosity level - may be given
twice, to enable debug output
-r, --readonly If set, then only visualizes current annotations (does
not overwrite existing ones)
-z, --zoom=<value> Provides the zoom multiplier for the input image, as a
floating number. This number has to be greater than 0.0.
Values larger than 1.0 will make the image bigger than
what it really is, while values between 0 and 1.0 will
make it smaller. [default: 1.0]
-f, --imfilter If set, images are filtered using adaptive hystogram.
-s, --subdir If set, all files in the subdirectories with extension
set after the "*" will be included.
-b, --basedir=<path> If given, use this as the basedir instead of basedir
acquired from image paths.
import click
from bob.extension.scripts.click_helper import verbosity_option, \
ResourceOption
@click.command(epilog='''
Examples:
Annotate two images, results in the directory `./tmp':
$ %(prog)s -v --output=./tmp A/B/img1.png A/C/img2.png
$ bob annotate -v --output=./tmp A/B/img1.png A/C/img2.png
Annotate all images, results in the directory `./tmp':
$ %(prog)s -v --output=./tmp A/*/*.png
$ bob annotate -v --output=./tmp A/*/*.png
Annotate the image of Lena, with a 2x zoom, get the result in `lena.txt`:
$ %(prog)s -v --zoom=2 bob/ip/annotator/data/lena.jpg
$ bob annotate -v --zoom=2 bob/ip/annotator/data/lena.jpg
Visualize annotations for Lena, with a 0.5x zoom, from the file `lena.txt`:
$ %(prog)s -v --readonly --zoom=0.5 bob/ip/annotator/data/lena.jpg
"""
import os
import sys
import pkg_resources
import logging
logger = logging.getLogger(__name__)
from docopt import docopt
from ..gui import AnnotatorApp
def main(user_input=None):
# Parse the command-line arguments
if user_input is not None:
arguments = user_input
else:
arguments = sys.argv[1:]
completions = dict(
prog = os.path.basename(sys.argv[0]),
version = pkg_resources.require('bob.ip.annotator')[0].version,
)
args = docopt(
__doc__ % completions,
argv=arguments,
options_first=True,
version=completions['version'],
)
# If the user wants more verbosity, lower the level
if args['--verbose'] == 1: logging.getLogger().setLevel(logging.INFO)
elif args['--verbose'] >= 2: logging.getLogger().setLevel(logging.DEBUG)
args['--output'] = os.path.abspath(os.path.expanduser(args['--output']))
args['<image>'] = [os.path.abspath(k) for k in args['<image>']]
if args['--subdir'] == True:
temp = []
for paths in args['<image>']:
splitted_path = paths.split("*")
if len(splitted_path) != 2:
raise IOError("When -s flag is set, file path and it's extension, seperated with '*' is expected, e.g.: 'a/b/*.png'")
for path, subdirs, files in os.walk(splitted_path[0]):
for name in files:
if name.endswith(splitted_path[1]):
temp.append(os.path.join(path, name))
args['<image>'] = temp
app = AnnotatorApp(args['<image>'], args['--output'], args['--readonly'],
float(args['--zoom']),args['--basedir'], args['--imfilter'])
$ bob annotate -v --readonly --zoom=0.5 bob/ip/annotator/data/lena.jpg
''')
@verbosity_option(cls=ResourceOption)
@click.argument('images', required=False, type=click.Path(file_okay=False,
dir_okay=True, writable=False, readable=True, exists=True))
@click.argument('annotations', required=False, type=click.Path(file_okay=False,
dir_okay=True, writable=True, readable=True, exists=False))
@click.option('-e', '--extension', default='.png',
show_default=True, help='The type of files to look for on the images' \
'directory')
@click.option('-z', '--zoom', default=1.0, type=click.FLOAT,
show_default=True, help='The zoom-level to apply for images. Choosing' \
'a value larger than 1.0 increases the app window size. If you ' \
'choose a value smaller than 1.0, it decreases it. Use this ' \
'setting to adjust window size on your screen')
@click.option('-r', '--readonly/--no-readonly', default=False,
help='Set this avoid any changes to existing annotations')
def annotate(images, annotations, extension, zoom, readonly):
"""A TkInter-based keypoint annotation tool for images"""
from ..gui import AnnotatorApp
app = AnnotatorApp(images, annotations, readonly, zoom)
app.mainloop()
return 0
#!/usr/bin/env python
# vim: set fileencoding=utf-8 :
"""A TkInter-based keypoint annotation tool for images (%(version)s)
Usage:
%(prog)s [-v ...] [-r] [-s] [-z <value>] [-a=<path>] [-b=<path>] [-o=<path>] [-c] <image>...
Arguments:
<image> Paths to the images to be annotated. The paths given will be made
absolute and duplicates will be removed. The common directory
between all images is saved as a base directory for the application.
The output directory will contain the annotations taking into
consideration the path structure of input images starting from that
base directory. For example, if input images are "A/B/img1.png" and
"A/C/img2.png", then the output annotations will be available in
"<output-path>/B/img1.txt" and "<output-path>/C/img2.txt"
Options:
-h, --help Shows this help message and exit
-V, --version Shows program's version number and exit
-o, --output=<path> The output directory where annotations will be saved
[default: .]
-a,--annotdir=<path> The base directory where annotations are saved for given
input files. It is assumed that annotation folder-structure
are the same as for the images.
-v, --verbose If set, increases the verbosity level - may be given
twice, to enable debug output
-r, --readonly If set, then only visualizes current annotations (does
not overwrite existing ones)
-c --cutroi If set, ROI is cut from the input image.
-z, --zoom=<value> Provides the zoom multiplier for the input image, as a
floating number. This number has to be greater than 0.0.
Values larger than 1.0 will make the image bigger than
what it really is, while values between 0 and 1.0 will
make it smaller. [default: 1.0]
-s, --subdir If set, all files in the subdirectories with extension
set after the "*" will be included.
-b, --basedir=<path> If given, use this as the basedir instead of basedir
acquired from image paths.
Examples:
Annotate two images, results in the directory `./tmp':
$ %(prog)s -v --output=./tmp A/B/img1.png A/C/img2.png
Annotate all images, results in the directory `./tmp':
$ %(prog)s -v --output=./tmp A/*/*.png
Annotate the image of Lena, with a 2x zoom, get the result in `lena.txt`:
$ %(prog)s -v --zoom=2 bob/ip/annotator/data/lena.jpg
Visualize annotations for Lena, with a 0.5x zoom, from the file `lena.txt`:
$ %(prog)s -v --readonly --zoom=0.5 bob/ip/annotator/data/lena.jpg
"""
import os
import sys
import pkg_resources
import logging
logger = logging.getLogger(__name__)
from docopt import docopt
from ..line_gui import LineAnnotatorApp
def main(user_input=None):
# Parse the command-line arguments
if user_input is not None:
arguments = user_input
else:
arguments = sys.argv[1:]
completions = dict(
prog = os.path.basename(sys.argv[0]),
version = pkg_resources.require('bob.ip.annotator')[0].version,
)
args = docopt(
__doc__ % completions,
argv=arguments,
options_first=True,
version=completions['version'],
)
# If the user wants more verbosity, lower the level
if args['--verbose'] == 1: logging.getLogger().setLevel(logging.INFO)
elif args['--verbose'] >= 2: logging.getLogger().setLevel(logging.DEBUG)
args['--output'] = os.path.abspath(os.path.expanduser(args['--output']))
args['<image>'] = [os.path.abspath(k) for k in args['<image>']]
if args['--subdir'] == True:
temp = []
for paths in args['<image>']:
splitted_path = paths.split("*")
if len(splitted_path) != 2:
raise IOError("When -s flag is set, file path and it's extension, seperated with '*' is expected, e.g.: 'a/b/*.png'")
for path, subdirs, files in os.walk(splitted_path[0]):
for name in files:
if name.endswith(splitted_path[1]):
temp.append(os.path.join(path, name))
args['<image>'] = temp
app = LineAnnotatorApp(args['<image>'], args['--annotdir'], args['--output'], args['--readonly'],
float(args['--zoom']), args['--cutroi'], args['--basedir'])
app.mainloop()
return 0
#!/usr/bin/env python
# vim: set fileencoding=utf-8 :
"""A TkInter-based keypoint annotation tool for images (%(version)s)
Usage:
%(prog)s [-v ...] [-r] [-s] [-z <value>] [-a=<path>] [-b=<path>] [-o=<path>] [-c] <image>...
Arguments:
<image> Paths to the images to be annotated. The paths given will be made
absolute and duplicates will be removed. The common directory
between all images is saved as a base directory for the application.
The output directory will contain the annotations taking into
consideration the path structure of input images starting from that
base directory. For example, if input images are "A/B/img1.png" and
"A/C/img2.png", then the output annotations will be available in
"<output-path>/B/img1.txt" and "<output-path>/C/img2.txt"
Options:
-h, --help Shows this help message and exit
-V, --version Shows program's version number and exit
-o, --output=<path> The output directory where annotations will be saved
[default: .]
-a,--annotdir=<path> The base directory where annotations are saved for given
input files. It is assumed that annotation folder-
structure are the same as for the images.
-v, --verbose If set, increases the verbosity level - may be given
twice, to enable debug output
-r, --readonly If set, then only visualizes current annotations (does
not overwrite existing ones)
-c --cutroi If set, ROI is cut from the input image.
-z, --zoom=<value> Provides the zoom multiplier for the input image, as a
floating number. This number has to be greater than 0.0.
Values larger than 1.0 will make the image bigger than
what it really is, while values between 0 and 1.0 will
make it smaller. [default: 1.0]
-s, --subdir If set, all files in the subdirectories with extension
set after the "*" will be included.
-b, --basedir=<path> If given, use this as the basedir instead of basedir
acquired from image paths.
Examples:
Annotate two images, results in the directory `./tmp':
$ %(prog)s -v --output=./tmp A/B/img1.png A/C/img2.png
Annotate all images, results in the directory `./tmp':
$ %(prog)s -v --output=./tmp A/*/*.png
Annotate the image of Lena, with a 2x zoom, get the result in `lena.txt`:
$ %(prog)s -v --zoom=2 bob/ip/annotator/data/lena.jpg
Visualize annotations for Lena, with a 0.5x zoom, from the file `lena.txt`:
$ %(prog)s -v --readonly --zoom=0.5 bob/ip/annotator/data/lena.jpg
"""
import os
import sys
import pkg_resources
import logging
logger = logging.getLogger(__name__)
from docopt import docopt
from ..point_gui import PointAnnotatorApp
def main(user_input=None):
# Parse the command-line arguments
if user_input is not None:
arguments = user_input
else:
arguments = sys.argv[1:]
completions = dict(
prog=os.path.basename(sys.argv[0]),
version=pkg_resources.require('bob.ip.annotator')[0].version,
)
args = docopt(
__doc__ % completions,
argv=arguments,
options_first=True,
version=completions['version'],
)
# If the user wants more verbosity, lower the level
if args['--verbose'] == 1:
logging.getLogger().setLevel(logging.INFO)
elif args['--verbose'] >= 2:
logging.getLogger().setLevel(logging.DEBUG)
args['--output'] = os.path.abspath(os.path.expanduser(args['--output']))
args['<image>'] = [os.path.abspath(k) for k in args['<image>']]
if args['--subdir'] is True:
temp = []
for paths in args['<image>']:
splitted_path = paths.split("*")
if len(splitted_path) != 2:
raise IOError(
"When -s flag is set, file path and it's extension, "
"seperated with '*' is expected, e.g.: 'a/b/*.png'")
for path, subdirs, files in os.walk(splitted_path[0]):
for name in files:
if name.endswith(splitted_path[1]):
temp.append(os.path.join(path, name))
args['<image>'] = temp
app = PointAnnotatorApp(args['<image>'],
args['--annotdir'],
args['--output'],
args['--readonly'],
float(args['--zoom']),
args['--cutroi'],
args['--basedir'])
app.mainloop()
return 0
This diff is collapsed.
......@@ -6,10 +6,6 @@ package:
version: {{ environ.get('BOB_PACKAGE_VERSION', '0.0.1') }}
build:
entry_points:
- annotate.py = bob.ip.annotator.script.annotate:main
- annotate_line.py = bob.ip.annotator.script.annotate_line:main
- annotate_point.py = bob.ip.annotator.script.annotate_point:main
number: {{ environ.get('BOB_BUILD_NUMBER', 0) }}
run_exports:
- {{ pin_subpackage(name) }}
......@@ -31,7 +27,6 @@ requirements:
- bob.extension
- bob.io.image
- six
- docopt
- pillow
- scipy
- scikit-image
......@@ -41,18 +36,16 @@ test:
imports:
- {{ name }}
commands:
- annotate.py --help
- annotate_line.py --help
- annotate_point.py --help
- nosetests --with-coverage --cover-package={{ name }} -sv {{ name }}
#- nosetests --with-coverage --cover-package={{ name }} -sv {{ name }}
- bob annotate --help
- sphinx-build -aEW {{ project_dir }}/doc {{ project_dir }}/sphinx
- sphinx-build -aEb doctest {{ project_dir }}/doc sphinx
- conda inspect linkages -p $PREFIX {{ name }} # [not win]
- conda inspect objects -p $PREFIX {{ name }} # [osx]
requires:
#- nose
#- coverage
- bob-devel {{ bob_devel }}.*
- nose
- coverage
- sphinx
- sphinx_rtd_theme
......
......@@ -10,6 +10,12 @@ Annotation of Regions
.. automodule:: bob.ip.annotator.gui
Re-usable Widgets
-----------------
.. automodule:: bob.ip.annotator.widgets
I/O Routines
------------
......
......@@ -14,10 +14,8 @@ Documentation
.. toctree::
:maxdepth: 2
annotator-guide
line-annotator-guide
point-annotator-guide
py_api
guide
api
Indices and tables
......
.. -*- coding: utf-8 -*-
==============================
GUIDE: Wrist VEIN annotation
==============================
In this guide is summarized all that one should need to know to be able to
annotate ROI in Idiap's project BIOWAVE wrist vein images.
To do that you will be using tool ``annotate_line.py`` from the
``non.ip.annotator`` package.
All work can be divided in to 3 stages:
1. Selecting the image *batches*;
2. Annotating the images;
3. Finalizing the annotation.
Selection of image *batches*
----------------------------
Go to the Google docs spreadsheet_ . There is listed *111 batches* with wrist
vein images. Each batch contains 6 images (it should take you 10-12 minutes to
complete one batch's vein annotations). Choose batches whose images you will
annotate by writing your name in the corresponding cell. **Careful -- to be
able to annotate lines first ROI annotation must be done -- choose only batches
which ROI annotation is finished.** If you get an error ``IOError: No
annotation for file...`` it can mean that:
1. No annotations is done for the current path;
2. the one responsible for the ROI annotation hasn't executed the ``chmod``
command (please see the GUIDE about ROI annotation)
Image annotation
----------------
First, open the ``terminal`` in your Idiap machine and switch ``CONDA``
environment by typing::
source /idiap/group/torch5spro/conda/bin/activate bob-2.3.4-py27_0
Now you can annotate one batch (6 images) or multiple batches. To annotate one,
type::
/idiap/project/biowave/biowave/bob.ip.annotator/bin/annotate_line.py -c -s -vv --zoom=2 --annotdir=/idiap/project/biowave/biowave/ROI_annotations/ --output=/idiap/project/biowave/biowave/VEIN_annotations --basedir=/idiap/project/biowave/biowave/annotation_images/ /idiap/project/biowave/biowave/annotation_images/BATCH1/*.png
Where ``BATCH1``, your chosen batch name, e.g. ``001/Right``
You can also run several batches, by replacing::
BATCH1
with::
{BATCH1,BATCH2,BATCHn}
**Be careful -- no spaces between batch names!** E.g., if I want to annotate
batches ``001/Right``, ``001/Left``, ``002/Right``, the complete command is::
/idiap/project/biowave/biowave/bob.ip.annotator/bin/annotate_line.py -c -s -vv --zoom=2 --annotdir=/idiap/project/biowave/biowave/ROI_annotations/ --output=/idiap/project/biowave/biowave/VEIN_annotations --basedir=/idiap/project/biowave/biowave/annotation_images/ /idiap/project/biowave/biowave/annotation_images/{001/Right,001/Left,002/Right}/*.png
**Again remember -- no spaces!**
After running the command an GUI will open. Now you can start to mark Veins in
the images.
In this GUI image filtering is introduced. Now, when GUI is opened and an image
is displayed, you can circle through image filters by pressing key ``f``. You
are able to see unfiltered image and image filtered using Contrast Limited
Adaptive Histogram Equalization (CLAHE) (more information_) with 3 different
parameters. With each press of key ``f`` (stands for *filtering*) GUI circles
through filters -- *unfiltered image* -> *filter 1* -> *filter 2* -> *filter 3*
-> *unfiltered image*.
**Attention** -- although *CLAHE* can highlight the vein pattern, this
non-linear operation can also introduce artefacts to the images and it *may*
seem that there is some vein pattern in the image, when in reality it is an
artefact caused by noise or a smooth gradient in the input image. That is why I
recommend that you circle through all the filtered images and the original one
before you mark some region as *vein*.
This GUI is similar with the previous one, but now the task is to mark the
lines. That is why now you are able to annotate multiple lines in one image.
For the first line (vein) - start clicking using **left** mouse button. The
*active* line is in blue colour, the active (last) point -- in red. As before,
you can move the active point, using arrow keys on your keyboard. You can also
delete the active point by pressing ``d`` or the whole active (blue) line by
pressing ``D``.
When you are finished with one vein (line), you can start to annotate next one
by pressing the **right** keyboard button. When you press the right button, no
annotation is set but you change the active line (you can notice that the
previous-active line changes it's colour to green), so it doesn't matter where
you press the right key. After you can start to annotate the second line - when
pressing the **left** mouse button a new blue line (active line) will appear
etc.
It is also possible to *circle* through the lines by pressing the key ``c``
(stands for "circle"). By pressing the key active line is switched -- now a
different line is in blue colour, if you have annotated multiple ones and you
can add points to it, delete points or delete the entire line by pressing
``D``.
As before, use the mouse scrollwheel (down), to move to the next image in the
batch or *up* to the previous image. Once you're done with the batch (scrolling
down does not change the image anymore), hit ``q`` to quit the program. All the
above is explained in the help dialogue of the annotator, hit ``?`` to get this
window.
The number of points per vein is arbitrary - we need to get shape of the
complete vein pattern.
Example:
.. image:: img/VEINS.png
To see more examples, in ``terminal`` run command (this command opens
annotations for first 3 batches in the read-only mode)::
/idiap/project/biowave/biowave/bob.ip.annotator/bin/annotate_line.py -c -s -v --readonly --zoom=2 --annotdir=/idiap/project/biowave/biowave/ROI_annotations/ --output=/idiap/project/biowave/biowave/VEIN_annotations --basedir=/idiap/project/biowave/biowave/annotation_images/ /idiap/project/biowave/biowave/annotation_images/{001/Right,001/Left,002/Right}/*.png
Finalization of annotation
--------------------------
When you are finished all annotations:
1. Go back to the Google spreadsheet_ and mark that you are done with the VEINS
annotations;
2. Run this command in the console (this command gives write access to all
people in the group, so I can move/edit the files if needed)::
find /idiap/project/biowave/biowave/VEIN_annotations/ -user $USER -exec chmod g+rw {} \;
That's it!
.. _spreadsheet: https://docs.google.com/spreadsheets/d/1-YcOitDkGDL4T0eccdkAQ0RdfqplzPvVrX-WcL8dUS8/edit?usp=sharing
.. _information: https://en.wikipedia.org/wiki/Adaptive_histogram_equalization#CLAHE