Skip to content
Snippets Groups Projects
Commit 38b0325f authored by Amir MOHAMMADI's avatar Amir MOHAMMADI
Browse files

Add pre-commit

parent 45127abf
Branches
Tags
1 merge request!124Remove deprecated code
Pipeline #60712 failed
Showing
with 198 additions and 76 deletions
[flake8]
max-line-length = 80
ignore = E501,W503,E302,E402,E203
# See https://pre-commit.com for more information
# See https://pre-commit.com/hooks.html for more hooks
repos:
- repo: https://github.com/timothycrosley/isort
rev: 5.9.3
hooks:
- id: isort
args: [--settings-path, "pyproject.toml"]
- repo: https://github.com/psf/black
rev: 21.7b0
hooks:
- id: black
- repo: https://gitlab.com/pycqa/flake8
rev: 3.9.2
hooks:
- id: flake8
- repo: https://github.com/pre-commit/pre-commit-hooks
rev: v4.0.1
hooks:
- id: check-ast
- id: check-case-conflict
- id: trailing-whitespace
- id: end-of-file-fixer
- id: debug-statements
- id: check-added-large-files
- id: check-yaml
exclude: .*/meta.yaml
...@@ -4,4 +4,3 @@ recursive-include bob/pad/face/config/preprocessor/dictionaries *.hdf5 ...@@ -4,4 +4,3 @@ recursive-include bob/pad/face/config/preprocessor/dictionaries *.hdf5
recursive-include doc *.py *.rst *.ico *.png recursive-include doc *.py *.rst *.ico *.png
recursive-include bob/pad/face/test/data *.hdf5 *.png recursive-include bob/pad/face/test/data *.hdf5 *.png
recursive-include bob/pad/face/config *.xml recursive-include bob/pad/face/config *.xml
# see https://docs.python.org/3/library/pkgutil.html # see https://docs.python.org/3/library/pkgutil.html
from pkgutil import extend_path from pkgutil import extend_path
__path__ = extend_path(__path__, __name__) __path__ = extend_path(__path__, __name__)
# see https://docs.python.org/3/library/pkgutil.html # see https://docs.python.org/3/library/pkgutil.html
from pkgutil import extend_path from pkgutil import extend_path
__path__ = extend_path(__path__, __name__) __path__ = extend_path(__path__, __name__)
from . import extractor, preprocessor, database from . import database, extractor, preprocessor # noqa: F401
def get_config(): def get_config():
"""Returns a string containing the configuration information. """Returns a string containing the configuration information."""
"""
import bob.extension import bob.extension
return bob.extension.get_config(__name__) return bob.extension.get_config(__name__)
# gets sphinx autodoc done right - don't remove it # gets sphinx autodoc done right - don't remove it
__all__ = [_ for _ in dir() if not _.startswith('_')] __all__ = [_ for _ in dir() if not _.startswith("_")]
import bob.pipelines as mario
from bob.pad.face.transformer import VideoToFrames
from sklearn.model_selection import GridSearchCV from sklearn.model_selection import GridSearchCV
from sklearn.pipeline import Pipeline from sklearn.pipeline import Pipeline
from sklearn.svm import SVC from sklearn.svm import SVC
import bob.pipelines as mario
from bob.pad.face.transformer import VideoToFrames
preprocessor = globals().get("preprocessor") preprocessor = globals().get("preprocessor")
extractor = globals().get("extractor") extractor = globals().get("extractor")
...@@ -21,7 +23,9 @@ param_grid = [ ...@@ -21,7 +23,9 @@ param_grid = [
classifier = GridSearchCV(SVC(), param_grid=param_grid, cv=3) classifier = GridSearchCV(SVC(), param_grid=param_grid, cv=3)
classifier = mario.wrap( classifier = mario.wrap(
["sample"], classifier, fit_extra_arguments=[("y", "is_bonafide")], ["sample"],
classifier,
fit_extra_arguments=[("y", "is_bonafide")],
) )
......
from .database import VideoPadSample # noqa: F401
from .casiafasd import CasiaFasdPadDatabase from .casiafasd import CasiaFasdPadDatabase
from .casiasurf import CasiaSurfPadDatabase from .casiasurf import CasiaSurfPadDatabase
from .database import VideoPadSample # noqa: F401
from .maskattack import MaskAttackPadDatabase from .maskattack import MaskAttackPadDatabase
from .oulunpu import OulunpuPadDatabase
from .replay_attack import ReplayAttackPadDatabase from .replay_attack import ReplayAttackPadDatabase
from .replay_mobile import ReplayMobilePadDatabase from .replay_mobile import ReplayMobilePadDatabase
from .swan import SwanPadDatabase from .swan import SwanPadDatabase
from .oulunpu import OulunpuPadDatabase
# gets sphinx autodoc done right - don't remove it # gets sphinx autodoc done right - don't remove it
def __appropriate__(*args): def __appropriate__(*args):
"""Says object was actually declared here, and not in the import module. """Says object was actually declared here, and not in the import module.
Fixing sphinx warnings of not being able to find classes, when path is Fixing sphinx warnings of not being able to find classes, when path is
shortened. Parameters: shortened. Parameters:
*args: An iterable of objects to modify *args: An iterable of objects to modify
Resolves `Sphinx referencing issues Resolves `Sphinx referencing issues
<https://github.com/sphinx-doc/sphinx/issues/3048>` <https://github.com/sphinx-doc/sphinx/issues/3048>`
""" """
for obj in args: for obj in args:
obj.__module__ = __name__ obj.__module__ = __name__
...@@ -34,4 +34,4 @@ __appropriate__( ...@@ -34,4 +34,4 @@ __appropriate__(
OulunpuPadDatabase, OulunpuPadDatabase,
) )
__all__ = [_ for _ in dir() if not _.startswith('_')] __all__ = [_ for _ in dir() if not _.startswith("_")]
import logging import logging
import os import os
from functools import partial from functools import partial
from sklearn.preprocessing import FunctionTransformer
import bob.io.base import bob.io.base
from bob.bio.video import VideoLikeContainer from bob.bio.video import VideoLikeContainer
from bob.extension import rc from bob.extension import rc
from bob.extension.download import get_file from bob.extension.download import get_file
from bob.pad.base.database import FileListPadDatabase from bob.pad.base.database import FileListPadDatabase
from bob.pipelines import DelayedSample from bob.pipelines import DelayedSample
from sklearn.preprocessing import FunctionTransformer
logger = logging.getLogger(__name__) logger = logging.getLogger(__name__)
...@@ -45,7 +48,9 @@ def casia_surf_multistream_load(samples, original_directory, stream_type): ...@@ -45,7 +48,9 @@ def casia_surf_multistream_load(samples, original_directory, stream_type):
paths = [] paths = []
for mod in mods: for mod in mods:
paths.append( paths.append(
os.path.join(original_directory or "", getattr(sample, mod_to_attr[mod])) os.path.join(
original_directory or "", getattr(sample, mod_to_attr[mod])
)
) )
data = partial(load_multi_stream, mods, paths) data = partial(load_multi_stream, mods, paths)
return DelayedSample(data, parent=sample, annotations=None) return DelayedSample(data, parent=sample, annotations=None)
...@@ -56,7 +61,9 @@ def casia_surf_multistream_load(samples, original_directory, stream_type): ...@@ -56,7 +61,9 @@ def casia_surf_multistream_load(samples, original_directory, stream_type):
def CasiaSurfMultiStreamSample(original_directory, stream_type): def CasiaSurfMultiStreamSample(original_directory, stream_type):
return FunctionTransformer( return FunctionTransformer(
casia_surf_multistream_load, casia_surf_multistream_load,
kw_args=dict(original_directory=original_directory, stream_type=stream_type), kw_args=dict(
original_directory=original_directory, stream_type=stream_type
),
) )
......
from functools import partial
import os import os
import bob.bio.video
from bob.bio.base.utils.annotations import read_annotation_file from functools import partial
from sklearn.preprocessing import FunctionTransformer from sklearn.preprocessing import FunctionTransformer
from bob.bio.base.utils.annotations import read_annotation_file
from bob.bio.video import VideoAsArray from bob.bio.video import VideoAsArray
from bob.pipelines import DelayedSample from bob.pipelines import DelayedSample
from ..utils import frames, number_of_frames
def get_no_transform(x): def get_no_transform(x):
return None return None
def delayed_video_load( def delayed_video_load(
samples, samples,
original_directory, original_directory,
......
import logging import logging
import numpy as np from sklearn.pipeline import make_pipeline
from bob.extension import rc from bob.extension import rc
from bob.extension.download import get_file from bob.extension.download import get_file
from bob.pad.base.database import FileListPadDatabase from bob.pad.base.database import FileListPadDatabase
from bob.pad.face.database import VideoPadSample from bob.pad.face.database import VideoPadSample
from bob.pipelines.transformers import Str_To_Types from bob.pipelines.transformers import Str_To_Types, str_to_bool
from bob.pipelines.transformers import str_to_bool
from sklearn.pipeline import make_pipeline
logger = logging.getLogger(__name__) logger = logging.getLogger(__name__)
......
def __appropriate__(*args): def __appropriate__(*args):
"""Says object was actually declared here, and not in the import module. """Says object was actually declared here, and not in the import module.
Fixing sphinx warnings of not being able to find classes, when path is Fixing sphinx warnings of not being able to find classes, when path is
...@@ -19,4 +17,4 @@ def __appropriate__(*args): ...@@ -19,4 +17,4 @@ def __appropriate__(*args):
__appropriate__() __appropriate__()
__all__ = [_ for _ in dir() if not _.startswith('_')] __all__ = [_ for _ in dir() if not _.startswith("_")]
from bob.bio.base.annotator.FailSafe import translate_kwargs from collections import OrderedDict
from sklearn.base import BaseEstimator, TransformerMixin from sklearn.base import BaseEstimator, TransformerMixin
from ..utils import extract_patches
from bob.bio.base.annotator.FailSafe import translate_kwargs
from bob.bio.video import VideoLikeContainer from bob.bio.video import VideoLikeContainer
from collections import OrderedDict
from ..utils import extract_patches
class ImagePatches(TransformerMixin, BaseEstimator): class ImagePatches(TransformerMixin, BaseEstimator):
...@@ -55,7 +58,10 @@ class VideoPatches(TransformerMixin, BaseEstimator): ...@@ -55,7 +58,10 @@ class VideoPatches(TransformerMixin, BaseEstimator):
def transform(self, videos, annotations=None): def transform(self, videos, annotations=None):
kwargs = translate_kwargs(dict(annotations=annotations), len(videos)) kwargs = translate_kwargs(dict(annotations=annotations), len(videos))
return [self.transform_one_video(vid, **kw) for vid, kw in zip(videos, kwargs)] return [
self.transform_one_video(vid, **kw)
for vid, kw in zip(videos, kwargs)
]
def transform_one_video(self, frames, annotations=None): def transform_one_video(self, frames, annotations=None):
annotations = annotations or {} annotations = annotations or {}
...@@ -76,7 +82,10 @@ class VideoPatches(TransformerMixin, BaseEstimator): ...@@ -76,7 +82,10 @@ class VideoPatches(TransformerMixin, BaseEstimator):
# extract patches # extract patches
patches = extract_patches( patches = extract_patches(
preprocessed, self.block_size, self.block_overlap, self.n_random_patches preprocessed,
self.block_size,
self.block_overlap,
self.n_random_patches,
) )
all_patches.extend(patches) all_patches.extend(patches)
......
...@@ -23,4 +23,4 @@ __appropriate__( ...@@ -23,4 +23,4 @@ __appropriate__(
ImagePatches, ImagePatches,
VideoPatches, VideoPatches,
) )
__all__ = [_ for _ in dir() if not _.startswith('_')] __all__ = [_ for _ in dir() if not _.startswith("_")]
"""Gets statistics on the average face size in a video database. """Gets statistics on the average face size in a video database.
""" """
import logging import logging
from os.path import expanduser
import click import click
import numpy as np import numpy as np
from os.path import expanduser
from bob.extension.scripts.click_helper import (
verbosity_option,
ConfigCommand,
ResourceOption,
)
from bob.bio.face.annotator import ( from bob.bio.face.annotator import (
BoundingBox,
bounding_box_from_annotation, bounding_box_from_annotation,
expected_eye_positions, expected_eye_positions,
BoundingBox, )
from bob.extension.scripts.click_helper import (
ConfigCommand,
ResourceOption,
verbosity_option,
) )
logger = logging.getLogger(__name__) logger = logging.getLogger(__name__)
...@@ -108,14 +111,20 @@ def statistics(database, output, database_directories_file, **kwargs): ...@@ -108,14 +111,20 @@ def statistics(database, output, database_directories_file, **kwargs):
): ):
click.echo( click.echo(
"min: {}, mean: {}, max: {}, std: {:.1f} for {}".format( "min: {}, mean: {}, max: {}, std: {:.1f} for {}".format(
array.min(), int(array.mean()), array.max(), array.std(), name array.min(),
int(array.mean()),
array.max(),
array.std(),
name,
) )
) )
# print the average eye distance assuming bounding boxes are from # print the average eye distance assuming bounding boxes are from
# bob.ip.facedetect or the annotations had eye locations in them # bob.ip.facedetect or the annotations had eye locations in them
bbx = BoundingBox((0, 0), face_sizes.mean(axis=0)) bbx = BoundingBox((0, 0), face_sizes.mean(axis=0))
annot = expected_eye_positions(bbx) annot = expected_eye_positions(bbx)
eye_distance = np.linalg.norm(np.array(annot["reye"]) - np.array(annot["leye"])) eye_distance = np.linalg.norm(
np.array(annot["reye"]) - np.array(annot["leye"])
)
click.echo("Average eye locations: {}".format(annot)) click.echo("Average eye locations: {}".format(annot))
click.echo("Average eye distance: {}".format(int(eye_distance))) click.echo("Average eye distance: {}".format(int(eye_distance)))
...@@ -141,7 +150,11 @@ def statistics(database, output, database_directories_file, **kwargs): ...@@ -141,7 +150,11 @@ def statistics(database, output, database_directories_file, **kwargs):
# ax.plot_surface(X, Y, Z, rstride=1, cstride=1, cmap=cm.viridis) # ax.plot_surface(X, Y, Z, rstride=1, cstride=1, cmap=cm.viridis)
plt.hist( plt.hist(
face_sizes[:, 1], density=True, bins="auto", label=attack_type, alpha=0.5 face_sizes[:, 1],
density=True,
bins="auto",
label=attack_type,
alpha=0.5,
) )
if output: if output:
plt.xlabel("Width of faces") plt.xlabel("Width of faces")
......
import bob.io.base
import os import os
from bob.pipelines import DelayedSample
from bob.pad.base.pipelines.vanilla_pad.abstract_classes import Database import bob.io.base
from bob.bio.base.database.legacy import check_parameters_for_validity, convert_names_to_lowlevel
from bob.bio.video import VideoLikeContainer
from bob.bio.base.database import AtntBioDatabase from bob.bio.base.database import AtntBioDatabase
from bob.bio.base.database.legacy import (
check_parameters_for_validity,
convert_names_to_lowlevel,
)
from bob.bio.video import VideoLikeContainer
from bob.pad.base.pipelines.vanilla_pad.abstract_classes import Database
from bob.pipelines import DelayedSample
def DummyPadSample( def DummyPadSample(
path, original_directory, client_id, key, attack_type, none_annotations=False path,
original_directory,
client_id,
key,
attack_type,
none_annotations=False,
): ):
def load(): def load():
file_name = os.path.join(original_directory, path + ".pgm") file_name = os.path.join(original_directory, path + ".pgm")
......
...@@ -2,11 +2,19 @@ import numpy ...@@ -2,11 +2,19 @@ import numpy
A_org = numpy.array(range(1, 17), "float64").reshape((4, 4)) A_org = numpy.array(range(1, 17), "float64").reshape((4, 4))
A_ans_0_3D = numpy.array( A_ans_0_3D = numpy.array(
[[[1, 2], [5, 6]], [[3, 4], [7, 8]], [[9, 10], [13, 14]], [[11, 12], [15, 16]]], [
[[1, 2], [5, 6]],
[[3, 4], [7, 8]],
[[9, 10], [13, 14]],
[[11, 12], [15, 16]],
],
"float64", "float64",
) )
A_ans_0_4D = numpy.array( A_ans_0_4D = numpy.array(
[[[[1, 2], [5, 6]], [[3, 4], [7, 8]]], [[[9, 10], [13, 14]], [[11, 12], [15, 16]]]], [
[[[1, 2], [5, 6]], [[3, 4], [7, 8]]],
[[[9, 10], [13, 14]], [[11, 12], [15, 16]]],
],
"float64", "float64",
) )
......
...@@ -2,10 +2,11 @@ ...@@ -2,10 +2,11 @@
# vim: set fileencoding=utf-8 : # vim: set fileencoding=utf-8 :
# Thu May 24 10:41:42 CEST 2012 # Thu May 24 10:41:42 CEST 2012
import numpy as np
from nose.plugins.skip import SkipTest from nose.plugins.skip import SkipTest
import bob.bio.base import bob.bio.base
import numpy as np
def test_replayattack(): def test_replayattack():
...@@ -31,10 +32,13 @@ def test_replayattack(): ...@@ -31,10 +32,13 @@ def test_replayattack():
assert len(database.samples(groups=["train", "dev"])) == 720 assert len(database.samples(groups=["train", "dev"])) == 720
assert len(database.samples(groups=["train"])) == 360 assert len(database.samples(groups=["train"])) == 360
assert ( assert (
len(database.samples(groups=["train", "dev", "eval"], purposes="real")) == 200 len(database.samples(groups=["train", "dev", "eval"], purposes="real"))
== 200
) )
assert ( assert (
len(database.samples(groups=["train", "dev", "eval"], purposes="attack")) len(
database.samples(groups=["train", "dev", "eval"], purposes="attack")
)
== 1000 == 1000
) )
...@@ -71,10 +75,14 @@ def test_replaymobile(): ...@@ -71,10 +75,14 @@ def test_replaymobile():
assert len(database.samples(groups=["train", "dev"])) == 728 assert len(database.samples(groups=["train", "dev"])) == 728
assert len(database.samples(groups=["train"])) == 312 assert len(database.samples(groups=["train"])) == 312
assert ( assert (
len(database.samples(groups=["train", "dev", "eval"], purposes="real")) == 390 len(database.samples(groups=["train", "dev", "eval"], purposes="real"))
== 390
) )
assert ( assert (
len(database.samples(groups=["train", "dev", "eval"], purposes="attack")) == 640 len(
database.samples(groups=["train", "dev", "eval"], purposes="attack")
)
== 640
) )
all_samples = database.sort(database.samples()) all_samples = database.sort(database.samples())
...@@ -133,11 +141,18 @@ def test_maskattack(): ...@@ -133,11 +141,18 @@ def test_maskattack():
) )
# all real sequences: 2 sessions, 5 recordings for 17 individuals # all real sequences: 2 sessions, 5 recordings for 17 individuals
assert ( assert (
len(maskattack.samples(groups=["train", "dev", "eval"], purposes="real")) == 170 len(
maskattack.samples(groups=["train", "dev", "eval"], purposes="real")
)
== 170
) )
# all attacks: 1 session, 5 recordings for 17 individuals # all attacks: 1 session, 5 recordings for 17 individuals
assert ( assert (
len(maskattack.samples(groups=["train", "dev", "eval"], purposes="attack")) len(
maskattack.samples(
groups=["train", "dev", "eval"], purposes="attack"
)
)
== 85 == 85
) )
...@@ -189,11 +204,17 @@ def test_casiasurf_color_protocol(): ...@@ -189,11 +204,17 @@ def test_casiasurf_color_protocol():
assert len(casiasurf.samples(groups=["train"], purposes="attack")) == 20324 assert len(casiasurf.samples(groups=["train"], purposes="attack")) == 20324
assert len(casiasurf.samples(groups=("dev",), purposes=("real",))) == 2994 assert len(casiasurf.samples(groups=("dev",), purposes=("real",))) == 2994
assert len(casiasurf.samples(groups=("dev",), purposes=("attack",))) == 6614 assert len(casiasurf.samples(groups=("dev",), purposes=("attack",))) == 6614
assert len(casiasurf.samples(groups=("dev",), purposes=("real", "attack"))) == 9608 assert (
len(casiasurf.samples(groups=("dev",), purposes=("real", "attack")))
== 9608
)
assert len(casiasurf.samples(groups=("eval",), purposes=("real",))) == 17458 assert len(casiasurf.samples(groups=("eval",), purposes=("real",))) == 17458
assert len(casiasurf.samples(groups=("eval",), purposes=("attack",))) == 40252
assert ( assert (
len(casiasurf.samples(groups=("eval",), purposes=("real", "attack"))) == 57710 len(casiasurf.samples(groups=("eval",), purposes=("attack",))) == 40252
)
assert (
len(casiasurf.samples(groups=("eval",), purposes=("real", "attack")))
== 57710
) )
...@@ -234,10 +255,13 @@ def test_swan(): ...@@ -234,10 +255,13 @@ def test_swan():
assert len(database.samples(groups=["train", "dev"])) == 2803 assert len(database.samples(groups=["train", "dev"])) == 2803
assert len(database.samples(groups=["train"])) == 2001 assert len(database.samples(groups=["train"])) == 2001
assert ( assert (
len(database.samples(groups=["train", "dev", "eval"], purposes="real")) == 3300 len(database.samples(groups=["train", "dev", "eval"], purposes="real"))
== 3300
) )
assert ( assert (
len(database.samples(groups=["train", "dev", "eval"], purposes="attack")) len(
database.samples(groups=["train", "dev", "eval"], purposes="attack")
)
== 2502 == 2502
) )
...@@ -287,7 +311,10 @@ def test_oulunpu(): ...@@ -287,7 +311,10 @@ def test_oulunpu():
"Protocol_4_6", "Protocol_4_6",
] ]
assert database.groups() == ["dev", "eval", "train"] assert database.groups() == ["dev", "eval", "train"]
assert len(database.samples(groups=["train", "dev", "eval"])) == 1200 + 900 + 600 assert (
len(database.samples(groups=["train", "dev", "eval"]))
== 1200 + 900 + 600
)
assert len(database.samples(groups=["train", "dev"])) == 1200 + 900 assert len(database.samples(groups=["train", "dev"])) == 1200 + 900
assert len(database.samples(groups=["train"])) == 1200 assert len(database.samples(groups=["train"])) == 1200
assert ( assert (
...@@ -295,7 +322,9 @@ def test_oulunpu(): ...@@ -295,7 +322,9 @@ def test_oulunpu():
== 240 + 180 + 120 == 240 + 180 + 120
) )
assert ( assert (
len(database.samples(groups=["train", "dev", "eval"], purposes="attack")) len(
database.samples(groups=["train", "dev", "eval"], purposes="attack")
)
== 960 + 720 + 480 == 960 + 720 + 480
) )
......
import bob.pipelines as mario import bob.pipelines as mario
from bob.bio.video import VideoLikeContainer from bob.bio.video import VideoLikeContainer
from bob.pad.face.transformer import VideoToFrames from bob.pad.face.transformer import VideoToFrames
......
from bob.pad.face.test.dummy.database import DummyDatabase as Database
from bob.pad.face.utils import yield_faces, scale_face, blocks, frames, number_of_frames
from nose.tools import raises
import numpy
import imageio import imageio
import numpy
from nose.tools import raises
from bob.pad.face.test.dummy.database import DummyDatabase as Database
from bob.pad.face.utils import (
blocks,
frames,
number_of_frames,
scale_face,
yield_faces,
)
def get_pad_sample(none_annotations=False): def get_pad_sample(none_annotations=False):
sample = Database(none_annotations=none_annotations).samples(("train", "dev"))[0] sample = Database(none_annotations=none_annotations).samples(
("train", "dev")
)[0]
return sample return sample
...@@ -15,7 +25,9 @@ image = get_pad_sample().data[0] ...@@ -15,7 +25,9 @@ image = get_pad_sample().data[0]
def test_video_frames(): def test_video_frames():
# get the path to cockatoo.mp4 from imageio-ffmpeg # get the path to cockatoo.mp4 from imageio-ffmpeg
path = imageio.core.Request("imageio:cockatoo.mp4", "r").get_local_filename() path = imageio.core.Request(
"imageio:cockatoo.mp4", "r"
).get_local_filename()
# read 2 frames # read 2 frames
for i, frame in enumerate(frames(path)): for i, frame in enumerate(frames(path)):
assert frame.shape == (3, 720, 1280), frame.shape assert frame.shape == (3, 720, 1280), frame.shape
...@@ -32,7 +44,6 @@ def test_video_frames(): ...@@ -32,7 +44,6 @@ def test_video_frames():
assert n_frames == 280, n_frames assert n_frames == 280, n_frames
def dummy_cropper(frame, annotations=None): def dummy_cropper(frame, annotations=None):
return frame return frame
......
0% Loading or .
You are about to add 0 people to the discussion. Proceed with caution.
Please register or to comment