...
 
Commits (32)
......@@ -18,3 +18,6 @@ build
record.txt
miniconda.sh
miniconda/
temp
results
*.tfrecords
......@@ -4,21 +4,9 @@
[buildout]
parts = scripts
develop = src/bob.db.fargo
src/bob.bio.face
src/bob.learn.pytorch
src/bob.fusion.base
.
develop = .
eggs = bob.paper.fargo_tbiom_2019
bob.db.fargo
bob.bio.face
bob.learn.pytorch
bob.bio.gmm
bob.learn.tensorflow
bob.bio.face_ongoing
bob.bio.htface
extensions = bob.buildout
mr.developer
......@@ -28,11 +16,6 @@ auto-checkout = *
newest = false
verbose = true
[sources]
bob.db.fargo = git git@gitlab.idiap.ch:bob/bob.db.fargo.git
bob.bio.face = git git@gitlab.idiap.ch:bob/bob.bio.face.git
bob.learn.pytorch = git git@gitlab.idiap.ch:bob/bob.learn.pytorch.git rev=f55be9188a51893daa7ef9539d8da127078caa07
bob.fusion.base = git git@gitlab.idiap.ch:bob/bob.fusion.base.git
[scripts]
recipe = bob.buildout:scripts
......
......@@ -32,8 +32,8 @@ requirements:
- bob.learn.tensorflow
- bob.bio.caffe_face
- bob.ip.tensorflow_extractor
- bob.bio.face_ongoing
- bob.bio.htface
- bob.bio.face_ongoing # [linux]
- bob.bio.htface # [linux]
run:
- python
- setuptools
......
#!/usr/bin/env python
import bob.bio.face
from bob.bio.htface.database import FargoBioDatabase
from bob.extension import rc
database = FargoBioDatabase(original_directory=rc["bob.db.fargo.directory"],
annotation_directory=rc["bob.db.fargo.annotation_directory"],
original_extension=".png",
protocol='mc-rgb2nir',
models_depend_on_protocol = True)
# Estimated training size
samples_per_epoch = 2000 * 5
#!/usr/bin/env python
# vim: set fileencoding=utf-8 :
groups = ['dev', 'eval']
allow_missing_files = True
##### PREPROCESSOR ######
import bob.bio.face
# This is the size of the image that this model expects
CROPPED_IMAGE_HEIGHT = 160
CROPPED_IMAGE_WIDTH = 160
# eye positions for frontal images
RIGHT_EYE_POS = (48, 53)
LEFT_EYE_POS = (48, 107)
# Detects the face and crops it without eye detection
preprocessor = bob.bio.face.preprocessor.FaceCrop(
cropped_image_size = (CROPPED_IMAGE_HEIGHT, CROPPED_IMAGE_WIDTH),
cropped_positions = {'leye' : LEFT_EYE_POS, 'reye' : RIGHT_EYE_POS},
color_channel='gray'
)
#### EXTRACTOR #####
import bob.ip.tensorflow_extractor
from bob.learn.tensorflow.network import inception_resnet_v2_batch_norm
import tensorflow as tf
from bob.extension import rc
from bob.bio.face_ongoing.extractor import TensorflowEmbedding
model_filename = rc['bob.bio.face_ongoing.msceleb-inception-v2_batchnorm_gray']
#########
# Extraction
#########
inputs = tf.placeholder(tf.float32, shape=(1, 160, 160, 1))
# Taking the embedding
prelogits = inception_resnet_v2_batch_norm(tf.stack([tf.image.per_image_standardization(i) for i in tf.unstack(inputs)]), mode=tf.estimator.ModeKeys.PREDICT)[0]
embedding = tf.nn.l2_normalize(prelogits, dim=1, name="embedding")
extractor = TensorflowEmbedding(bob.ip.tensorflow_extractor.Extractor(model_filename, inputs, embedding))
## ALGORITHM
algorithm = "distance-cosine"
#!/usr/bin/env python
# vim: set fileencoding=utf-8 :
groups = ['dev', 'eval']
allow_missing_files = True
##### PREPROCESSOR ######
import bob.bio.face
# This is the size of the image that this model expects
CROPPED_IMAGE_HEIGHT = 160
CROPPED_IMAGE_WIDTH = 160
# eye positions for frontal images
RIGHT_EYE_POS = (48, 53)
LEFT_EYE_POS = (48, 107)
# Detects the face and crops it without eye detection
preprocessor = bob.bio.face.preprocessor.FaceCrop(
cropped_image_size = (CROPPED_IMAGE_HEIGHT, CROPPED_IMAGE_WIDTH),
cropped_positions = {'leye' : LEFT_EYE_POS, 'reye' : RIGHT_EYE_POS},
color_channel='gray'
)
#### EXTRACTOR #####
import bob.ip.tensorflow_extractor
from bob.learn.tensorflow.network import inception_resnet_v2_batch_norm
import tensorflow as tf
from bob.extension import rc
from bob.bio.face_ongoing.extractor import TensorflowEmbedding
model_filename = rc['bob.bio.face_ongoing.msceleb-inception-v2_batchnorm_gray']
#########
# Extraction
#########
inputs = tf.placeholder(tf.float32, shape=(1, 160, 160, 1))
# Taking the embedding
prelogits = inception_resnet_v2_batch_norm(tf.stack([tf.image.per_image_standardization(i) for i in tf.unstack(inputs)]), mode=tf.estimator.ModeKeys.PREDICT)[0]
embedding = tf.nn.l2_normalize(prelogits, dim=1, name="embedding")
extractor = TensorflowEmbedding(bob.ip.tensorflow_extractor.Extractor(model_filename, inputs, embedding))
## ALGORITHM
algorithm = "distance-cosine"
from bob.extension import rc as _rc
from bob.bio.face.database import FargoBioDatabase
database = FargoBioDatabase(
original_directory='./temp/vgg/preprocessed/',
annotation_directory=None,
original_extension=".png",
protocol="mc-depth"
)
from bob.bio.base.utils import read_original_data
groups = 'world'
samples = database.all_files(groups=groups)
CLIENT_IDS = (str(f.client_id) for f in database.objects(groups=groups))
CLIENT_IDS = set(CLIENT_IDS)
CLIENT_IDS = dict(zip(CLIENT_IDS, range(len(CLIENT_IDS))))
def file_to_label(f):
return CLIENT_IDS[str(f.client_id)]
import bob.io.base
import bob.io.image
import os
import numpy
def reader(biofile):
filename = os.path.join(database.original_directory, biofile.path + database.original_extension)
temp = bob.io.base.load(filename)
data = numpy.ndarray((temp.shape[0], temp.shape[1], 3), dtype=temp.dtype)
data[:, :, 0] = data[:, :, 1] = data[:, :, 2] = temp
label = biofile.client_id - 1
key = str(biofile.path)
key = key.encode('utf-8')
return (data, label, key)
from bob.extension import rc as _rc
from bob.bio.face.database import FargoBioDatabase
database = FargoBioDatabase(
original_directory='./temp/vgg/preprocessed/',
annotation_directory=None,
original_extension=".png",
protocol="mc-nir"
)
from bob.bio.base.utils import read_original_data
groups = 'world'
samples = database.all_files(groups=groups)
CLIENT_IDS = (str(f.client_id) for f in database.objects(groups=groups))
CLIENT_IDS = set(CLIENT_IDS)
CLIENT_IDS = dict(zip(CLIENT_IDS, range(len(CLIENT_IDS))))
def file_to_label(f):
return CLIENT_IDS[str(f.client_id)]
import bob.io.base
import bob.io.image
import os
import numpy
def reader(biofile):
filename = os.path.join(database.original_directory, biofile.path + database.original_extension)
temp = bob.io.base.load(filename)
data = numpy.ndarray((temp.shape[0], temp.shape[1], 3), dtype=temp.dtype)
data[:, :, 0] = data[:, :, 1] = data[:, :, 2] = temp
label = biofile.client_id - 1
key = str(biofile.path)
key = key.encode('utf-8')
return (data, label, key)
from bob.extension import rc as _rc
from bob.bio.face.database import FargoBioDatabase
database = FargoBioDatabase(
original_directory='./temp/vgg/preprocessed/',
annotation_directory=None,
original_extension=".png",
protocol="mc-rgb"
)
from bob.bio.base.utils import read_original_data
groups = 'world'
samples = database.all_files(groups=groups)
CLIENT_IDS = (str(f.client_id) for f in database.objects(groups=groups))
CLIENT_IDS = set(CLIENT_IDS)
CLIENT_IDS = dict(zip(CLIENT_IDS, range(len(CLIENT_IDS))))
def file_to_label(f):
return CLIENT_IDS[str(f.client_id)]
import bob.io.base
import bob.io.image
import os
def reader(biofile):
filename = os.path.join(database.original_directory, biofile.path + database.original_extension)
data = bob.io.image.to_matplotlib(bob.io.base.load(filename))
label = biofile.client_id - 1
key = str(biofile.path)
key = key.encode('utf-8')
return (data, label, key)
......@@ -15,8 +15,6 @@ from bob.learn.pytorch.datasets.utils import map_labels
class FargoDataset(Dataset):
"""Class representing the FARGO dataset
Only retrieves the RGB training set
Attributes
----------
original_directory : str
......@@ -33,7 +31,7 @@ class FargoDataset(Dataset):
"""
def __init__(self, original_directory=rc['bob.db.fargo.directory'],
annotation_directory=rc['bob.db.fargo.annotation_directory'],
transform=None, start_index=0, modality='rgb'):
transform=None, start_index=0, modality='depth'):
"""Init function
Parameters
......
......@@ -15,8 +15,6 @@ from bob.learn.pytorch.datasets.utils import map_labels
class FargoDataset(Dataset):
"""Class representing the FARGO dataset
Only retrieves the RGB training set
Attributes
----------
original_directory : str
......@@ -33,7 +31,7 @@ class FargoDataset(Dataset):
"""
def __init__(self, original_directory=rc['bob.db.fargo.directory'],
annotation_directory=rc['bob.db.fargo.annotation_directory'],
transform=None, start_index=0, modality='rgb'):
transform=None, start_index=0, modality='nir'):
"""Init function
Parameters
......
from bob.learn.tensorflow.estimators import Logits
from bob.learn.tensorflow.network import vgg_16
from bob.learn.tensorflow.dataset import DEFAULT_FEATURE
from functools import partial
import tensorflow as tf
import os
# PLACE YOUR MODEL DIR HERE
model_dir = 'VGG-ft-depth'
# THIS IS THE CHECKPOINT FROM THE CAFFE MODEL CONVERTED TO TENSORFLOW
# IT IS IN THE bob.ip.tensorflow_extractor
import bob.ip.tensorflow_extractor
vgg_path = os.path.join(bob.ip.tensorflow_extractor.VGGFace.get_vggpath(), "vgg_face_tf")
extra_checkpoint = {"checkpoint_path": vgg_path,
"scopes": dict({"vgg_16/": "vgg_16/"})}
estimator = Logits(
vgg_16,
optimizer=tf.train.AdagradOptimizer(1e-4),
loss_op=tf.losses.sparse_softmax_cross_entropy,
n_classes=25, # the number of identities in the world set of FARGO database
embedding_validation=True,
model_dir=model_dir,
apply_moving_averages=False,
extra_checkpoint=extra_checkpoint
)
# PLACE YOUR TF RECORD PATH HERE
tfrecord_filename = 'fargo_mc_depth.tfrecords'
data_shape = (224, 224 , 3)
output_shape = (224, 224)
data_type = tf.uint8
batch_size = 16
epochs = 20
def vgg_16_face_normalization(serialized_example):
"""
Special bathing
"""
# Getting the `data`, `key`, `label` organization for the bob.
features = DEFAULT_FEATURE
features = tf.parse_single_example(serialized_example, features=features)
# Convert the image data from string back to the numbers
image = tf.decode_raw(features["data"], data_type)
# Reshape image data into the original shape
image = tf.reshape(image, data_shape)
image = tf.cast(image, tf.float32)
image -= [129.1863, 104.7624, 93.5940]
# Cast label data into int64
label = tf.cast(features["label"], tf.int64)
key = tf.cast(features["key"], tf.string)
return image, label, key
def train_input_fn():
"""
Customizing the input function
"""
# Creating the dataset here so I can use the specific normalization applied in VGG Face
dataset = tf.data.TFRecordDataset(tfrecord_filename)
parser = partial(vgg_16_face_normalization)
dataset = dataset.map(parser)
dataset = dataset.shuffle(1000)
dataset = dataset.batch(batch_size, drop_remainder=True)
dataset = dataset.repeat(epochs)
dataset = dataset.map(lambda d, l, k: ({"data": d, "key": k}, l))
return dataset
from bob.learn.tensorflow.estimators import Logits
from bob.learn.tensorflow.network import vgg_16
from bob.learn.tensorflow.dataset import DEFAULT_FEATURE
from functools import partial
import tensorflow as tf
import os
# PLACE YOUR MODEL DIR HERE
model_dir = 'VGG-ft-nir'
# THIS IS THE CHECKPOINT FROM THE CAFFE MODEL CONVERTED TO TENSORFLOW
# IT IS IN THE bob.ip.tensorflow_extractor
import bob.ip.tensorflow_extractor
vgg_path = os.path.join(bob.ip.tensorflow_extractor.VGGFace.get_vggpath(), "vgg_face_tf")
extra_checkpoint = {"checkpoint_path": vgg_path,
"scopes": dict({"vgg_16/": "vgg_16/"})}
estimator = Logits(
vgg_16,
optimizer=tf.train.AdagradOptimizer(1e-4),
loss_op=tf.losses.sparse_softmax_cross_entropy,
n_classes=25, # the number of identities in the world set of FARGO database
embedding_validation=True,
model_dir=model_dir,
apply_moving_averages=False,
extra_checkpoint=extra_checkpoint
)
# PLACE YOUR TF RECORD PATH HERE
tfrecord_filename = 'fargo_mc_nir.tfrecords'
data_shape = (224, 224 , 3)
output_shape = (224, 224)
data_type = tf.uint8
batch_size = 16
epochs = 20
def vgg_16_face_normalization(serialized_example):
"""
Special bathing
"""
# Getting the `data`, `key`, `label` organization for the bob.
features = DEFAULT_FEATURE
features = tf.parse_single_example(serialized_example, features=features)
# Convert the image data from string back to the numbers
image = tf.decode_raw(features["data"], data_type)
# Reshape image data into the original shape
image = tf.reshape(image, data_shape)
image = tf.cast(image, tf.float32)
image -= [129.1863, 104.7624, 93.5940]
# Cast label data into int64
label = tf.cast(features["label"], tf.int64)
key = tf.cast(features["key"], tf.string)
return image, label, key
def train_input_fn():
"""
Customizing the input function
"""
# Creating the dataset here so I can use the specific normalization applied in VGG Face
dataset = tf.data.TFRecordDataset(tfrecord_filename)
parser = partial(vgg_16_face_normalization)
dataset = dataset.map(parser)
dataset = dataset.shuffle(1000)
dataset = dataset.batch(batch_size, drop_remainder=True)
dataset = dataset.repeat(epochs)
dataset = dataset.map(lambda d, l, k: ({"data": d, "key": k}, l))
return dataset
from bob.learn.tensorflow.estimators import Logits
from bob.learn.tensorflow.network import vgg_16
from bob.learn.tensorflow.dataset import DEFAULT_FEATURE
from functools import partial
import tensorflow as tf
import os
# PLACE YOUR MODEL DIR HERE
model_dir = 'VGG-ft-rgb'
# THIS IS THE CHECKPOINT FROM THE CAFFE MODEL CONVERTED TO TENSORFLOW
# IT IS IN THE bob.ip.tensorflow_extractor
import bob.ip.tensorflow_extractor
vgg_path = os.path.join(bob.ip.tensorflow_extractor.VGGFace.get_vggpath(), "vgg_face_tf")
extra_checkpoint = {"checkpoint_path": vgg_path,
"scopes": dict({"vgg_16/": "vgg_16/"})}
estimator = Logits(
vgg_16,
optimizer=tf.train.AdagradOptimizer(1e-4),
loss_op=tf.losses.sparse_softmax_cross_entropy,
n_classes=25, # the number of identities in the world set of FARGO database
embedding_validation=True,
model_dir=model_dir,
apply_moving_averages=False,
extra_checkpoint=extra_checkpoint
)
# PLACE YOUR TF RECORD PATH HERE
tfrecord_filename = 'fargo_mc_rgb.tfrecords'
data_shape = (224, 224 , 3)
output_shape = (224, 224)
data_type = tf.uint8
batch_size = 16
epochs = 20
def vgg_16_face_normalization(serialized_example):
"""
Special bathing
"""
# Getting the `data`, `key`, `label` organization for the bob.
features = DEFAULT_FEATURE
features = tf.parse_single_example(serialized_example, features=features)
# Convert the image data from string back to the numbers
image = tf.decode_raw(features["data"], data_type)
# Reshape image data into the original shape
image = tf.reshape(image, data_shape)
image = tf.cast(image, tf.float32)
image -= [129.1863, 104.7624, 93.5940]
# Cast label data into int64
label = tf.cast(features["label"], tf.int64)
key = tf.cast(features["key"], tf.string)
return image, label, key
def train_input_fn():
"""
Customizing the input function
"""
# Creating the dataset here so I can use the specific normalization applied in VGG Face
dataset = tf.data.TFRecordDataset(tfrecord_filename)
parser = partial(vgg_16_face_normalization)
dataset = dataset.map(parser)
dataset = dataset.shuffle(1000)
dataset = dataset.batch(batch_size, drop_remainder=True)
dataset = dataset.repeat(epochs)
dataset = dataset.map(lambda d, l, k: ({"data": d, "key": k}, l))
return dataset
groups = ('dev', 'eval')
allow_missing_files = True
import bob.bio.face
import bob.bio.caffe_face
import os
import numpy
preprocessed_directory = os.path.abspath('./temp/vgg/preprocessed/')
skip_preprocessing = True
# define the preprocessor to correctly load the data
from bob.bio.base.preprocessor import Preprocessor
class VGGOneChannel(Preprocessor):
"""
Preprocessor class for VGG using 1-channel images.
This will just load and copy the single channel image in 3 channels,
since VGG architecture expects input of sixe 224x224x3
"""
def __init__(self, **kwargs):
Preprocessor.__init__(self, **kwargs)
def read_data(self, data_file):
"""
reads the original image and copies it into 3 channels
"""
temp = bob.bio.base.preprocessor.Preprocessor().read_data(data_file)
data = numpy.zeros((3, temp.shape[0], temp.shape[1]))
data[0] = data[1] = data[2] = temp
return data
preprocessor = VGGOneChannel()
# extract features from fine-tuned model
_modelfile = 'VGG-ft-depth/'
import bob.ip.tensorflow_extractor
extractor = bob.bio.base.extractor.CallableExtractor(bob.ip.tensorflow_extractor.VGGFace(checkpoint_filename=_modelfile))
algorithm = 'distance-cosine'
groups = ('dev', 'eval')
allow_missing_files = True
import numpy
import bob.bio.face
import bob.bio.caffe_face
import os
preprocessed_directory = os.path.abspath('./temp/vgg/preprocessed/')
skip_preprocessing = True
# define the preprocessor to correctly load the data
from bob.bio.base.preprocessor import Preprocessor
class VGGOneChannel(Preprocessor):
"""
Preprocessor class for VGG using 1-channel images.
This will just load and copy the single channel image in 3 channels,
since VGG architecture expects input of sixe 224x224x3
"""
def __init__(self, **kwargs):
Preprocessor.__init__(self, **kwargs)
def read_data(self, data_file):
"""
reads the original image and copies it into 3 channels
"""
temp = bob.bio.base.preprocessor.Preprocessor().read_data(data_file)
data = numpy.zeros((3, temp.shape[0], temp.shape[1]))
data[0] = data[1] = data[2] = temp
return data
preprocessor = VGGOneChannel()
# extract features from fine-tuned model
_modelfile = 'VGG-ft-nir/'
import bob.ip.tensorflow_extractor
extractor = bob.bio.base.extractor.CallableExtractor(bob.ip.tensorflow_extractor.VGGFace(checkpoint_filename=_modelfile))
algorithm = 'distance-cosine'
groups = ('dev', 'eval')
allow_missing_files = True
import bob.bio.face
import bob.bio.caffe_face
import os
preprocessed_directory = os.path.abspath('./temp/vgg/preprocessed/')
skip_preprocessing = True
# extract features from fine-tuned model
_modelfile = 'VGG-ft-rgb/'
import bob.ip.tensorflow_extractor
extractor = bob.bio.base.extractor.CallableExtractor(bob.ip.tensorflow_extractor.VGGFace(checkpoint_filename=_modelfile))
algorithm = 'distance-cosine'
# DUMMY
from bob.bio.base.preprocessor import Preprocessor
preprocessor = Preprocessor()
groups = ('dev', 'eval')
allow_missing_files = True
import numpy
import bob.bio.face
import bob.bio.caffe_face
import os
preprocessed_directory = os.path.abspath('./temp/vgg/preprocessed/')
skip_preprocessing = True
# define the preprocessor to correctly load the data
from bob.bio.base.preprocessor import Preprocessor
class VGGOneChannel(Preprocessor):
"""
Preprocessor class for VGG using 1-channel images.
This will just load and copy the single channel image in 3 channels,
since VGG architecture expects input of sixe 224x224x3
"""
def __init__(self, **kwargs):
Preprocessor.__init__(self, **kwargs)
def read_data(self, data_file):
"""
reads the original image and copies it into 3 channels
"""
temp = bob.bio.base.preprocessor.Preprocessor().read_data(data_file)
data = numpy.zeros((3, temp.shape[0], temp.shape[1]))
data[0] = data[1] = data[2] = temp
return data
preprocessor = VGGOneChannel()
# extract features from fc7 (default)
import bob.bio.caffe_face
extractor = bob.bio.caffe_face.extractor.VGGFeatures("fc7")
algorithm = 'distance-cosine'
This diff is collapsed.
name: fargo_tbiom_2019
channels:
- https://www.idiap.ch/software/bob/conda
- defaults
dependencies:
- _libgcc_mutex=0.1
- _tflow_select=2.3.0
- absl-py=0.7.1
- alabaster=0.7.12
- asn1crypto=0.24.0
- astor=0.7.1
- babel=2.7.0
- backcall=0.1.0
- blas=1.0
- bob.ap=2.1.8
- bob.bio.base=4.0.1
- bob.bio.caffe_face=1.1.2
- bob.bio.face=4.0.1
- bob.bio.face_ongoing=1.0.4
- bob.bio.gmm=3.2.3
- bob.bio.htface=1.0.3
- bob.blitz=2.0.17
- bob.buildout=2.2.0
- bob.core=2.2.2
- bob.db.arface=2.1.7
- bob.db.atnt=2.0.13
- bob.db.base=3.0.2
- bob.db.cbsr_nir_vis_2=2.0.4
- bob.db.cuhk_cufs=2.2.4
- bob.db.cuhk_cufsf=1.0.3
- bob.db.fargo=1.0.0
- bob.db.ldhf=1.0.1
- bob.db.mnist=2.2.3
- bob.db.nivl=1.0.2
- bob.db.pola_thermal=1.0.2
- bob.db.xm2vts=2.1.7
- bob.extension=3.1.1
- bob.fusion.base=0.0.2
- bob.io.base=3.0.5
- bob.io.image=2.4.3
- bob.io.matlab=2.0.13
- bob.io.video=3.0.0
- bob.ip.base=2.2.3
- bob.ip.caffe_extractor=2.0.2
- bob.ip.color=2.0.13
- bob.ip.draw=2.0.12
- bob.ip.facedetect=2.1.8
- bob.ip.flandmark=2.1.9
- bob.ip.gabor=2.0.14
- bob.ip.mtcnn=1.0.2
- bob.ip.tensorflow_extractor=0.0.4
- bob.learn.activation=2.0.13
- bob.learn.boosting=2.0.16
- bob.learn.em=2.1.3
- bob.learn.linear=2.1.5
- bob.learn.mlp=2.1.9
- bob.learn.pytorch=0.0.5
- bob.learn.tensorflow=1.1.0
- bob.math=3.1.3
- bob.measure=4.1.0
- bob.sp=2.0.13
- boost=1.67.0
- bzip2=1.0.8
- c-ares=1.15.0
- ca-certificates=2019.5.15
- caffe=1.0
- cairo=1.14.12
- certifi=2019.6.16
- cffi=1.12.3
- chardet=3.0.4
- click=7.0
- click-plugins=1.1.1
- cloudpickle=1.2.1
- coverage=4.5.3
- cryptography=2.7
- cudatoolkit=10.0.130
- cudnn=7.6.0
- cycler=0.10.0
- cytoolz=0.10.0
- dask-core=2.1.0
- dbus=1.13.6
- decorator=4.4.0
- docopt=0.6.2
- docutils=0.14
- expat=2.2.6
- ffmpeg=4.0
- fontconfig=2.13.0
- freeglut=3.0.0
- freetype=2.9.1
- gast=0.2.2
- gflags=2.2.2
- giflib=5.1.4
- glib=2.56.2
- glog=0.3.5
- google-pasta=0.1.7
- graphite2=1.3.13
- gridtk=1.6.3
- grpcio=1.16.1
- gst-plugins-base=1.14.0
- gstreamer=1.14.0
- h5py=2.8.0
- harfbuzz=1.8.8
- hdf5=1.10.2
- icu=58.2
- idna=2.8
- imageio=2.5.0
- imagesize=1.1.0
- intel-openmp=2019.4
- ipdb=0.11
- ipython=7.6.1
- ipython_genutils=0.2.0
- jasper=2.0.14
- jedi=0.13.3
- jinja2=2.10.1
- joblib=0.13.2
- jpeg=9b
- keras-applications=1.0.8
- keras-preprocessing=1.1.0
- kiwisolver=1.1.0
- leveldb=1.20
- libblitz=1.0.1
- libboost=1.67.0
- libedit=3.1.20181209
- libffi=3.2.1
- libgcc-ng=9.1.0
- libgfortran-ng=7.3.0
- libglu=9.0.0
- libmatio=1.5.15
- libopencv=3.4.2
- libopus=1.3
- libpng=1.6.37
- libprotobuf=3.8.0
- libstdcxx-ng=9.1.0
- libtiff=4.0.10
- libuuid=1.0.3
- libvpx=1.7.0
- libxcb=1.13
- libxml2=2.9.9
- lmdb=0.9.23
- markdown=3.1.1
- markupsafe=1.1.1
- matplotlib=3.1.0
- mkl=2019.4
- mkl-service=2.0.2
- mkl_fft=1.0.12
- mkl_random=1.0.2
- ncurses=6.1
- networkx=2.3
- ninja=1.9.0
- nose=1.3.7
- numpy=1.16.4
- numpy-base=1.16.4
- olefile=0.46
- openh264=1.7.0
- openssl=1.1.1c
- packaging=19.0
- pandas=0.24.2
- parso=0.5.0
- pcre=8.43
- pexpect=4.7.0
- pickleshare=0.7.5
- pillow=6.1.0
- pip=19.1.1
- pixman=0.38.0
- prompt_toolkit=2.0.9
- protobuf=3.8.0
- ptyprocess=0.6.0
- py-boost=1.67.0
- py-opencv=3.4.2
- pycparser=2.19
- pygments=2.4.2
- pyopenssl=19.0.0
- pyparsing=2.4.0
- pyqt=5.9.2
- pysocks=1.7.0
- python=3.6.8
- python-dateutil=2.8.0
- python-gflags=3.1.2
- python-leveldb=0.20
- pytorch=1.0.1
- pytz=2019.1
- pywavelets=1.0.3
- pyyaml=5.1.1
- qt=5.9.7
- readline=7.0
- requests=2.22.0
- scikit-image=0.15.0
- scikit-learn=0.21.2
- scipy=1.2.1
- setuptools=41.0.1
- sip=4.19.8
- six=1.12.0
- snappy=1.1.7
- snowballstemmer=1.9.0
- sphinx=2.1.2
- sphinx_rtd_theme=0.4.3
- sphinxcontrib-applehelp=1.0.1
- sphinxcontrib-devhelp=1.0.1
- sphinxcontrib-htmlhelp=1.0.2
- sphinxcontrib-jsmath=1.0.1
- sphinxcontrib-qthelp=1.0.2
- sphinxcontrib-serializinghtml=1.1.3
- sqlalchemy=1.3.5
- sqlite=3.29.0
- tabulate=0.8.3
- tensorboard=1.14.0
- tensorflow=1.14.0
- tensorflow-base=1.14.0
- tensorflow-estimator=1.14.0
- termcolor=1.1.0
- tk=8.6.8
- toolz=0.10.0
- torchvision=0.2.1
- tornado=6.0.3
- traitlets=4.3.2
- urllib3=1.24.2
- vlfeat=0.9.21
- wcwidth=0.1.7
- werkzeug=0.15.4
- wheel=0.33.4
- wrapt=1.11.2
- xz=5.2.4
- yaml=0.1.7
- zc.buildout=2.12.2
- zc.recipe.egg=2.0.7
- zlib=1.2.11
- zstd=1.3.7
prefix: /idiap/user/heusch/miniconda3/envs/fargo_tbiom_2019