Commit 50f8c2f0 authored by Pavel KORSHUNOV's avatar Pavel KORSHUNOV

updated evaluatuion scripts, environment, and docs to bob5

parent 310b9288
Pipeline #24401 passed with stage
in 12 minutes and 42 seconds
...@@ -22,6 +22,7 @@ from __future__ import print_function ...@@ -22,6 +22,7 @@ from __future__ import print_function
"""This script evaluates the given score files and computes EER and Spoofing FAR with regards to 10 types of voice attacks""" """This script evaluates the given score files and computes EER and Spoofing FAR with regards to 10 types of voice attacks"""
import bob.measure import bob.measure
import bob.bio.base.score
import argparse import argparse
import numpy, math import numpy, math
...@@ -40,7 +41,7 @@ def load_attacks_file(filename, support="all", adevice="all", recdevice="all"): ...@@ -40,7 +41,7 @@ def load_attacks_file(filename, support="all", adevice="all", recdevice="all"):
negatives = [] negatives = []
# read four column list line by line # read four column list line by line
for (client_id, probe_id, filename, score) in bob.measure.load.four_column(filename): for (client_id, probe_id, filename, score) in bob.bio.base.score.load.four_column(filename):
if client_id == probe_id: if client_id == probe_id:
if (support in filename or support == "all") and \ if (support in filename or support == "all") and \
(adevice in filename or adevice == "all") and \ (adevice in filename or adevice == "all") and \
...@@ -155,7 +156,7 @@ def main(command_line_parameters=None): ...@@ -155,7 +156,7 @@ def main(command_line_parameters=None):
## Read scores ### ## Read scores ###
print("Loading real score files") print("Loading real score files")
# take only positive values # take only positive values
scores_real = [bob.measure.load.split_four_column(real_file)[1] for real_file in args.real_files] scores_real = [bob.bio.base.score.load.split_four_column(real_file)[1] for real_file in args.real_files]
print("Loading attack score files") print("Loading attack score files")
scores_attack = [load_attacks_file(attack_file, support, adevice)[1] for attack_file in args.attack_files] scores_attack = [load_attacks_file(attack_file, support, adevice)[1] for attack_file in args.attack_files]
......
...@@ -22,6 +22,7 @@ from __future__ import print_function ...@@ -22,6 +22,7 @@ from __future__ import print_function
"""This script evaluates the given score files and computes EER and Spoofing FAR with regards to 10 types of voice attacks""" """This script evaluates the given score files and computes EER and Spoofing FAR with regards to 10 types of voice attacks"""
import bob.measure import bob.measure
import bob.bio.base.score
import argparse import argparse
import numpy, math import numpy, math
...@@ -41,7 +42,7 @@ def load_attack_scores(scores_filename, support="all", adevice="all", recdevice= ...@@ -41,7 +42,7 @@ def load_attack_scores(scores_filename, support="all", adevice="all", recdevice=
negatives = [] negatives = []
# read four column list line by line # read four column list line by line
for (client_id, probe_id, filename, score) in bob.measure.load.four_column(scores_filename): for (client_id, probe_id, filename, score) in bob.bio.base.score.load.four_column(scores_filename):
if client_id == probe_id: if client_id == probe_id:
if (support in filename or support == "all") and \ if (support in filename or support == "all") and \
(adevice in filename or adevice == "all") and \ (adevice in filename or adevice == "all") and \
...@@ -157,9 +158,9 @@ def main(command_line_parameters=None): ...@@ -157,9 +158,9 @@ def main(command_line_parameters=None):
## Read scores ### ## Read scores ###
#################### ####################
print("Loading %s real score file of the development set" % (args.real_dev_file)) print("Loading %s real score file of the development set" % (args.real_dev_file))
scores_dev_zimp, scores_dev = bob.measure.load.split_four_column(args.real_dev_file) scores_dev_zimp, scores_dev = bob.bio.base.score.load.split_four_column(args.real_dev_file)
print("Loading %s real score file of the evaluation set" % (args.real_eval_file)) print("Loading %s real score file of the evaluation set" % (args.real_eval_file))
scores_eval_zimp, scores_eval = bob.measure.load.split_four_column(args.real_eval_file) scores_eval_zimp, scores_eval = bob.bio.base.score.load.split_four_column(args.real_eval_file)
support=args.support support=args.support
......
...@@ -49,7 +49,7 @@ packages can find the data needed for the experiments. ...@@ -49,7 +49,7 @@ packages can find the data needed for the experiments.
GMM models for ``real`` and ``spoofed`` data are trained as per this generic command:: GMM models for ``real`` and ``spoofed`` data are trained as per this generic command::
$ ./bin/train_gmm.py -d DB_NAME -p Preprocessor -e Feature_Extractor -a Classifier -s Folder_Name --groups world --skip-enroller-training -vv --parallel 6 $ ./bin/train_gmm.py -d DB_NAME --protocol PROTOCOL_NAME -p Preprocessor -e Feature_Extractor -a Classifier -s Folder_Name --groups world --skip-enroller-training -vv --parallel 6
This training may take a long time and as the results, it will generate GMM model and write it into ``Projector.hdf5`` This training may take a long time and as the results, it will generate GMM model and write it into ``Projector.hdf5``
file. You can check all possible options by running ``$ ./bin/train_gmm.py --help``. file. You can check all possible options by running ``$ ./bin/train_gmm.py --help``.
...@@ -61,19 +61,43 @@ Here is the generic command for tuning the trained system on developing set and ...@@ -61,19 +61,43 @@ Here is the generic command for tuning the trained system on developing set and
For example, to train and evaluate a GMM-based PAD system using MFCC-based features computed for For example, to train and evaluate a GMM-based PAD system using MFCC-based features computed for
``licit`` and ``spoof`` protocols of the ASVspoof database, the following commands need to be run:: ``licit`` and ``spoof`` protocols of the ASVspoof database, the following commands need to be run::
$ ./bin/train_gmm.py -d asvspoof-licit -p mod-4hz -e mfcc20 -a gmm-tomi -s temp --groups world --projector-file Projector_licit.hdf5 --skip-enroller-training -vv --parallel 6 $ ./bin/train_gmm.py -d asvspoof-licit --protocol CM-licit -p mod-4hz -e mfcc20 -a gmm-tomi -s temp --groups world --projector-file Projector_licit.hdf5 --skip-enroller-training -vv --parallel 6
$ ./bin/train_gmm.py -d asvspoof-spoof -p mod-4hz -e mfcc20 -a gmm-tomi -s temp --groups world --projector-file Projector_spoof.hdf5 --skip-enroller-training -vv --parallel 6 $ ./bin/train_gmm.py -d asvspoof-spoof --protocol CM-spoof -p mod-4hz -e mfcc20 -a gmm-tomi -s temp --groups world --projector-file Projector_spoof.hdf5 --skip-enroller-training -vv --parallel 6
$ ./bin/spoof.py -d asvspoof -p mod-4hz -e mfcc20 -a gmm --projector-file Projector_spoof.hdf5 -s temp --groups dev eval --skip-projector-training -vv $ ./bin/spoof.py -d asvspoof -p mod-4hz -e mfcc20 -a gmm --projector-file Projector_spoof.hdf5 -s temp --groups dev eval --skip-projector-training -vv
Here, ``./bin/train_gmm.py`` produces two GMM models, one for ``licit`` protocol (real data only) and one for ``spoof`` Here, ``./bin/train_gmm.py`` produces two GMM models, one for ``licit`` protocol (real data only) and one for ``spoof``
protocol (spoofed data only). Then, ``./bin/spoof.py`` is used to project all data from ``dev`` and ``eval`` sets onto protocol (spoofed data only). Then, ``./bin/spoof.py`` is used to project all data from ``dev`` and ``eval`` sets onto
these two GMM models and compute corresponding scores. these two GMM models and compute corresponding scores. By default, the scores will be saved inside ``./results`` folder.
Once the scores are obtained, error rates and DET curves can be computed using the following script:: Scores analysis and plotting
----------------------------
$ ./bin/plot_pad_results.py -t scores_path/dev-attack -d scores_path/dev-real -f scores_path/eval-attack -e scores_path/eval-real -o plots" Once the scores are obtained, error rates can be computed using the following command (you must have installed Bob v5.0 or higher)::
Also, it is possible to reproduce the experiments presented in the paper using the following bash scripts that run for all $ bob pad metrics -e scores_path/scores-dev scores_path/scores-eval
The histograms and DET curves can also be plotted, for details run ``$ bob pad --help``.
If you want to avoid training all PAD systems and computing scores, we are providing the score files obtained for all the PAD systems presented in the paper.
You can download all the scores as follows::
$ #You should be inside the package directory bob.paper.interspeech_2016
$ wget http://www.idiap.ch/resource/biometric/data/interspeech_2016.tar.gz #Download the scores
$ tar -xzvf interspeech_2016.tar.gz
With downloaded scores, you need to compute error rates and DET curves, you need to run the following script::
$ ./bin/pad_process_scores.py -t scores_path/scores-dev-attack -d scores_path/scores-dev-real -f scores_path/scores-eval-attack -e scores_path/scores-eval-real -o plots
For example, to evaluate MFCC-based PAD system for ASVspoof_ database, run the following::
$ ./bin/pad_process_scores.py -t scores/asvspoof_pad/gmm_mfcc20_onlydeltas_20/scores-dev-attack -d scores/asvspoof_pad/gmm_mfcc20_onlydeltas_20/scores-dev-real -f scores/asvspoof_pad/gmm_mfcc20_onlydeltas_20/scores-eval-attack -e scores/asvspoof_pad/gmm_mfcc20_onlydeltas_20/scores-eval-real -o plots
Re-running all experiments on SGE grid
--------------------------------------
It is possible to reproduce the experiments presented in the paper using the following bash scripts that run for all
PAD systems used in the paper (note that these scripts assume SGE grid to be available and will take a few days on 50 PAD systems used in the paper (note that these scripts assume SGE grid to be available and will take a few days on 50
parallel machines):: parallel machines)::
...@@ -83,14 +107,13 @@ parallel machines):: ...@@ -83,14 +107,13 @@ parallel machines)::
$ ./project_on_gmms.sh asvspoof 20 # evaluate for ASVspoof database $ ./project_on_gmms.sh asvspoof 20 # evaluate for ASVspoof database
Generate results from pre-computed scores Generate paper's results from pre-computed scores
----------------------------------------- -------------------------------------------------
If you want to avoid training all PAD systems and computing scores, we are providing the score files obtained for all the PAD systems presented in the paper. Hence, the error rates can be computed, as per Tables 1, 2, and 3 of the paper, and additional DET curves can be plotted by simply performing the following:: If you want to avoid training all PAD systems and computing scores, we are providing the score files obtained for all the PAD systems presented in the paper.
The error rates can be computed, as per Tables 1, 2, and 3 of the paper, and additional DET curves can be plotted by simply performing the following::
$ #You should be inside the package directory bob.paper.interspeech_2016
$ wget http://www.idiap.ch/resource/biometric/data/interspeech_2016.tar.gz #Download the scores
$ tar -xzvf interspeech_2016.tar.gz
$ ./evaluate_scores.sh # compute error rates and plot the DET curves for each PAD system $ ./evaluate_scores.sh # compute error rates and plot the DET curves for each PAD system
The script will create folders for each different PAD system (it contains computed error rates and DET curves) The script will create folders for each different PAD system (it contains computed error rates and DET curves)
......
# This file may be used to create an environment using: name: bob.paper.interspeech_2016
# $ conda create --name <env> --file <this file> channels:
# platform: osx-64 - https://www.idiap.ch/software/bob/conda
alabaster=0.7.10=py27_0 - defaults
appnope=0.1.0=py27_0 dependencies:
babel=2.4.0=py27_0 - alabaster=0.7.11=py36_0
backports=1.0=py27_0 - asn1crypto=0.24.0=py36_0
backports_abc=0.5=py27_0 - babel=2.6.0=py36_0
bleach=1.5.0=py27_0 - blas=1.0=mkl
bob=2.7.0=py27_0 - boost=1.65.1=py36_4
bob-devel=2017.08.18=np112py27_0 - bzip2=1.0.6=h14c3975_5
bob-extras=2017.06.06=0 - ca-certificates=2018.03.07=0
bob.ap=2.1.4=np112py27_0 - certifi=2018.4.16=py36_0
bob.bio.base=3.1.2=py_0 - cffi=1.11.5=py36h9745a5d_0
bob.bio.face=3.2.0=py_0 - chardet=3.0.4=py36h0f667ec_1
bob.bio.gmm=3.0.2=py_0 - click=6.7=py36h5253387_0
bob.bio.spear=3.1.0=py_0 - click-plugins=1.0.3=py36_0
bob.bio.video=3.2.0=py_0 - coverage=4.5.1=py36h14c3975_0
bob.blitz=2.0.13=np112py27_0 - cryptography=2.2.2=py36h14c3975_0
bob.core=2.1.8=np112py27_0 - cycler=0.10.0=py36h93f1223_0
bob.db.avspoof=2.2.7=py_0 - dbus=1.13.2=h714fa37_1
bob.extension=2.4.0=py27_0 - docopt=0.6.2=py36_0
bob.io.audio=2.0.5=np112py27_0 - docutils=0.14=py36hb0f60f5_0
bob.io.base=2.1.0=np112py27_0 - expat=2.2.5=he0dffb1_0
bob.io.image=2.2.2=np112py27_0 - fontconfig=2.12.6=h49f89f6_0
bob.io.matlab=2.0.9=np112py27_0 - freetype=2.8=hab7d2ae_1
bob.io.video=2.0.15=np112py27_0 - glib=2.56.1=h000015b_0
bob.ip.base=2.0.15=np112py27_0 - gst-plugins-base=1.14.0=hbbd80ab_1
bob.ip.color=2.0.9=np112py27_0 - gstreamer=1.14.0=hb453b48_1
bob.ip.draw=2.0.8=np112py27_0 - hdf5=1.10.1=h9caa474_1
bob.ip.facedetect=2.1.3=np112py27_0 - icu=58.2=h9c2bf20_1
bob.ip.facelandmarks=1.0.4=py_0 - idna=2.7=py36_0
bob.ip.flandmark=2.1.5=np112py27_0 - imagesize=1.0.0=py36_0
bob.ip.gabor=2.0.10=np112py27_0 - intel-openmp=2018.0.3=0
bob.ip.optflow.hornschunck=2.0.12=np112py27_0 - jinja2=2.10=py36ha16c418_0
bob.ip.optflow.liu=2.0.11=np112py27_0 - jpeg=9b=h024ee3a_2
bob.ip.skincolorfilter=0.0.3=py_0 - kiwisolver=1.0.1=py36h764f252_0
bob.learn.activation=2.0.9=np112py27_0 - libboost=1.65.1=habcd387_4
bob.learn.boosting=2.0.12=py27_0 - libedit=3.1.20170329=h6b74fdf_2
bob.learn.em=2.0.13=np112py27_0 - libffi=3.2.1=hd88cf55_4
bob.learn.libsvm=2.0.12=np112py27_0 - libgcc-ng=7.2.0=hdf63c60_3
bob.learn.linear=2.1.1=np112py27_0 - libgfortran-ng=7.2.0=hdf63c60_3
bob.learn.mlp=2.1.5=np112py27_0 - libopus=1.2.1=hb9ed12e_0
bob.math=2.1.1=np112py27_0 - libpng=1.6.34=hb9fc6fc_0
bob.measure=2.4.0=np112py27_0 - libstdcxx-ng=7.2.0=hdf63c60_3
bob.pad.base=1.0.8=py_0 - libtiff=4.0.9=he85c1e1_1
bob.pad.voice=1.0.4=py_0 - libvpx=1.6.1=h888fd40_0
bob.sp=2.0.9=np112py27_0 - libxcb=1.13=h1bed415_1
bokeh=0.12.5=py27_1 - libxml2=2.9.8=h26e45fe_1
boost=1.61.0=py27_0 - markupsafe=1.0=py36hd9260cd_1
bzip2=1.0.6=3 - matplotlib=2.2.2=py36h0e671d2_1
cloudpickle=0.4.0=py27_0 - mkl=2018.0.3=1
cmake=3.6.3=0 - mkl_fft=1.0.1=py36h3010b51_0
configparser=3.5.0=py27_0 - mkl_random=1.0.1=py36h629b387_0
coverage=4.3.4=py27_0 - ncurses=6.1=hf484d3e_0
curl=7.52.1=0 - nose=1.3.7=py36hcdf7029_2
cycler=0.10.0=py27_0 - numpy=1.14.5=py36hcd700cb_3
cython=0.25.2=py27_0 - numpy-base=1.14.5=py36hdbf6ddf_3
cytoolz=0.8.2=py27_0 - olefile=0.45.1=py36_0
cyvlfeat=0.4.5=np112py27_2 - openssl=1.0.2o=h20670df_0
dask=0.14.3=py27_1 - pcre=8.42=h439df22_0
decorator=4.1.2=py27_0 - pillow=4.3.0=py36h6f462bf_1
dlib=19.0=np112py27_1 - pip=10.0.1=py36_0
docopt=0.6.2=py27_0 - py-boost=1.65.1=py36hf484d3e_4
docutils=0.14=py27_0 - pycparser=2.18=py36hf9f622e_1
entrypoints=0.2.3=py27_0 - pygments=2.2.0=py36h0d3125c_0
enum34=1.1.6=py27_0 - pyopenssl=18.0.0=py36_0
expat=2.1.0=0 - pyparsing=2.2.0=py36hee85983_1
ffmpeg=2.8.10=4 - pyqt=5.9.2=py36h751905a_0
freetype=2.5.5=2 - pysocks=1.6.8=py36_0
funcsigs=1.0.2=py27_0 - python=3.6.6=hc3d631a_0
functools32=3.2.3.2=py27_0 - python-dateutil=2.7.3=py36_0
futures=3.1.1=py27_0 - pytz=2018.5=py36_0
get_terminal_size=1.0.0=py27_0 - qt=5.9.5=h7e424d6_0
giflib=5.1.4=3 - readline=7.0=ha6073c6_4
gridtk=1.4.3=py_0 - requests=2.19.1=py36_0
hdf5=1.8.17=2 - scikit-learn=0.19.1=py36h7aa7ec6_0
html5lib=0.9999999=py27_0 - scipy=1.1.0=py36hfc37229_0
icu=54.1=0 - setuptools=39.2.0=py36_0
imagesize=0.7.1=py27_0 - sip=4.19.8=py36hf484d3e_0
ipdb=0.10.2=py27_0 - six=1.11.0=py36h372c433_1
ipykernel=4.6.1=py27_0 - snowballstemmer=1.2.1=py36h6febd40_0
ipython=5.3.0=py27_0 - sphinx=1.6.3=py36he5f0bdb_0
ipython_genutils=0.2.0=py27_0 - sphinxcontrib=1.0=py36h6d0f590_1
ipywidgets=6.0.0=py27_0 - sphinxcontrib-websupport=1.1.0=py36_1
jbig=2.1=0 - sqlalchemy=1.2.8=py36h14c3975_0
jinja2=2.9.6=py27_0 - sqlite=3.24.0=h84994c4_0
jpeg=9b=0 - tabulate=0.8.2=py36_0
jsonschema=2.6.0=py27_0 - tk=8.6.7=hc745277_3
jupyter=1.0.0=py27_3 - tornado=5.0.2=py36_0
jupyter_client=5.1.0=py27_0 - typing=3.6.4=py36_0
jupyter_console=5.1.0=py27_0 - urllib3=1.23=py36_0
jupyter_core=4.3.0=py27_0 - wheel=0.32.1=py36_0
libblitz=0.10=3 - xz=5.2.4=h14c3975_4
libmatio=1.5.6=3 - zlib=1.2.11=ha838bed_2
libogg=1.3.2=0 - bob=5.0.0=py36hc3b47e9_0
libpng=1.6.27=0 - bob.ap=2.1.7=py36h0191800_0
libsvm=3.21=3 - bob.bio.base=4.0.0=py36h9e540f5_0
libtiff=4.0.6=3 - bob.bio.face=4.0.0=py36h605e89b_0
llvmlite=0.18.0=py27_0 - bob.bio.gmm=3.2.2=py36hfa564a4_0
locket=0.2.0=py27_1 - bob.bio.spear=3.2.2=py36hfa564a4_0
markupsafe=0.23=py27_2 - bob.bio.video=3.4.2=py36hf2dec60_0
matplotlib=2.0.2=np112py27_0 - bob.blitz=2.0.16=py36hac2ea6f_0
menpo=0.8.1=np112py27_0 - bob.buildout=2.1.7=py36hbe6127a_0
menpofit=0.5.0=py27_0 - bob.core=2.2.1=py36hac2ea6f_0
menpowidgets=0.3.0p6=py27_0 - bob.db.asvspoof=1.2.1=py36h28a5d26_0
mistune=0.7.4=py27_0 - bob.db.atnt=2.0.12=py36hf2dec60_0
mkl=2017.0.1=0 - bob.db.avspoof=2.3.1=py36h28a5d26_0
mne=0.14.1=np112py27_0 - bob.db.base=3.0.1=py36h28a5d26_0
nbconvert=5.1.1=py27_0 - bob.extension=3.1.0=py36h6c8ecfd_0
nbformat=4.3.0=py27_0 - bob.io.base=3.0.4=py36h12a88d4_0
ncurses=5.9=10 - bob.io.image=2.4.2=py36hd33571a_0
networkx=1.11=py27_0 - bob.io.matlab=2.0.12=py36h5bf78fe_0
nose=1.3.7=py27_1 - bob.io.video=2.1.1=py36hd1ff1a4_1
notebook=5.0.0=py27_0 - bob.ip.base=2.2.2=py36h2b31b38_0
numba=0.33.0=np112py27_0 - bob.ip.color=2.0.12=py36hac2ea6f_0
numexpr=2.6.2=np112py27_0 - bob.ip.draw=2.0.11=py36hbc316f1_0
numpy=1.12.1=py27_0 - bob.ip.facedetect=2.1.6=py36hbf20c1f_0
olefile=0.44=py27_0 - bob.ip.flandmark=2.1.8=py36hbf20c1f_0
opencv=3.1.0=np112py27_4 - bob.ip.gabor=2.0.13=py36h5d07071_0
openssl=1.0.2l=0 - bob.learn.activation=2.0.12=py36hac2ea6f_0
pandas=0.20.1=np112py27_0 - bob.learn.boosting=2.0.15=py36hf9a62a4_0
pandocfilters=1.4.1=py27_0 - bob.learn.em=2.1.2=py36h5d07071_0
partd=0.3.8=py27_0 - bob.learn.libsvm=2.1.1=py36h7de4692_0
path.py=10.3.1=py27_0 - bob.learn.linear=2.1.4=py36hf9a62a4_0
pathlib=1.0=py27_0 - bob.learn.mlp=2.1.8=py36hac2ea6f_0
pathlib2=2.3.0=py27_0 - bob.math=3.1.2=py36he85555e_0
pexpect=4.2.1=py27_0 - bob.measure=4.0.0=py36h74400ff_0
pickleshare=0.7.4=py27_0 - bob.pad.base=2.1.0=py36hf2dec60_0
pillow=4.1.1=py27_0 - bob.pad.voice=1.0.6=py36hf2dec60_0
pip=9.0.1=py27_1 - bob.sp=2.0.12=py36h3b40046_0
pkg-config=0.28=1 - ffmpeg=3.4=hdec9c9a_1
pkgtools=0.7.3=py27_0 - giflib=5.1.4=h63890e4_4
prompt_toolkit=1.0.14=py27_0 - libblitz=1.0.1=h5c30f38_0
ptyprocess=0.5.2=py27_0 - libmatio=1.5.11=ha500df2_0
pyedflib=0.1.9=np112py27_0 - libsvm=3.22=h4f306d9_0
pygments=2.2.0=py27_0 - openh264=1.7.0=hc521636_1
pyparsing=2.2.0=py27_0 - vlfeat=0.9.21=h18fa195_0
pyqt=5.6.0=py27_2 - zc.buildout=2.11.2=py36_0
python=2.7.13=0 - zc.recipe.egg=2.0.5=py36_0
python-dateutil=2.6.1=py27_0
pytz=2017.2=py27_0
pywavelets=0.5.2=np112py27_0
pyyaml=3.12=py27_0
pyzmq=16.0.2=py27_0
qt=5.6.2=2
qtconsole=4.3.1=py27_0
readline=6.2=2
requests=2.14.2=py27_0
scandir=1.5=py27_0
schema=0.6.5=py27_0
scikit-image=0.13.0=np112py27_0
scikit-learn=0.18.1=np112py27_1
scipy=0.19.0=np112py27_0
setuptools=27.2.0=py27_0
simplegeneric=0.8.1=py27_1
singledispatch=3.4.0.3=py27_0
sip=4.18=py27_0
six=1.10.0=py27_0
snowballstemmer=1.2.1=py27_0
sox=14.4.2=5
sphinx=1.5.6=py27_0
sphinx_rtd_theme=0.2.4=py27_0
sqlalchemy=1.1.9=py27_0
sqlite=3.13.0=0
ssl_match_hostname=3.4.0.2=py27_1
subprocess32=3.2.7=py27_0
terminado=0.6=py27_0
testpath=0.3=py27_0
tk=8.5.18=0
toolchain=2.3.2=0
toolz=0.8.2=py27_0
tornado=4.5.1=py27_0
traitlets=4.3.2=py27_0
virtualenv=15.1.0=py27_0
vlfeat=0.9.20=3
wcwidth=0.1.7=py27_0
wheel=0.29.0=py27_0
widgetsnbextension=2.0.0=py27_0
x264=20131217=3
xz=5.2.2=1
yaml=0.1.6=0
zc.buildout=2.9.2=py27_0
zlib=1.2.8=3
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment