Skip to content
GitLab
Menu
Projects
Groups
Snippets
Loading...
Help
Help
Support
Community forum
Keyboard shortcuts
?
Submit feedback
Contribute to GitLab
Sign in
Toggle navigation
Menu
Open sidebar
bob
bob.learn.tensorflow
Commits
b9c51932
Commit
b9c51932
authored
Oct 30, 2017
by
Tiago de Freitas Pereira
Browse files
Merge branch 'predict' into 'master'
Take estimators from config in generic scripts Closes
#46
See merge request
!28
parents
fdac5f7c
db610539
Pipeline
#13555
failed with stages
in 3 minutes and 28 seconds
Changes
6
Pipelines
1
Hide whitespace changes
Inline
Side-by-side
bob/learn/tensorflow/dataset/bio.py
View file @
b9c51932
import
os
import
six
import
tensorflow
as
tf
from
bob.bio.base.tools.grid
import
indices
from
bob.bio.base
import
read_original_data
as
_read_original_data
from
bob.bio.base
import
read_original_data
def
make_output_path
(
output_dir
,
key
):
"""Returns an output path used for saving keys. You need to make sure the
directories leading to this output path exist.
Parameters
----------
output_dir : str
The root directory to save the results
key : str
The key of the sample. Usually biofile.make_path("", "")
Returns
-------
str
The path for the provided key.
"""
return
os
.
path
.
join
(
output_dir
,
key
+
'.hdf5'
)
def
bio_generator
(
database
,
groups
,
number_of_parallel_jobs
,
output_dir
,
read_original_data
=
None
,
biofile_to_label
=
None
,
multiple_samples
=
False
,
force
=
False
):
def
bio_generator
(
database
,
biofiles
,
load_data
=
None
,
biofile_to_label
=
None
,
multiple_samples
=
False
,
repeat
=
False
):
"""Returns a generator and its output types and shapes based on
bob.bio.base databases.
...
...
@@ -34,16 +12,11 @@ def bio_generator(database, groups, number_of_parallel_jobs, output_dir,
----------
database : :any:`bob.bio.base.database.BioDatabase`
The database that you want to use.
groups : [str]
List of groups. Can be any permutation of ``('world', 'dev', 'eval')``
number_of_parallel_jobs : int
The number of parallel jobs that the script has ran with. This is used
to split the number files into array jobs.
output_dir : str
The root directory where the data will be saved.
read_original_data : :obj:`object`, optional
biofiles : [:any:`bob.bio.base.database.BioFile`]
The list of the bio files .
load_data : :obj:`object`, optional
A callable with the signature of
``data =
re
ad_
original_data(biofile, directory, extension
)``.
``data =
lo
ad_
data(database, biofile
)``.
:any:`bob.bio.base.read_original_data` is used by default.
biofile_to_label : :obj:`object`, optional
A callable with the signature of ``label = biofile_to_label(biofile)``.
...
...
@@ -52,8 +25,8 @@ def bio_generator(database, groups, number_of_parallel_jobs, output_dir,
If true, it assumes that the bio database's samples actually contain
multiple samples. This is useful for when you want to treat video
databases as image databases.
force
: bool, optional
If
t
rue,
all files will be overwritten
.
repeat
: bool, optional
If
T
rue,
the samples are repeated forever
.
Returns
-------
...
...
@@ -65,35 +38,34 @@ def bio_generator(database, groups, number_of_parallel_jobs, output_dir,
output_shapes : (tf.TensorShape, tf.TensorShape, tf.TensorShape)
The shapes of the returned samples.
"""
if
read_original_data
is
None
:
read_original_data
=
_read_original_data
if
load_data
is
None
:
def
load_data
(
database
,
biofile
):
data
=
read_original_data
(
biofile
,
database
.
original_directory
,
database
.
original_extension
)
return
data
if
biofile_to_label
is
None
:
def
biofile_to_label
(
biofile
):
return
-
1
biofiles
=
list
(
database
.
all_files
(
groups
))
if
number_of_parallel_jobs
>
1
:
start
,
end
=
indices
(
biofiles
,
number_of_parallel_jobs
)
biofiles
=
biofiles
[
start
:
end
]
labels
=
(
biofile_to_label
(
f
)
for
f
in
biofiles
)
keys
=
(
str
(
f
.
make_path
(
""
,
""
))
for
f
in
biofiles
)
def
generator
():
for
f
,
label
,
key
in
six
.
moves
.
zip
(
biofiles
,
labels
,
keys
):
outpath
=
make_output_path
(
output_dir
,
key
)
if
not
force
and
os
.
path
.
isfile
(
outpath
):
continue
data
=
read_original_data
(
f
,
database
.
original_directory
,
database
.
original_extension
)
# labels
if
multiple_samples
:
for
d
in
data
:
yield
(
d
,
label
,
key
)
else
:
yield
(
data
,
label
,
key
)
while
True
:
for
f
,
label
,
key
in
six
.
moves
.
zip
(
biofiles
,
labels
,
keys
):
data
=
load_data
(
database
,
f
)
# labels
if
multiple_samples
:
for
d
in
data
:
yield
(
d
,
label
,
key
)
else
:
yield
(
data
,
label
,
key
)
if
not
repeat
:
break
# load one data to get its type and shape
data
=
read_original_data
(
biofiles
[
0
],
database
.
original_directory
,
database
.
original_extension
)
data
=
load_data
(
database
,
biofiles
[
0
])
if
multiple_samples
:
try
:
data
=
data
[
0
]
...
...
bob/learn/tensorflow/network/SimpleCNN.py
View file @
b9c51932
import
tensorflow
as
tf
from
..utils
import
get_available_gpus
,
to_channels_first
def
architecture
(
input_layer
,
mode
=
tf
.
estimator
.
ModeKeys
.
TRAIN
,
kernerl_size
=
(
3
,
3
),
n_classes
=
2
):
data_format
=
'channels_last'
if
len
(
get_available_gpus
())
!=
0
:
# When running on GPU, transpose the data from channels_last (NHWC) to
# channels_first (NCHW) to improve performance. See
# https://www.tensorflow.org/performance/performance_guide#data_formats
input_layer
=
to_channels_first
(
'input_layer'
)
data_format
=
'channels_first'
kernerl_size
=
(
3
,
3
),
n_classes
=
2
,
data_format
=
'channels_last'
):
# Keep track of all the endpoints
endpoints
=
{}
# Convolutional Layer #1
# Computes 32 features using a kernerl_size filter with ReLU activation.
...
...
@@ -22,11 +18,13 @@ def architecture(input_layer, mode=tf.estimator.ModeKeys.TRAIN,
padding
=
"same"
,
activation
=
tf
.
nn
.
relu
,
data_format
=
data_format
)
endpoints
[
'conv1'
]
=
conv1
# Pooling Layer #1
# First max pooling layer with a 2x2 filter and stride of 2
pool1
=
tf
.
layers
.
max_pooling2d
(
inputs
=
conv1
,
pool_size
=
[
2
,
2
],
strides
=
2
,
data_format
=
data_format
)
endpoints
[
'pool1'
]
=
pool1
# Convolutional Layer #2
# Computes 64 features using a kernerl_size filter.
...
...
@@ -38,44 +36,55 @@ def architecture(input_layer, mode=tf.estimator.ModeKeys.TRAIN,
padding
=
"same"
,
activation
=
tf
.
nn
.
relu
,
data_format
=
data_format
)
endpoints
[
'conv2'
]
=
conv2
# Pooling Layer #2
# Second max pooling layer with a 2x2 filter and stride of 2
pool2
=
tf
.
layers
.
max_pooling2d
(
inputs
=
conv2
,
pool_size
=
[
2
,
2
],
strides
=
2
,
data_format
=
data_format
)
endpoints
[
'pool2'
]
=
pool2
# Flatten tensor into a batch of vectors
# TODO: use tf.layers.flatten in tensorflow 1.4 and above
pool2_flat
=
tf
.
contrib
.
layers
.
flatten
(
pool2
)
endpoints
[
'pool2_flat'
]
=
pool2_flat
# Dense Layer
# Densely connected layer with 1024 neurons
dense
=
tf
.
layers
.
dense
(
inputs
=
pool2_flat
,
units
=
1024
,
activation
=
tf
.
nn
.
relu
)
endpoints
[
'dense'
]
=
dense
# Add dropout operation; 0.6 probability that element will be kept
dropout
=
tf
.
layers
.
dropout
(
inputs
=
dense
,
rate
=
0.4
,
training
=
mode
==
tf
.
estimator
.
ModeKeys
.
TRAIN
)
endpoints
[
'dropout'
]
=
dropout
# Logits layer
# Input Tensor Shape: [batch_size, 1024]
# Output Tensor Shape: [batch_size, 2]
logits
=
tf
.
layers
.
dense
(
inputs
=
dropout
,
units
=
n_classes
)
endpoints
[
'logits'
]
=
logits
return
logits
return
logits
,
endpoints
def
model_fn
(
features
,
labels
,
mode
,
params
=
None
,
config
=
None
):
"""Model function for CNN."""
data
=
features
[
'data'
]
keys
=
features
[
'key'
]
params
=
params
or
{}
learning_rate
=
params
.
get
(
'learning_rate'
,
1e-5
)
kernerl_size
=
params
.
get
(
'kernerl_size'
,
(
3
,
3
))
n_classes
=
params
.
get
(
'n_classes'
,
2
)
data
=
features
[
'data'
]
keys
=
features
[
'keys'
]
logits
=
architecture
(
data
,
mode
,
kernerl_size
=
kernerl_size
,
n_classes
=
n_classes
)
arch_kwargs
=
{
'kernerl_size'
:
params
.
get
(
'kernerl_size'
,
None
),
'n_classes'
:
params
.
get
(
'n_classes'
,
None
),
'data_format'
:
params
.
get
(
'data_format'
,
None
),
}
arch_kwargs
=
{
k
:
v
for
k
,
v
in
arch_kwargs
.
items
()
if
v
is
not
None
}
logits
,
_
=
architecture
(
data
,
mode
,
**
arch_kwargs
)
predictions
=
{
# Generate predictions (for PREDICT and EVAL mode)
...
...
@@ -91,24 +100,27 @@ def model_fn(features, labels, mode, params=None, config=None):
# Calculate Loss (for both TRAIN and EVAL modes)
loss
=
tf
.
losses
.
sparse_softmax_cross_entropy
(
logits
=
logits
,
labels
=
labels
)
accuracy
=
tf
.
metrics
.
accuracy
(
labels
=
labels
,
predictions
=
predictions
[
"classes"
])
metrics
=
{
'accuracy'
:
accuracy
}
with
tf
.
name_scope
(
'train_metrics'
):
# Create a tensor named train_loss for logging purposes
tf
.
summary
.
scalar
(
'train_loss'
,
loss
)
# Configure the Training Op (for TRAIN mode)
# Configure the training op
if
mode
==
tf
.
estimator
.
ModeKeys
.
TRAIN
:
optimizer
=
tf
.
train
.
GradientDescentOptimizer
(
learning_rate
=
learning_rate
)
train_op
=
optimizer
.
minimize
(
loss
=
loss
,
global_step
=
tf
.
train
.
get_global_step
())
return
tf
.
estimator
.
EstimatorSpec
(
mode
=
mode
,
loss
=
loss
,
train_op
=
train_op
)
# Add evaluation metrics (for EVAL mode)
eval_metric
_op
s
=
{
"accuracy"
:
tf
.
metrics
.
accuracy
(
labels
=
labels
,
predictions
=
predictions
[
"classes"
])}
loss
=
loss
,
global_step
=
tf
.
train
.
get_or_create_global_step
())
# Log accuracy and loss
with
tf
.
name_scope
(
'train_metrics'
):
tf
.
summary
.
scalar
(
'accuracy'
,
accuracy
[
1
]
)
tf
.
summary
.
scalar
(
'loss'
,
loss
)
else
:
train
_op
=
None
return
tf
.
estimator
.
EstimatorSpec
(
mode
=
mode
,
loss
=
loss
,
eval_metric_ops
=
eval_metric_ops
)
mode
=
mode
,
predictions
=
predictions
,
loss
=
loss
,
train_op
=
train_op
,
eval_metric_ops
=
metrics
)
bob/learn/tensorflow/script/eval_generic.py
View file @
b9c51932
...
...
@@ -20,16 +20,13 @@ The configuration files should have the following objects totally:
## Required objects:
model_dir
model_fn
estimator
eval_input_fn
## Optional objects:
eval_interval_secs
run_once
run_config
model_params
steps
hooks
name
...
...
@@ -59,33 +56,27 @@ def main(argv=None):
config_files
=
args
[
'<config_files>'
]
config
=
read_config_file
(
config_files
)
model_dir
=
config
.
model_dir
model_fn
=
config
.
model_fn
estimator
=
config
.
estimator
eval_input_fn
=
config
.
eval_input_fn
eval_interval_secs
=
getattr
(
config
,
'eval_interval_secs'
,
60
)
run_once
=
getattr
(
config
,
'run_once'
,
False
)
run_config
=
getattr
(
config
,
'run_config'
,
None
)
model_params
=
getattr
(
config
,
'model_params'
,
None
)
steps
=
getattr
(
config
,
'steps'
,
None
)
hooks
=
getattr
(
config
,
'hooks'
,
None
)
name
=
getattr
(
config
,
'eval_name'
,
None
)
# Instantiate Estimator
nn
=
tf
.
estimator
.
Estimator
(
model_fn
=
model_fn
,
model_dir
=
model_dir
,
params
=
model_params
,
config
=
run_config
)
if
name
:
real_name
=
'eval_'
+
name
else
:
real_name
=
'eval'
evaluated_file
=
os
.
path
.
join
(
nn
.
model_dir
,
real_name
,
'evaluated'
)
evaluated_file
=
os
.
path
.
join
(
estimator
.
model_dir
,
real_name
,
'evaluated'
)
while
True
:
evaluated_steps
=
[]
if
os
.
path
.
exists
(
evaluated_file
):
with
open
(
evaluated_file
)
as
f
:
evaluated_steps
=
f
.
read
().
split
()
evaluated_steps
=
[
line
.
split
()[
0
]
for
line
in
f
]
ckpt
=
tf
.
train
.
get_checkpoint_state
(
nn
.
model_dir
)
ckpt
=
tf
.
train
.
get_checkpoint_state
(
estimator
.
model_dir
)
if
(
not
ckpt
)
or
(
not
ckpt
.
model_checkpoint_path
):
time
.
sleep
(
eval_interval_secs
)
continue
...
...
@@ -101,7 +92,7 @@ def main(argv=None):
continue
# Evaluate
evaluations
=
nn
.
evaluate
(
evaluations
=
estimator
.
evaluate
(
input_fn
=
eval_input_fn
,
steps
=
steps
,
hooks
=
hooks
,
...
...
@@ -109,11 +100,14 @@ def main(argv=None):
name
=
name
,
)
print
(
', '
.
join
(
'%s = %s'
%
(
k
,
v
)
for
k
,
v
in
sorted
(
six
.
iteritems
(
evaluations
))))
str_evaluations
=
', '
.
join
(
'%s = %s'
%
(
k
,
v
)
for
k
,
v
in
sorted
(
six
.
iteritems
(
evaluations
)))
print
(
str_evaluations
)
sys
.
stdout
.
flush
()
with
open
(
evaluated_file
,
'a'
)
as
f
:
f
.
write
(
'{}
\n
'
.
format
(
evaluations
[
'global_step'
]))
f
.
write
(
'{} {}
\n
'
.
format
(
evaluations
[
'global_step'
],
str_evaluations
))
if
run_once
:
break
time
.
sleep
(
eval_interval_secs
)
...
...
bob/learn/tensorflow/script/predict_bio.py
View file @
b9c51932
...
...
@@ -53,9 +53,8 @@ The configuration files should have the following objects totally:
An estimator instance that represents the neural network.
database : :any:`bob.bio.base.database.BioDatabase`
A bio database. Its original_directory must point to the correct path.
groups : [str]
A list of groups to evaluate. Can be any permutation of
``('world', 'dev', 'eval')``.
biofiles : [:any:`bob.bio.base.database.BioFile`]
The list of the bio files .
bio_predict_input_fn : callable
A callable with the signature of
``input_fn = bio_predict_input_fn(generator, output_types, output_shapes)``
...
...
@@ -65,9 +64,9 @@ The configuration files should have the following objects totally:
# Optional objects:
re
ad_
original_data : callable
lo
ad_
data : :obj:`object`, optional
A callable with the signature of
``data =
re
ad_
original_data(biofile, directory, extension
)``.
``data =
lo
ad_
data(database, biofile
)``.
:any:`bob.bio.base.read_original_data` is used by default.
hooks : [:any:`tf.train.SessionRunHook`]
Optional hooks that you may want to attach to the predictions.
...
...
@@ -83,6 +82,7 @@ An example configuration for a trained model and its evaluation could be::
estimator = tf.estimator.Estimator(model_fn, model_dir)
groups = ['dev']
biofiles = database.all_files(groups)
# the ``dataset = tf.data.Dataset.from_generator(generator, output_types,
...
...
@@ -113,13 +113,33 @@ from collections import defaultdict
import
numpy
as
np
from
bob.io.base
import
create_directories_safe
from
bob.bio.base.utils
import
read_config_file
,
save
from
bob.bio.base.tools.grid
import
indices
from
bob.learn.tensorflow.utils.commandline
import
\
get_from_config_or_commandline
from
bob.learn.tensorflow.dataset.bio
import
make_output_path
,
bio_generator
from
bob.learn.tensorflow.dataset.bio
import
bio_generator
from
bob.core.log
import
setup
,
set_verbosity_level
logger
=
setup
(
__name__
)
def
make_output_path
(
output_dir
,
key
):
"""Returns an output path used for saving keys. You need to make sure the
directories leading to this output path exist.
Parameters
----------
output_dir : str
The root directory to save the results
key : str
The key of the sample. Usually biofile.make_path("", "")
Returns
-------
str
The path for the provided key.
"""
return
os
.
path
.
join
(
output_dir
,
key
+
'.hdf5'
)
def
save_predictions
(
pool
,
output_dir
,
key
,
pred_buffer
):
outpath
=
make_output_path
(
output_dir
,
key
)
create_directories_safe
(
os
.
path
.
dirname
(
outpath
))
...
...
@@ -150,7 +170,7 @@ def main(argv=None):
force
=
get_from_config_or_commandline
(
config
,
'force'
,
args
,
defaults
)
hooks
=
getattr
(
config
,
'hooks'
,
None
)
read_original
_data
=
getattr
(
config
,
'
read_original
_data'
,
None
)
load
_data
=
getattr
(
config
,
'
load
_data'
,
None
)
# Sets-up logging
set_verbosity_level
(
logger
,
verbosity
)
...
...
@@ -158,15 +178,18 @@ def main(argv=None):
# required arguments
estimator
=
config
.
estimator
database
=
config
.
database
group
s
=
config
.
group
s
biofile
s
=
config
.
biofile
s
bio_predict_input_fn
=
config
.
bio_predict_input_fn
output_dir
=
get_from_config_or_commandline
(
config
,
'output_dir'
,
args
,
defaults
,
False
)
if
number_of_parallel_jobs
>
1
:
start
,
end
=
indices
(
biofiles
,
number_of_parallel_jobs
)
biofiles
=
biofiles
[
start
:
end
]
generator
,
output_types
,
output_shapes
=
bio_generator
(
database
,
groups
,
number_of_parallel_jobs
,
output_dir
,
read_original_data
=
read_original_data
,
biofile_to_label
=
None
,
multiple_samples
=
multiple_samples
,
force
=
force
)
database
,
biofiles
,
load_data
=
load_data
,
biofile_to_label
=
None
,
multiple_samples
=
multiple_samples
,
force
=
force
)
predict_input_fn
=
bio_predict_input_fn
(
generator
,
output_types
,
output_shapes
)
...
...
bob/learn/tensorflow/script/train_generic.py
View file @
b9c51932
#!/usr/bin/env python
"""Trains networks using
tf.train.MonitoredTrainingSession
"""Trains networks using
Tensorflow estimators.
Usage:
%(prog)s [options] <config_files>...
...
...
@@ -20,14 +20,11 @@ The configuration files should have the following objects totally:
## Required objects:
model_fn
estimator
train_input_fn
## Optional objects:
model_dir
run_config
model_params
hooks
steps
max_steps
...
...
@@ -40,7 +37,6 @@ from __future__ import division
from
__future__
import
print_function
# import pkg_resources so that bob imports work properly:
import
pkg_resources
import
tensorflow
as
tf
from
bob.bio.base.utils
import
read_config_file
...
...
@@ -54,27 +50,16 @@ def main(argv=None):
config_files
=
args
[
'<config_files>'
]
config
=
read_config_file
(
config_files
)
model_fn
=
config
.
model_fn
estimator
=
config
.
estimator
train_input_fn
=
config
.
train_input_fn
model_dir
=
getattr
(
config
,
'model_dir'
,
None
)
run_config
=
getattr
(
config
,
'run_config'
,
None
)
model_params
=
getattr
(
config
,
'model_params'
,
None
)
hooks
=
getattr
(
config
,
'hooks'
,
None
)
steps
=
getattr
(
config
,
'steps'
,
None
)
max_steps
=
getattr
(
config
,
'max_steps'
,
None
)
if
run_config
is
None
:
# by default create reproducible nets:
from
bob.learn.tensorflow.utils.reproducible
import
run_config
# Instantiate Estimator
nn
=
tf
.
estimator
.
Estimator
(
model_fn
=
model_fn
,
model_dir
=
model_dir
,
params
=
model_params
,
config
=
run_config
)
# Train
nn
.
train
(
input_fn
=
train_input_fn
,
hooks
=
hooks
,
steps
=
steps
,
max_steps
=
max_steps
)
estimator
.
train
(
input_fn
=
train_input_fn
,
hooks
=
hooks
,
steps
=
steps
,
max_steps
=
max_steps
)
if
__name__
==
'__main__'
:
...
...
bob/learn/tensorflow/test/test_estimator_scripts.py
View file @
b9c51932
...
...
@@ -13,6 +13,7 @@ from bob.learn.tensorflow.script.eval_generic import main as eval_generic
dummy_tfrecord_config
=
datafile
(
'dummy_verify_config.py'
,
__name__
)
CONFIG
=
'''
import tensorflow as tf
from bob.learn.tensorflow.utils.reproducible import run_config
from bob.learn.tensorflow.dataset.tfrecords import shuffle_data_and_labels,
\
batch_data_and_labels
...
...
@@ -21,7 +22,7 @@ tfrecord_filenames = ['%(tfrecord_filenames)s']
data_shape = (1, 112, 92) # size of atnt images
data_type = tf.uint8
batch_size = 2
epochs =
1
epochs =
2
learning_rate = 0.00001
run_once = True
...
...
@@ -32,7 +33,7 @@ def train_input_fn():
def eval_input_fn():
return batch_data_and_labels(tfrecord_filenames, data_shape, data_type,
batch_size, epochs=
epochs
)
batch_size, epochs=
1
)
def architecture(images):
images = tf.cast(images, tf.float32)
...
...
@@ -60,24 +61,35 @@ def model_fn(features, labels, mode, params, config):
return tf.estimator.EstimatorSpec(mode=mode, predictions=predictions)
# Calculate Loss (for both TRAIN and EVAL modes)
predictor = tf.nn
.sparse_softmax_cross_entropy
_with_logits
(
loss = tf.losses
.sparse_softmax_cross_entropy(
logits=logits, labels=labels)
loss = tf.reduce_mean(predictor)
accuracy = tf.metrics.accuracy(
labels=labels, predictions=predictions["classes"])
metrics = {'accuracy': accuracy}
# Configure the
T
raining
Op (for TRAIN mode)
# Configure the
t
raining
op
if mode == tf.estimator.ModeKeys.TRAIN:
global_step = tf.contrib.framework.get_or_create_global_step()
optimizer = tf.train.GradientDescentOptimizer(learning_rate)
train_op = optimizer.minimize(loss, global_step=global_step)
return tf.estimator.EstimatorSpec(mode=mode, loss=loss,
train_op=train_op)
# Add evaluation metrics (for EVAL mode)
eval_metric_ops = {
"accuracy": tf.metrics.accuracy(
labels=labels, predictions=predictions["classes"])}
optimizer = tf.train.GradientDescentOptimizer(
learning_rate=learning_rate)
train_op = optimizer.minimize(
loss=loss, global_step=tf.train.get_or_create_global_step())
# Log accuracy and loss
with tf.name_scope('train_metrics'):
tf.summary.scalar('accuracy', accuracy[1])
tf.summary.scalar('loss', loss)
else:
train_op = None
return tf.estimator.EstimatorSpec(
mode=mode, loss=loss, eval_metric_ops=eval_metric_ops)
mode=mode,
predictions=predictions,
loss=loss,
train_op=train_op,
eval_metric_ops=metrics)
estimator = tf.estimator.Estimator(model_fn=model_fn, model_dir=model_dir,
config=run_config)
'''
...
...
@@ -128,7 +140,7 @@ def test_eval_once():
doc
=
f
.
read
()
assert
'1'
in
doc
,
doc
assert
'
1
00'
in
doc
,
doc
assert
'
2
00'
in
doc
,
doc
finally
:
try
:
shutil
.
rmtree
(
tmpdir
)
...
...
Write
Preview
Markdown
is supported
0%
Try again
or
attach a new file
.
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment