Organizing the trainer

parent 2efd0e68
...@@ -31,24 +31,70 @@ class SoftmaxAnalizer(object): ...@@ -31,24 +31,70 @@ class SoftmaxAnalizer(object):
""" """
self.data_shuffler = None self.data_shuffler = None
self.machine = None self.trainer = None
self.session = None self.session = None
def __call__(self, data_shuffler, machine, session): def __call__(self, data_shuffler, trainer, session):
if self.data_shuffler is None: if self.data_shuffler is None:
self.data_shuffler = data_shuffler self.data_shuffler = data_shuffler
self.machine = machine self.trainer = trainer
self.session = session self.session = session
# Creating the graph # Creating the graph
feature_batch, label_batch = self.data_shuffler.get_placeholders(name="validation_accuracy") feature_batch, label_batch = self.data_shuffler.get_placeholders(name="validation_accuracy")
data, labels = self.data_shuffler.get_batch() data, labels = self.data_shuffler.get_batch()
graph = self.machine.compute_graph(feature_batch) graph = self.trainer.architecture.compute_graph(feature_batch)
predictions = numpy.argmax(self.session.run(graph, feed_dict={feature_batch: data[:]}), 1)
accuracy = 100. * numpy.sum(predictions == labels) / predictions.shape[0]
summaries = []
summaries.append(summary_pb2.Summary.Value(tag="accuracy_validation", simple_value=float(accuracy)))
return summary_pb2.Summary(value=summaries)
class SoftmaxSiameseAnalizer(object):
"""
Analizer.
"""
def __init__(self):
"""
Softmax analizer
** Parameters **
data_shuffler:
graph:
session:
convergence_threshold:
convergence_reference: References to analize the convergence. Possible values are `eer`, `far10`, `far10`
"""
self.data_shuffler = None
self.trainer = None
self.session = None
def __call__(self, data_shuffler, machine, session):
if self.data_shuffler is None:
self.data_shuffler = data_shuffler
self.trainer = trainer
self.session = session
# Creating the graph
#feature_batch, label_batch = self.data_shuffler.get_placeholders(name="validation_accuracy")
feature_left_batch, feature_right_batch label_batch = self.data_shuffler.get_placeholders_pair(name="validation_accuracy")
batch_left, batch_right, labels = self.data_shuffler.get_batch()
left = self.machine.compute_graph(feature_batch)
predictions = numpy.argmax(self.session.run(graph, feed_dict={feature_batch: data[:]}), 1) predictions = numpy.argmax(self.session.run(graph, feed_dict={feature_batch: data[:]}), 1)
accuracy = 100. * numpy.sum(predictions == labels) / predictions.shape[0] accuracy = 100. * numpy.sum(predictions == labels) / predictions.shape[0]
summaries = [] summaries = []
summaries.append(summary_pb2.Summary.Value(tag="accuracy_validation", simple_value=float(accuracy))) summaries.append(summary_pb2.Summary.Value(tag="accuracy_validation", simple_value=float(accuracy)))
return summary_pb2.Summary(value=summaries) return summary_pb2.Summary(value=summaries)
\ No newline at end of file
...@@ -47,7 +47,10 @@ class BaseDataShuffler(object): ...@@ -47,7 +47,10 @@ class BaseDataShuffler(object):
self.indexes = numpy.array(range(self.n_samples)) self.indexes = numpy.array(range(self.n_samples))
numpy.random.shuffle(self.indexes) numpy.random.shuffle(self.indexes)
# TODO: Reorganize the datas hufflers for siamese and triplets
self.data_placeholder = None self.data_placeholder = None
self.data2_placeholder = None
self.data3_placeholder = None
self.label_placeholder = None self.label_placeholder = None
def get_placeholders_forprefetch(self, name=""): def get_placeholders_forprefetch(self, name=""):
...@@ -59,6 +62,21 @@ class BaseDataShuffler(object): ...@@ -59,6 +62,21 @@ class BaseDataShuffler(object):
self.label_placeholder = tf.placeholder(tf.int64, shape=[None, ]) self.label_placeholder = tf.placeholder(tf.int64, shape=[None, ])
return self.data_placeholder, self.label_placeholder return self.data_placeholder, self.label_placeholder
def get_placeholders_pair_forprefetch(self, name=""):
"""
Returns a place holder with the size of your batch
"""
if self.data_placeholder is None:
self.data_placeholder = tf.placeholder(tf.float32, shape=tuple([None] + list(self.shape[1:])), name=name)
if self.data2_placeholder is None:
self.data2_placeholder = tf.placeholder(tf.float32, shape=tuple([None] + list(self.shape[1:])), name=name)
if self.label_placeholder:
self.label_placeholder = tf.placeholder(tf.int64, shape=[None, ])
return self.data_placeholder, self.data2_placeholder, self.label_placeholder
def get_placeholders(self, name=""): def get_placeholders(self, name=""):
""" """
Returns a place holder with the size of your batch Returns a place holder with the size of your batch
...@@ -66,9 +84,28 @@ class BaseDataShuffler(object): ...@@ -66,9 +84,28 @@ class BaseDataShuffler(object):
if self.data_placeholder is None: if self.data_placeholder is None:
self.data_placeholder = tf.placeholder(tf.float32, shape=self.shape, name=name) self.data_placeholder = tf.placeholder(tf.float32, shape=self.shape, name=name)
if self.label_placeholder is None:
self.label_placeholder = tf.placeholder(tf.int64, shape=self.shape[0]) self.label_placeholder = tf.placeholder(tf.int64, shape=self.shape[0])
return self.data_placeholder, self.label_placeholder return self.data_placeholder, self.label_placeholder
def get_placeholders_pair(self, name=""):
"""
Returns a place holder with the size of your batch
"""
if self.data_placeholder is None:
self.data_placeholder = tf.placeholder(tf.float32, shape=self.shape, name=name+"_right")
if self.data2_placeholder is None:
self.data2_placeholder = tf.placeholder(tf.float32, shape=self.shape, name=name+"_left")
if self.label_placeholder is None:
self.label_placeholder = tf.placeholder(tf.int64, shape=self.shape[0], name="label")
return self.data_placeholder, self.data2_placeholder, self.label_placeholder
def get_genuine_or_not(self, input_data, input_labels, genuine=True): def get_genuine_or_not(self, input_data, input_labels, genuine=True):
if genuine: if genuine:
......
...@@ -87,13 +87,13 @@ def main(): ...@@ -87,13 +87,13 @@ def main():
batch_size=VALIDATION_BATCH_SIZE) batch_size=VALIDATION_BATCH_SIZE)
# Preparing the architecture # Preparing the architecture
cnn = False cnn = True
if cnn: if cnn:
architecture = Chopra(seed=SEED) architecture = Chopra(seed=SEED)
#architecture = Lenet(seed=SEED) #architecture = Lenet(seed=SEED)
#architecture = Dummy(seed=SEED) #architecture = Dummy(seed=SEED)
loss = BaseLoss(tf.nn.sparse_softmax_cross_entropy_with_logits, tf.reduce_mean) loss = BaseLoss(tf.nn.sparse_softmax_cross_entropy_with_logits, tf.reduce_mean)
trainer = Trainer(architecture=architecture, loss=loss, iterations=ITERATIONS, prefetch=False, temp_dir="cnn") trainer = Trainer(architecture=architecture, loss=loss, iterations=ITERATIONS, prefetch=False, temp_dir="./LOGS/cnn")
trainer.train(train_data_shuffler, validation_data_shuffler) trainer.train(train_data_shuffler, validation_data_shuffler)
#trainer.train(train_data_shuffler) #trainer.train(train_data_shuffler)
else: else:
......
...@@ -112,28 +112,29 @@ def main(): ...@@ -112,28 +112,29 @@ def main():
# batch_size=VALIDATION_BATCH_SIZE) # batch_size=VALIDATION_BATCH_SIZE)
# Preparing the architecture # Preparing the architecture
#n_classes = len(train_data_shuffler.possible_labels) n_classes = len(train_data_shuffler.possible_labels)
n_classes = 50 #n_classes = 50
cnn = True cnn = True
if cnn: if cnn:
# LENET PAPER CHOPRA # LENET PAPER CHOPRA
architecture = Chopra(seed=SEED) architecture = Chopra(seed=SEED, fc1_output=n_classes)
#architecture = Lenet(default_feature_layer="fc2", n_classes=n_classes, conv1_output=8, conv2_output=16,use_gpu=USE_GPU) #architecture = Lenet(default_feature_layer="fc2", n_classes=n_classes, conv1_output=8, conv2_output=16,use_gpu=USE_GPU)
#architecture = VGG(n_classes=n_classes, use_gpu=USE_GPU) #architecture = VGG(n_classes=n_classes, use_gpu=USE_GPU)
#architecture = Dummy(seed=SEED) #architecture = Dummy(seed=SEED)
#architecture = LenetDropout(default_feature_layer="fc2", n_classes=n_classes, conv1_output=4, conv2_output=8, use_gpu=USE_GPU) #architecture = LenetDropout(default_feature_layer="fc2", n_classes=n_classes, conv1_output=4, conv2_output=8, use_gpu=USE_GPU)
loss = ContrastiveLoss(contrastive_margin=3.) loss = ContrastiveLoss(contrastive_margin=4.)
optimizer = tf.train.GradientDescentOptimizer(0.00001) optimizer = tf.train.GradientDescentOptimizer(0.000001)
trainer = SiameseTrainer(architecture=architecture, trainer = SiameseTrainer(architecture=architecture,
loss=loss, loss=loss,
iterations=ITERATIONS, iterations=ITERATIONS,
snapshot=VALIDATION_TEST, snapshot=VALIDATION_TEST,
optimizer=optimizer) optimizer=optimizer,
temp_dir="./LOGS/siamese-cnn")
#import ipdb; ipdb.set_trace();
trainer.train(train_data_shuffler, validation_data_shuffler) trainer.train(train_data_shuffler, validation_data_shuffler)
#trainer.train(train_data_shuffler) #trainer.train(train_data_shuffler)
else: else:
...@@ -143,5 +144,6 @@ def main(): ...@@ -143,5 +144,6 @@ def main():
trainer = SiameseTrainer(architecture=mlp, trainer = SiameseTrainer(architecture=mlp,
loss=loss, loss=loss,
iterations=ITERATIONS, iterations=ITERATIONS,
snapshot=VALIDATION_TEST) snapshot=VALIDATION_TEST,
temp_dir="./LOGS/siamese-dnn")
trainer.train(train_data_shuffler, validation_data_shuffler) trainer.train(train_data_shuffler, validation_data_shuffler)
...@@ -6,16 +6,40 @@ ...@@ -6,16 +6,40 @@
import logging import logging
logger = logging.getLogger("bob.learn.tensorflow") logger = logging.getLogger("bob.learn.tensorflow")
import tensorflow as tf import tensorflow as tf
from tensorflow.core.framework import summary_pb2
import threading import threading
from ..analyzers import ExperimentAnalizer from ..analyzers import ExperimentAnalizer, SoftmaxAnalizer
from ..network import SequenceNetwork from ..network import SequenceNetwork
import bob.io.base import bob.io.base
from .Trainer import Trainer from .Trainer import Trainer
import os import os
import sys import sys
class SiameseTrainer(Trainer): class SiameseTrainer(Trainer):
"""
Trainer for siamese networks.
**Parameters**
architecture: The architecture that you want to run. Should be a :py:class`bob.learn.tensorflow.network.SequenceNetwork`
optimizer: One of the tensorflow optimizers https://www.tensorflow.org/versions/r0.10/api_docs/python/train.html
use_gpu: Use GPUs in the training
loss: Loss
temp_dir: The output directory
base_learning_rate: Initial learning rate
weight_decay:
convergence_threshold:
iterations: Maximum number of iterations
snapshot: Will take a snapshot of the network at every `n` iterations
prefetch: Use extra Threads to deal with the I/O
analizer: Neural network analizer :py:mod:`bob.learn.tensorflow.analyzers`
verbosity_level:
"""
def __init__(self, def __init__(self,
architecture, architecture,
optimizer=tf.train.AdamOptimizer(), optimizer=tf.train.AdamOptimizer(),
...@@ -30,7 +54,13 @@ class SiameseTrainer(Trainer): ...@@ -30,7 +54,13 @@ class SiameseTrainer(Trainer):
###### training options ########## ###### training options ##########
convergence_threshold=0.01, convergence_threshold=0.01,
iterations=5000, iterations=5000,
snapshot=100): snapshot=100,
prefetch=False,
## Analizer
analizer=SoftmaxAnalizer(),
verbosity_level=2):
super(SiameseTrainer, self).__init__( super(SiameseTrainer, self).__init__(
architecture=architecture, architecture=architecture,
...@@ -38,146 +68,171 @@ class SiameseTrainer(Trainer): ...@@ -38,146 +68,171 @@ class SiameseTrainer(Trainer):
use_gpu=use_gpu, use_gpu=use_gpu,
loss=loss, loss=loss,
temp_dir=temp_dir, temp_dir=temp_dir,
# Learning rate
base_learning_rate=base_learning_rate, base_learning_rate=base_learning_rate,
weight_decay=weight_decay, weight_decay=weight_decay,
###### training options ##########
convergence_threshold=convergence_threshold, convergence_threshold=convergence_threshold,
iterations=iterations, iterations=iterations,
snapshot=snapshot snapshot=snapshot,
prefetch=prefetch,
## Analizer
analizer=analizer,
verbosity_level=verbosity_level
) )
def train(self, train_data_shuffler, validation_data_shuffler=None): self.between_class_graph = None
""" self.within_class_graph = None
Do the loop forward --> backward --|
^--------------------| def compute_graph(self, data_shuffler, prefetch=False, name=""):
""" """
Computes the graph for the trainer.
def start_thread():
threads = []
for n in range(1):
t = threading.Thread(target=load_and_enqueue)
t.daemon = True # thread will close when parent quits
t.start()
threads.append(t)
return threads
def load_and_enqueue():
"""
Injecting data in the place holder queue
"""
# for i in range(self.iterations+5):
while not thread_pool.should_stop():
batch_left, batch_right, labels = train_data_shuffler.get_pair()
feed_dict = {train_placeholder_left_data: batch_left,
train_placeholder_right_data: batch_right,
train_placeholder_labels: labels}
session.run(enqueue_op, feed_dict=feed_dict)
# TODO: find an elegant way to provide this as a parameter of the trainer
learning_rate = tf.train.exponential_decay(
self.base_learning_rate, # Learning rate
train_data_shuffler.batch_size,
train_data_shuffler.n_samples,
self.weight_decay # Decay step
)
# Creating directory ** Parameters **
bob.io.base.create_directories_safe(self.temp_dir)
data_shuffler: Data shuffler
# Creating two graphs prefetch:
train_placeholder_left_data, train_placeholder_labels = train_data_shuffler.\ name: Name of the graph
get_placeholders_forprefetch(name="train_left") """
train_placeholder_right_data, _ = train_data_shuffler.get_placeholders(name="train_right")
# Defining place holders
# Defining a placeholder queue for prefetching if prefetch:
queue = tf.FIFOQueue(capacity=100, placeholder_left_data, placeholder_right_data, placeholder_labels = data_shuffler.get_placeholders_pair_forprefetch(name="train")
dtypes=[tf.float32, tf.float32, tf.int64],
shapes=[train_placeholder_left_data.get_shape().as_list()[1:], # Creating two graphs
train_placeholder_right_data.get_shape().as_list()[1:], #placeholder_left_data, placeholder_labels = data_shuffler. \
[]]) # get_placeholders_forprefetch(name="train_left")
# Fetching the place holders from the queue #placeholder_right_data, _ = data_shuffler.get_placeholders(name="train_right")
enqueue_op = queue.enqueue_many([train_placeholder_left_data, feature_left_batch, feature_right_batch, label_batch = data_shuffler.get_placeholders_pair(name="train_")
train_placeholder_right_data,
train_placeholder_labels]) # Defining a placeholder queue for prefetching
train_left_feature_batch, train_right_label_batch, train_labels_batch = \ queue = tf.FIFOQueue(capacity=100,
queue.dequeue_many(train_data_shuffler.batch_size) dtypes=[tf.float32, tf.float32, tf.int64],
shapes=[placeholder_left_data.get_shape().as_list()[1:],
# Creating the architecture for train and validation placeholder_right_data.get_shape().as_list()[1:],
if not isinstance(self.architecture, SequenceNetwork): []])
raise ValueError("The variable `architecture` must be an instance of "
"`bob.learn.tensorflow.network.SequenceNetwork`") # Fetching the place holders from the queue
self.enqueue_op = queue.enqueue_many([placeholder_left_data, placeholder_right_data, placeholder_labels])
feature_left_batch, feature_right_batch, label_batch = queue.dequeue_many(data_shuffler.batch_size)
# Creating the architecture for train and validation
if not isinstance(self.architecture, SequenceNetwork):
raise ValueError("The variable `architecture` must be an instance of "
"`bob.learn.tensorflow.network.SequenceNetwork`")
else:
feature_left_batch, feature_right_batch, label_batch = data_shuffler.get_placeholders_pair(name="train_")
#feature_left_batch, label_batch = data_shuffler.get_placeholders(name="train_left")
#feature_right_batch, _ = data_shuffler.get_placeholders(name="train_right")
# Creating the siamese graph # Creating the siamese graph
train_left_graph = self.architecture.compute_graph(train_left_feature_batch) train_left_graph = self.architecture.compute_graph(feature_left_batch)
train_right_graph = self.architecture.compute_graph(train_right_label_batch) train_right_graph = self.architecture.compute_graph(feature_right_batch)
loss_train, between_class, within_class = self.loss(train_labels_batch, graph, between_class_graph, within_class_graph = self.loss(label_batch,
train_left_graph, train_left_graph,
train_right_graph) train_right_graph)
# Preparing the optimizer self.between_class_graph = between_class_graph
step = tf.Variable(0) self.within_class_graph = within_class_graph
self.optimizer._learning_rate = learning_rate
optimizer = self.optimizer.minimize(loss_train, global_step=step) return graph
#optimizer = tf.train.MomentumOptimizer(learning_rate, momentum=0.99, use_locking=False,
# name='Momentum').minimize(loss_train, global_step=step) def get_feed_dict(self, data_shuffler):
"""
print("Initializing !!") Given a data shuffler prepared the dictionary to be injected in the graph
# Training
hdf5 = bob.io.base.HDF5File(os.path.join(self.temp_dir, 'model.hdf5'), 'w') ** Parameters **
data_shuffler:
with tf.Session() as session:
if validation_data_shuffler is not None: """
analizer = ExperimentAnalizer(validation_data_shuffler, self.architecture, session)
batch_left, batch_right, labels = data_shuffler.get_pair()
tf.initialize_all_variables().run() placeholder_left_data, placeholder_right_data, placeholder_label = data_shuffler.get_placeholders_pair(name="train")
# Start a thread to enqueue data asynchronously, and hide I/O latency. feed_dict = {placeholder_left_data: batch_left,
thread_pool = tf.train.Coordinator() placeholder_right_data: batch_right,
tf.train.start_queue_runners(coord=thread_pool) placeholder_label: labels}
threads = start_thread()
return feed_dict
# TENSOR BOARD SUMMARY
train_writer = tf.train.SummaryWriter(os.path.join(self.temp_dir, 'LOGS'), session.graph) def fit(self, session, step):
"""
# Siamese specific summary Run one iteration (`forward` and `backward`)
tf.scalar_summary('loss', loss_train)
tf.scalar_summary('between_class', between_class) ** Parameters **
tf.scalar_summary('within_class', within_class) session: Tensorflow session
tf.scalar_summary('lr', learning_rate) step: Iteration number
merged = tf.merge_all_summaries()
"""
# Architecture summary if self.prefetch:
self.architecture.generate_summaries() _, l, bt_class, wt_class, lr, summary = session.run([self.optimizer,
merged_validation = tf.merge_all_summaries() self.training_graph, self.between_class_graph, self.within_class_graph,
self.learning_rate, self.summaries_train])
for step in range(self.iterations): else:
feed_dict = self.get_feed_dict(self.train_data_shuffler)
_, l, lr, summary = session.run( _, l, bt_class, wt_class, lr, summary = session.run([self.optimizer,
[optimizer, loss_train, learning_rate, merged]) self.training_graph, self.between_class_graph, self.within_class_graph,
#_, l, lr,b,w, summary = session.run([optimizer, loss_train, learning_rate,between_class,within_class, merged]) self.learning_rate, self.summaries_train], feed_dict=feed_dict)
#_, l, lr= session.run([optimizer, loss_train, learning_rate])
train_writer.add_summary(summary, step) logger.info("Loss training set step={0} = {1}".format(step, l))
#print str(step) + " loss: {0}, bc: {1}, wc: {2}".format(l, b, w) self.train_summary_writter.add_summary(summary, step)
#print str(step) + " loss: {0}".format(l)
sys.stdout.flush() def compute_validation(self, session, data_shuffler, step):
#import ipdb; ipdb.set_trace(); """
Computes the loss in the validation set
if validation_data_shuffler is not None and step % self.snapshot == 0:
print str(step) ** Parameters **
sys.stdout.flush() session: Tensorflow session
data_shuffler: The data shuffler to be used
summary = session.run(merged_validation) step: Iteration number
train_writer.add_summary(summary, step)
"""
summary = analizer()
train_writer.add_summary(summary, step) if self.validation_summary_writter is None:
self.validation_summary_writter = tf.train.SummaryWriter(os.path.join(self.temp_dir, 'validation'), session.graph)
print("#######DONE##########")
self.architecture.save(hdf5) self.validation_graph = self.compute_graph(data_shuffler, name="validation")
del hdf5 feed_dict = self.get_feed_dict(data_shuffler)
train_writer.close() l = session.run(self.validation_graph, feed_dict=feed_dict)
thread_pool.request_stop() summaries = []
thread_pool.join(threads) summaries.append(summary_pb2.Summary.Value(tag="loss", simple_value=float(l)))
self.validation_summary_writter.add_summary(summary_pb2.Summary(value=summaries), step)
logger.info("Loss VALIDATION set step={0} = {1}".format(step, l))
def create_general_summary(self):
"""
Creates a simple tensorboard summary with the value of the loss and learning rate
"""
# Train summary
tf.scalar_summary('loss', self.training_graph, name="train")
tf.scalar_summary('between_class_loss', self.between_class_graph, name="train")
tf.scalar_summary('within_class_loss', self.within_class_graph, name="train")
tf.scalar_summary('lr', self.learning_rate, name="train")
return tf.merge_all_summaries()
def load_and_enqueue(self, session):
"""
Injecting data in the place holder queue
**Parameters**
session: Tensorflow session
"""
while not self.thread_pool.should_stop():
batch_left, batch_right, labels = self.train_data_shuffler.get_pair()
placeholder_left_data, placeholder_right_data, placeholder_label = self.train_data_shuffler.get_placeholders_pair()
feed_dict = {placeholder_left_data: batch_left,
placeholder_right_data: batch_right,
placeholder_label: labels}
session.run(self.enqueue_op, feed_dict=feed_dict)
...@@ -4,21 +4,42 @@ ...@@ -4,21 +4,42 @@
# @date: Tue 09 Aug 2016 15:25:22 CEST # @date: Tue 09 Aug 2016 15:25:22 CEST
import logging import logging
logger = logging.getLogger("bob.learn.tensorflow")
import tensorflow as tf import tensorflow as tf
from ..network import SequenceNetwork from ..network import SequenceNetwork
import threading import threading
import numpy
import os import os
import bob.io.base import bob.io.base
import bob.core import bob.core
from ..analyzers import SoftmaxAnalizer from ..analyzers import SoftmaxAnalizer
from tensorflow.core.framework import summary_pb2 from tensorflow.core.framework import summary_pb2
import time
logger = bob.core.log.setup("bob.learn.tensorflow") logger = bob.core.log.setup("bob.learn.tensorflow")
class Trainer(object):
class Trainer(object):
"""
One graph trainer.
Use this trainer when your CNN is composed by one graph
**Parameters**
architecture: The architecture that you want to run. Should be a :py:class`bob.learn.tensorflow.network.SequenceNetwork`
optimizer: One of the tensorflow optimizers https://www.tensorflow.org/versions/r0.10/api_docs/python/train.html
use_gpu: Use GPUs in the training
loss: Loss
temp_dir: The output directory
base_learning_rate: Initial learning rate
weight_decay:
convergence_threshold:
iterations: Maximum number of iterations
snapshot: Will take a snapshot of the network at every `n` iterations
prefetch: Use extra Threads to deal with the I/O
analizer: Neural network analizer :py:mod:`bob.learn.tensorflow.analyzers`
verbosity_level:
"""
def __init__(self, def __init__(self,
architecture, architecture,
optimizer=tf.train.AdamOptimizer(), optimizer=tf.train.AdamOptimizer(),
...@@ -37,22 +58,10 @@ class Trainer(object): ...@@ -37,22 +58,10 @@ class Trainer(object):
prefetch=False, prefetch=False,
## Analizer