Commit 88eea1d2 authored by Amir MOHAMMADI's avatar Amir MOHAMMADI

Trun nitpicky back on!

parent 7e2d37d7
Pipeline #29816 failed with stage
in 156 minutes and 6 seconds
......@@ -44,7 +44,7 @@ def dataset_to_tfrecord(dataset, output):
Parameters
----------
dataset : tf.data.Dataset
dataset : ``tf.data.Dataset``
The tf.data.Dataset that you want to write into a TFRecord file.
output : str
Path to the TFRecord file. Besides this file, a .json file is also created.
......@@ -53,7 +53,7 @@ def dataset_to_tfrecord(dataset, output):
Returns
-------
tf.Operation
``tf.Operation``
A tf.Operation that, when run, writes contents of dataset to a file. When
running in eager mode, calling this function will write the file. Otherwise, you
have to call session.run() on the returned operation.
......@@ -100,7 +100,7 @@ def dataset_from_tfrecord(tfrecord):
Returns
-------
tf.data.Dataset
``tf.data.Dataset``
A dataset that contains the data from the TFRecord file.
"""
# these imports are needed so that eval can work
......@@ -468,7 +468,8 @@ def batch_data_and_labels(
"""
Dump in order batches from a list of tf-record files
**Parameters**
Parameters
----------
tfrecord_filenames:
List containing the tf-record paths
......
......@@ -15,17 +15,17 @@ def balanced_softmax_cross_entropy_loss_weights(labels, dtype="float32"):
Parameters
----------
labels : tf.Tensor
labels : ``tf.Tensor``
Labels of your current input. The shape must be [batch_size, n_classes]. If your
labels are not one-hot encoded, you can use ``tf.one_hot`` to convert them first
before giving them to this function.
dtype : dtype
dtype : ``tf.dtype``
The dtype that weights will have. It should be float. Best is to provide
logits.dtype as input.
Returns
-------
tf.Tensor
``tf.Tensor``
Computed weights that will cancel your dataset imbalance per batch.
Examples
......@@ -100,16 +100,16 @@ def balanced_sigmoid_cross_entropy_loss_weights(labels, dtype="float32"):
Parameters
----------
labels : tf.Tensor
labels : ``tf.Tensor``
Labels of your current input. The shape must be [batch_size] and values must be
either 0 or 1.
dtype : dtype
dtype : ``tf.dtype``
The dtype that weights will have. It should be float. Best is to provide
logits.dtype as input.
Returns
-------
tf.Tensor
``tf.Tensor``
Computed weights that will cancel your dataset imbalance per batch.
Examples
......
......@@ -30,7 +30,7 @@ extensions = [
]
# Be picky about warnings
nitpicky = False
nitpicky = True
# Ignores stuff we can't easily resolve on other project's sphinx manuals
nitpick_ignore = []
......
......@@ -86,6 +86,7 @@ Detailed Information
.. automodule:: bob.learn.tensorflow
.. automodule:: bob.learn.tensorflow.estimators
.. automodule:: bob.learn.tensorflow.dataset
.. automodule:: bob.learn.tensorflow.dataset.generator
.. automodule:: bob.learn.tensorflow.dataset.bio
.. automodule:: bob.learn.tensorflow.dataset.image
.. automodule:: bob.learn.tensorflow.dataset.siamese_image
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment