Skip to content
Snippets Groups Projects
Commit f05932dc authored by Anjith GEORGE's avatar Anjith GEORGE
Browse files

Getting rid of tf logger

parent 60d7070f
Branches
Tags
1 merge request!42Tensorboard
Pipeline #42109 passed
......@@ -7,7 +7,7 @@ import torch.nn as nn
import torch.optim as optim
from torch.autograd import Variable
from bob.learn.pytorch.utils import comp_bce_loss_weights
from .tflog import Logger
from torch.utils.tensorboard import SummaryWriter
import bob.core
logger = bob.core.log.setup("bob.learn.pytorch")
......@@ -74,7 +74,7 @@ class FASNetTrainer(object):
bob.core.log.set_verbosity_level(logger, verbosity_level)
self.tf_logger = Logger(tf_logdir)
self.tf_logger = SummaryWriter(log_dir=tf_logdir)
# Setting the gradients to true for the layers which needs to be adapted
......@@ -282,15 +282,17 @@ class FASNetTrainer(object):
# scalar logs
for tag, value in info.items():
self.tf_logger.scalar_summary(tag, value, epoch+1)
self.tf_logger.add_scalar(tag=tag, scalar_value=value, global_step=epoch+1)
# Log values and gradients of the parameters (histogram summary)
for tag, value in self.network.named_parameters():
tag = tag.replace('.', '/')
try:
self.tf_logger.histo_summary(tag, value.data.cpu().numpy(), epoch+1)
self.tf_logger.histo_summary(tag+'/grad', value.grad.data.cpu().numpy(), epoch+1)
self.tf_logger.add_histogram(
tag=tag, values=value.data.cpu().numpy(), global_step=epoch+1)
self.tf_logger.add_histogram(
tag=tag+'/grad', values=value.grad.data.cpu().numpy(), global_step=epoch+1)
except:
pass
......
......@@ -8,7 +8,6 @@ import numpy as np
import torch
import torch.nn as nn
from torch.autograd import Variable
from .tflog import Logger
from torch.utils.tensorboard import SummaryWriter
import bob.core
......
......@@ -7,7 +7,7 @@ import torch.nn as nn
import torch.optim as optim
from torch.autograd import Variable
from bob.learn.pytorch.utils import comp_bce_loss_weights
from .tflog import Logger
from torch.utils.tensorboard import SummaryWriter
import bob.core
logger = bob.core.log.setup("bob.learn.pytorch")
......@@ -75,7 +75,7 @@ class MCCNNTrainer(object):
bob.core.log.set_verbosity_level(logger, verbosity_level)
self.tf_logger = Logger(tf_logdir)
self.tf_logger = SummaryWriter(log_dir=tf_logdir)
layers_present = self.network.lcnn_layers.copy()
......@@ -309,25 +309,27 @@ class MCCNNTrainer(object):
# scalar logs
for tag, value in info.items():
self.tf_logger.scalar_summary(tag, value, epoch+1)
self.tf_logger.add_scalar(tag=tag, scalar_value=value, global_step=epoch+1)
# Log values and gradients of the parameters (histogram summary)
for tag, value in self.network.named_parameters():
tag = tag.replace('.', '/')
try:
self.tf_logger.histo_summary(tag, value.data.cpu().numpy(), epoch+1)
self.tf_logger.histo_summary(tag+'/grad', value.grad.data.cpu().numpy(), epoch+1)
self.tf_logger.add_histogram(
tag=tag, values=value.data.cpu().numpy(), global_step=epoch+1)
self.tf_logger.add_histogram(
tag=tag+'/grad', values=value.grad.data.cpu().numpy(), global_step=epoch+1)
except:
pass
# Log images
logimg=img.view(-1,img.size()[1]*128, 128)[:10].cpu().numpy()
# # Log images
# logimg=img.view(-1,img.size()[1]*128, 128)[:10].cpu().numpy()
info = { 'images': logimg}
# info = { 'images': logimg}
for tag, images in info.items():
self.tf_logger.image_summary(tag, images, epoch+1)
# for tag, images in info.items():
# self.tf_logger.image_summary(tag, images, epoch+1)
######################################## </Logging> ###################################
......
# Code referenced from https://gist.github.com/gyglim/1f8dfb1b5c82627ae3efcfbbadb9f514
import tensorflow as tf
import numpy as np
import matplotlib.pyplot as plt
try:
from StringIO import StringIO # Python 2.7
except ImportError:
from io import BytesIO # Python 3.x
class Logger(object):
def __init__(self, log_dir):
"""Create a summary writer logging to log_dir."""
self.writer = tf.summary.FileWriter(log_dir)
def scalar_summary(self, tag, value, step):
"""Log a scalar variable."""
summary = tf.Summary(value=[tf.Summary.Value(tag=tag, simple_value=value)])
self.writer.add_summary(summary, step)
def image_summary(self, tag, images, step):
"""Log a list of images."""
img_summaries = []
for i, img in enumerate(images):
# Write the image to a string
try:
s = StringIO()
except:
s = BytesIO()
plt.imsave(s, img, format='png')
# Create an Image object
img_sum = tf.Summary.Image(encoded_image_string=s.getvalue(),
height=img.shape[0],
width=img.shape[1])
# Create a Summary value
img_summaries.append(tf.Summary.Value(tag='%s/%d' % (tag, i), image=img_sum))
# Create and write Summary
summary = tf.Summary(value=img_summaries)
self.writer.add_summary(summary, step)
def histo_summary(self, tag, values, step, bins=1000):
"""Log a histogram of the tensor of values."""
# Create a histogram using numpy
counts, bin_edges = np.histogram(values, bins=bins)
# Fill the fields of the histogram proto
hist = tf.HistogramProto()
hist.min = float(np.min(values))
hist.max = float(np.max(values))
hist.num = int(np.prod(values.shape))
hist.sum = float(np.sum(values))
hist.sum_squares = float(np.sum(values**2))
# Drop the start of the first bin
bin_edges = bin_edges[1:]
# Add bin edges and counts
for edge in bin_edges:
hist.bucket_limit.append(edge)
for c in counts:
hist.bucket.append(c)
# Create and write Summary
summary = tf.Summary(value=[tf.Summary.Value(tag=tag, histo=hist)])
self.writer.add_summary(summary, step)
self.writer.flush()
\ No newline at end of file
0% Loading or .
You are about to add 0 people to the discussion. Proceed with caution.
Please register or to comment