Commit 5959096d authored by André Anjos's avatar André Anjos 💬

Merge branch 'multiple-issues' into 'master'

Fixes to multiple issues

Closes #6, #8, and #9

See merge request !10
parents 6c48e835 b37f3f93
Pipeline #17586 passed with stages
in 23 minutes and 5 seconds
/**
* @file io/cxx/video::Reader.cc
* @date Wed Jun 22 17:50:08 2011 +0200
* @author Andre Anjos <andre.anjos@idiap.ch>
*
* @brief A class to help you read videos. This code originates from
* http://ffmpeg.org/doxygen/1.0/, "decoding & encoding example".
*
* Copyright (C) Idiap Research Institute, Martigny, Switzerland
*/
#include "reader.h"
#include <stdexcept>
......@@ -18,12 +7,6 @@
#include <bob.io.base/blitz_array.h>
#if LIBAVUTIL_VERSION_INT < 0x371167 //55.17.103 @ ffmpeg-3.0
#ifndef AV_PIX_FMT_RGB24
#define AV_PIX_FMT_RGB24 PIX_FMT_RGB24
#endif
#endif
namespace bob { namespace io { namespace video {
Reader::Reader(const std::string& filename, bool check) {
......@@ -72,8 +55,8 @@ namespace bob { namespace io { namespace video {
}
boost::shared_ptr<AVCodecContext> codec_ctxt =
make_codec_context(m_filepath,
format_ctxt->streams[stream_index], codec);
make_decoder_context(m_filepath, format_ctxt->streams[stream_index],
codec);
/**
* Copies some information from the context just opened
......@@ -216,7 +199,7 @@ namespace bob { namespace io { namespace video {
m_format_context = make_input_format_context(filename);
m_stream_index = find_video_stream(filename, m_format_context);
m_codec = find_decoder(filename, m_format_context, m_stream_index);
m_codec_context = make_codec_context(filename,
m_codec_context = make_decoder_context(filename,
m_format_context->streams[m_stream_index], m_codec);
m_swscaler = make_scaler(filename, m_codec_context,
m_codec_context->pix_fmt, AV_PIX_FMT_RGB24);
......
/**
* @date Wed Jun 22 17:50:08 2011 +0200
* @author Andre Anjos <andre.anjos@idiap.ch>
*
* @brief A class to help you read videos. This code originates from
* http://ffmpeg.org/doxygen/1.0/, "decoding & encoding example".
*
* Copyright (C) Idiap Research Institute, Martigny, Switzerland
*/
#ifndef BOB_IO_VIDEO_READER_H
#define BOB_IO_VIDEO_READER_H
......
This diff is collapsed.
/**
* @author Andre Anjos <andre.anjos@idiap.ch>
* @date Mon 26 Nov 17:33:19 2012
*
* @brief A simple set of utilities to query ffmpeg
*
* Copyright (C) Idiap Research Institute, Martigny, Switzerland
*/
#ifndef BOB_IO_VIDEO_UTILS_H
#define BOB_IO_VIDEO_UTILS_H
......@@ -101,16 +92,29 @@ namespace bob { namespace io { namespace video {
************************************************************************/
/**
* Creates a new codec context and verify all is good.
* Creates a new codec encoding context and verify all is good.
*
* @note The returned object knows how to correctly delete itself, freeing
* all acquired resources. Nonetheless, when this object is used in
* conjunction with other objects required for file encoding, order must be
* conjunction with other objects required for file decoding, order must be
* respected.
*/
boost::shared_ptr<AVCodecContext> make_codec_context(
boost::shared_ptr<AVCodecContext> make_decoder_context(
const std::string& filename, AVStream* stream, AVCodec* codec);
/**
* Creates a new codec encoding context and verify all is good.
*
* @note The returned object knows how to correctly delete itself, freeing
* all acquired resources. Nonetheless, when this object is used in
* conjunction with other objects required for file encoding, order must be
* respected.
*/
boost::shared_ptr<AVCodecContext> make_encoder_context(
const std::string& filename, AVFormatContext* fmtctxt, AVStream* stream,
AVCodec* codec, size_t height, size_t width, double framerate,
double bitrate, size_t gop);
/**
* Allocates the software scaler that handles size and pixel format
* conversion.
......@@ -119,16 +123,14 @@ namespace bob { namespace io { namespace video {
* all acquired resources. Nonetheless, when this object is used in
* conjunction with other objects required for file encoding, order must be
* respected.
*
* @note This scaler constructor is used both in encoding and decoding,
* therefore needs to know source and destination pixel formats, which may
* different in each circumstance.
*/
#if LIBAVUTIL_VERSION_INT >= 0x334A64 //51.74.100 @ ffmpeg-3.0
boost::shared_ptr<SwsContext> make_scaler(const std::string& filename,
boost::shared_ptr<AVCodecContext> stream,
AVPixelFormat source_pixel_format, AVPixelFormat dest_pixel_format);
#else
boost::shared_ptr<SwsContext> make_scaler(const std::string& filename,
boost::shared_ptr<AVCodecContext> stream,
PixelFormat source_pixel_format, PixelFormat dest_pixel_format);
#endif
/**
* Allocates a frame for a particular context. The frame space will be
......@@ -140,13 +142,8 @@ namespace bob { namespace io { namespace video {
* conjunction with other objects required for file encoding, order must be
* respected.
*/
#if LIBAVUTIL_VERSION_INT >= 0x334A64 //51.74.100 @ ffmpeg-3.0
boost::shared_ptr<AVFrame> make_frame(const std::string& filename,
boost::shared_ptr<AVCodecContext> stream, AVPixelFormat pixfmt);
#else
boost::shared_ptr<AVFrame> make_frame(const std::string& filename,
boost::shared_ptr<AVCodecContext> stream, PixelFormat pixfmt);
#endif
boost::shared_ptr<AVCodecContext> stream);
/************************************************************************
* Video reading specific utilities
......@@ -238,7 +235,7 @@ namespace bob { namespace io { namespace video {
/**
* Creates a new AVStream on the output file given by the format context
* pointer, with the given configurations.
* pointer and the codec
*
* @note The returned object knows how to correctly delete itself, freeing
* all acquired resources. Nonetheless, when this object is used in
......@@ -246,15 +243,7 @@ namespace bob { namespace io { namespace video {
* respected.
*/
boost::shared_ptr<AVStream> make_stream(const std::string& filename,
boost::shared_ptr<AVFormatContext> fmtctxt, const std::string& codecname,
size_t height, size_t width, float framerate, float bitrate, size_t gop,
AVCodec* codec);
/**
* Allocates a video buffer (useful for ffmpeg < 0.11)
*/
boost::shared_array<uint8_t> make_buffer
(boost::shared_ptr<AVFormatContext> format_context, size_t size);
boost::shared_ptr<AVFormatContext> fmtctxt, AVCodec* codec);
/**
* Opens the output file using the given context, writes a header, if the
......@@ -271,14 +260,13 @@ namespace bob { namespace io { namespace video {
boost::shared_ptr<AVFormatContext> format_context);
/**
* Flushes frames which are buffered on the given encoder stream. This only
* happens if (codec->capabilities & CODEC_CAP_DELAY) is true.
* Flushes frames which are buffered on the given encoder stream. This is
* only required if (codec->capabilities & CODEC_CAP_DELAY) is true.
*/
void flush_encoder(const std::string& filename,
boost::shared_ptr<AVFormatContext> format_context,
boost::shared_ptr<AVStream> stream, AVCodec* codec,
boost::shared_array<uint8_t> buffer,
size_t buffer_size);
boost::shared_ptr<AVStream> stream,
boost::shared_ptr<AVCodecContext> codec_context);
/**
* Writes a data frame into the encoder stream.
......@@ -291,11 +279,10 @@ namespace bob { namespace io { namespace video {
const std::string& filename,
boost::shared_ptr<AVFormatContext> format_context,
boost::shared_ptr<AVStream> stream,
boost::shared_ptr<AVCodecContext> codec_context,
boost::shared_ptr<AVFrame> context_frame,
boost::shared_ptr<AVFrame> tmp_frame,
boost::shared_ptr<SwsContext> swscaler,
boost::shared_array<uint8_t> buffer,
size_t buffer_size);
boost::shared_ptr<SwsContext> swscaler);
}}}
......
/**
* @file io/cxx/video::Writer.cc
* @date Wed 28 Nov 2012 13:51:58 CET
* @author Andre Anjos <andre.anjos@idiap.ch>
*
* @brief A class to help you write videos. This code originates from
* http://ffmpeg.org/doxygen/1.0/, "muxing.c" example.
*
* Copyright (C) Idiap Research Institute, Martigny, Switzerland
*/
#include "writer.h"
#include <boost/format.hpp>
#include <boost/preprocessor.hpp>
#if LIBAVFORMAT_VERSION_INT < 0x361764 /* 54.23.100 @ ffmpeg-0.11 */
#define FFMPEG_VIDEO_BUFFER_SIZE 200000
#else
#define FFMPEG_VIDEO_BUFFER_SIZE 0
#endif
#if LIBAVUTIL_VERSION_INT < 0x371167 //55.17.103 @ ffmpeg-3.0
#ifndef AV_PIX_FMT_RGB24
#define AV_PIX_FMT_RGB24 PIX_FMT_RGB24
#endif
#endif
namespace bob { namespace io { namespace video {
Writer::Writer(
......@@ -42,17 +19,12 @@ namespace bob { namespace io { namespace video {
m_opened(false),
m_format_context(make_output_format_context(filename, format)),
m_codec(find_encoder(filename, m_format_context, codec)),
m_stream(make_stream(filename, m_format_context, codec, height,
width, framerate, bitrate, gop, m_codec)),
m_codec_context(make_codec_context(filename, m_stream.get(), m_codec)),
m_context_frame(make_frame(filename, m_codec_context, m_stream->codec->pix_fmt)),
#if LIBAVCODEC_VERSION_INT >= 0x352a00 //53.42.0 @ ffmpeg-0.9
m_swscaler(make_scaler(filename, m_codec_context, AV_PIX_FMT_GBRP, m_stream->codec->pix_fmt)),
#else
m_rgb24_frame(make_frame(filename, m_codec_context, AV_PIX_FMT_RGB24)),
m_swscaler(make_scaler(filename, m_codec_context, AV_PIX_FMT_RGB24, m_stream->codec->pix_fmt)),
#endif
m_buffer(make_buffer(m_format_context, FFMPEG_VIDEO_BUFFER_SIZE)),
m_stream(make_stream(filename, m_format_context, m_codec)),
m_codec_context(make_encoder_context(filename, m_format_context.get(),
m_stream.get(), m_codec, height, width, framerate, bitrate, gop)),
m_context_frame(make_frame(filename, m_codec_context)),
m_swscaler(make_scaler(filename, m_codec_context, AV_PIX_FMT_GBRP,
m_codec_context->pix_fmt)),
m_height(height),
m_width(width),
m_framerate(framerate),
......@@ -110,15 +82,13 @@ namespace bob { namespace io { namespace video {
if (!m_opened) return;
flush_encoder(m_filename, m_format_context, m_stream, m_codec,
m_buffer, FFMPEG_VIDEO_BUFFER_SIZE);
flush_encoder(m_filename, m_format_context, m_stream, m_codec_context);
close_output_file(m_filename, m_format_context);
/* Destroyes resources in an orderly fashion */
m_codec_context.reset();
m_context_frame.reset();
m_rgb24_frame.reset();
m_buffer.reset();
m_swscaler.reset();
m_stream.reset();
m_format_context.reset();
......@@ -139,8 +109,8 @@ namespace bob { namespace io { namespace video {
info % BOOST_PP_STRINGIZE(LIBSWSCALE_VERSION);
info % m_format_context->oformat->name;
info % m_format_context->oformat->long_name;
info % m_stream->codec->codec->name;
info % m_stream->codec->codec->long_name;
info % m_codec->name;
info % m_codec->long_name;
info % (m_current_frame/m_framerate);
info % m_current_frame;
info % m_framerate;
......@@ -168,8 +138,8 @@ namespace bob { namespace io { namespace video {
blitz::Range a = blitz::Range::all();
for(int i=data.lbound(0); i<(data.extent(0)+data.lbound(0)); ++i) {
write_video_frame(data(i, a, a, a), m_filename, m_format_context,
m_stream, m_context_frame, m_rgb24_frame, m_swscaler, m_buffer,
FFMPEG_VIDEO_BUFFER_SIZE);
m_stream, m_codec_context, m_context_frame, m_rgb24_frame,
m_swscaler);
++m_current_frame;
m_typeinfo_video.shape[0] += 1;
}
......@@ -192,8 +162,7 @@ namespace bob { namespace io { namespace video {
}
write_video_frame(data, m_filename, m_format_context,
m_stream, m_context_frame, m_rgb24_frame, m_swscaler, m_buffer,
FFMPEG_VIDEO_BUFFER_SIZE);
m_stream, m_codec_context, m_context_frame, m_rgb24_frame, m_swscaler);
++m_current_frame;
m_typeinfo_video.shape[0] += 1;
}
......@@ -229,8 +198,8 @@ namespace bob { namespace io { namespace video {
blitz::Array<uint8_t,3> tmp(const_cast<uint8_t*>(static_cast<const uint8_t*>(data.ptr())), shape,
blitz::neverDeleteData);
write_video_frame(tmp, m_filename, m_format_context,
m_stream, m_context_frame, m_rgb24_frame, m_swscaler, m_buffer,
FFMPEG_VIDEO_BUFFER_SIZE);
m_stream, m_codec_context, m_context_frame, m_rgb24_frame,
m_swscaler);
++m_current_frame;
m_typeinfo_video.shape[0] += 1;
}
......@@ -253,8 +222,8 @@ namespace bob { namespace io { namespace video {
for(size_t i=0; i<type.shape[0]; ++i) {
blitz::Array<uint8_t,3> tmp(ptr, shape, blitz::neverDeleteData);
write_video_frame(tmp, m_filename, m_format_context,
m_stream, m_context_frame, m_rgb24_frame, m_swscaler, m_buffer,
FFMPEG_VIDEO_BUFFER_SIZE);
m_stream, m_codec_context, m_context_frame, m_rgb24_frame,
m_swscaler);
++m_current_frame;
m_typeinfo_video.shape[0] += 1;
ptr += frame_size;
......
/**
* @date Wed 28 Nov 2012 13:52:08 CET
* @author Andre Anjos <andre.anjos@idiap.ch>
*
* @brief A class to help you write videos. This code originates from
* http://ffmpeg.org/doxygen/1.0/, "decoding & encoding example".
*
* Copyright (C) Idiap Research Institute, Martigny, Switzerland
*/
#ifndef BOB_IO_VIDEO_WRITER_H
#define BOB_IO_VIDEO_WRITER_H
......@@ -119,10 +109,10 @@ namespace bob { namespace io { namespace video {
return m_format_context->oformat->long_name;
}
std::string codecName() const {
return m_stream->codec->codec->name;
return m_codec->name;
}
std::string codecLongName() const {
return m_stream->codec->codec->long_name;
return m_codec->long_name;
}
/**
......@@ -188,7 +178,6 @@ namespace bob { namespace io { namespace video {
boost::shared_ptr<AVFrame> m_context_frame; ///< output frame data
boost::shared_ptr<AVFrame> m_rgb24_frame; ///< temporary frame data
boost::shared_ptr<SwsContext> m_swscaler; ///< software scaler
boost::shared_array<uint8_t> m_buffer; ///< buffer for ffmpeg < 0.11.0
size_t m_height;
size_t m_width;
double m_framerate;
......
......@@ -128,13 +128,8 @@ static PyObject* describe_codec(const AVCodec* codec) {
unsigned int i=0;
while(codec->pix_fmts[i] != -1) {
if (!list_append(pixfmt,
#if LIBAVUTIL_VERSION_INT >= 0x320f01 //50.15.1 @ ffmpeg-0.6
av_get_pix_fmt_name
#else
avcodec_get_pix_fmt_name
#endif
(codec->pix_fmts[i++]))) return 0;
if (!list_append(pixfmt, av_get_pix_fmt_name(codec->pix_fmts[i++])))
return 0;
}
pixfmt = PySequence_Tuple(pixfmt);
}
......@@ -604,6 +599,11 @@ static PyModuleDef module_definition = {
static PyObject* create_module (void) {
/* Initialize libavcodec, and register all codecs and formats. */
av_log_set_level(AV_LOG_QUIET);
avcodec_register_all();
av_register_all();
# if PY_VERSION_HEX >= 0x03000000
PyObject* module = PyModule_Create(&module_definition);
auto module_ = make_xsafe(module);
......
......@@ -17,7 +17,7 @@ from bob.io.base import load
# These are some global parameters for the test.
INPUT_VIDEO = test_utils.datafile('test.mov', __name__)
UNICODE_VIDEO = test_utils.datafile('test_straße.mov', __name__)
def test_codec_support():
......@@ -32,11 +32,12 @@ def test_codec_support():
if v['encode']: assert describe_encoder(v['id'])
# Assert we support, at least, some known codecs
for codec in ('ffv1', 'wmv2', 'mpeg4', 'mjpeg'):
for codec in ('ffv1', 'wmv2', 'mpeg4', 'mjpeg', 'h264'):
assert codec in supported
assert supported[codec]['encode']
assert supported[codec]['decode']
def test_input_format_support():
# Describes all encoders
......@@ -48,6 +49,7 @@ def test_input_format_support():
for fmt in ('avi', 'mov', 'mp4'):
assert fmt in supported
def test_output_format_support():
# Describes all encoders
......@@ -59,6 +61,7 @@ def test_output_format_support():
for fmt in ('avi', 'mov', 'mp4'):
assert fmt in supported
def test_video_reader_attributes():
from . import reader
......@@ -90,32 +93,46 @@ def test_video_reader_attributes():
def write_unicode_temp_file():
prefix = 'bobtest_straße_'
suffix = '.avi'
tmpname = test_utils.temporary_filename(prefix=prefix, suffix=suffix)
# Writing temp file for testing
from . import writer
width = 20
height = 20
framerate = 24
outv = writer(UNICODE_VIDEO, height, width, framerate)
outv = writer(tmpname, height, width, framerate)
for i in range(0, 3):
newframe = (numpy.random.random_integers(0,255,(3,height,width)))
outv.append(newframe.astype('uint8'))
outv.close()
return tmpname
def test_video_reader_unicode():
# Writing temp file for testing
write_unicode_temp_file()
try:
from . import reader
# Writing temp file for testing
tmpname = write_unicode_temp_file()
iv = reader(UNICODE_VIDEO)
from . import reader
assert isinstance(iv.filename, str)
assert 'ß' in UNICODE_VIDEO
assert 'ß' in iv.filename
iv = reader(tmpname)
assert isinstance(iv.filename, str)
assert 'ß' in tmpname
assert 'ß' in iv.filename
finally:
if os.path.exists(tmpname): os.unlink(tmpname)
#os.remove(UNICODE_VIDEO)
def test_video_reader_str():
......@@ -125,6 +142,7 @@ def test_video_reader_str():
assert repr(iv)
assert str(iv)
def test_can_iterate():
from . import reader
......@@ -140,6 +158,7 @@ def test_can_iterate():
assert counter == len(video) #we have gone through all frames
def test_iteration():
from . import reader
......@@ -150,21 +169,26 @@ def test_iteration():
for l, i in zip(objs, f):
assert numpy.allclose(l, i)
def test_base_load_on_unicode():
# Writing temp file for testing
write_unicode_temp_file()
try:
from . import reader
f = reader(UNICODE_VIDEO)
objs = load(UNICODE_VIDEO)
# Writing temp file for testing
tmpname = write_unicode_temp_file()
from . import reader
f = reader(tmpname)
objs = load(tmpname)
nose.tools.eq_(len(f), len(objs))
for l, i in zip(objs, f):
assert numpy.allclose(l.shape, i.shape)
finally:
if os.path.exists(tmpname): os.unlink(tmpname)
nose.tools.eq_(len(f), len(objs))
for l, i in zip(objs, f):
assert numpy.allclose(l.shape, i.shape)
#os.remove(UNICODE_VIDEO)
def test_indexing():
from . import reader
......@@ -183,6 +207,7 @@ def test_indexing():
assert numpy.allclose(f[len(f)-1], f[-1])
assert numpy.allclose(f[len(f)-2], f[-2])
def test_slicing_empty():
from . import reader
......@@ -192,6 +217,7 @@ def test_slicing_empty():
assert objs.shape == tuple()
assert objs.dtype == numpy.uint8
def test_slicing_0():
from . import reader
......@@ -201,6 +227,7 @@ def test_slicing_0():
for i, k in enumerate(load(INPUT_VIDEO)):
assert numpy.allclose(k, objs[i])
def test_slicing_1():
from . import reader
......@@ -213,6 +240,7 @@ def test_slicing_1():
assert numpy.allclose(s[2], f[7])
assert numpy.allclose(s[3], f[9])
def test_slicing_2():
from . import reader
......@@ -224,6 +252,7 @@ def test_slicing_2():
assert numpy.allclose(s[1], f[len(f)-7])
assert numpy.allclose(s[2], f[len(f)-4])
def test_slicing_3():
from . import reader
......@@ -238,6 +267,7 @@ def test_slicing_3():
assert numpy.allclose(s[2], f[14])
assert numpy.allclose(s[3], f[11])
def test_slicing_4():
from . import reader
......@@ -262,6 +292,7 @@ def test_can_use_array_interface():
for frame_id, frame in zip(range(array.shape[0]), iv.__iter__()):
assert numpy.array_equal(array[frame_id,:,:,:], frame)
def test_video_reading_after_writing():
from . import test_utils
......@@ -291,6 +322,7 @@ def test_video_reading_after_writing():
# And we erase both files after this
if os.path.exists(tmpname): os.unlink(tmpname)
def test_video_writer_close():
from . import test_utils
......@@ -324,6 +356,7 @@ def test_video_writer_close():
# And we erase both files after this
if os.path.exists(tmpname): os.unlink(tmpname)
def test_closed_video_writer_raises():
from . import test_utils
......
......@@ -65,19 +65,20 @@ def test_format_codecs():
default = dict(frameskip=0.1, color=9.0, noise=45.),
# high-quality encoders
zlib = dict(frameskip=0.0, color=0.0, noise=0.0),
ffv1 = dict(frameskip=0.05, color=9., noise=46.),
vp8 = dict(frameskip=0.3, color=9.0, noise=65.),
libvpx = dict(frameskip=0.3, color=9.0, noise=65.),
h264 = dict(frameskip=0.5, color=9.0, noise=55.),
libx264 = dict(frameskip=0.4, color=9.0, noise=50.),
theora = dict(frameskip=0.5, color=9.0, noise=70.),
libtheora = dict(frameskip=0.5, color=9.0, noise=70.),
mpeg4 = dict(frameskip=1.0, color=9.0, noise=55.),
zlib = dict(frameskip=0.0, color=0.0, noise=0.0),
ffv1 = dict(frameskip=0.05, color=9.0, noise=46.),
vp8 = dict(frameskip=0.3, color=9.0, noise=65.),
libvpx = dict(frameskip=0.3, color=9.0, noise=65.),
h264 = dict(frameskip=0.5, color=9.0, noise=55.),
libx264 = dict(frameskip=0.4, color=9.0, noise=50.),
libopenh264 = dict(frameskip=0.5, color=9.0, noise=55.),
theora = dict(frameskip=0.5, color=9.0, noise=70.),
libtheora = dict(frameskip=0.5, color=9.0, noise=70.),
mpeg4 = dict(frameskip=1.0, color=9.0, noise=55.),
# older, but still good quality encoders
mjpeg = dict(frameskip=1.2, color=9.0, noise=50.),
mpegvideo = dict(frameskip=1.3, color=9.0, noise=75.),
mpegvideo = dict(frameskip=1.3, color=9.0, noise=80.),
mpeg2video = dict(frameskip=1.3, color=9.0, noise=75.),
mpeg1video = dict(frameskip=1.4, color=9.0, noise=50.),
......@@ -88,15 +89,6 @@ def test_format_codecs():
msmpeg4v2 = dict(frameskip=6., color=10., noise=50.),
)
# some exceptions
if test_utils.ffmpeg_version_lessthan('0.6'):
distortions['ffv1']['frameskip'] = 0.55
distortions['mpeg1video']['frameskip'] = 1.5
distortions['mpegvideo']['color'] = 9.0
distortions['mpegvideo']['frameskip'] = 1.4
distortions['mpeg2video']['color'] = 9.0
distortions['mpeg2video']['frameskip'] = 1.4
from . import supported_videowriter_formats
SUPPORTED = supported_videowriter_formats()
for format in SUPPORTED:
......@@ -121,7 +113,7 @@ def check_user_video(format, codec, maxdist):
# encode the input video using the format and codec provided by the user
outv = writer(fname, oheight, owidth, orig_vreader.frame_rate,
codec=codec, format=format, check=False)
codec=codec, format=format, check=True)
for k in orig: outv.append(k)
outv.close()
......@@ -140,7 +132,7 @@ def check_user_video(format, codec, maxdist):
assert max(dist) <= maxdist, "max(distortion) %g > %g allowed for format `%s' and codec `%s'" % (max(dist), maxdist, format, codec)
# make sure that the encoded frame rate is not off by a big amount
assert abs(orig_vreader.frame_rate - encoded.frame_rate) <= (1.0/MAXLENTH), "original video framerate %g differs from encoded %g by more than %g for format `%s' and codec `%s'" % (encoded.frame_rate, framerate, 1.0/MAXLENTH, format, codec)
assert abs(orig_vreader.frame_rate - encoded.frame_rate) <= (1.0/MAXLENTH), "original video framerate %g differs from encoded %g by more than %g for format `%s' and codec `%s'" % (encoded.frame_rate, orig_vreader.framerate, 1.0/MAXLENTH, format, codec)
finally:
......@@ -154,15 +146,16 @@ def test_user_video():
default = 1.5,
# high-quality encoders
zlib = 0.0,
ffv1 = 1.7,
vp8 = 2.7,
libvpx = 2.7,
h264 = 2.7,
libx264 = 2.5,
theora = 2.0,
libtheora = 2.0,
mpeg4 = 2.3,
zlib = 0.0,
ffv1 = 1.7,
vp8 = 2.7,
libvpx = 2.7,
h264 = 2.7,
libx264 = 2.5,
libopenh264 = 3.0,
theora = 2.0,
libtheora = 2.0,
mpeg4 = 2.3,
# older, but still good quality encoders
mjpeg = 1.8,
......
......@@ -62,7 +62,7 @@ def print_numbers(frame, counter, format, fontsize):
# img = Image.fromstring('RGB', (frame.shape[1], frame.shape[2]), frame.transpose(1,2,0).tostring())
# img = Image.frombytes('RGB', (frame.shape[1], frame.shape[2]), frame.transpose(1,2,0).tostring()) #For some reason there is no frombytes in UBUNTU 12 04
img = Image.frombuffer('RGB', (frame.shape[1], frame.shape[2]), frame.transpose(1,2,0).tostring(), 'raw', "RGB", 0,1 ) #This call seems weird, but I follow the instructions from here (http://pillow.readthedocs.org/en/3.0.x/reference/Image.html#PIL.Image.frombuffer). Following these instructions I don't get a warning
draw = ImageDraw.Draw(img)
draw.text((x_pos, y_pos), text, font=font, fill=(255,255,255))
return numpy.asarray(img).transpose(2,0,1)
......@@ -108,7 +108,7 @@ def color_distortion(shape, framerate, format, codec, filename):
length, height, width = shape
from . import reader, writer
outv = writer(filename, height, width, framerate, codec=codec,
format=format, check=False)
format=format, check=True)
orig = []
text_format = "%%0%dd" % len(str(length-1))
fontsize = estimate_fontsize(height, width, text_format)
......@@ -120,7 +120,7 @@ def color_distortion(shape, framerate, format, codec, filename):
orig.append(newframe)
outv.close()
orig = numpy.array(orig, dtype='uint8')
return orig, framerate, reader(filename, check=False)
return orig, framerate, reader(filename, check=True)
def frameskip_detection(shape, framerate, format, codec, filename):
"""Returns distortion patterns for a set of frames with big numbers.
......@@ -145,7 +145,7 @@ def frameskip_detection(shape, framerate, format, codec, filename):
text_format = "%%0%dd" % len(str(length-1))
fontsize = estimate_fontsize(height, width, text_format)
outv = writer(filename, height, width, framerate, codec=codec,
format=format, check=False)