Add WebM parser code from Chromium

Also includes its dependencies, like media/base/text_track_config.*
and media/filters/webvtt_util.h

Change-Id: I5b26245daf004da19b912b7c5b2c21ce4ba85688
This commit is contained in:
KongQun Yang 2015-10-08 14:48:07 -07:00
parent b6db8b9867
commit 87993c5dc7
38 changed files with 7818 additions and 0 deletions

View File

@ -0,0 +1,30 @@
// Copyright 2013 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#include "media/base/text_track_config.h"
namespace media {
TextTrackConfig::TextTrackConfig()
: kind_(kTextNone) {
}
TextTrackConfig::TextTrackConfig(TextKind kind,
const std::string& label,
const std::string& language,
const std::string& id)
: kind_(kind),
label_(label),
language_(language),
id_(id) {
}
bool TextTrackConfig::Matches(const TextTrackConfig& config) const {
return config.kind() == kind_ &&
config.label() == label_ &&
config.language() == language_ &&
config.id() == id_;
}
} // namespace media

View File

@ -0,0 +1,48 @@
// Copyright 2013 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#ifndef MEDIA_BASE_TEXT_TRACK_CONFIG_H_
#define MEDIA_BASE_TEXT_TRACK_CONFIG_H_
#include <string>
#include "media/base/media_export.h"
namespace media {
// Specifies the varieties of text tracks.
enum TextKind {
kTextSubtitles,
kTextCaptions,
kTextDescriptions,
kTextMetadata,
kTextNone
};
class MEDIA_EXPORT TextTrackConfig {
public:
TextTrackConfig();
TextTrackConfig(TextKind kind,
const std::string& label,
const std::string& language,
const std::string& id);
// Returns true if all fields in |config| match this config.
bool Matches(const TextTrackConfig& config) const;
TextKind kind() const { return kind_; }
const std::string& label() const { return label_; }
const std::string& language() const { return language_; }
const std::string& id() const { return id_; }
private:
TextKind kind_;
std::string label_;
std::string language_;
std::string id_;
};
} // namespace media
#endif // MEDIA_BASE_TEXT_TRACK_H_

View File

@ -0,0 +1,30 @@
// Copyright 2013 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#ifndef MEDIA_FILTERS_WEBVTT_UTIL_H_
#define MEDIA_FILTERS_WEBVTT_UTIL_H_
#include <vector>
namespace media {
// Utility function to create side data item for decoder buffer.
template<typename T>
void MakeSideData(T id_begin, T id_end,
T settings_begin, T settings_end,
std::vector<uint8>* side_data) {
// The DecoderBuffer only supports a single side data item. In the case of
// a WebVTT cue, we can have potentially two side data items. In order to
// avoid disrupting DecoderBuffer any more than we need to, we copy both
// side data items onto a single one, and terminate each with a NUL marker.
side_data->clear();
side_data->insert(side_data->end(), id_begin, id_end);
side_data->push_back(0);
side_data->insert(side_data->end(), settings_begin, settings_end);
side_data->push_back(0);
}
} // namespace media
#endif // MEDIA_FILTERS_WEBVTT_UTIL_H_

View File

@ -0,0 +1,226 @@
// Copyright 2014 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#include "media/formats/webm/cluster_builder.h"
#include "base/logging.h"
#include "media/base/data_buffer.h"
#include "media/formats/webm/webm_constants.h"
namespace media {
static const uint8 kClusterHeader[] = {
0x1F, 0x43, 0xB6, 0x75, // CLUSTER ID
0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, // cluster(size = 0)
0xE7, // Timecode ID
0x88, // timecode(size=8)
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, // timecode value
};
static const uint8 kSimpleBlockHeader[] = {
0xA3, // SimpleBlock ID
0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, // SimpleBlock(size = 0)
};
static const uint8 kBlockGroupHeader[] = {
0xA0, // BlockGroup ID
0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, // BlockGroup(size = 0)
0x9B, // BlockDuration ID
0x88, // BlockDuration(size = 8)
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, // duration
0xA1, // Block ID
0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, // Block(size = 0)
};
static const uint8 kBlockGroupHeaderWithoutBlockDuration[] = {
0xA0, // BlockGroup ID
0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, // BlockGroup(size = 0)
0xA1, // Block ID
0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, // Block(size = 0)
};
enum {
kClusterSizeOffset = 4,
kClusterTimecodeOffset = 14,
kSimpleBlockSizeOffset = 1,
kBlockGroupSizeOffset = 1,
kBlockGroupWithoutBlockDurationBlockSizeOffset = 10,
kBlockGroupDurationOffset = 11,
kBlockGroupBlockSizeOffset = 20,
kInitialBufferSize = 32768,
};
Cluster::Cluster(scoped_ptr<uint8[]> data, int size)
: data_(data.Pass()), size_(size) {}
Cluster::~Cluster() {}
ClusterBuilder::ClusterBuilder() { Reset(); }
ClusterBuilder::~ClusterBuilder() {}
void ClusterBuilder::SetClusterTimecode(int64 cluster_timecode) {
DCHECK_EQ(cluster_timecode_, -1);
cluster_timecode_ = cluster_timecode;
// Write the timecode into the header.
uint8* buf = buffer_.get() + kClusterTimecodeOffset;
for (int i = 7; i >= 0; --i) {
buf[i] = cluster_timecode & 0xff;
cluster_timecode >>= 8;
}
}
void ClusterBuilder::AddSimpleBlock(int track_num, int64 timecode, int flags,
const uint8* data, int size) {
int block_size = size + 4;
int bytes_needed = sizeof(kSimpleBlockHeader) + block_size;
if (bytes_needed > (buffer_size_ - bytes_used_))
ExtendBuffer(bytes_needed);
uint8* buf = buffer_.get() + bytes_used_;
int block_offset = bytes_used_;
memcpy(buf, kSimpleBlockHeader, sizeof(kSimpleBlockHeader));
UpdateUInt64(block_offset + kSimpleBlockSizeOffset, block_size);
buf += sizeof(kSimpleBlockHeader);
WriteBlock(buf, track_num, timecode, flags, data, size);
bytes_used_ += bytes_needed;
}
void ClusterBuilder::AddBlockGroup(int track_num, int64 timecode, int duration,
int flags, const uint8* data, int size) {
AddBlockGroupInternal(track_num, timecode, true, duration, flags, data, size);
}
void ClusterBuilder::AddBlockGroupWithoutBlockDuration(int track_num,
int64 timecode,
int flags,
const uint8* data,
int size) {
AddBlockGroupInternal(track_num, timecode, false, 0, flags, data, size);
}
void ClusterBuilder::AddBlockGroupInternal(int track_num, int64 timecode,
bool include_block_duration,
int duration, int flags,
const uint8* data, int size) {
int block_size = size + 4;
int bytes_needed = block_size;
if (include_block_duration) {
bytes_needed += sizeof(kBlockGroupHeader);
} else {
bytes_needed += sizeof(kBlockGroupHeaderWithoutBlockDuration);
}
int block_group_size = bytes_needed - 9;
if (bytes_needed > (buffer_size_ - bytes_used_))
ExtendBuffer(bytes_needed);
uint8* buf = buffer_.get() + bytes_used_;
int block_group_offset = bytes_used_;
if (include_block_duration) {
memcpy(buf, kBlockGroupHeader, sizeof(kBlockGroupHeader));
UpdateUInt64(block_group_offset + kBlockGroupDurationOffset, duration);
UpdateUInt64(block_group_offset + kBlockGroupBlockSizeOffset, block_size);
buf += sizeof(kBlockGroupHeader);
} else {
memcpy(buf, kBlockGroupHeaderWithoutBlockDuration,
sizeof(kBlockGroupHeaderWithoutBlockDuration));
UpdateUInt64(
block_group_offset + kBlockGroupWithoutBlockDurationBlockSizeOffset,
block_size);
buf += sizeof(kBlockGroupHeaderWithoutBlockDuration);
}
UpdateUInt64(block_group_offset + kBlockGroupSizeOffset, block_group_size);
// Make sure the 4 most-significant bits are 0.
// http://www.matroska.org/technical/specs/index.html#block_structure
flags &= 0x0f;
WriteBlock(buf, track_num, timecode, flags, data, size);
bytes_used_ += bytes_needed;
}
void ClusterBuilder::WriteBlock(uint8* buf, int track_num, int64 timecode,
int flags, const uint8* data, int size) {
DCHECK_GE(track_num, 0);
DCHECK_LE(track_num, 126);
DCHECK_GE(flags, 0);
DCHECK_LE(flags, 0xff);
DCHECK(data);
DCHECK_GT(size, 0);
DCHECK_NE(cluster_timecode_, -1);
int64 timecode_delta = timecode - cluster_timecode_;
DCHECK_GE(timecode_delta, -32768);
DCHECK_LE(timecode_delta, 32767);
buf[0] = 0x80 | (track_num & 0x7F);
buf[1] = (timecode_delta >> 8) & 0xff;
buf[2] = timecode_delta & 0xff;
buf[3] = flags & 0xff;
memcpy(buf + 4, data, size);
}
scoped_ptr<Cluster> ClusterBuilder::Finish() {
DCHECK_NE(cluster_timecode_, -1);
UpdateUInt64(kClusterSizeOffset, bytes_used_ - (kClusterSizeOffset + 8));
scoped_ptr<Cluster> ret(new Cluster(buffer_.Pass(), bytes_used_));
Reset();
return ret.Pass();
}
scoped_ptr<Cluster> ClusterBuilder::FinishWithUnknownSize() {
DCHECK_NE(cluster_timecode_, -1);
UpdateUInt64(kClusterSizeOffset, kWebMUnknownSize);
scoped_ptr<Cluster> ret(new Cluster(buffer_.Pass(), bytes_used_));
Reset();
return ret.Pass();
}
void ClusterBuilder::Reset() {
buffer_size_ = kInitialBufferSize;
buffer_.reset(new uint8[buffer_size_]);
memcpy(buffer_.get(), kClusterHeader, sizeof(kClusterHeader));
bytes_used_ = sizeof(kClusterHeader);
cluster_timecode_ = -1;
}
void ClusterBuilder::ExtendBuffer(int bytes_needed) {
int new_buffer_size = 2 * buffer_size_;
while ((new_buffer_size - bytes_used_) < bytes_needed)
new_buffer_size *= 2;
scoped_ptr<uint8[]> new_buffer(new uint8[new_buffer_size]);
memcpy(new_buffer.get(), buffer_.get(), bytes_used_);
buffer_.reset(new_buffer.release());
buffer_size_ = new_buffer_size;
}
void ClusterBuilder::UpdateUInt64(int offset, int64 value) {
DCHECK_LE(offset + 7, buffer_size_);
uint8* buf = buffer_.get() + offset;
// Fill the last 7 bytes of size field in big-endian order.
for (int i = 7; i > 0; i--) {
buf[i] = value & 0xff;
value >>= 8;
}
}
} // namespace media

View File

@ -0,0 +1,64 @@
// Copyright 2014 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#ifndef MEDIA_FORMATS_WEBM_CLUSTER_BUILDER_H_
#define MEDIA_FORMATS_WEBM_CLUSTER_BUILDER_H_
#include "base/basictypes.h"
#include "base/memory/scoped_ptr.h"
namespace media {
class Cluster {
public:
Cluster(scoped_ptr<uint8[]> data, int size);
~Cluster();
const uint8* data() const { return data_.get(); }
int size() const { return size_; }
private:
scoped_ptr<uint8[]> data_;
int size_;
DISALLOW_IMPLICIT_CONSTRUCTORS(Cluster);
};
class ClusterBuilder {
public:
ClusterBuilder();
~ClusterBuilder();
void SetClusterTimecode(int64 cluster_timecode);
void AddSimpleBlock(int track_num, int64 timecode, int flags,
const uint8* data, int size);
void AddBlockGroup(int track_num, int64 timecode, int duration, int flags,
const uint8* data, int size);
void AddBlockGroupWithoutBlockDuration(int track_num, int64 timecode,
int flags, const uint8* data, int size);
scoped_ptr<Cluster> Finish();
scoped_ptr<Cluster> FinishWithUnknownSize();
private:
void AddBlockGroupInternal(int track_num, int64 timecode,
bool include_block_duration, int duration,
int flags, const uint8* data, int size);
void Reset();
void ExtendBuffer(int bytes_needed);
void UpdateUInt64(int offset, int64 value);
void WriteBlock(uint8* buf, int track_num, int64 timecode, int flags,
const uint8* data, int size);
scoped_ptr<uint8[]> buffer_;
int buffer_size_;
int bytes_used_;
int64 cluster_timecode_;
DISALLOW_COPY_AND_ASSIGN(ClusterBuilder);
};
} // namespace media
#endif // MEDIA_FORMATS_WEBM_CLUSTER_BUILDER_H_

View File

@ -0,0 +1,89 @@
// Copyright (c) 2015 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#include "base/logging.h"
#include "media/formats/webm/opus_packet_builder.h"
#include "media/formats/webm/webm_cluster_parser.h"
namespace media {
OpusPacket::OpusPacket(uint8_t config, uint8_t frame_count, bool is_VBR) {
DCHECK_GE(config, 0);
DCHECK_LT(config, kNumPossibleOpusConfigs);
DCHECK_GE(frame_count, kMinOpusPacketFrameCount);
DCHECK_LE(frame_count, kMaxOpusPacketFrameCount);
duration_ms_ = frame_count *
WebMClusterParser::kOpusFrameDurationsMu[config] /
static_cast<float>(1000);
uint8_t frame_count_code;
uint8_t frame_count_byte;
if (frame_count == 1) {
frame_count_code = 0;
} else if (frame_count == 2) {
frame_count_code = is_VBR ? 2 : 1;
} else {
frame_count_code = 3;
frame_count_byte = (is_VBR ? 1 << 7 : 0) | frame_count;
}
// All opus packets must have TOC byte.
uint8_t opus_toc_byte = (config << 3) | frame_count_code;
data_.push_back(opus_toc_byte);
// For code 3 packets, the number of frames is signaled in the "frame
// count byte".
if (frame_count_code == 3) {
data_.push_back(frame_count_byte);
}
// Packet will only conform to layout specification for the TOC byte
// and optional frame count bytes appended above. This last byte
// is purely dummy padding where frame size data or encoded data might
// otherwise start.
data_.push_back(static_cast<uint8_t>(0));
}
OpusPacket::~OpusPacket() {
}
const uint8_t* OpusPacket::data() const {
return &(data_[0]);
}
int OpusPacket::size() const {
return data_.size();
}
double OpusPacket::duration_ms() const {
return duration_ms_;
}
ScopedVector<OpusPacket> BuildAllOpusPackets() {
ScopedVector<OpusPacket> opus_packets;
for (int frame_count = kMinOpusPacketFrameCount;
frame_count <= kMaxOpusPacketFrameCount; frame_count++) {
for (int opus_config_num = 0; opus_config_num < kNumPossibleOpusConfigs;
opus_config_num++) {
bool is_VBR = false;
opus_packets.push_back(
new OpusPacket(opus_config_num, frame_count, is_VBR));
if (frame_count >= 2) {
// Add another packet with VBR flag toggled. For frame counts >= 2,
// VBR triggers changes to packet framing.
is_VBR = true;
opus_packets.push_back(
new OpusPacket(opus_config_num, frame_count, is_VBR));
}
}
}
return opus_packets.Pass();
}
} // namespace media

View File

@ -0,0 +1,43 @@
// Copyright (c) 2015 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#ifndef MEDIA_FORMATS_WEBM_OPUS_PACKET_BUILDER_H_
#define MEDIA_FORMATS_WEBM_OPUS_PACKET_BUILDER_H_
#include <vector>
#include "base/memory/scoped_ptr.h"
#include "base/memory/scoped_vector.h"
namespace media {
// From Opus RFC. See https://tools.ietf.org/html/rfc6716#page-14
enum OpusConstants {
kNumPossibleOpusConfigs = 32,
kMinOpusPacketFrameCount = 1,
kMaxOpusPacketFrameCount = 48
};
class OpusPacket {
public:
OpusPacket(uint8_t config, uint8_t frame_count, bool is_VBR);
~OpusPacket();
const uint8_t* data() const;
int size() const;
double duration_ms() const;
private:
std::vector<uint8_t> data_;
double duration_ms_;
DISALLOW_COPY_AND_ASSIGN(OpusPacket);
};
// Builds an exhaustive collection of Opus packet configurations.
ScopedVector<OpusPacket> BuildAllOpusPackets();
} // namespace media
#endif // MEDIA_FORMATS_WEBM_OPUS_PACKET_BUILDER_H_

View File

@ -0,0 +1,386 @@
// Copyright 2014 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#include "media/formats/webm/tracks_builder.h"
#include "base/logging.h"
#include "media/formats/webm/webm_constants.h"
namespace media {
// Returns size of an integer, formatted using Matroska serialization.
static int GetUIntMkvSize(uint64 value) {
if (value < 0x07FULL)
return 1;
if (value < 0x03FFFULL)
return 2;
if (value < 0x01FFFFFULL)
return 3;
if (value < 0x0FFFFFFFULL)
return 4;
if (value < 0x07FFFFFFFFULL)
return 5;
if (value < 0x03FFFFFFFFFFULL)
return 6;
if (value < 0x01FFFFFFFFFFFFULL)
return 7;
return 8;
}
// Returns the minimium size required to serialize an integer value.
static int GetUIntSize(uint64 value) {
if (value < 0x0100ULL)
return 1;
if (value < 0x010000ULL)
return 2;
if (value < 0x01000000ULL)
return 3;
if (value < 0x0100000000ULL)
return 4;
if (value < 0x010000000000ULL)
return 5;
if (value < 0x01000000000000ULL)
return 6;
if (value < 0x0100000000000000ULL)
return 7;
return 8;
}
static int MasterElementSize(int element_id, int payload_size) {
return GetUIntSize(element_id) + GetUIntMkvSize(payload_size) + payload_size;
}
static int UIntElementSize(int element_id, uint64 value) {
return GetUIntSize(element_id) + 1 + GetUIntSize(value);
}
static int DoubleElementSize(int element_id) {
return GetUIntSize(element_id) + 1 + 8;
}
static int StringElementSize(int element_id, const std::string& value) {
return GetUIntSize(element_id) +
GetUIntMkvSize(value.length()) +
value.length();
}
static void SerializeInt(uint8** buf_ptr, int* buf_size_ptr,
int64 value, int size) {
uint8*& buf = *buf_ptr;
int& buf_size = *buf_size_ptr;
for (int idx = 1; idx <= size; ++idx) {
*buf++ = static_cast<uint8>(value >> ((size - idx) * 8));
--buf_size;
}
}
static void SerializeDouble(uint8** buf_ptr, int* buf_size_ptr,
double value) {
// Use a union to convert |value| to native endian integer bit pattern.
union {
double src;
int64 dst;
} tmp;
tmp.src = value;
// Write the bytes from native endian |tmp.dst| to big-endian form in |buf|.
SerializeInt(buf_ptr, buf_size_ptr, tmp.dst, 8);
}
static void WriteElementId(uint8** buf, int* buf_size, int element_id) {
SerializeInt(buf, buf_size, element_id, GetUIntSize(element_id));
}
static void WriteUInt(uint8** buf, int* buf_size, uint64 value) {
const int size = GetUIntMkvSize(value);
value |= (1ULL << (size * 7)); // Matroska formatting
SerializeInt(buf, buf_size, value, size);
}
static void WriteMasterElement(uint8** buf, int* buf_size,
int element_id, int payload_size) {
WriteElementId(buf, buf_size, element_id);
WriteUInt(buf, buf_size, payload_size);
}
static void WriteUIntElement(uint8** buf,
int* buf_size,
int element_id,
uint64 value) {
WriteElementId(buf, buf_size, element_id);
const int size = GetUIntSize(value);
WriteUInt(buf, buf_size, size);
SerializeInt(buf, buf_size, value, size);
}
static void WriteDoubleElement(uint8** buf, int* buf_size,
int element_id, double value) {
WriteElementId(buf, buf_size, element_id);
WriteUInt(buf, buf_size, 8);
SerializeDouble(buf, buf_size, value);
}
static void WriteStringElement(uint8** buf_ptr, int* buf_size_ptr,
int element_id, const std::string& value) {
uint8*& buf = *buf_ptr;
int& buf_size = *buf_size_ptr;
WriteElementId(&buf, &buf_size, element_id);
const uint64 size = value.length();
WriteUInt(&buf, &buf_size, size);
memcpy(buf, value.data(), size);
buf += size;
buf_size -= size;
}
TracksBuilder::TracksBuilder(bool allow_invalid_values)
: allow_invalid_values_(allow_invalid_values) {}
TracksBuilder::TracksBuilder()
: allow_invalid_values_(false) {}
TracksBuilder::~TracksBuilder() {}
void TracksBuilder::AddVideoTrack(int track_num,
uint64 track_uid,
const std::string& codec_id,
const std::string& name,
const std::string& language,
int default_duration,
int video_pixel_width,
int video_pixel_height) {
AddTrackInternal(track_num, kWebMTrackTypeVideo, track_uid, codec_id, name,
language, default_duration, video_pixel_width,
video_pixel_height, -1, -1);
}
void TracksBuilder::AddAudioTrack(int track_num,
uint64 track_uid,
const std::string& codec_id,
const std::string& name,
const std::string& language,
int default_duration,
int audio_channels,
double audio_sampling_frequency) {
AddTrackInternal(track_num, kWebMTrackTypeAudio, track_uid, codec_id, name,
language, default_duration, -1, -1, audio_channels,
audio_sampling_frequency);
}
void TracksBuilder::AddTextTrack(int track_num,
uint64 track_uid,
const std::string& codec_id,
const std::string& name,
const std::string& language) {
AddTrackInternal(track_num, kWebMTrackTypeSubtitlesOrCaptions, track_uid,
codec_id, name, language, -1, -1, -1, -1, -1);
}
std::vector<uint8> TracksBuilder::Finish() {
// Allocate the storage
std::vector<uint8> buffer;
buffer.resize(GetTracksSize());
// Populate the storage with a tracks header
WriteTracks(&buffer[0], buffer.size());
return buffer;
}
void TracksBuilder::AddTrackInternal(int track_num,
int track_type,
uint64 track_uid,
const std::string& codec_id,
const std::string& name,
const std::string& language,
int default_duration,
int video_pixel_width,
int video_pixel_height,
int audio_channels,
double audio_sampling_frequency) {
tracks_.push_back(Track(track_num, track_type, track_uid, codec_id, name,
language, default_duration, video_pixel_width,
video_pixel_height, audio_channels,
audio_sampling_frequency, allow_invalid_values_));
}
int TracksBuilder::GetTracksSize() const {
return MasterElementSize(kWebMIdTracks, GetTracksPayloadSize());
}
int TracksBuilder::GetTracksPayloadSize() const {
int payload_size = 0;
for (TrackList::const_iterator itr = tracks_.begin();
itr != tracks_.end(); ++itr) {
payload_size += itr->GetSize();
}
return payload_size;
}
void TracksBuilder::WriteTracks(uint8* buf, int buf_size) const {
WriteMasterElement(&buf, &buf_size, kWebMIdTracks, GetTracksPayloadSize());
for (TrackList::const_iterator itr = tracks_.begin();
itr != tracks_.end(); ++itr) {
itr->Write(&buf, &buf_size);
}
}
TracksBuilder::Track::Track(int track_num,
int track_type,
uint64 track_uid,
const std::string& codec_id,
const std::string& name,
const std::string& language,
int default_duration,
int video_pixel_width,
int video_pixel_height,
int audio_channels,
double audio_sampling_frequency,
bool allow_invalid_values)
: track_num_(track_num),
track_type_(track_type),
track_uid_(track_uid),
codec_id_(codec_id),
name_(name),
language_(language),
default_duration_(default_duration),
video_pixel_width_(video_pixel_width),
video_pixel_height_(video_pixel_height),
audio_channels_(audio_channels),
audio_sampling_frequency_(audio_sampling_frequency) {
if (!allow_invalid_values) {
CHECK_GT(track_num_, 0);
CHECK_GT(track_type_, 0);
CHECK_LT(track_type_, 255);
CHECK_GT(track_uid_, 0);
if (track_type != kWebMTrackTypeVideo &&
track_type != kWebMTrackTypeAudio) {
CHECK_EQ(default_duration_, -1);
} else {
CHECK(default_duration_ == -1 || default_duration_ > 0);
}
if (track_type == kWebMTrackTypeVideo) {
CHECK_GT(video_pixel_width_, 0);
CHECK_GT(video_pixel_height_, 0);
} else {
CHECK_EQ(video_pixel_width_, -1);
CHECK_EQ(video_pixel_height_, -1);
}
if (track_type == kWebMTrackTypeAudio) {
CHECK_GT(audio_channels_, 0);
CHECK_GT(audio_sampling_frequency_, 0.0);
} else {
CHECK_EQ(audio_channels_, -1);
CHECK_EQ(audio_sampling_frequency_, -1.0);
}
}
}
int TracksBuilder::Track::GetSize() const {
return MasterElementSize(kWebMIdTrackEntry, GetPayloadSize());
}
int TracksBuilder::Track::GetVideoPayloadSize() const {
int payload_size = 0;
if (video_pixel_width_ >= 0)
payload_size += UIntElementSize(kWebMIdPixelWidth, video_pixel_width_);
if (video_pixel_height_ >= 0)
payload_size += UIntElementSize(kWebMIdPixelHeight, video_pixel_height_);
return payload_size;
}
int TracksBuilder::Track::GetAudioPayloadSize() const {
int payload_size = 0;
if (audio_channels_ >= 0)
payload_size += UIntElementSize(kWebMIdChannels, audio_channels_);
if (audio_sampling_frequency_ >= 0)
payload_size += DoubleElementSize(kWebMIdSamplingFrequency);
return payload_size;
}
int TracksBuilder::Track::GetPayloadSize() const {
int size = 0;
size += UIntElementSize(kWebMIdTrackNumber, track_num_);
size += UIntElementSize(kWebMIdTrackType, track_type_);
size += UIntElementSize(kWebMIdTrackUID, track_uid_);
if (default_duration_ >= 0)
size += UIntElementSize(kWebMIdDefaultDuration, default_duration_);
if (!codec_id_.empty())
size += StringElementSize(kWebMIdCodecID, codec_id_);
if (!name_.empty())
size += StringElementSize(kWebMIdName, name_);
if (!language_.empty())
size += StringElementSize(kWebMIdLanguage, language_);
if (GetVideoPayloadSize() > 0) {
size += MasterElementSize(kWebMIdVideo, GetVideoPayloadSize());
}
if (GetAudioPayloadSize() > 0) {
size += MasterElementSize(kWebMIdAudio, GetAudioPayloadSize());
}
return size;
}
void TracksBuilder::Track::Write(uint8** buf, int* buf_size) const {
WriteMasterElement(buf, buf_size, kWebMIdTrackEntry, GetPayloadSize());
WriteUIntElement(buf, buf_size, kWebMIdTrackNumber, track_num_);
WriteUIntElement(buf, buf_size, kWebMIdTrackType, track_type_);
WriteUIntElement(buf, buf_size, kWebMIdTrackUID, track_uid_);
if (default_duration_ >= 0)
WriteUIntElement(buf, buf_size, kWebMIdDefaultDuration, default_duration_);
if (!codec_id_.empty())
WriteStringElement(buf, buf_size, kWebMIdCodecID, codec_id_);
if (!name_.empty())
WriteStringElement(buf, buf_size, kWebMIdName, name_);
if (!language_.empty())
WriteStringElement(buf, buf_size, kWebMIdLanguage, language_);
if (GetVideoPayloadSize() > 0) {
WriteMasterElement(buf, buf_size, kWebMIdVideo, GetVideoPayloadSize());
if (video_pixel_width_ >= 0)
WriteUIntElement(buf, buf_size, kWebMIdPixelWidth, video_pixel_width_);
if (video_pixel_height_ >= 0)
WriteUIntElement(buf, buf_size, kWebMIdPixelHeight, video_pixel_height_);
}
if (GetAudioPayloadSize() > 0) {
WriteMasterElement(buf, buf_size, kWebMIdAudio, GetAudioPayloadSize());
if (audio_channels_ >= 0)
WriteUIntElement(buf, buf_size, kWebMIdChannels, audio_channels_);
if (audio_sampling_frequency_ >= 0) {
WriteDoubleElement(buf, buf_size, kWebMIdSamplingFrequency,
audio_sampling_frequency_);
}
}
}
} // namespace media

View File

@ -0,0 +1,114 @@
// Copyright 2014 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#ifndef MEDIA_FORMATS_WEBM_TRACKS_BUILDER_H_
#define MEDIA_FORMATS_WEBM_TRACKS_BUILDER_H_
#include <list>
#include <string>
#include <vector>
#include "base/basictypes.h"
namespace media {
class TracksBuilder {
public:
// If |allow_invalid_values| is false, some AddTrack() parameters will be
// basically checked and will assert if out of valid range. |codec_id|,
// |name|, |language| and any device-specific constraints are not checked.
explicit TracksBuilder(bool allow_invalid_values);
TracksBuilder(); // Sets |allow_invalid_values| to false.
~TracksBuilder();
// Only a non-negative |default_duration| will result in a serialized
// kWebMIdDefaultDuration element. Note, 0 is allowed here for testing only
// if |allow_invalid_values_| is true, since it is an illegal value for
// DefaultDuration. Similar applies to |audio_channels|,
// |audio_sampling_frequency|, |video_pixel_width| and |video_pixel_height|.
void AddVideoTrack(int track_num,
uint64 track_uid,
const std::string& codec_id,
const std::string& name,
const std::string& language,
int default_duration,
int video_pixel_width,
int video_pixel_height);
void AddAudioTrack(int track_num,
uint64 track_uid,
const std::string& codec_id,
const std::string& name,
const std::string& language,
int default_duration,
int audio_channels,
double audio_sampling_frequency);
void AddTextTrack(int track_num,
uint64 track_uid,
const std::string& codec_id,
const std::string& name,
const std::string& language);
std::vector<uint8> Finish();
private:
void AddTrackInternal(int track_num,
int track_type,
uint64 track_uid,
const std::string& codec_id,
const std::string& name,
const std::string& language,
int default_duration,
int video_pixel_width,
int video_pixel_height,
int audio_channels,
double audio_sampling_frequency);
int GetTracksSize() const;
int GetTracksPayloadSize() const;
void WriteTracks(uint8* buffer, int buffer_size) const;
class Track {
public:
Track(int track_num,
int track_type,
uint64 track_uid,
const std::string& codec_id,
const std::string& name,
const std::string& language,
int default_duration,
int video_pixel_width,
int video_pixel_height,
int audio_channels,
double audio_sampling_frequency,
bool allow_invalid_values);
int GetSize() const;
void Write(uint8** buf, int* buf_size) const;
private:
int GetPayloadSize() const;
int GetVideoPayloadSize() const;
int GetAudioPayloadSize() const;
int track_num_;
int track_type_;
int track_uid_;
std::string codec_id_;
std::string name_;
std::string language_;
int default_duration_;
int video_pixel_width_;
int video_pixel_height_;
int audio_channels_;
double audio_sampling_frequency_;
};
typedef std::list<Track> TrackList;
TrackList tracks_;
bool allow_invalid_values_;
DISALLOW_COPY_AND_ASSIGN(TracksBuilder);
};
} // namespace media
#endif // MEDIA_FORMATS_WEBM_TRACKS_BUILDER_H_

View File

@ -0,0 +1,141 @@
// Copyright 2014 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#include "media/formats/webm/webm_audio_client.h"
#include "media/base/audio_decoder_config.h"
#include "media/base/channel_layout.h"
#include "media/formats/webm/webm_constants.h"
namespace media {
WebMAudioClient::WebMAudioClient(const scoped_refptr<MediaLog>& media_log)
: media_log_(media_log) {
Reset();
}
WebMAudioClient::~WebMAudioClient() {
}
void WebMAudioClient::Reset() {
channels_ = -1;
samples_per_second_ = -1;
output_samples_per_second_ = -1;
}
bool WebMAudioClient::InitializeConfig(
const std::string& codec_id, const std::vector<uint8>& codec_private,
int64 seek_preroll, int64 codec_delay, bool is_encrypted,
AudioDecoderConfig* config) {
DCHECK(config);
SampleFormat sample_format = kSampleFormatPlanarF32;
AudioCodec audio_codec = kUnknownAudioCodec;
if (codec_id == "A_VORBIS") {
audio_codec = kCodecVorbis;
} else if (codec_id == "A_OPUS") {
audio_codec = kCodecOpus;
} else {
MEDIA_LOG(ERROR, media_log_) << "Unsupported audio codec_id " << codec_id;
return false;
}
if (samples_per_second_ <= 0)
return false;
// Set channel layout default if a Channels element was not present.
if (channels_ == -1)
channels_ = 1;
ChannelLayout channel_layout = GuessChannelLayout(channels_);
if (channel_layout == CHANNEL_LAYOUT_UNSUPPORTED) {
MEDIA_LOG(ERROR, media_log_) << "Unsupported channel count " << channels_;
return false;
}
int samples_per_second = samples_per_second_;
if (output_samples_per_second_ > 0)
samples_per_second = output_samples_per_second_;
// Always use 48kHz for OPUS. See the "Input Sample Rate" section of the
// spec: http://tools.ietf.org/html/draft-terriberry-oggopus-01#page-11
if (audio_codec == kCodecOpus) {
samples_per_second = 48000;
sample_format = kSampleFormatF32;
}
const uint8* extra_data = NULL;
size_t extra_data_size = 0;
if (codec_private.size() > 0) {
extra_data = &codec_private[0];
extra_data_size = codec_private.size();
}
// Convert |codec_delay| from nanoseconds into frames.
int codec_delay_in_frames = 0;
if (codec_delay != -1) {
codec_delay_in_frames =
0.5 +
samples_per_second * (static_cast<double>(codec_delay) /
base::Time::kNanosecondsPerSecond);
}
config->Initialize(
audio_codec,
sample_format,
channel_layout,
samples_per_second,
extra_data,
extra_data_size,
is_encrypted,
base::TimeDelta::FromMicroseconds(
(seek_preroll != -1 ? seek_preroll : 0) / 1000),
codec_delay_in_frames);
return config->IsValidConfig();
}
bool WebMAudioClient::OnUInt(int id, int64 val) {
if (id == kWebMIdChannels) {
if (channels_ != -1) {
MEDIA_LOG(ERROR, media_log_) << "Multiple values for id " << std::hex
<< id << " specified. (" << channels_
<< " and " << val << ")";
return false;
}
channels_ = val;
}
return true;
}
bool WebMAudioClient::OnFloat(int id, double val) {
double* dst = NULL;
switch (id) {
case kWebMIdSamplingFrequency:
dst = &samples_per_second_;
break;
case kWebMIdOutputSamplingFrequency:
dst = &output_samples_per_second_;
break;
default:
return true;
}
if (val <= 0)
return false;
if (*dst != -1) {
MEDIA_LOG(ERROR, media_log_) << "Multiple values for id " << std::hex << id
<< " specified (" << *dst << " and " << val
<< ")";
return false;
}
*dst = val;
return true;
}
} // namespace media

View File

@ -0,0 +1,54 @@
// Copyright 2014 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#ifndef MEDIA_FORMATS_WEBM_WEBM_AUDIO_CLIENT_H_
#define MEDIA_FORMATS_WEBM_WEBM_AUDIO_CLIENT_H_
#include <string>
#include <vector>
#include "media/base/media_log.h"
#include "media/formats/webm/webm_parser.h"
namespace media {
class AudioDecoderConfig;
// Helper class used to parse an Audio element inside a TrackEntry element.
class WebMAudioClient : public WebMParserClient {
public:
explicit WebMAudioClient(const scoped_refptr<MediaLog>& media_log);
~WebMAudioClient() override;
// Reset this object's state so it can process a new audio track element.
void Reset();
// Initialize |config| with the data in |codec_id|, |codec_private|,
// |is_encrypted| and the fields parsed from the last audio track element this
// object was used to parse.
// Returns true if |config| was successfully initialized.
// Returns false if there was unexpected values in the provided parameters or
// audio track element fields.
bool InitializeConfig(const std::string& codec_id,
const std::vector<uint8>& codec_private,
const int64 seek_preroll,
const int64 codec_delay,
bool is_encrypted,
AudioDecoderConfig* config);
private:
// WebMParserClient implementation.
bool OnUInt(int id, int64 val) override;
bool OnFloat(int id, double val) override;
scoped_refptr<MediaLog> media_log_;
int channels_;
double samples_per_second_;
double output_samples_per_second_;
DISALLOW_COPY_AND_ASSIGN(WebMAudioClient);
};
} // namespace media
#endif // MEDIA_FORMATS_WEBM_WEBM_AUDIO_CLIENT_H_

View File

@ -0,0 +1,882 @@
// Copyright 2014 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#include "media/formats/webm/webm_cluster_parser.h"
#include <vector>
#include "base/logging.h"
#include "base/sys_byteorder.h"
#include "media/base/decrypt_config.h"
#include "media/base/timestamp_constants.h"
#include "media/filters/webvtt_util.h"
#include "media/formats/webm/webm_constants.h"
#include "media/formats/webm/webm_crypto_helpers.h"
#include "media/formats/webm/webm_webvtt_parser.h"
namespace media {
const uint16_t WebMClusterParser::kOpusFrameDurationsMu[] = {
10000, 20000, 40000, 60000, 10000, 20000, 40000, 60000, 10000, 20000, 40000,
60000, 10000, 20000, 10000, 20000, 2500, 5000, 10000, 20000, 2500, 5000,
10000, 20000, 2500, 5000, 10000, 20000, 2500, 5000, 10000, 20000};
enum {
// Limits the number of MEDIA_LOG() calls in the path of reading encoded
// duration to avoid spamming for corrupted data.
kMaxDurationErrorLogs = 10,
// Limits the number of MEDIA_LOG() calls warning the user that buffer
// durations have been estimated.
kMaxDurationEstimateLogs = 10,
};
WebMClusterParser::WebMClusterParser(
int64 timecode_scale,
int audio_track_num,
base::TimeDelta audio_default_duration,
int video_track_num,
base::TimeDelta video_default_duration,
const WebMTracksParser::TextTracks& text_tracks,
const std::set<int64>& ignored_tracks,
const std::string& audio_encryption_key_id,
const std::string& video_encryption_key_id,
const AudioCodec audio_codec,
const scoped_refptr<MediaLog>& media_log)
: timecode_multiplier_(timecode_scale / 1000.0),
ignored_tracks_(ignored_tracks),
audio_encryption_key_id_(audio_encryption_key_id),
video_encryption_key_id_(video_encryption_key_id),
audio_codec_(audio_codec),
parser_(kWebMIdCluster, this),
cluster_start_time_(kNoTimestamp()),
audio_(audio_track_num, false, audio_default_duration, media_log),
video_(video_track_num, true, video_default_duration, media_log),
ready_buffer_upper_bound_(kNoDecodeTimestamp()),
media_log_(media_log) {
for (WebMTracksParser::TextTracks::const_iterator it = text_tracks.begin();
it != text_tracks.end();
++it) {
text_track_map_.insert(std::make_pair(
it->first, Track(it->first, false, kNoTimestamp(), media_log_)));
}
}
WebMClusterParser::~WebMClusterParser() {}
void WebMClusterParser::Reset() {
last_block_timecode_ = -1;
cluster_timecode_ = -1;
cluster_start_time_ = kNoTimestamp();
cluster_ended_ = false;
parser_.Reset();
audio_.Reset();
video_.Reset();
ResetTextTracks();
ready_buffer_upper_bound_ = kNoDecodeTimestamp();
}
int WebMClusterParser::Parse(const uint8_t* buf, int size) {
audio_.ClearReadyBuffers();
video_.ClearReadyBuffers();
ClearTextTrackReadyBuffers();
ready_buffer_upper_bound_ = kNoDecodeTimestamp();
int result = parser_.Parse(buf, size);
if (result < 0) {
cluster_ended_ = false;
return result;
}
cluster_ended_ = parser_.IsParsingComplete();
if (cluster_ended_) {
// If there were no buffers in this cluster, set the cluster start time to
// be the |cluster_timecode_|.
if (cluster_start_time_ == kNoTimestamp()) {
// If the cluster did not even have a |cluster_timecode_|, signal parse
// error.
if (cluster_timecode_ < 0)
return -1;
cluster_start_time_ = base::TimeDelta::FromMicroseconds(
cluster_timecode_ * timecode_multiplier_);
}
// Reset the parser if we're done parsing so that
// it is ready to accept another cluster on the next
// call.
parser_.Reset();
last_block_timecode_ = -1;
cluster_timecode_ = -1;
}
return result;
}
const WebMClusterParser::BufferQueue& WebMClusterParser::GetAudioBuffers() {
if (ready_buffer_upper_bound_ == kNoDecodeTimestamp())
UpdateReadyBuffers();
return audio_.ready_buffers();
}
const WebMClusterParser::BufferQueue& WebMClusterParser::GetVideoBuffers() {
if (ready_buffer_upper_bound_ == kNoDecodeTimestamp())
UpdateReadyBuffers();
return video_.ready_buffers();
}
const WebMClusterParser::TextBufferQueueMap&
WebMClusterParser::GetTextBuffers() {
if (ready_buffer_upper_bound_ == kNoDecodeTimestamp())
UpdateReadyBuffers();
// Translate our |text_track_map_| into |text_buffers_map_|, inserting rows in
// the output only for non-empty ready_buffer() queues in |text_track_map_|.
text_buffers_map_.clear();
for (TextTrackMap::const_iterator itr = text_track_map_.begin();
itr != text_track_map_.end();
++itr) {
const BufferQueue& text_buffers = itr->second.ready_buffers();
if (!text_buffers.empty())
text_buffers_map_.insert(std::make_pair(itr->first, text_buffers));
}
return text_buffers_map_;
}
base::TimeDelta WebMClusterParser::TryGetEncodedAudioDuration(
const uint8_t* data,
int size) {
// Duration is currently read assuming the *entire* stream is unencrypted.
// The special "Signal Byte" prepended to Blocks in encrypted streams is
// assumed to not be present.
// TODO(chcunningham): Consider parsing "Signal Byte" for encrypted streams
// to return duration for any unencrypted blocks.
if (audio_codec_ == kCodecOpus) {
return ReadOpusDuration(data, size);
}
// TODO(wolenetz/chcunningham): Implement duration reading for Vorbis. See
// motivations in http://crbug.com/396634.
return kNoTimestamp();
}
base::TimeDelta WebMClusterParser::ReadOpusDuration(const uint8_t* data,
int size) {
// Masks and constants for Opus packets. See
// https://tools.ietf.org/html/rfc6716#page-14
static const uint8_t kTocConfigMask = 0xf8;
static const uint8_t kTocFrameCountCodeMask = 0x03;
static const uint8_t kFrameCountMask = 0x3f;
static const base::TimeDelta kPacketDurationMax =
base::TimeDelta::FromMilliseconds(120);
if (size < 1) {
LIMITED_MEDIA_LOG(DEBUG, media_log_, num_duration_errors_,
kMaxDurationErrorLogs)
<< "Invalid zero-byte Opus packet; demuxed block duration may be "
"imprecise.";
return kNoTimestamp();
}
// Frame count type described by last 2 bits of Opus TOC byte.
int frame_count_type = data[0] & kTocFrameCountCodeMask;
int frame_count = 0;
switch (frame_count_type) {
case 0:
frame_count = 1;
break;
case 1:
case 2:
frame_count = 2;
break;
case 3:
// Type 3 indicates an arbitrary frame count described in the next byte.
if (size < 2) {
LIMITED_MEDIA_LOG(DEBUG, media_log_, num_duration_errors_,
kMaxDurationErrorLogs)
<< "Second byte missing from 'Code 3' Opus packet; demuxed block "
"duration may be imprecise.";
return kNoTimestamp();
}
frame_count = data[1] & kFrameCountMask;
if (frame_count == 0) {
LIMITED_MEDIA_LOG(DEBUG, media_log_, num_duration_errors_,
kMaxDurationErrorLogs)
<< "Illegal 'Code 3' Opus packet with frame count zero; demuxed "
"block duration may be imprecise.";
return kNoTimestamp();
}
break;
default:
LIMITED_MEDIA_LOG(DEBUG, media_log_, num_duration_errors_,
kMaxDurationErrorLogs)
<< "Unexpected Opus frame count type: " << frame_count_type << "; "
<< "demuxed block duration may be imprecise.";
return kNoTimestamp();
}
int opusConfig = (data[0] & kTocConfigMask) >> 3;
CHECK_GE(opusConfig, 0);
CHECK_LT(opusConfig, static_cast<int>(arraysize(kOpusFrameDurationsMu)));
DCHECK_GT(frame_count, 0);
base::TimeDelta duration = base::TimeDelta::FromMicroseconds(
kOpusFrameDurationsMu[opusConfig] * frame_count);
if (duration > kPacketDurationMax) {
// Intentionally allowing packet to pass through for now. Decoder should
// either handle or fail gracefully. MEDIA_LOG as breadcrumbs in case
// things go sideways.
LIMITED_MEDIA_LOG(DEBUG, media_log_, num_duration_errors_,
kMaxDurationErrorLogs)
<< "Warning, demuxed Opus packet with encoded duration: "
<< duration.InMilliseconds() << "ms. Should be no greater than "
<< kPacketDurationMax.InMilliseconds() << "ms.";
}
return duration;
}
WebMParserClient* WebMClusterParser::OnListStart(int id) {
if (id == kWebMIdCluster) {
cluster_timecode_ = -1;
cluster_start_time_ = kNoTimestamp();
} else if (id == kWebMIdBlockGroup) {
block_data_.reset();
block_data_size_ = -1;
block_duration_ = -1;
discard_padding_ = -1;
discard_padding_set_ = false;
} else if (id == kWebMIdBlockAdditions) {
block_add_id_ = -1;
block_additional_data_.reset();
block_additional_data_size_ = 0;
}
return this;
}
bool WebMClusterParser::OnListEnd(int id) {
if (id != kWebMIdBlockGroup)
return true;
// Make sure the BlockGroup actually had a Block.
if (block_data_size_ == -1) {
MEDIA_LOG(ERROR, media_log_) << "Block missing from BlockGroup.";
return false;
}
bool result = ParseBlock(false, block_data_.get(), block_data_size_,
block_additional_data_.get(),
block_additional_data_size_, block_duration_,
discard_padding_set_ ? discard_padding_ : 0);
block_data_.reset();
block_data_size_ = -1;
block_duration_ = -1;
block_add_id_ = -1;
block_additional_data_.reset();
block_additional_data_size_ = 0;
discard_padding_ = -1;
discard_padding_set_ = false;
return result;
}
bool WebMClusterParser::OnUInt(int id, int64 val) {
int64* dst;
switch (id) {
case kWebMIdTimecode:
dst = &cluster_timecode_;
break;
case kWebMIdBlockDuration:
dst = &block_duration_;
break;
case kWebMIdBlockAddID:
dst = &block_add_id_;
break;
default:
return true;
}
if (*dst != -1)
return false;
*dst = val;
return true;
}
bool WebMClusterParser::ParseBlock(bool is_simple_block,
const uint8_t* buf,
int size,
const uint8_t* additional,
int additional_size,
int duration,
int64 discard_padding) {
if (size < 4)
return false;
// Return an error if the trackNum > 127. We just aren't
// going to support large track numbers right now.
if (!(buf[0] & 0x80)) {
MEDIA_LOG(ERROR, media_log_) << "TrackNumber over 127 not supported";
return false;
}
int track_num = buf[0] & 0x7f;
int timecode = buf[1] << 8 | buf[2];
int flags = buf[3] & 0xff;
int lacing = (flags >> 1) & 0x3;
if (lacing) {
MEDIA_LOG(ERROR, media_log_) << "Lacing " << lacing
<< " is not supported yet.";
return false;
}
// Sign extend negative timecode offsets.
if (timecode & 0x8000)
timecode |= ~0xffff;
const uint8_t* frame_data = buf + 4;
int frame_size = size - (frame_data - buf);
return OnBlock(is_simple_block, track_num, timecode, duration, flags,
frame_data, frame_size, additional, additional_size,
discard_padding);
}
bool WebMClusterParser::OnBinary(int id, const uint8_t* data, int size) {
switch (id) {
case kWebMIdSimpleBlock:
return ParseBlock(true, data, size, NULL, 0, -1, 0);
case kWebMIdBlock:
if (block_data_) {
MEDIA_LOG(ERROR, media_log_)
<< "More than 1 Block in a BlockGroup is not "
"supported.";
return false;
}
block_data_.reset(new uint8_t[size]);
memcpy(block_data_.get(), data, size);
block_data_size_ = size;
return true;
case kWebMIdBlockAdditional: {
uint64 block_add_id = base::HostToNet64(block_add_id_);
if (block_additional_data_) {
// TODO(vigneshv): Technically, more than 1 BlockAdditional is allowed
// as per matroska spec. But for now we don't have a use case to
// support parsing of such files. Take a look at this again when such a
// case arises.
MEDIA_LOG(ERROR, media_log_) << "More than 1 BlockAdditional in a "
"BlockGroup is not supported.";
return false;
}
// First 8 bytes of side_data in DecoderBuffer is the BlockAddID
// element's value in Big Endian format. This is done to mimic ffmpeg
// demuxer's behavior.
block_additional_data_size_ = size + sizeof(block_add_id);
block_additional_data_.reset(new uint8_t[block_additional_data_size_]);
memcpy(block_additional_data_.get(), &block_add_id,
sizeof(block_add_id));
memcpy(block_additional_data_.get() + 8, data, size);
return true;
}
case kWebMIdDiscardPadding: {
if (discard_padding_set_ || size <= 0 || size > 8)
return false;
discard_padding_set_ = true;
// Read in the big-endian integer.
discard_padding_ = static_cast<int8>(data[0]);
for (int i = 1; i < size; ++i)
discard_padding_ = (discard_padding_ << 8) | data[i];
return true;
}
default:
return true;
}
}
bool WebMClusterParser::OnBlock(bool is_simple_block,
int track_num,
int timecode,
int block_duration,
int flags,
const uint8_t* data,
int size,
const uint8_t* additional,
int additional_size,
int64 discard_padding) {
DCHECK_GE(size, 0);
if (cluster_timecode_ == -1) {
MEDIA_LOG(ERROR, media_log_) << "Got a block before cluster timecode.";
return false;
}
// TODO(acolwell): Should relative negative timecode offsets be rejected? Or
// only when the absolute timecode is negative? See http://crbug.com/271794
if (timecode < 0) {
MEDIA_LOG(ERROR, media_log_) << "Got a block with negative timecode offset "
<< timecode;
return false;
}
if (last_block_timecode_ != -1 && timecode < last_block_timecode_) {
MEDIA_LOG(ERROR, media_log_)
<< "Got a block with a timecode before the previous block.";
return false;
}
Track* track = NULL;
StreamParserBuffer::Type buffer_type = DemuxerStream::AUDIO;
std::string encryption_key_id;
base::TimeDelta encoded_duration = kNoTimestamp();
if (track_num == audio_.track_num()) {
track = &audio_;
encryption_key_id = audio_encryption_key_id_;
if (encryption_key_id.empty()) {
encoded_duration = TryGetEncodedAudioDuration(data, size);
}
} else if (track_num == video_.track_num()) {
track = &video_;
encryption_key_id = video_encryption_key_id_;
buffer_type = DemuxerStream::VIDEO;
} else if (ignored_tracks_.find(track_num) != ignored_tracks_.end()) {
return true;
} else if (Track* const text_track = FindTextTrack(track_num)) {
if (is_simple_block) // BlockGroup is required for WebVTT cues
return false;
if (block_duration < 0) // not specified
return false;
track = text_track;
buffer_type = DemuxerStream::TEXT;
} else {
MEDIA_LOG(ERROR, media_log_) << "Unexpected track number " << track_num;
return false;
}
last_block_timecode_ = timecode;
base::TimeDelta timestamp = base::TimeDelta::FromMicroseconds(
(cluster_timecode_ + timecode) * timecode_multiplier_);
scoped_refptr<StreamParserBuffer> buffer;
if (buffer_type != DemuxerStream::TEXT) {
// The first bit of the flags is set when a SimpleBlock contains only
// keyframes. If this is a Block, then inspection of the payload is
// necessary to determine whether it contains a keyframe or not.
// http://www.matroska.org/technical/specs/index.html
bool is_keyframe =
is_simple_block ? (flags & 0x80) != 0 : track->IsKeyframe(data, size);
// Every encrypted Block has a signal byte and IV prepended to it. Current
// encrypted WebM request for comments specification is here
// http://wiki.webmproject.org/encryption/webm-encryption-rfc
scoped_ptr<DecryptConfig> decrypt_config;
int data_offset = 0;
if (!encryption_key_id.empty() &&
!WebMCreateDecryptConfig(
data, size,
reinterpret_cast<const uint8_t*>(encryption_key_id.data()),
encryption_key_id.size(),
&decrypt_config, &data_offset)) {
return false;
}
// TODO(wolenetz/acolwell): Validate and use a common cross-parser TrackId
// type with remapped bytestream track numbers and allow multiple tracks as
// applicable. See https://crbug.com/341581.
buffer = StreamParserBuffer::CopyFrom(
data + data_offset, size - data_offset,
additional, additional_size,
is_keyframe, buffer_type, track_num);
if (decrypt_config)
buffer->set_decrypt_config(decrypt_config.Pass());
} else {
std::string id, settings, content;
WebMWebVTTParser::Parse(data, size, &id, &settings, &content);
std::vector<uint8_t> side_data;
MakeSideData(id.begin(), id.end(),
settings.begin(), settings.end(),
&side_data);
// TODO(wolenetz/acolwell): Validate and use a common cross-parser TrackId
// type with remapped bytestream track numbers and allow multiple tracks as
// applicable. See https://crbug.com/341581.
buffer = StreamParserBuffer::CopyFrom(
reinterpret_cast<const uint8_t*>(content.data()),
content.length(),
&side_data[0],
side_data.size(),
true, buffer_type, track_num);
}
buffer->set_timestamp(timestamp);
if (cluster_start_time_ == kNoTimestamp())
cluster_start_time_ = timestamp;
base::TimeDelta block_duration_time_delta = kNoTimestamp();
if (block_duration >= 0) {
block_duration_time_delta = base::TimeDelta::FromMicroseconds(
block_duration * timecode_multiplier_);
}
// Prefer encoded duration over BlockGroup->BlockDuration or
// TrackEntry->DefaultDuration when available. This layering violation is a
// workaround for http://crbug.com/396634, decreasing the likelihood of
// fall-back to rough estimation techniques for Blocks that lack a
// BlockDuration at the end of a cluster. Cross cluster durations are not
// feasible given flexibility of cluster ordering and MSE APIs. Duration
// estimation may still apply in cases of encryption and codecs for which
// we do not extract encoded duration. Within a cluster, estimates are applied
// as Block Timecode deltas, or once the whole cluster is parsed in the case
// of the last Block in the cluster. See Track::AddBuffer and
// ApplyDurationEstimateIfNeeded().
if (encoded_duration != kNoTimestamp()) {
DCHECK(encoded_duration != kInfiniteDuration());
DCHECK(encoded_duration > base::TimeDelta());
buffer->set_duration(encoded_duration);
DVLOG(3) << __FUNCTION__ << " : "
<< "Using encoded duration " << encoded_duration.InSecondsF();
if (block_duration_time_delta != kNoTimestamp()) {
base::TimeDelta duration_difference =
block_duration_time_delta - encoded_duration;
const auto kWarnDurationDiff =
base::TimeDelta::FromMicroseconds(timecode_multiplier_ * 2);
if (duration_difference.magnitude() > kWarnDurationDiff) {
LIMITED_MEDIA_LOG(DEBUG, media_log_, num_duration_errors_,
kMaxDurationErrorLogs)
<< "BlockDuration (" << block_duration_time_delta.InMilliseconds()
<< "ms) differs significantly from encoded duration ("
<< encoded_duration.InMilliseconds() << "ms).";
}
}
} else if (block_duration_time_delta != kNoTimestamp()) {
buffer->set_duration(block_duration_time_delta);
} else {
DCHECK_NE(buffer_type, DemuxerStream::TEXT);
buffer->set_duration(track->default_duration());
}
if (discard_padding != 0) {
buffer->set_discard_padding(std::make_pair(
base::TimeDelta(),
base::TimeDelta::FromMicroseconds(discard_padding / 1000)));
}
return track->AddBuffer(buffer);
}
WebMClusterParser::Track::Track(int track_num,
bool is_video,
base::TimeDelta default_duration,
const scoped_refptr<MediaLog>& media_log)
: track_num_(track_num),
is_video_(is_video),
default_duration_(default_duration),
estimated_next_frame_duration_(kNoTimestamp()),
media_log_(media_log) {
DCHECK(default_duration_ == kNoTimestamp() ||
default_duration_ > base::TimeDelta());
}
WebMClusterParser::Track::~Track() {}
DecodeTimestamp WebMClusterParser::Track::GetReadyUpperBound() {
DCHECK(ready_buffers_.empty());
if (last_added_buffer_missing_duration_.get())
return last_added_buffer_missing_duration_->GetDecodeTimestamp();
return DecodeTimestamp::FromPresentationTime(base::TimeDelta::Max());
}
void WebMClusterParser::Track::ExtractReadyBuffers(
const DecodeTimestamp before_timestamp) {
DCHECK(ready_buffers_.empty());
DCHECK(DecodeTimestamp() <= before_timestamp);
DCHECK(kNoDecodeTimestamp() != before_timestamp);
if (buffers_.empty())
return;
if (buffers_.back()->GetDecodeTimestamp() < before_timestamp) {
// All of |buffers_| are ready.
ready_buffers_.swap(buffers_);
DVLOG(3) << __FUNCTION__ << " : " << track_num_ << " All "
<< ready_buffers_.size() << " are ready: before upper bound ts "
<< before_timestamp.InSecondsF();
return;
}
// Not all of |buffers_| are ready yet. Move any that are ready to
// |ready_buffers_|.
while (true) {
const scoped_refptr<StreamParserBuffer>& buffer = buffers_.front();
if (buffer->GetDecodeTimestamp() >= before_timestamp)
break;
ready_buffers_.push_back(buffer);
buffers_.pop_front();
DCHECK(!buffers_.empty());
}
DVLOG(3) << __FUNCTION__ << " : " << track_num_ << " Only "
<< ready_buffers_.size() << " ready, " << buffers_.size()
<< " at or after upper bound ts " << before_timestamp.InSecondsF();
}
bool WebMClusterParser::Track::AddBuffer(
const scoped_refptr<StreamParserBuffer>& buffer) {
DVLOG(2) << "AddBuffer() : " << track_num_
<< " ts " << buffer->timestamp().InSecondsF()
<< " dur " << buffer->duration().InSecondsF()
<< " kf " << buffer->is_key_frame()
<< " size " << buffer->data_size();
if (last_added_buffer_missing_duration_.get()) {
base::TimeDelta derived_duration =
buffer->timestamp() - last_added_buffer_missing_duration_->timestamp();
last_added_buffer_missing_duration_->set_duration(derived_duration);
DVLOG(2) << "AddBuffer() : applied derived duration to held-back buffer : "
<< " ts "
<< last_added_buffer_missing_duration_->timestamp().InSecondsF()
<< " dur "
<< last_added_buffer_missing_duration_->duration().InSecondsF()
<< " kf " << last_added_buffer_missing_duration_->is_key_frame()
<< " size " << last_added_buffer_missing_duration_->data_size();
scoped_refptr<StreamParserBuffer> updated_buffer =
last_added_buffer_missing_duration_;
last_added_buffer_missing_duration_ = NULL;
if (!QueueBuffer(updated_buffer))
return false;
}
if (buffer->duration() == kNoTimestamp()) {
last_added_buffer_missing_duration_ = buffer;
DVLOG(2) << "AddBuffer() : holding back buffer that is missing duration";
return true;
}
return QueueBuffer(buffer);
}
void WebMClusterParser::Track::ApplyDurationEstimateIfNeeded() {
if (!last_added_buffer_missing_duration_.get())
return;
base::TimeDelta estimated_duration = GetDurationEstimate();
last_added_buffer_missing_duration_->set_duration(estimated_duration);
if (is_video_) {
// Exposing estimation so splicing/overlap frame processing can make
// informed decisions downstream.
// TODO(chcunningham): Set this for audio as well in later change where
// audio is switched to max estimation and splicing is disabled.
last_added_buffer_missing_duration_->set_is_duration_estimated(true);
}
LIMITED_MEDIA_LOG(INFO, media_log_, num_duration_estimates_,
kMaxDurationEstimateLogs)
<< "Estimating WebM block duration to be "
<< estimated_duration.InMilliseconds()
<< "ms for the last (Simple)Block in the Cluster for this Track. Use "
"BlockGroups with BlockDurations at the end of each Track in a "
"Cluster to avoid estimation.";
DVLOG(2) << __FUNCTION__ << " new dur : ts "
<< last_added_buffer_missing_duration_->timestamp().InSecondsF()
<< " dur "
<< last_added_buffer_missing_duration_->duration().InSecondsF()
<< " kf " << last_added_buffer_missing_duration_->is_key_frame()
<< " size " << last_added_buffer_missing_duration_->data_size();
// Don't use the applied duration as a future estimation (don't use
// QueueBuffer() here.)
buffers_.push_back(last_added_buffer_missing_duration_);
last_added_buffer_missing_duration_ = NULL;
}
void WebMClusterParser::Track::ClearReadyBuffers() {
// Note that |buffers_| are kept and |estimated_next_frame_duration_| is not
// reset here.
ready_buffers_.clear();
}
void WebMClusterParser::Track::Reset() {
ClearReadyBuffers();
buffers_.clear();
last_added_buffer_missing_duration_ = NULL;
}
bool WebMClusterParser::Track::IsKeyframe(const uint8_t* data, int size) const {
// For now, assume that all blocks are keyframes for datatypes other than
// video. This is a valid assumption for Vorbis, WebVTT, & Opus.
if (!is_video_)
return true;
// Make sure the block is big enough for the minimal keyframe header size.
if (size < 7)
return false;
// The LSb of the first byte must be a 0 for a keyframe.
// http://tools.ietf.org/html/rfc6386 Section 19.1
if ((data[0] & 0x01) != 0)
return false;
// Verify VP8 keyframe startcode.
// http://tools.ietf.org/html/rfc6386 Section 19.1
if (data[3] != 0x9d || data[4] != 0x01 || data[5] != 0x2a)
return false;
return true;
}
bool WebMClusterParser::Track::QueueBuffer(
const scoped_refptr<StreamParserBuffer>& buffer) {
DCHECK(!last_added_buffer_missing_duration_.get());
// WebMClusterParser::OnBlock() gives MEDIA_LOG and parse error on decreasing
// block timecode detection within a cluster. Therefore, we should not see
// those here.
DecodeTimestamp previous_buffers_timestamp = buffers_.empty() ?
DecodeTimestamp() : buffers_.back()->GetDecodeTimestamp();
CHECK(previous_buffers_timestamp <= buffer->GetDecodeTimestamp());
base::TimeDelta duration = buffer->duration();
if (duration < base::TimeDelta() || duration == kNoTimestamp()) {
MEDIA_LOG(ERROR, media_log_)
<< "Invalid buffer duration: " << duration.InSecondsF();
return false;
}
// The estimated frame duration is the minimum (for audio) or the maximum
// (for video) non-zero duration since the last initialization segment. The
// minimum is used for audio to ensure frame durations aren't overestimated,
// triggering unnecessary frame splicing. For video, splicing does not apply,
// so maximum is used and overlap is simply resolved by showing the
// later of the overlapping frames at its given PTS, effectively trimming down
// the over-estimated duration of the previous frame.
// TODO(chcunningham): Use max for audio and disable splicing whenever
// estimated buffers are encountered.
if (duration > base::TimeDelta()) {
base::TimeDelta orig_duration_estimate = estimated_next_frame_duration_;
if (estimated_next_frame_duration_ == kNoTimestamp()) {
estimated_next_frame_duration_ = duration;
} else if (is_video_) {
estimated_next_frame_duration_ =
std::max(duration, estimated_next_frame_duration_);
} else {
estimated_next_frame_duration_ =
std::min(duration, estimated_next_frame_duration_);
}
if (orig_duration_estimate != estimated_next_frame_duration_) {
DVLOG(3) << "Updated duration estimate:"
<< orig_duration_estimate
<< " -> "
<< estimated_next_frame_duration_
<< " at timestamp: "
<< buffer->GetDecodeTimestamp().InSecondsF();
}
}
buffers_.push_back(buffer);
return true;
}
base::TimeDelta WebMClusterParser::Track::GetDurationEstimate() {
base::TimeDelta duration = estimated_next_frame_duration_;
if (duration != kNoTimestamp()) {
DVLOG(3) << __FUNCTION__ << " : using estimated duration";
} else {
DVLOG(3) << __FUNCTION__ << " : using hardcoded default duration";
if (is_video_) {
duration = base::TimeDelta::FromMilliseconds(
kDefaultVideoBufferDurationInMs);
} else {
duration = base::TimeDelta::FromMilliseconds(
kDefaultAudioBufferDurationInMs);
}
}
DCHECK(duration > base::TimeDelta());
DCHECK(duration != kNoTimestamp());
return duration;
}
void WebMClusterParser::ClearTextTrackReadyBuffers() {
text_buffers_map_.clear();
for (TextTrackMap::iterator it = text_track_map_.begin();
it != text_track_map_.end();
++it) {
it->second.ClearReadyBuffers();
}
}
void WebMClusterParser::ResetTextTracks() {
ClearTextTrackReadyBuffers();
for (TextTrackMap::iterator it = text_track_map_.begin();
it != text_track_map_.end();
++it) {
it->second.Reset();
}
}
void WebMClusterParser::UpdateReadyBuffers() {
DCHECK(ready_buffer_upper_bound_ == kNoDecodeTimestamp());
DCHECK(text_buffers_map_.empty());
if (cluster_ended_) {
audio_.ApplyDurationEstimateIfNeeded();
video_.ApplyDurationEstimateIfNeeded();
// Per OnBlock(), all text buffers should already have valid durations, so
// there is no need to call ApplyDurationEstimateIfNeeded() on text tracks
// here.
ready_buffer_upper_bound_ =
DecodeTimestamp::FromPresentationTime(base::TimeDelta::Max());
DCHECK(ready_buffer_upper_bound_ == audio_.GetReadyUpperBound());
DCHECK(ready_buffer_upper_bound_ == video_.GetReadyUpperBound());
} else {
ready_buffer_upper_bound_ = std::min(audio_.GetReadyUpperBound(),
video_.GetReadyUpperBound());
DCHECK(DecodeTimestamp() <= ready_buffer_upper_bound_);
DCHECK(kNoDecodeTimestamp() != ready_buffer_upper_bound_);
}
// Prepare each track's ready buffers for retrieval.
audio_.ExtractReadyBuffers(ready_buffer_upper_bound_);
video_.ExtractReadyBuffers(ready_buffer_upper_bound_);
for (TextTrackMap::iterator itr = text_track_map_.begin();
itr != text_track_map_.end();
++itr) {
itr->second.ExtractReadyBuffers(ready_buffer_upper_bound_);
}
}
WebMClusterParser::Track*
WebMClusterParser::FindTextTrack(int track_num) {
const TextTrackMap::iterator it = text_track_map_.find(track_num);
if (it == text_track_map_.end())
return NULL;
return &it->second;
}
} // namespace media

View File

@ -0,0 +1,325 @@
// Copyright 2014 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#ifndef MEDIA_FORMATS_WEBM_WEBM_CLUSTER_PARSER_H_
#define MEDIA_FORMATS_WEBM_WEBM_CLUSTER_PARSER_H_
#include <deque>
#include <map>
#include <set>
#include <string>
#include "base/memory/scoped_ptr.h"
#include "media/base/audio_decoder_config.h"
#include "media/base/media_export.h"
#include "media/base/media_log.h"
#include "media/base/stream_parser.h"
#include "media/base/stream_parser_buffer.h"
#include "media/formats/webm/webm_parser.h"
#include "media/formats/webm/webm_tracks_parser.h"
namespace media {
class MEDIA_EXPORT WebMClusterParser : public WebMParserClient {
public:
typedef StreamParser::TrackId TrackId;
typedef std::deque<scoped_refptr<StreamParserBuffer> > BufferQueue;
typedef std::map<TrackId, const BufferQueue> TextBufferQueueMap;
// Numbers chosen to estimate the duration of a buffer if none is set and
// there is not enough information to get a better estimate.
enum {
// Common 1k samples @44.1kHz
kDefaultAudioBufferDurationInMs = 23,
// Chosen to represent 16fps duration, which will prevent MSE stalls in
// videos with frame-rates as low as 8fps.
kDefaultVideoBufferDurationInMs = 63
};
// Opus packets encode the duration and other parameters in the 5 most
// significant bits of the first byte. The index in this array corresponds
// to the duration of each frame of the packet in microseconds. See
// https://tools.ietf.org/html/rfc6716#page-14
static const uint16_t kOpusFrameDurationsMu[];
private:
// Helper class that manages per-track state.
class Track {
public:
Track(int track_num,
bool is_video,
base::TimeDelta default_duration,
const scoped_refptr<MediaLog>& media_log);
~Track();
int track_num() const { return track_num_; }
// If a buffer is currently held aside pending duration calculation, returns
// its decode timestamp. Otherwise, returns kInfiniteDuration().
DecodeTimestamp GetReadyUpperBound();
// Prepares |ready_buffers_| for retrieval. Prior to calling,
// |ready_buffers_| must be empty. Moves all |buffers_| with decode
// timestamp before |before_timestamp| to |ready_buffers_|, preserving their
// order.
void ExtractReadyBuffers(const DecodeTimestamp before_timestamp);
const BufferQueue& ready_buffers() const { return ready_buffers_; }
// If |last_added_buffer_missing_duration_| is set, updates its duration
// relative to |buffer|'s timestamp, and adds it to |buffers_| and unsets
// |last_added_buffer_missing_duration_|. Then, if |buffer| is missing
// duration, saves |buffer| into |last_added_buffer_missing_duration_|, or
// otherwise adds |buffer| to |buffers_|.
bool AddBuffer(const scoped_refptr<StreamParserBuffer>& buffer);
// If |last_added_buffer_missing_duration_| is set, updates its duration to
// be non-kNoTimestamp() value of |estimated_next_frame_duration_| or a
// hard-coded default, then adds it to |buffers_| and unsets
// |last_added_buffer_missing_duration_|. (This method helps stream parser
// emit all buffers in a media segment before signaling end of segment.)
void ApplyDurationEstimateIfNeeded();
// Clears |ready_buffers_| (use ExtractReadyBuffers() to fill it again).
// Leaves as-is |buffers_| and any possibly held-aside buffer that is
// missing duration.
void ClearReadyBuffers();
// Clears all buffer state, including any possibly held-aside buffer that
// was missing duration, and all contents of |buffers_| and
// |ready_buffers_|.
void Reset();
// Helper function used to inspect block data to determine if the
// block is a keyframe.
// |data| contains the bytes in the block.
// |size| indicates the number of bytes in |data|.
bool IsKeyframe(const uint8_t* data, int size) const;
base::TimeDelta default_duration() const { return default_duration_; }
private:
// Helper that sanity-checks |buffer| duration, updates
// |estimated_next_frame_duration_|, and adds |buffer| to |buffers_|.
// Returns false if |buffer| failed sanity check and therefore was not added
// to |buffers_|. Returns true otherwise.
bool QueueBuffer(const scoped_refptr<StreamParserBuffer>& buffer);
// Helper that calculates the buffer duration to use in
// ApplyDurationEstimateIfNeeded().
base::TimeDelta GetDurationEstimate();
// Counts the number of estimated durations used in this track. Used to
// prevent log spam for MEDIA_LOG()s about estimated duration.
int num_duration_estimates_ = 0;
int track_num_;
bool is_video_;
// Parsed track buffers, each with duration and in (decode) timestamp order,
// that have not yet been extracted into |ready_buffers_|. Note that up to
// one additional buffer missing duration may be tracked by
// |last_added_buffer_missing_duration_|.
BufferQueue buffers_;
scoped_refptr<StreamParserBuffer> last_added_buffer_missing_duration_;
// Buffers in (decode) timestamp order that were previously parsed into and
// extracted from |buffers_|. Buffers are moved from |buffers_| to
// |ready_buffers_| by ExtractReadyBuffers() if they are below a specified
// upper bound timestamp. Track users can therefore extract only those
// parsed buffers which are "ready" for emission (all before some maximum
// timestamp).
BufferQueue ready_buffers_;
// If kNoTimestamp(), then |estimated_next_frame_duration_| will be used.
base::TimeDelta default_duration_;
// If kNoTimestamp(), then a default value will be used. This estimate is
// the maximum (for video), or minimum (for audio) duration seen so far for
// this track, and is used only if |default_duration_| is kNoTimestamp().
// TODO(chcunningham): Use maximum for audio too, adding checks to disable
// splicing when these estimates are observed in SourceBufferStream.
base::TimeDelta estimated_next_frame_duration_;
scoped_refptr<MediaLog> media_log_;
};
typedef std::map<int, Track> TextTrackMap;
public:
WebMClusterParser(int64 timecode_scale,
int audio_track_num,
base::TimeDelta audio_default_duration,
int video_track_num,
base::TimeDelta video_default_duration,
const WebMTracksParser::TextTracks& text_tracks,
const std::set<int64>& ignored_tracks,
const std::string& audio_encryption_key_id,
const std::string& video_encryption_key_id,
const AudioCodec audio_codec,
const scoped_refptr<MediaLog>& media_log);
~WebMClusterParser() override;
// Resets the parser state so it can accept a new cluster.
void Reset();
// Parses a WebM cluster element in |buf|.
//
// Returns -1 if the parse fails.
// Returns 0 if more data is needed.
// Returns the number of bytes parsed on success.
int Parse(const uint8_t* buf, int size);
base::TimeDelta cluster_start_time() const { return cluster_start_time_; }
// Get the current ready buffers resulting from Parse().
// If the parse reached the end of cluster and the last buffer was held aside
// due to missing duration, the buffer is given an estimated duration and
// included in the result.
// Otherwise, if there are is a buffer held aside due to missing duration for
// any of the tracks, no buffers with same or greater (decode) timestamp will
// be included in the buffers.
// The returned deques are cleared by Parse() or Reset() and updated by the
// next calls to Get{Audio,Video}Buffers().
// If no Parse() or Reset() has occurred since the last call to Get{Audio,
// Video,Text}Buffers(), then the previous BufferQueue& is returned again
// without any recalculation.
const BufferQueue& GetAudioBuffers();
const BufferQueue& GetVideoBuffers();
// Constructs and returns a subset of |text_track_map_| containing only
// tracks with non-empty buffer queues produced by the last Parse() and
// filtered to exclude any buffers that have (decode) timestamp same or
// greater than the lowest (decode) timestamp across all tracks of any buffer
// held aside due to missing duration (unless the end of cluster has been
// reached).
// The returned map is cleared by Parse() or Reset() and updated by the next
// call to GetTextBuffers().
// If no Parse() or Reset() has occurred since the last call to
// GetTextBuffers(), then the previous TextBufferQueueMap& is returned again
// without any recalculation.
const TextBufferQueueMap& GetTextBuffers();
// Returns true if the last Parse() call stopped at the end of a cluster.
bool cluster_ended() const { return cluster_ended_; }
private:
// WebMParserClient methods.
WebMParserClient* OnListStart(int id) override;
bool OnListEnd(int id) override;
bool OnUInt(int id, int64 val) override;
bool OnBinary(int id, const uint8_t* data, int size) override;
bool ParseBlock(bool is_simple_block,
const uint8_t* buf,
int size,
const uint8_t* additional,
int additional_size,
int duration,
int64 discard_padding);
bool OnBlock(bool is_simple_block,
int track_num,
int timecode,
int duration,
int flags,
const uint8_t* data,
int size,
const uint8_t* additional,
int additional_size,
int64 discard_padding);
// Resets the Track objects associated with each text track.
void ResetTextTracks();
// Clears the the ready buffers associated with each text track.
void ClearTextTrackReadyBuffers();
// Helper method for Get{Audio,Video,Text}Buffers() that recomputes
// |ready_buffer_upper_bound_| and calls ExtractReadyBuffers() on each track.
// If |cluster_ended_| is true, first applies duration estimate if needed for
// |audio_| and |video_| and sets |ready_buffer_upper_bound_| to
// kInfiniteDuration(). Otherwise, sets |ready_buffer_upper_bound_| to the
// minimum upper bound across |audio_| and |video_|. (Text tracks can have no
// buffers missing duration, so they are not involved in calculating the upper
// bound.)
// Parse() or Reset() must be called between calls to UpdateReadyBuffers() to
// clear each track's ready buffers and to reset |ready_buffer_upper_bound_|
// to kNoDecodeTimestamp().
void UpdateReadyBuffers();
// Search for the indicated track_num among the text tracks. Returns NULL
// if that track num is not a text track.
Track* FindTextTrack(int track_num);
// Attempts to read the duration from the encoded audio data, returning as
// TimeDelta or kNoTimestamp() if duration cannot be retrieved. This obviously
// violates layering rules, but is useful for MSE to know duration in cases
// where it isn't explicitly given and cannot be calculated for Blocks at the
// end of a Cluster (the next Cluster in playback-order may not be the next
// Cluster we parse, so we can't simply use the delta of the first Block in
// the next Cluster). Avoid calling if encrypted; may produce unexpected
// output. See implementation for supported codecs.
base::TimeDelta TryGetEncodedAudioDuration(const uint8_t* data, int size);
// Reads Opus packet header to determine packet duration. Duration returned
// as TimeDelta or kNoTimestamp() upon failure to read duration from packet.
base::TimeDelta ReadOpusDuration(const uint8_t* data, int size);
// Tracks the number of MEDIA_LOGs made in process of reading encoded
// duration. Useful to prevent log spam.
int num_duration_errors_ = 0;
double timecode_multiplier_; // Multiplier used to convert timecodes into
// microseconds.
std::set<int64> ignored_tracks_;
std::string audio_encryption_key_id_;
std::string video_encryption_key_id_;
const AudioCodec audio_codec_;
WebMListParser parser_;
int64 last_block_timecode_ = -1;
scoped_ptr<uint8_t[]> block_data_;
int block_data_size_ = -1;
int64 block_duration_ = -1;
int64 block_add_id_ = -1;
scoped_ptr<uint8_t[]> block_additional_data_;
// Must be 0 if |block_additional_data_| is null. Must be > 0 if
// |block_additional_data_| is NOT null.
int block_additional_data_size_ = 0;
int64 discard_padding_ = -1;
bool discard_padding_set_ = false;
int64 cluster_timecode_ = -1;
base::TimeDelta cluster_start_time_;
bool cluster_ended_ = false;
Track audio_;
Track video_;
TextTrackMap text_track_map_;
// Subset of |text_track_map_| maintained by GetTextBuffers(), and cleared by
// ClearTextTrackReadyBuffers(). Callers of GetTextBuffers() get a const-ref
// to this member.
TextBufferQueueMap text_buffers_map_;
// Limits the range of buffers returned by Get{Audio,Video,Text}Buffers() to
// this exclusive upper bound. Set to kNoDecodeTimestamp(), meaning not yet
// calculated, by Reset() and Parse(). If kNoDecodeTimestamp(), then
// Get{Audio,Video,Text}Buffers() will calculate it to be the minimum (decode)
// timestamp across all tracks' |last_buffer_missing_duration_|, or
// kInfiniteDuration() if no buffers are currently missing duration.
DecodeTimestamp ready_buffer_upper_bound_;
scoped_refptr<MediaLog> media_log_;
DISALLOW_IMPLICIT_CONSTRUCTORS(WebMClusterParser);
};
} // namespace media
#endif // MEDIA_FORMATS_WEBM_WEBM_CLUSTER_PARSER_H_

File diff suppressed because it is too large Load Diff

View File

@ -0,0 +1,14 @@
// Copyright 2014 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#include "media/formats/webm/webm_constants.h"
namespace media {
const char kWebMCodecSubtitles[] = "D_WEBVTT/SUBTITLES";
const char kWebMCodecCaptions[] = "D_WEBVTT/CAPTIONS";
const char kWebMCodecDescriptions[] = "D_WEBVTT/DESCRIPTIONS";
const char kWebMCodecMetadata[] = "D_WEBVTT/METADATA";
} // namespace media

View File

@ -0,0 +1,229 @@
// Copyright 2014 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#ifndef MEDIA_FORMATS_WEBM_WEBM_CONSTANTS_H_
#define MEDIA_FORMATS_WEBM_WEBM_CONSTANTS_H_
#include "base/basictypes.h"
#include "media/base/media_export.h"
namespace media {
// WebM element IDs.
// This is a subset of the IDs in the Matroska spec.
// http://www.matroska.org/technical/specs/index.html
const int kWebMIdAESSettingsCipherMode = 0x47E8;
const int kWebMIdAlphaMode = 0x53C0;
const int kWebMIdAspectRatioType = 0x54B3;
const int kWebMIdAttachedFile = 0x61A7;
const int kWebMIdAttachmentLink = 0x7446;
const int kWebMIdAttachments = 0x1941A469;
const int kWebMIdAudio = 0xE1;
const int kWebMIdBitDepth = 0x6264;
const int kWebMIdBlock = 0xA1;
const int kWebMIdBlockAddID = 0xEE;
const int kWebMIdBlockAdditions = 0x75A1;
const int kWebMIdBlockAdditional = 0xA5;
const int kWebMIdBlockDuration = 0x9B;
const int kWebMIdBlockGroup = 0xA0;
const int kWebMIdBlockMore = 0xA6;
const int kWebMIdChannels = 0x9F;
const int kWebMIdChapCountry = 0x437E;
const int kWebMIdChapLanguage = 0x437C;
const int kWebMIdChapProcess = 0x6944;
const int kWebMIdChapProcessCodecID = 0x6955;
const int kWebMIdChapProcessCommand = 0x6911;
const int kWebMIdChapProcessData = 0x6933;
const int kWebMIdChapProcessPrivate = 0x450D;
const int kWebMIdChapProcessTime = 0x6922;
const int kWebMIdChapString = 0x85;
const int kWebMIdChapterAtom = 0xB6;
const int kWebMIdChapterDisplay = 0x80;
const int kWebMIdChapterFlagEnabled = 0x4598;
const int kWebMIdChapterFlagHidden = 0x98;
const int kWebMIdChapterPhysicalEquiv = 0x63C3;
const int kWebMIdChapters = 0x1043A770;
const int kWebMIdChapterSegmentEditionUID = 0x6EBC;
const int kWebMIdChapterSegmentUID = 0x6E67;
const int kWebMIdChapterTimeEnd = 0x92;
const int kWebMIdChapterTimeStart = 0x91;
const int kWebMIdChapterTrack = 0x8F;
const int kWebMIdChapterTrackNumber = 0x89;
const int kWebMIdChapterTranslate = 0x6924;
const int kWebMIdChapterTranslateCodec = 0x69BF;
const int kWebMIdChapterTranslateEditionUID = 0x69FC;
const int kWebMIdChapterTranslateID = 0x69A5;
const int kWebMIdChapterUID = 0x73C4;
const int kWebMIdCluster = 0x1F43B675;
const int kWebMIdCodecDecodeAll = 0xAA;
const int kWebMIdCodecDelay = 0x56AA;
const int kWebMIdCodecID = 0x86;
const int kWebMIdCodecName = 0x258688;
const int kWebMIdCodecPrivate = 0x63A2;
const int kWebMIdCodecState = 0xA4;
const int kWebMIdColorSpace = 0x2EB524;
const int kWebMIdContentCompAlgo = 0x4254;
const int kWebMIdContentCompression = 0x5034;
const int kWebMIdContentCompSettings = 0x4255;
const int kWebMIdContentEncAESSettings = 0x47E7;
const int kWebMIdContentEncAlgo = 0x47E1;
const int kWebMIdContentEncKeyID = 0x47E2;
const int kWebMIdContentEncoding = 0x6240;
const int kWebMIdContentEncodingOrder = 0x5031;
const int kWebMIdContentEncodings = 0x6D80;
const int kWebMIdContentEncodingScope = 0x5032;
const int kWebMIdContentEncodingType = 0x5033;
const int kWebMIdContentEncryption = 0x5035;
const int kWebMIdContentSigAlgo = 0x47E5;
const int kWebMIdContentSigHashAlgo = 0x47E6;
const int kWebMIdContentSigKeyID = 0x47E4;
const int kWebMIdContentSignature = 0x47E3;
const int kWebMIdCRC32 = 0xBF;
const int kWebMIdCueBlockNumber = 0x5378;
const int kWebMIdCueClusterPosition = 0xF1;
const int kWebMIdCueCodecState = 0xEA;
const int kWebMIdCuePoint = 0xBB;
const int kWebMIdCueReference = 0xDB;
const int kWebMIdCueRefTime = 0x96;
const int kWebMIdCues = 0x1C53BB6B;
const int kWebMIdCueTime = 0xB3;
const int kWebMIdCueTrack = 0xF7;
const int kWebMIdCueTrackPositions = 0xB7;
const int kWebMIdDateUTC = 0x4461;
const int kWebMIdDefaultDuration = 0x23E383;
const int kWebMIdDiscardPadding = 0x75A2;
const int kWebMIdDisplayHeight = 0x54BA;
const int kWebMIdDisplayUnit = 0x54B2;
const int kWebMIdDisplayWidth = 0x54B0;
const int kWebMIdDocType = 0x4282;
const int kWebMIdDocTypeReadVersion = 0x4285;
const int kWebMIdDocTypeVersion = 0x4287;
const int kWebMIdDuration = 0x4489;
const int kWebMIdEBMLHeader = 0x1A45DFA3;
const int kWebMIdEBMLMaxIDLength = 0x42F2;
const int kWebMIdEBMLMaxSizeLength = 0x42F3;
const int kWebMIdEBMLReadVersion = 0x42F7;
const int kWebMIdEBMLVersion = 0x4286;
const int kWebMIdEditionEntry = 0x45B9;
const int kWebMIdEditionFlagDefault = 0x45DB;
const int kWebMIdEditionFlagHidden = 0x45BD;
const int kWebMIdEditionFlagOrdered = 0x45DD;
const int kWebMIdEditionUID = 0x45BC;
const int kWebMIdFileData = 0x465C;
const int kWebMIdFileDescription = 0x467E;
const int kWebMIdFileMimeType = 0x4660;
const int kWebMIdFileName = 0x466E;
const int kWebMIdFileUID = 0x46AE;
const int kWebMIdFlagDefault = 0x88;
const int kWebMIdFlagEnabled = 0xB9;
const int kWebMIdFlagForced = 0x55AA;
const int kWebMIdFlagInterlaced = 0x9A;
const int kWebMIdFlagLacing = 0x9C;
const int kWebMIdFrameRate = 0x2383E3;
const int kWebMIdInfo = 0x1549A966;
const int kWebMIdJoinBlocks = 0xE9;
const int kWebMIdLaceNumber = 0xCC;
const int kWebMIdLanguage = 0x22B59C;
const int kWebMIdMaxBlockAdditionId = 0x55EE;
const int kWebMIdMaxCache = 0x6DF8;
const int kWebMIdMinCache = 0x6DE7;
const int kWebMIdMuxingApp = 0x4D80;
const int kWebMIdName = 0x536E;
const int kWebMIdNextFilename = 0x3E83BB;
const int kWebMIdNextUID = 0x3EB923;
const int kWebMIdOutputSamplingFrequency = 0x78B5;
const int kWebMIdPixelCropBottom = 0x54AA;
const int kWebMIdPixelCropLeft = 0x54CC;
const int kWebMIdPixelCropRight = 0x54DD;
const int kWebMIdPixelCropTop = 0x54BB;
const int kWebMIdPixelHeight = 0xBA;
const int kWebMIdPixelWidth = 0xB0;
const int kWebMIdPosition = 0xA7;
const int kWebMIdPrevFilename = 0x3C83AB;
const int kWebMIdPrevSize = 0xAB;
const int kWebMIdPrevUID = 0x3CB923;
const int kWebMIdReferenceBlock = 0xFB;
const int kWebMIdReferencePriority = 0xFA;
const int kWebMIdSamplingFrequency = 0xB5;
const int kWebMIdSeek = 0x4DBB;
const int kWebMIdSeekHead = 0x114D9B74;
const int kWebMIdSeekID = 0x53AB;
const int kWebMIdSeekPosition = 0x53AC;
const int kWebMIdSeekPreRoll = 0x56BB;
const int kWebMIdSegment = 0x18538067;
const int kWebMIdSegmentFamily = 0x4444;
const int kWebMIdSegmentFilename = 0x7384;
const int kWebMIdSegmentUID = 0x73A4;
const int kWebMIdSilentTrackNumber = 0x58D7;
const int kWebMIdSilentTracks = 0x5854;
const int kWebMIdSimpleBlock = 0xA3;
const int kWebMIdSimpleTag = 0x67C8;
const int kWebMIdSlices = 0x8E;
const int kWebMIdStereoMode = 0x53B8;
const int kWebMIdTag = 0x7373;
const int kWebMIdTagAttachmentUID = 0x63C6;
const int kWebMIdTagBinary = 0x4485;
const int kWebMIdTagChapterUID = 0x63C4;
const int kWebMIdTagDefault = 0x4484;
const int kWebMIdTagEditionUID = 0x63C9;
const int kWebMIdTagLanguage = 0x447A;
const int kWebMIdTagName = 0x45A3;
const int kWebMIdTags = 0x1254C367;
const int kWebMIdTagString = 0x4487;
const int kWebMIdTagTrackUID = 0x63C5;
const int kWebMIdTargets = 0x63C0;
const int kWebMIdTargetType = 0x63CA;
const int kWebMIdTargetTypeValue = 0x68CA;
const int kWebMIdTimecode = 0xE7;
const int kWebMIdTimecodeScale = 0x2AD7B1;
const int kWebMIdTimeSlice = 0xE8;
const int kWebMIdTitle = 0x7BA9;
const int kWebMIdTrackCombinePlanes = 0xE3;
const int kWebMIdTrackEntry = 0xAE;
const int kWebMIdTrackJoinUID = 0xED;
const int kWebMIdTrackNumber = 0xD7;
const int kWebMIdTrackOperation = 0xE2;
const int kWebMIdTrackOverlay = 0x6FAB;
const int kWebMIdTrackPlane = 0xE4;
const int kWebMIdTrackPlaneType = 0xE6;
const int kWebMIdTrackPlaneUID = 0xE5;
const int kWebMIdTracks = 0x1654AE6B;
const int kWebMIdTrackTimecodeScale = 0x23314F;
const int kWebMIdTrackTranslate = 0x6624;
const int kWebMIdTrackTranslateCodec = 0x66BF;
const int kWebMIdTrackTranslateEditionUID = 0x66FC;
const int kWebMIdTrackTranslateTrackID = 0x66A5;
const int kWebMIdTrackType = 0x83;
const int kWebMIdTrackUID = 0x73C5;
const int kWebMIdVideo = 0xE0;
const int kWebMIdVoid = 0xEC;
const int kWebMIdWritingApp = 0x5741;
const int64 kWebMReservedId = 0x1FFFFFFF;
const int64 kWebMUnknownSize = 0x00FFFFFFFFFFFFFFLL;
const uint8 kWebMFlagKeyframe = 0x80;
// Current encrypted WebM request for comments specification is here
// http://wiki.webmproject.org/encryption/webm-encryption-rfc
const uint8 kWebMFlagEncryptedFrame = 0x1;
const int kWebMIvSize = 8;
const int kWebMSignalByteSize = 1;
// Current specification for WebVTT embedded in WebM
// http://wiki.webmproject.org/webm-metadata/temporal-metadata/webvtt-in-webm
const int kWebMTrackTypeVideo = 1;
const int kWebMTrackTypeAudio = 2;
const int kWebMTrackTypeSubtitlesOrCaptions = 0x11;
const int kWebMTrackTypeDescriptionsOrMetadata = 0x21;
MEDIA_EXPORT extern const char kWebMCodecSubtitles[];
MEDIA_EXPORT extern const char kWebMCodecCaptions[];
MEDIA_EXPORT extern const char kWebMCodecDescriptions[];
MEDIA_EXPORT extern const char kWebMCodecMetadata[];
} // namespace media
#endif // MEDIA_FORMATS_WEBM_WEBM_CONSTANTS_H_

View File

@ -0,0 +1,28 @@
// Copyright 2014 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#include "base/logging.h"
#include "media/formats/webm/webm_content_encodings.h"
namespace media {
ContentEncoding::ContentEncoding()
: order_(kOrderInvalid),
scope_(kScopeInvalid),
type_(kTypeInvalid),
encryption_algo_(kEncAlgoInvalid),
cipher_mode_(kCipherModeInvalid) {
}
ContentEncoding::~ContentEncoding() {}
void ContentEncoding::SetEncryptionKeyId(const uint8* encryption_key_id,
int size) {
DCHECK(encryption_key_id);
DCHECK_GT(size, 0);
encryption_key_id_.assign(reinterpret_cast<const char*>(encryption_key_id),
size);
}
} // namespace media

View File

@ -0,0 +1,88 @@
// Copyright 2014 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#ifndef MEDIA_FORMATS_WEBM_WEBM_CONTENT_ENCODINGS_H_
#define MEDIA_FORMATS_WEBM_WEBM_CONTENT_ENCODINGS_H_
#include <string>
#include "base/basictypes.h"
#include "base/memory/scoped_ptr.h"
#include "media/base/media_export.h"
namespace media {
class MEDIA_EXPORT ContentEncoding {
public:
// The following enum definitions are based on the ContentEncoding element
// specified in the Matroska spec.
static const int kOrderInvalid = -1;
enum Scope {
kScopeInvalid = 0,
kScopeAllFrameContents = 1,
kScopeTrackPrivateData = 2,
kScopeNextContentEncodingData = 4,
kScopeMax = 7,
};
enum Type {
kTypeInvalid = -1,
kTypeCompression = 0,
kTypeEncryption = 1,
};
enum EncryptionAlgo {
kEncAlgoInvalid = -1,
kEncAlgoNotEncrypted = 0,
kEncAlgoDes = 1,
kEncAlgo3des = 2,
kEncAlgoTwofish = 3,
kEncAlgoBlowfish = 4,
kEncAlgoAes = 5,
};
enum CipherMode {
kCipherModeInvalid = 0,
kCipherModeCtr = 1,
};
ContentEncoding();
~ContentEncoding();
int64 order() const { return order_; }
void set_order(int64 order) { order_ = order; }
Scope scope() const { return scope_; }
void set_scope(Scope scope) { scope_ = scope; }
Type type() const { return type_; }
void set_type(Type type) { type_ = type; }
EncryptionAlgo encryption_algo() const { return encryption_algo_; }
void set_encryption_algo(EncryptionAlgo encryption_algo) {
encryption_algo_ = encryption_algo;
}
const std::string& encryption_key_id() const { return encryption_key_id_; }
void SetEncryptionKeyId(const uint8* encryption_key_id, int size);
CipherMode cipher_mode() const { return cipher_mode_; }
void set_cipher_mode(CipherMode mode) { cipher_mode_ = mode; }
private:
int64 order_;
Scope scope_;
Type type_;
EncryptionAlgo encryption_algo_;
std::string encryption_key_id_;
CipherMode cipher_mode_;
DISALLOW_COPY_AND_ASSIGN(ContentEncoding);
};
} // namespace media
#endif // MEDIA_FORMATS_WEBM_WEBM_CONTENT_ENCODINGS_H_

View File

@ -0,0 +1,274 @@
// Copyright 2014 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#include "media/formats/webm/webm_content_encodings_client.h"
#include "base/logging.h"
#include "base/stl_util.h"
#include "media/formats/webm/webm_constants.h"
namespace media {
WebMContentEncodingsClient::WebMContentEncodingsClient(
const scoped_refptr<MediaLog>& media_log)
: media_log_(media_log),
content_encryption_encountered_(false),
content_encodings_ready_(false) {
}
WebMContentEncodingsClient::~WebMContentEncodingsClient() {
STLDeleteElements(&content_encodings_);
}
const ContentEncodings& WebMContentEncodingsClient::content_encodings() const {
DCHECK(content_encodings_ready_);
return content_encodings_;
}
WebMParserClient* WebMContentEncodingsClient::OnListStart(int id) {
if (id == kWebMIdContentEncodings) {
DCHECK(!cur_content_encoding_.get());
DCHECK(!content_encryption_encountered_);
STLDeleteElements(&content_encodings_);
content_encodings_ready_ = false;
return this;
}
if (id == kWebMIdContentEncoding) {
DCHECK(!cur_content_encoding_.get());
DCHECK(!content_encryption_encountered_);
cur_content_encoding_.reset(new ContentEncoding());
return this;
}
if (id == kWebMIdContentEncryption) {
DCHECK(cur_content_encoding_.get());
if (content_encryption_encountered_) {
MEDIA_LOG(ERROR, media_log_) << "Unexpected multiple ContentEncryption.";
return NULL;
}
content_encryption_encountered_ = true;
return this;
}
if (id == kWebMIdContentEncAESSettings) {
DCHECK(cur_content_encoding_.get());
return this;
}
// This should not happen if WebMListParser is working properly.
DCHECK(false);
return NULL;
}
// Mandatory occurrence restriction is checked in this function. Multiple
// occurrence restriction is checked in OnUInt and OnBinary.
bool WebMContentEncodingsClient::OnListEnd(int id) {
if (id == kWebMIdContentEncodings) {
// ContentEncoding element is mandatory. Check this!
if (content_encodings_.empty()) {
MEDIA_LOG(ERROR, media_log_) << "Missing ContentEncoding.";
return false;
}
content_encodings_ready_ = true;
return true;
}
if (id == kWebMIdContentEncoding) {
DCHECK(cur_content_encoding_.get());
//
// Specify default values to missing mandatory elements.
//
if (cur_content_encoding_->order() == ContentEncoding::kOrderInvalid) {
// Default value of encoding order is 0, which should only be used on the
// first ContentEncoding.
if (!content_encodings_.empty()) {
MEDIA_LOG(ERROR, media_log_) << "Missing ContentEncodingOrder.";
return false;
}
cur_content_encoding_->set_order(0);
}
if (cur_content_encoding_->scope() == ContentEncoding::kScopeInvalid)
cur_content_encoding_->set_scope(ContentEncoding::kScopeAllFrameContents);
if (cur_content_encoding_->type() == ContentEncoding::kTypeInvalid)
cur_content_encoding_->set_type(ContentEncoding::kTypeCompression);
// Check for elements valid in spec but not supported for now.
if (cur_content_encoding_->type() == ContentEncoding::kTypeCompression) {
MEDIA_LOG(ERROR, media_log_) << "ContentCompression not supported.";
return false;
}
// Enforce mandatory elements without default values.
DCHECK(cur_content_encoding_->type() == ContentEncoding::kTypeEncryption);
if (!content_encryption_encountered_) {
MEDIA_LOG(ERROR, media_log_) << "ContentEncodingType is encryption but"
<< " ContentEncryption is missing.";
return false;
}
content_encodings_.push_back(cur_content_encoding_.release());
content_encryption_encountered_ = false;
return true;
}
if (id == kWebMIdContentEncryption) {
DCHECK(cur_content_encoding_.get());
// Specify default value for elements that are not present.
if (cur_content_encoding_->encryption_algo() ==
ContentEncoding::kEncAlgoInvalid) {
cur_content_encoding_->set_encryption_algo(
ContentEncoding::kEncAlgoNotEncrypted);
}
return true;
}
if (id == kWebMIdContentEncAESSettings) {
if (cur_content_encoding_->cipher_mode() ==
ContentEncoding::kCipherModeInvalid)
cur_content_encoding_->set_cipher_mode(ContentEncoding::kCipherModeCtr);
return true;
}
// This should not happen if WebMListParser is working properly.
DCHECK(false);
return false;
}
// Multiple occurrence restriction and range are checked in this function.
// Mandatory occurrence restriction is checked in OnListEnd.
bool WebMContentEncodingsClient::OnUInt(int id, int64 val) {
DCHECK(cur_content_encoding_.get());
if (id == kWebMIdContentEncodingOrder) {
if (cur_content_encoding_->order() != ContentEncoding::kOrderInvalid) {
MEDIA_LOG(ERROR, media_log_)
<< "Unexpected multiple ContentEncodingOrder.";
return false;
}
if (val != static_cast<int64>(content_encodings_.size())) {
// According to the spec, encoding order starts with 0 and counts upwards.
MEDIA_LOG(ERROR, media_log_) << "Unexpected ContentEncodingOrder.";
return false;
}
cur_content_encoding_->set_order(val);
return true;
}
if (id == kWebMIdContentEncodingScope) {
if (cur_content_encoding_->scope() != ContentEncoding::kScopeInvalid) {
MEDIA_LOG(ERROR, media_log_)
<< "Unexpected multiple ContentEncodingScope.";
return false;
}
if (val == ContentEncoding::kScopeInvalid ||
val > ContentEncoding::kScopeMax) {
MEDIA_LOG(ERROR, media_log_) << "Unexpected ContentEncodingScope.";
return false;
}
if (val & ContentEncoding::kScopeNextContentEncodingData) {
MEDIA_LOG(ERROR, media_log_) << "Encoded next ContentEncoding is not "
"supported.";
return false;
}
cur_content_encoding_->set_scope(static_cast<ContentEncoding::Scope>(val));
return true;
}
if (id == kWebMIdContentEncodingType) {
if (cur_content_encoding_->type() != ContentEncoding::kTypeInvalid) {
MEDIA_LOG(ERROR, media_log_)
<< "Unexpected multiple ContentEncodingType.";
return false;
}
if (val == ContentEncoding::kTypeCompression) {
MEDIA_LOG(ERROR, media_log_) << "ContentCompression not supported.";
return false;
}
if (val != ContentEncoding::kTypeEncryption) {
MEDIA_LOG(ERROR, media_log_) << "Unexpected ContentEncodingType " << val
<< ".";
return false;
}
cur_content_encoding_->set_type(static_cast<ContentEncoding::Type>(val));
return true;
}
if (id == kWebMIdContentEncAlgo) {
if (cur_content_encoding_->encryption_algo() !=
ContentEncoding::kEncAlgoInvalid) {
MEDIA_LOG(ERROR, media_log_) << "Unexpected multiple ContentEncAlgo.";
return false;
}
if (val < ContentEncoding::kEncAlgoNotEncrypted ||
val > ContentEncoding::kEncAlgoAes) {
MEDIA_LOG(ERROR, media_log_) << "Unexpected ContentEncAlgo " << val
<< ".";
return false;
}
cur_content_encoding_->set_encryption_algo(
static_cast<ContentEncoding::EncryptionAlgo>(val));
return true;
}
if (id == kWebMIdAESSettingsCipherMode) {
if (cur_content_encoding_->cipher_mode() !=
ContentEncoding::kCipherModeInvalid) {
MEDIA_LOG(ERROR, media_log_)
<< "Unexpected multiple AESSettingsCipherMode.";
return false;
}
if (val != ContentEncoding::kCipherModeCtr) {
MEDIA_LOG(ERROR, media_log_) << "Unexpected AESSettingsCipherMode " << val
<< ".";
return false;
}
cur_content_encoding_->set_cipher_mode(
static_cast<ContentEncoding::CipherMode>(val));
return true;
}
// This should not happen if WebMListParser is working properly.
DCHECK(false);
return false;
}
// Multiple occurrence restriction is checked in this function. Mandatory
// restriction is checked in OnListEnd.
bool WebMContentEncodingsClient::OnBinary(int id, const uint8* data, int size) {
DCHECK(cur_content_encoding_.get());
DCHECK(data);
DCHECK_GT(size, 0);
if (id == kWebMIdContentEncKeyID) {
if (!cur_content_encoding_->encryption_key_id().empty()) {
MEDIA_LOG(ERROR, media_log_) << "Unexpected multiple ContentEncKeyID";
return false;
}
cur_content_encoding_->SetEncryptionKeyId(data, size);
return true;
}
// This should not happen if WebMListParser is working properly.
DCHECK(false);
return false;
}
} // namespace media

View File

@ -0,0 +1,50 @@
// Copyright 2014 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#ifndef MEDIA_FORMATS_WEBM_WEBM_CONTENT_ENCODINGS_CLIENT_H_
#define MEDIA_FORMATS_WEBM_WEBM_CONTENT_ENCODINGS_CLIENT_H_
#include <vector>
#include "base/callback.h"
#include "base/compiler_specific.h"
#include "base/memory/scoped_ptr.h"
#include "media/base/media_export.h"
#include "media/base/media_log.h"
#include "media/formats/webm/webm_content_encodings.h"
#include "media/formats/webm/webm_parser.h"
namespace media {
typedef std::vector<ContentEncoding*> ContentEncodings;
// Parser for WebM ContentEncodings element.
class MEDIA_EXPORT WebMContentEncodingsClient : public WebMParserClient {
public:
explicit WebMContentEncodingsClient(const scoped_refptr<MediaLog>& media_log);
~WebMContentEncodingsClient() override;
const ContentEncodings& content_encodings() const;
// WebMParserClient methods
WebMParserClient* OnListStart(int id) override;
bool OnListEnd(int id) override;
bool OnUInt(int id, int64 val) override;
bool OnBinary(int id, const uint8* data, int size) override;
private:
scoped_refptr<MediaLog> media_log_;
scoped_ptr<ContentEncoding> cur_content_encoding_;
bool content_encryption_encountered_;
ContentEncodings content_encodings_;
// |content_encodings_| is ready. For debugging purpose.
bool content_encodings_ready_;
DISALLOW_COPY_AND_ASSIGN(WebMContentEncodingsClient);
};
} // namespace media
#endif // MEDIA_FORMATS_WEBM_WEBM_CONTENT_ENCODINGS_CLIENT_H_

View File

@ -0,0 +1,282 @@
// Copyright 2014 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#include "media/formats/webm/webm_content_encodings_client.h"
#include <string>
#include "base/bind.h"
#include "base/strings/string_number_conversions.h"
#include "media/base/mock_media_log.h"
#include "media/formats/webm/webm_constants.h"
#include "media/formats/webm/webm_parser.h"
#include "testing/gmock/include/gmock/gmock.h"
#include "testing/gtest/include/gtest/gtest.h"
using ::testing::StrictMock;
namespace media {
// Matchers for verifying common media log entry strings.
MATCHER(MissingContentEncoding, "") {
return CONTAINS_STRING(arg, "Missing ContentEncoding.");
}
MATCHER(UnexpectedContentEncodingOrder, "") {
return CONTAINS_STRING(arg, "Unexpected ContentEncodingOrder.");
}
MATCHER(UnexpectedContentEncodingScope, "") {
return CONTAINS_STRING(arg, "Unexpected ContentEncodingScope.");
}
MATCHER(ContentCompressionNotSupported, "") {
return CONTAINS_STRING(arg, "ContentCompression not supported.");
}
MATCHER(MissingContentEncryption, "") {
return CONTAINS_STRING(
arg,
"ContentEncodingType is encryption but ContentEncryption is missing.");
}
MATCHER_P(UnexpectedContentEncAlgo, algo, "") {
return CONTAINS_STRING(
arg, "Unexpected ContentEncAlgo " + base::IntToString(algo) + ".");
}
class WebMContentEncodingsClientTest : public testing::Test {
public:
WebMContentEncodingsClientTest()
: media_log_(new StrictMock<MockMediaLog>()),
client_(media_log_),
parser_(kWebMIdContentEncodings, &client_) {}
void ParseAndExpectToFail(const uint8* buf, int size) {
int result = parser_.Parse(buf, size);
EXPECT_EQ(-1, result);
}
protected:
scoped_refptr<StrictMock<MockMediaLog>> media_log_;
WebMContentEncodingsClient client_;
WebMListParser parser_;
};
TEST_F(WebMContentEncodingsClientTest, EmptyContentEncodings) {
const uint8 kContentEncodings[] = {
0x6D, 0x80, 0x80, // ContentEncodings (size = 0)
};
int size = sizeof(kContentEncodings);
EXPECT_MEDIA_LOG(MissingContentEncoding());
ParseAndExpectToFail(kContentEncodings, size);
}
TEST_F(WebMContentEncodingsClientTest, EmptyContentEncoding) {
const uint8 kContentEncodings[] = {
0x6D, 0x80, 0x83, // ContentEncodings (size = 3)
0x63, 0x40, 0x80, // ContentEncoding (size = 0)
};
int size = sizeof(kContentEncodings);
ParseAndExpectToFail(kContentEncodings, size);
}
TEST_F(WebMContentEncodingsClientTest, SingleContentEncoding) {
const uint8 kContentEncodings[] = {
0x6D, 0x80, 0xA1, // ContentEncodings (size = 33)
0x62, 0x40, 0x9e, // ContentEncoding (size = 30)
0x50, 0x31, 0x81, 0x00, // ContentEncodingOrder (size = 1)
0x50, 0x32, 0x81, 0x01, // ContentEncodingScope (size = 1)
0x50, 0x33, 0x81, 0x01, // ContentEncodingType (size = 1)
0x50, 0x35, 0x8F, // ContentEncryption (size = 15)
0x47, 0xE1, 0x81, 0x05, // ContentEncAlgo (size = 1)
0x47, 0xE2, 0x88, // ContentEncKeyID (size = 8)
0xAA, 0xAA, 0xAA, 0xAA, 0xAA, 0xAA, 0xAA, 0xAA,
};
int size = sizeof(kContentEncodings);
int result = parser_.Parse(kContentEncodings, size);
ASSERT_EQ(size, result);
const ContentEncodings& content_encodings = client_.content_encodings();
ASSERT_EQ(1u, content_encodings.size());
ASSERT_TRUE(content_encodings[0]);
EXPECT_EQ(0, content_encodings[0]->order());
EXPECT_EQ(ContentEncoding::kScopeAllFrameContents,
content_encodings[0]->scope());
EXPECT_EQ(ContentEncoding::kTypeEncryption, content_encodings[0]->type());
EXPECT_EQ(ContentEncoding::kEncAlgoAes,
content_encodings[0]->encryption_algo());
EXPECT_EQ(8u, content_encodings[0]->encryption_key_id().size());
}
TEST_F(WebMContentEncodingsClientTest, MultipleContentEncoding) {
const uint8 kContentEncodings[] = {
0x6D, 0x80, 0xC2, // ContentEncodings (size = 66)
0x62, 0x40, 0x9e, // ContentEncoding (size = 30)
0x50, 0x31, 0x81, 0x00, // ContentEncodingOrder (size = 1)
0x50, 0x32, 0x81, 0x03, // ContentEncodingScope (size = 1)
0x50, 0x33, 0x81, 0x01, // ContentEncodingType (size = 1)
0x50, 0x35, 0x8F, // ContentEncryption (size = 15)
0x47, 0xE1, 0x81, 0x05, // ContentEncAlgo (size = 1)
0x47, 0xE2, 0x88, // ContentEncKeyID (size = 8)
0xAA, 0xAA, 0xAA, 0xAA, 0xAA, 0xAA, 0xAA, 0xAA,
0x62, 0x40, 0x9e, // ContentEncoding (size = 30)
0x50, 0x31, 0x81, 0x01, // ContentEncodingOrder (size = 1)
0x50, 0x32, 0x81, 0x03, // ContentEncodingScope (size = 1)
0x50, 0x33, 0x81, 0x01, // ContentEncodingType (size = 1)
0x50, 0x35, 0x8F, // ContentEncryption (size = 15)
0x47, 0xE1, 0x81, 0x01, // ContentEncAlgo (size = 1)
0x47, 0xE2, 0x88, // ContentEncKeyID (size = 8)
0xBB, 0xBB, 0xBB, 0xBB, 0xBB, 0xBB, 0xBB, 0xBB,
};
int size = sizeof(kContentEncodings);
int result = parser_.Parse(kContentEncodings, size);
ASSERT_EQ(size, result);
const ContentEncodings& content_encodings = client_.content_encodings();
ASSERT_EQ(2u, content_encodings.size());
for (int i = 0; i < 2; ++i) {
ASSERT_TRUE(content_encodings[i]);
EXPECT_EQ(i, content_encodings[i]->order());
EXPECT_EQ(ContentEncoding::kScopeAllFrameContents |
ContentEncoding::kScopeTrackPrivateData,
content_encodings[i]->scope());
EXPECT_EQ(ContentEncoding::kTypeEncryption, content_encodings[i]->type());
EXPECT_EQ(!i ? ContentEncoding::kEncAlgoAes : ContentEncoding::kEncAlgoDes,
content_encodings[i]->encryption_algo());
EXPECT_EQ(8u, content_encodings[i]->encryption_key_id().size());
}
}
TEST_F(WebMContentEncodingsClientTest, DefaultValues) {
const uint8 kContentEncodings[] = {
0x6D, 0x80, 0x8A, // ContentEncodings (size = 10)
0x62, 0x40, 0x87, // ContentEncoding (size = 7)
// ContentEncodingOrder missing
// ContentEncodingScope missing
0x50, 0x33, 0x81, 0x01, // ContentEncodingType (size = 1)
0x50, 0x35, 0x80, // ContentEncryption (size = 0)
// ContentEncAlgo missing
};
int size = sizeof(kContentEncodings);
int result = parser_.Parse(kContentEncodings, size);
ASSERT_EQ(size, result);
const ContentEncodings& content_encodings = client_.content_encodings();
ASSERT_EQ(1u, content_encodings.size());
ASSERT_TRUE(content_encodings[0]);
EXPECT_EQ(0, content_encodings[0]->order());
EXPECT_EQ(ContentEncoding::kScopeAllFrameContents,
content_encodings[0]->scope());
EXPECT_EQ(ContentEncoding::kTypeEncryption, content_encodings[0]->type());
EXPECT_EQ(ContentEncoding::kEncAlgoNotEncrypted,
content_encodings[0]->encryption_algo());
EXPECT_TRUE(content_encodings[0]->encryption_key_id().empty());
}
TEST_F(WebMContentEncodingsClientTest, ContentEncodingsClientReuse) {
const uint8 kContentEncodings[] = {
0x6D, 0x80, 0xA1, // ContentEncodings (size = 33)
0x62, 0x40, 0x9e, // ContentEncoding (size = 30)
0x50, 0x31, 0x81, 0x00, // ContentEncodingOrder (size = 1)
0x50, 0x32, 0x81, 0x01, // ContentEncodingScope (size = 1)
0x50, 0x33, 0x81, 0x01, // ContentEncodingType (size = 1)
0x50, 0x35, 0x8F, // ContentEncryption (size = 15)
0x47, 0xE1, 0x81, 0x05, // ContentEncAlgo (size = 1)
0x47, 0xE2, 0x88, // ContentEncKeyID (size = 8)
0xAA, 0xAA, 0xAA, 0xAA, 0xAA, 0xAA, 0xAA, 0xAA,
};
int size = sizeof(kContentEncodings);
// Parse for the first time.
int result = parser_.Parse(kContentEncodings, size);
ASSERT_EQ(size, result);
// Parse again.
parser_.Reset();
result = parser_.Parse(kContentEncodings, size);
ASSERT_EQ(size, result);
const ContentEncodings& content_encodings = client_.content_encodings();
ASSERT_EQ(1u, content_encodings.size());
ASSERT_TRUE(content_encodings[0]);
EXPECT_EQ(0, content_encodings[0]->order());
EXPECT_EQ(ContentEncoding::kScopeAllFrameContents,
content_encodings[0]->scope());
EXPECT_EQ(ContentEncoding::kTypeEncryption, content_encodings[0]->type());
EXPECT_EQ(ContentEncoding::kEncAlgoAes,
content_encodings[0]->encryption_algo());
EXPECT_EQ(8u, content_encodings[0]->encryption_key_id().size());
}
TEST_F(WebMContentEncodingsClientTest, InvalidContentEncodingOrder) {
const uint8 kContentEncodings[] = {
0x6D, 0x80, 0x8E, // ContentEncodings (size = 14)
0x62, 0x40, 0x8B, // ContentEncoding (size = 11)
0x50, 0x31, 0x81, 0xEE, // ContentEncodingOrder (size = 1), invalid
0x50, 0x33, 0x81, 0x01, // ContentEncodingType (size = 1)
0x50, 0x35, 0x80, // ContentEncryption (size = 0)
};
int size = sizeof(kContentEncodings);
EXPECT_MEDIA_LOG(UnexpectedContentEncodingOrder());
ParseAndExpectToFail(kContentEncodings, size);
}
TEST_F(WebMContentEncodingsClientTest, InvalidContentEncodingScope) {
const uint8 kContentEncodings[] = {
0x6D, 0x80, 0x8E, // ContentEncodings (size = 14)
0x62, 0x40, 0x8B, // ContentEncoding (size = 11)
0x50, 0x32, 0x81, 0xEE, // ContentEncodingScope (size = 1), invalid
0x50, 0x33, 0x81, 0x01, // ContentEncodingType (size = 1)
0x50, 0x35, 0x80, // ContentEncryption (size = 0)
};
int size = sizeof(kContentEncodings);
EXPECT_MEDIA_LOG(UnexpectedContentEncodingScope());
ParseAndExpectToFail(kContentEncodings, size);
}
TEST_F(WebMContentEncodingsClientTest, InvalidContentEncodingType) {
const uint8 kContentEncodings[] = {
0x6D, 0x80, 0x8E, // ContentEncodings (size = 14)
0x62, 0x40, 0x8B, // ContentEncoding (size = 11)
0x50, 0x33, 0x81, 0x00, // ContentEncodingType (size = 1), invalid
0x50, 0x35, 0x80, // ContentEncryption (size = 0)
};
int size = sizeof(kContentEncodings);
EXPECT_MEDIA_LOG(ContentCompressionNotSupported());
ParseAndExpectToFail(kContentEncodings, size);
}
// ContentEncodingType is encryption but no ContentEncryption present.
TEST_F(WebMContentEncodingsClientTest, MissingContentEncryption) {
const uint8 kContentEncodings[] = {
0x6D, 0x80, 0x87, // ContentEncodings (size = 7)
0x62, 0x40, 0x84, // ContentEncoding (size = 4)
0x50, 0x33, 0x81, 0x01, // ContentEncodingType (size = 1)
// ContentEncryption missing
};
int size = sizeof(kContentEncodings);
EXPECT_MEDIA_LOG(MissingContentEncryption());
ParseAndExpectToFail(kContentEncodings, size);
}
TEST_F(WebMContentEncodingsClientTest, InvalidContentEncAlgo) {
const uint8 kContentEncodings[] = {
0x6D, 0x80, 0x99, // ContentEncodings (size = 25)
0x62, 0x40, 0x96, // ContentEncoding (size = 22)
0x50, 0x33, 0x81, 0x01, // ContentEncodingType (size = 1)
0x50, 0x35, 0x8F, // ContentEncryption (size = 15)
0x47, 0xE1, 0x81, 0xEE, // ContentEncAlgo (size = 1), invalid
0x47, 0xE2, 0x88, // ContentEncKeyID (size = 8)
0xAA, 0xAA, 0xAA, 0xAA, 0xAA, 0xAA, 0xAA, 0xAA,
};
int size = sizeof(kContentEncodings);
EXPECT_MEDIA_LOG(UnexpectedContentEncAlgo(0xEE));
ParseAndExpectToFail(kContentEncodings, size);
}
} // namespace media

View File

@ -0,0 +1,62 @@
// Copyright 2014 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#include "media/formats/webm/webm_crypto_helpers.h"
#include "base/logging.h"
#include "base/sys_byteorder.h"
#include "media/base/decrypt_config.h"
#include "media/formats/webm/webm_constants.h"
namespace media {
namespace {
// Generates a 16 byte CTR counter block. The CTR counter block format is a
// CTR IV appended with a CTR block counter. |iv| is an 8 byte CTR IV.
// |iv_size| is the size of |iv| in btyes. Returns a string of
// kDecryptionKeySize bytes.
std::string GenerateWebMCounterBlock(const uint8* iv, int iv_size) {
std::string counter_block(reinterpret_cast<const char*>(iv), iv_size);
counter_block.append(DecryptConfig::kDecryptionKeySize - iv_size, 0);
return counter_block;
}
} // namespace anonymous
bool WebMCreateDecryptConfig(const uint8* data, int data_size,
const uint8* key_id, int key_id_size,
scoped_ptr<DecryptConfig>* decrypt_config,
int* data_offset) {
if (data_size < kWebMSignalByteSize) {
DVLOG(1) << "Got a block from an encrypted stream with no data.";
return false;
}
uint8 signal_byte = data[0];
int frame_offset = sizeof(signal_byte);
// Setting the DecryptConfig object of the buffer while leaving the
// initialization vector empty will tell the decryptor that the frame is
// unencrypted.
std::string counter_block;
if (signal_byte & kWebMFlagEncryptedFrame) {
if (data_size < kWebMSignalByteSize + kWebMIvSize) {
DVLOG(1) << "Got an encrypted block with not enough data " << data_size;
return false;
}
counter_block = GenerateWebMCounterBlock(data + frame_offset, kWebMIvSize);
frame_offset += kWebMIvSize;
}
decrypt_config->reset(new DecryptConfig(
std::string(reinterpret_cast<const char*>(key_id), key_id_size),
counter_block,
std::vector<SubsampleEntry>()));
*data_offset = frame_offset;
return true;
}
} // namespace media

View File

@ -0,0 +1,28 @@
// Copyright 2014 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#ifndef MEDIA_FORMATS_WEBM_WEBM_CRYPTO_HELPERS_H_
#define MEDIA_FORMATS_WEBM_WEBM_CRYPTO_HELPERS_H_
#include "base/basictypes.h"
#include "base/memory/scoped_ptr.h"
#include "media/base/decoder_buffer.h"
namespace media {
// Fills an initialized DecryptConfig, which can be sent to the Decryptor if
// the stream has potentially encrypted frames. Also sets |data_offset| which
// indicates where the encrypted data starts. Leaving the IV empty will tell
// the decryptor that the frame is unencrypted. Returns true if |data| is valid,
// false otherwise, in which case |decrypt_config| and |data_offset| will not be
// changed. Current encrypted WebM request for comments specification is here
// http://wiki.webmproject.org/encryption/webm-encryption-rfc
bool WebMCreateDecryptConfig(const uint8* data, int data_size,
const uint8* key_id, int key_id_size,
scoped_ptr<DecryptConfig>* decrypt_config,
int* data_offset);
} // namespace media
#endif // MEDIA_FORMATS_WEBM_WEBM_CRYPT_HELPERS_H_

View File

@ -0,0 +1,103 @@
// Copyright 2014 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#include "media/formats/webm/webm_info_parser.h"
#include "base/logging.h"
#include "media/formats/webm/webm_constants.h"
namespace media {
// Default timecode scale if the TimecodeScale element is
// not specified in the INFO element.
static const int kWebMDefaultTimecodeScale = 1000000;
WebMInfoParser::WebMInfoParser()
: timecode_scale_(-1),
duration_(-1) {
}
WebMInfoParser::~WebMInfoParser() {}
int WebMInfoParser::Parse(const uint8* buf, int size) {
timecode_scale_ = -1;
duration_ = -1;
WebMListParser parser(kWebMIdInfo, this);
int result = parser.Parse(buf, size);
if (result <= 0)
return result;
// For now we do all or nothing parsing.
return parser.IsParsingComplete() ? result : 0;
}
WebMParserClient* WebMInfoParser::OnListStart(int id) { return this; }
bool WebMInfoParser::OnListEnd(int id) {
if (id == kWebMIdInfo && timecode_scale_ == -1) {
// Set timecode scale to default value if it isn't present in
// the Info element.
timecode_scale_ = kWebMDefaultTimecodeScale;
}
return true;
}
bool WebMInfoParser::OnUInt(int id, int64 val) {
if (id != kWebMIdTimecodeScale)
return true;
if (timecode_scale_ != -1) {
DVLOG(1) << "Multiple values for id " << std::hex << id << " specified";
return false;
}
timecode_scale_ = val;
return true;
}
bool WebMInfoParser::OnFloat(int id, double val) {
if (id != kWebMIdDuration) {
DVLOG(1) << "Unexpected float for id" << std::hex << id;
return false;
}
if (duration_ != -1) {
DVLOG(1) << "Multiple values for duration.";
return false;
}
duration_ = val;
return true;
}
bool WebMInfoParser::OnBinary(int id, const uint8* data, int size) {
if (id == kWebMIdDateUTC) {
if (size != 8)
return false;
int64 date_in_nanoseconds = 0;
for (int i = 0; i < size; ++i)
date_in_nanoseconds = (date_in_nanoseconds << 8) | data[i];
base::Time::Exploded exploded_epoch;
exploded_epoch.year = 2001;
exploded_epoch.month = 1;
exploded_epoch.day_of_month = 1;
exploded_epoch.hour = 0;
exploded_epoch.minute = 0;
exploded_epoch.second = 0;
exploded_epoch.millisecond = 0;
date_utc_ = base::Time::FromUTCExploded(exploded_epoch) +
base::TimeDelta::FromMicroseconds(date_in_nanoseconds / 1000);
}
return true;
}
bool WebMInfoParser::OnString(int id, const std::string& str) {
return true;
}
} // namespace media

View File

@ -0,0 +1,50 @@
// Copyright 2014 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#ifndef MEDIA_FORMATS_WEBM_WEBM_INFO_PARSER_H_
#define MEDIA_FORMATS_WEBM_WEBM_INFO_PARSER_H_
#include "base/compiler_specific.h"
#include "base/time/time.h"
#include "media/base/media_export.h"
#include "media/formats/webm/webm_parser.h"
namespace media {
// Parser for WebM Info element.
class MEDIA_EXPORT WebMInfoParser : public WebMParserClient {
public:
WebMInfoParser();
~WebMInfoParser() override;
// Parses a WebM Info element in |buf|.
//
// Returns -1 if the parse fails.
// Returns 0 if more data is needed.
// Returns the number of bytes parsed on success.
int Parse(const uint8* buf, int size);
int64 timecode_scale() const { return timecode_scale_; }
double duration() const { return duration_; }
base::Time date_utc() const { return date_utc_; }
private:
// WebMParserClient methods
WebMParserClient* OnListStart(int id) override;
bool OnListEnd(int id) override;
bool OnUInt(int id, int64 val) override;
bool OnFloat(int id, double val) override;
bool OnBinary(int id, const uint8* data, int size) override;
bool OnString(int id, const std::string& str) override;
int64 timecode_scale_;
double duration_;
base::Time date_utc_;
DISALLOW_COPY_AND_ASSIGN(WebMInfoParser);
};
} // namespace media
#endif // MEDIA_FORMATS_WEBM_WEBM_INFO_PARSER_H_

View File

@ -0,0 +1,954 @@
// Copyright 2014 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#include "media/formats/webm/webm_parser.h"
// This file contains code to parse WebM file elements. It was created
// from information in the Matroska spec.
// http://www.matroska.org/technical/specs/index.html
// This file contains code for encrypted WebM. Current WebM
// encrypted request for comments specification is here
// http://wiki.webmproject.org/encryption/webm-encryption-rfc
#include <iomanip>
#include "base/logging.h"
#include "base/numerics/safe_conversions.h"
#include "media/formats/webm/webm_constants.h"
namespace media {
enum ElementType {
UNKNOWN,
LIST, // Referred to as Master Element in the Matroska spec.
UINT,
FLOAT,
BINARY,
STRING,
SKIP,
};
struct ElementIdInfo {
ElementType type_;
int id_;
};
struct ListElementInfo {
int id_;
int level_;
const ElementIdInfo* id_info_;
int id_info_count_;
};
// The following are tables indicating what IDs are valid sub-elements
// of particular elements. If an element is encountered that doesn't
// appear in the list, a parsing error is signalled. Some elements are
// marked as SKIP because they are valid, but we don't care about them
// right now.
static const ElementIdInfo kEBMLHeaderIds[] = {
{UINT, kWebMIdEBMLVersion},
{UINT, kWebMIdEBMLReadVersion},
{UINT, kWebMIdEBMLMaxIDLength},
{UINT, kWebMIdEBMLMaxSizeLength},
{STRING, kWebMIdDocType},
{UINT, kWebMIdDocTypeVersion},
{UINT, kWebMIdDocTypeReadVersion},
};
static const ElementIdInfo kSegmentIds[] = {
{LIST, kWebMIdSeekHead},
{LIST, kWebMIdInfo},
{LIST, kWebMIdCluster},
{LIST, kWebMIdTracks},
{LIST, kWebMIdCues},
{LIST, kWebMIdAttachments},
{LIST, kWebMIdChapters},
{LIST, kWebMIdTags},
};
static const ElementIdInfo kSeekHeadIds[] = {
{LIST, kWebMIdSeek},
};
static const ElementIdInfo kSeekIds[] = {
{BINARY, kWebMIdSeekID},
{UINT, kWebMIdSeekPosition},
};
static const ElementIdInfo kInfoIds[] = {
{BINARY, kWebMIdSegmentUID},
{STRING, kWebMIdSegmentFilename},
{BINARY, kWebMIdPrevUID},
{STRING, kWebMIdPrevFilename},
{BINARY, kWebMIdNextUID},
{STRING, kWebMIdNextFilename},
{BINARY, kWebMIdSegmentFamily},
{LIST, kWebMIdChapterTranslate},
{UINT, kWebMIdTimecodeScale},
{FLOAT, kWebMIdDuration},
{BINARY, kWebMIdDateUTC},
{STRING, kWebMIdTitle},
{STRING, kWebMIdMuxingApp},
{STRING, kWebMIdWritingApp},
};
static const ElementIdInfo kChapterTranslateIds[] = {
{UINT, kWebMIdChapterTranslateEditionUID},
{UINT, kWebMIdChapterTranslateCodec},
{BINARY, kWebMIdChapterTranslateID},
};
static const ElementIdInfo kClusterIds[] = {
{BINARY, kWebMIdSimpleBlock},
{UINT, kWebMIdTimecode},
{LIST, kWebMIdSilentTracks},
{UINT, kWebMIdPosition},
{UINT, kWebMIdPrevSize},
{LIST, kWebMIdBlockGroup},
};
static const ElementIdInfo kSilentTracksIds[] = {
{UINT, kWebMIdSilentTrackNumber},
};
static const ElementIdInfo kBlockGroupIds[] = {
{BINARY, kWebMIdBlock},
{LIST, kWebMIdBlockAdditions},
{UINT, kWebMIdBlockDuration},
{UINT, kWebMIdReferencePriority},
{BINARY, kWebMIdReferenceBlock},
{BINARY, kWebMIdCodecState},
{BINARY, kWebMIdDiscardPadding},
{LIST, kWebMIdSlices},
};
static const ElementIdInfo kBlockAdditionsIds[] = {
{LIST, kWebMIdBlockMore},
};
static const ElementIdInfo kBlockMoreIds[] = {
{UINT, kWebMIdBlockAddID},
{BINARY, kWebMIdBlockAdditional},
};
static const ElementIdInfo kSlicesIds[] = {
{LIST, kWebMIdTimeSlice},
};
static const ElementIdInfo kTimeSliceIds[] = {
{UINT, kWebMIdLaceNumber},
};
static const ElementIdInfo kTracksIds[] = {
{LIST, kWebMIdTrackEntry},
};
static const ElementIdInfo kTrackEntryIds[] = {
{UINT, kWebMIdTrackNumber},
{BINARY, kWebMIdTrackUID},
{UINT, kWebMIdTrackType},
{UINT, kWebMIdFlagEnabled},
{UINT, kWebMIdFlagDefault},
{UINT, kWebMIdFlagForced},
{UINT, kWebMIdFlagLacing},
{UINT, kWebMIdMinCache},
{UINT, kWebMIdMaxCache},
{UINT, kWebMIdDefaultDuration},
{FLOAT, kWebMIdTrackTimecodeScale},
{UINT, kWebMIdMaxBlockAdditionId},
{STRING, kWebMIdName},
{STRING, kWebMIdLanguage},
{STRING, kWebMIdCodecID},
{BINARY, kWebMIdCodecPrivate},
{STRING, kWebMIdCodecName},
{UINT, kWebMIdAttachmentLink},
{UINT, kWebMIdCodecDecodeAll},
{UINT, kWebMIdTrackOverlay},
{UINT, kWebMIdCodecDelay},
{UINT, kWebMIdSeekPreRoll},
{LIST, kWebMIdTrackTranslate},
{LIST, kWebMIdVideo},
{LIST, kWebMIdAudio},
{LIST, kWebMIdTrackOperation},
{LIST, kWebMIdContentEncodings},
};
static const ElementIdInfo kTrackTranslateIds[] = {
{UINT, kWebMIdTrackTranslateEditionUID},
{UINT, kWebMIdTrackTranslateCodec},
{BINARY, kWebMIdTrackTranslateTrackID},
};
static const ElementIdInfo kVideoIds[] = {
{UINT, kWebMIdFlagInterlaced},
{UINT, kWebMIdStereoMode},
{UINT, kWebMIdAlphaMode},
{UINT, kWebMIdPixelWidth},
{UINT, kWebMIdPixelHeight},
{UINT, kWebMIdPixelCropBottom},
{UINT, kWebMIdPixelCropTop},
{UINT, kWebMIdPixelCropLeft},
{UINT, kWebMIdPixelCropRight},
{UINT, kWebMIdDisplayWidth},
{UINT, kWebMIdDisplayHeight},
{UINT, kWebMIdDisplayUnit},
{UINT, kWebMIdAspectRatioType},
{BINARY, kWebMIdColorSpace},
{FLOAT, kWebMIdFrameRate},
};
static const ElementIdInfo kAudioIds[] = {
{FLOAT, kWebMIdSamplingFrequency},
{FLOAT, kWebMIdOutputSamplingFrequency},
{UINT, kWebMIdChannels},
{UINT, kWebMIdBitDepth},
};
static const ElementIdInfo kTrackOperationIds[] = {
{LIST, kWebMIdTrackCombinePlanes},
{LIST, kWebMIdJoinBlocks},
};
static const ElementIdInfo kTrackCombinePlanesIds[] = {
{LIST, kWebMIdTrackPlane},
};
static const ElementIdInfo kTrackPlaneIds[] = {
{UINT, kWebMIdTrackPlaneUID},
{UINT, kWebMIdTrackPlaneType},
};
static const ElementIdInfo kJoinBlocksIds[] = {
{UINT, kWebMIdTrackJoinUID},
};
static const ElementIdInfo kContentEncodingsIds[] = {
{LIST, kWebMIdContentEncoding},
};
static const ElementIdInfo kContentEncodingIds[] = {
{UINT, kWebMIdContentEncodingOrder},
{UINT, kWebMIdContentEncodingScope},
{UINT, kWebMIdContentEncodingType},
{LIST, kWebMIdContentCompression},
{LIST, kWebMIdContentEncryption},
};
static const ElementIdInfo kContentCompressionIds[] = {
{UINT, kWebMIdContentCompAlgo},
{BINARY, kWebMIdContentCompSettings},
};
static const ElementIdInfo kContentEncryptionIds[] = {
{LIST, kWebMIdContentEncAESSettings},
{UINT, kWebMIdContentEncAlgo},
{BINARY, kWebMIdContentEncKeyID},
{BINARY, kWebMIdContentSignature},
{BINARY, kWebMIdContentSigKeyID},
{UINT, kWebMIdContentSigAlgo},
{UINT, kWebMIdContentSigHashAlgo},
};
static const ElementIdInfo kContentEncAESSettingsIds[] = {
{UINT, kWebMIdAESSettingsCipherMode},
};
static const ElementIdInfo kCuesIds[] = {
{LIST, kWebMIdCuePoint},
};
static const ElementIdInfo kCuePointIds[] = {
{UINT, kWebMIdCueTime},
{LIST, kWebMIdCueTrackPositions},
};
static const ElementIdInfo kCueTrackPositionsIds[] = {
{UINT, kWebMIdCueTrack},
{UINT, kWebMIdCueClusterPosition},
{UINT, kWebMIdCueBlockNumber},
{UINT, kWebMIdCueCodecState},
{LIST, kWebMIdCueReference},
};
static const ElementIdInfo kCueReferenceIds[] = {
{UINT, kWebMIdCueRefTime},
};
static const ElementIdInfo kAttachmentsIds[] = {
{LIST, kWebMIdAttachedFile},
};
static const ElementIdInfo kAttachedFileIds[] = {
{STRING, kWebMIdFileDescription},
{STRING, kWebMIdFileName},
{STRING, kWebMIdFileMimeType},
{BINARY, kWebMIdFileData},
{UINT, kWebMIdFileUID},
};
static const ElementIdInfo kChaptersIds[] = {
{LIST, kWebMIdEditionEntry},
};
static const ElementIdInfo kEditionEntryIds[] = {
{UINT, kWebMIdEditionUID},
{UINT, kWebMIdEditionFlagHidden},
{UINT, kWebMIdEditionFlagDefault},
{UINT, kWebMIdEditionFlagOrdered},
{LIST, kWebMIdChapterAtom},
};
static const ElementIdInfo kChapterAtomIds[] = {
{UINT, kWebMIdChapterUID},
{UINT, kWebMIdChapterTimeStart},
{UINT, kWebMIdChapterTimeEnd},
{UINT, kWebMIdChapterFlagHidden},
{UINT, kWebMIdChapterFlagEnabled},
{BINARY, kWebMIdChapterSegmentUID},
{UINT, kWebMIdChapterSegmentEditionUID},
{UINT, kWebMIdChapterPhysicalEquiv},
{LIST, kWebMIdChapterTrack},
{LIST, kWebMIdChapterDisplay},
{LIST, kWebMIdChapProcess},
};
static const ElementIdInfo kChapterTrackIds[] = {
{UINT, kWebMIdChapterTrackNumber},
};
static const ElementIdInfo kChapterDisplayIds[] = {
{STRING, kWebMIdChapString},
{STRING, kWebMIdChapLanguage},
{STRING, kWebMIdChapCountry},
};
static const ElementIdInfo kChapProcessIds[] = {
{UINT, kWebMIdChapProcessCodecID},
{BINARY, kWebMIdChapProcessPrivate},
{LIST, kWebMIdChapProcessCommand},
};
static const ElementIdInfo kChapProcessCommandIds[] = {
{UINT, kWebMIdChapProcessTime},
{BINARY, kWebMIdChapProcessData},
};
static const ElementIdInfo kTagsIds[] = {
{LIST, kWebMIdTag},
};
static const ElementIdInfo kTagIds[] = {
{LIST, kWebMIdTargets},
{LIST, kWebMIdSimpleTag},
};
static const ElementIdInfo kTargetsIds[] = {
{UINT, kWebMIdTargetTypeValue},
{STRING, kWebMIdTargetType},
{UINT, kWebMIdTagTrackUID},
{UINT, kWebMIdTagEditionUID},
{UINT, kWebMIdTagChapterUID},
{UINT, kWebMIdTagAttachmentUID},
};
static const ElementIdInfo kSimpleTagIds[] = {
{STRING, kWebMIdTagName},
{STRING, kWebMIdTagLanguage},
{UINT, kWebMIdTagDefault},
{STRING, kWebMIdTagString},
{BINARY, kWebMIdTagBinary},
};
#define LIST_ELEMENT_INFO(id, level, id_info) \
{ (id), (level), (id_info), arraysize(id_info) }
static const ListElementInfo kListElementInfo[] = {
LIST_ELEMENT_INFO(kWebMIdCluster, 1, kClusterIds),
LIST_ELEMENT_INFO(kWebMIdEBMLHeader, 0, kEBMLHeaderIds),
LIST_ELEMENT_INFO(kWebMIdSegment, 0, kSegmentIds),
LIST_ELEMENT_INFO(kWebMIdSeekHead, 1, kSeekHeadIds),
LIST_ELEMENT_INFO(kWebMIdSeek, 2, kSeekIds),
LIST_ELEMENT_INFO(kWebMIdInfo, 1, kInfoIds),
LIST_ELEMENT_INFO(kWebMIdChapterTranslate, 2, kChapterTranslateIds),
LIST_ELEMENT_INFO(kWebMIdSilentTracks, 2, kSilentTracksIds),
LIST_ELEMENT_INFO(kWebMIdBlockGroup, 2, kBlockGroupIds),
LIST_ELEMENT_INFO(kWebMIdBlockAdditions, 3, kBlockAdditionsIds),
LIST_ELEMENT_INFO(kWebMIdBlockMore, 4, kBlockMoreIds),
LIST_ELEMENT_INFO(kWebMIdSlices, 3, kSlicesIds),
LIST_ELEMENT_INFO(kWebMIdTimeSlice, 4, kTimeSliceIds),
LIST_ELEMENT_INFO(kWebMIdTracks, 1, kTracksIds),
LIST_ELEMENT_INFO(kWebMIdTrackEntry, 2, kTrackEntryIds),
LIST_ELEMENT_INFO(kWebMIdTrackTranslate, 3, kTrackTranslateIds),
LIST_ELEMENT_INFO(kWebMIdVideo, 3, kVideoIds),
LIST_ELEMENT_INFO(kWebMIdAudio, 3, kAudioIds),
LIST_ELEMENT_INFO(kWebMIdTrackOperation, 3, kTrackOperationIds),
LIST_ELEMENT_INFO(kWebMIdTrackCombinePlanes, 4, kTrackCombinePlanesIds),
LIST_ELEMENT_INFO(kWebMIdTrackPlane, 5, kTrackPlaneIds),
LIST_ELEMENT_INFO(kWebMIdJoinBlocks, 4, kJoinBlocksIds),
LIST_ELEMENT_INFO(kWebMIdContentEncodings, 3, kContentEncodingsIds),
LIST_ELEMENT_INFO(kWebMIdContentEncoding, 4, kContentEncodingIds),
LIST_ELEMENT_INFO(kWebMIdContentCompression, 5, kContentCompressionIds),
LIST_ELEMENT_INFO(kWebMIdContentEncryption, 5, kContentEncryptionIds),
LIST_ELEMENT_INFO(kWebMIdContentEncAESSettings, 6, kContentEncAESSettingsIds),
LIST_ELEMENT_INFO(kWebMIdCues, 1, kCuesIds),
LIST_ELEMENT_INFO(kWebMIdCuePoint, 2, kCuePointIds),
LIST_ELEMENT_INFO(kWebMIdCueTrackPositions, 3, kCueTrackPositionsIds),
LIST_ELEMENT_INFO(kWebMIdCueReference, 4, kCueReferenceIds),
LIST_ELEMENT_INFO(kWebMIdAttachments, 1, kAttachmentsIds),
LIST_ELEMENT_INFO(kWebMIdAttachedFile, 2, kAttachedFileIds),
LIST_ELEMENT_INFO(kWebMIdChapters, 1, kChaptersIds),
LIST_ELEMENT_INFO(kWebMIdEditionEntry, 2, kEditionEntryIds),
LIST_ELEMENT_INFO(kWebMIdChapterAtom, 3, kChapterAtomIds),
LIST_ELEMENT_INFO(kWebMIdChapterTrack, 4, kChapterTrackIds),
LIST_ELEMENT_INFO(kWebMIdChapterDisplay, 4, kChapterDisplayIds),
LIST_ELEMENT_INFO(kWebMIdChapProcess, 4, kChapProcessIds),
LIST_ELEMENT_INFO(kWebMIdChapProcessCommand, 5, kChapProcessCommandIds),
LIST_ELEMENT_INFO(kWebMIdTags, 1, kTagsIds),
LIST_ELEMENT_INFO(kWebMIdTag, 2, kTagIds),
LIST_ELEMENT_INFO(kWebMIdTargets, 3, kTargetsIds),
LIST_ELEMENT_INFO(kWebMIdSimpleTag, 3, kSimpleTagIds),
};
// Parses an element header id or size field. These fields are variable length
// encoded. The first byte indicates how many bytes the field occupies.
// |buf| - The buffer to parse.
// |size| - The number of bytes in |buf|
// |max_bytes| - The maximum number of bytes the field can be. ID fields
// set this to 4 & element size fields set this to 8. If the
// first byte indicates a larger field size than this it is a
// parser error.
// |mask_first_byte| - For element size fields the field length encoding bits
// need to be masked off. This parameter is true for
// element size fields and is false for ID field values.
//
// Returns: The number of bytes parsed on success. -1 on error.
static int ParseWebMElementHeaderField(const uint8* buf, int size,
int max_bytes, bool mask_first_byte,
int64* num) {
DCHECK(buf);
DCHECK(num);
if (size < 0)
return -1;
if (size == 0)
return 0;
int mask = 0x80;
uint8 ch = buf[0];
int extra_bytes = -1;
bool all_ones = false;
for (int i = 0; i < max_bytes; ++i) {
if ((ch & mask) != 0) {
mask = ~mask & 0xff;
*num = mask_first_byte ? ch & mask : ch;
all_ones = (ch & mask) == mask;
extra_bytes = i;
break;
}
mask = 0x80 | mask >> 1;
}
if (extra_bytes == -1)
return -1;
// Return 0 if we need more data.
if ((1 + extra_bytes) > size)
return 0;
int bytes_used = 1;
for (int i = 0; i < extra_bytes; ++i) {
ch = buf[bytes_used++];
all_ones &= (ch == 0xff);
*num = (*num << 8) | ch;
}
if (all_ones)
*num = kint64max;
return bytes_used;
}
int WebMParseElementHeader(const uint8* buf, int size,
int* id, int64* element_size) {
DCHECK(buf);
DCHECK_GE(size, 0);
DCHECK(id);
DCHECK(element_size);
if (size == 0)
return 0;
int64 tmp = 0;
int num_id_bytes = ParseWebMElementHeaderField(buf, size, 4, false, &tmp);
if (num_id_bytes <= 0)
return num_id_bytes;
if (tmp == kint64max)
tmp = kWebMReservedId;
*id = static_cast<int>(tmp);
int num_size_bytes = ParseWebMElementHeaderField(buf + num_id_bytes,
size - num_id_bytes,
8, true, &tmp);
if (num_size_bytes <= 0)
return num_size_bytes;
if (tmp == kint64max)
tmp = kWebMUnknownSize;
*element_size = tmp;
DVLOG(3) << "WebMParseElementHeader() : id " << std::hex << *id << std::dec
<< " size " << *element_size;
return num_id_bytes + num_size_bytes;
}
// Finds ElementType for a specific ID.
static ElementType FindIdType(int id,
const ElementIdInfo* id_info,
int id_info_count) {
// Check for global element IDs that can be anywhere.
if (id == kWebMIdVoid || id == kWebMIdCRC32)
return SKIP;
for (int i = 0; i < id_info_count; ++i) {
if (id == id_info[i].id_)
return id_info[i].type_;
}
return UNKNOWN;
}
// Finds ListElementInfo for a specific ID.
static const ListElementInfo* FindListInfo(int id) {
for (size_t i = 0; i < arraysize(kListElementInfo); ++i) {
if (id == kListElementInfo[i].id_)
return &kListElementInfo[i];
}
return NULL;
}
static int FindListLevel(int id) {
const ListElementInfo* list_info = FindListInfo(id);
if (list_info)
return list_info->level_;
return -1;
}
static int ParseUInt(const uint8* buf, int size, int id,
WebMParserClient* client) {
if ((size <= 0) || (size > 8))
return -1;
// Read in the big-endian integer.
uint64 value = 0;
for (int i = 0; i < size; ++i)
value = (value << 8) | buf[i];
// We use int64 in place of uint64 everywhere for convenience. See this bug
// for more details: http://crbug.com/366750#c3
if (!base::IsValueInRangeForNumericType<int64>(value))
return -1;
if (!client->OnUInt(id, value))
return -1;
return size;
}
static int ParseFloat(const uint8* buf, int size, int id,
WebMParserClient* client) {
if ((size != 4) && (size != 8))
return -1;
double value = -1;
// Read the bytes from big-endian form into a native endian integer.
int64 tmp = 0;
for (int i = 0; i < size; ++i)
tmp = (tmp << 8) | buf[i];
// Use a union to convert the integer bit pattern into a floating point
// number.
if (size == 4) {
union {
int32 src;
float dst;
} tmp2;
tmp2.src = static_cast<int32>(tmp);
value = tmp2.dst;
} else if (size == 8) {
union {
int64 src;
double dst;
} tmp2;
tmp2.src = tmp;
value = tmp2.dst;
} else {
return -1;
}
if (!client->OnFloat(id, value))
return -1;
return size;
}
static int ParseBinary(const uint8* buf, int size, int id,
WebMParserClient* client) {
return client->OnBinary(id, buf, size) ? size : -1;
}
static int ParseString(const uint8* buf, int size, int id,
WebMParserClient* client) {
const uint8* end = static_cast<const uint8*>(memchr(buf, '\0', size));
int length = (end != NULL) ? static_cast<int>(end - buf) : size;
std::string str(reinterpret_cast<const char*>(buf), length);
return client->OnString(id, str) ? size : -1;
}
static int ParseNonListElement(ElementType type, int id, int64 element_size,
const uint8* buf, int size,
WebMParserClient* client) {
DCHECK_GE(size, element_size);
int result = -1;
switch(type) {
case LIST:
NOTIMPLEMENTED();
result = -1;
break;
case UINT:
result = ParseUInt(buf, element_size, id, client);
break;
case FLOAT:
result = ParseFloat(buf, element_size, id, client);
break;
case BINARY:
result = ParseBinary(buf, element_size, id, client);
break;
case STRING:
result = ParseString(buf, element_size, id, client);
break;
case SKIP:
result = element_size;
break;
default:
DVLOG(1) << "Unhandled ID type " << type;
return -1;
};
DCHECK_LE(result, size);
return result;
}
WebMParserClient::WebMParserClient() {}
WebMParserClient::~WebMParserClient() {}
WebMParserClient* WebMParserClient::OnListStart(int id) {
DVLOG(1) << "Unexpected list element start with ID " << std::hex << id;
return NULL;
}
bool WebMParserClient::OnListEnd(int id) {
DVLOG(1) << "Unexpected list element end with ID " << std::hex << id;
return false;
}
bool WebMParserClient::OnUInt(int id, int64 val) {
DVLOG(1) << "Unexpected unsigned integer element with ID " << std::hex << id;
return false;
}
bool WebMParserClient::OnFloat(int id, double val) {
DVLOG(1) << "Unexpected float element with ID " << std::hex << id;
return false;
}
bool WebMParserClient::OnBinary(int id, const uint8* data, int size) {
DVLOG(1) << "Unexpected binary element with ID " << std::hex << id;
return false;
}
bool WebMParserClient::OnString(int id, const std::string& str) {
DVLOG(1) << "Unexpected string element with ID " << std::hex << id;
return false;
}
WebMListParser::WebMListParser(int id, WebMParserClient* client)
: state_(NEED_LIST_HEADER),
root_id_(id),
root_level_(FindListLevel(id)),
root_client_(client) {
DCHECK_GE(root_level_, 0);
DCHECK(client);
}
WebMListParser::~WebMListParser() {}
void WebMListParser::Reset() {
ChangeState(NEED_LIST_HEADER);
list_state_stack_.clear();
}
int WebMListParser::Parse(const uint8* buf, int size) {
DCHECK(buf);
if (size < 0 || state_ == PARSE_ERROR || state_ == DONE_PARSING_LIST)
return -1;
if (size == 0)
return 0;
const uint8* cur = buf;
int cur_size = size;
int bytes_parsed = 0;
while (cur_size > 0 && state_ != PARSE_ERROR && state_ != DONE_PARSING_LIST) {
int element_id = 0;
int64 element_size = 0;
int result = WebMParseElementHeader(cur, cur_size, &element_id,
&element_size);
if (result < 0)
return result;
if (result == 0)
return bytes_parsed;
switch(state_) {
case NEED_LIST_HEADER: {
if (element_id != root_id_) {
ChangeState(PARSE_ERROR);
return -1;
}
// Only allow Segment & Cluster to have an unknown size.
if (element_size == kWebMUnknownSize &&
(element_id != kWebMIdSegment) &&
(element_id != kWebMIdCluster)) {
ChangeState(PARSE_ERROR);
return -1;
}
ChangeState(INSIDE_LIST);
if (!OnListStart(root_id_, element_size))
return -1;
break;
}
case INSIDE_LIST: {
int header_size = result;
const uint8* element_data = cur + header_size;
int element_data_size = cur_size - header_size;
if (element_size < element_data_size)
element_data_size = element_size;
result = ParseListElement(header_size, element_id, element_size,
element_data, element_data_size);
DCHECK_LE(result, header_size + element_data_size);
if (result < 0) {
ChangeState(PARSE_ERROR);
return -1;
}
if (result == 0)
return bytes_parsed;
break;
}
case DONE_PARSING_LIST:
case PARSE_ERROR:
// Shouldn't be able to get here.
NOTIMPLEMENTED();
break;
}
cur += result;
cur_size -= result;
bytes_parsed += result;
}
return (state_ == PARSE_ERROR) ? -1 : bytes_parsed;
}
bool WebMListParser::IsParsingComplete() const {
return state_ == DONE_PARSING_LIST;
}
void WebMListParser::ChangeState(State new_state) {
state_ = new_state;
}
int WebMListParser::ParseListElement(int header_size,
int id, int64 element_size,
const uint8* data, int size) {
DCHECK_GT(list_state_stack_.size(), 0u);
ListState& list_state = list_state_stack_.back();
DCHECK(list_state.element_info_);
const ListElementInfo* element_info = list_state.element_info_;
ElementType id_type =
FindIdType(id, element_info->id_info_, element_info->id_info_count_);
// Unexpected ID.
if (id_type == UNKNOWN) {
if (list_state.size_ != kWebMUnknownSize ||
!IsSiblingOrAncestor(list_state.id_, id)) {
DVLOG(1) << "No ElementType info for ID 0x" << std::hex << id;
return -1;
}
// We've reached the end of a list of unknown size. Update the size now that
// we know it and dispatch the end of list calls.
list_state.size_ = list_state.bytes_parsed_;
if (!OnListEnd())
return -1;
// Check to see if all open lists have ended.
if (list_state_stack_.size() == 0)
return 0;
list_state = list_state_stack_.back();
}
// Make sure the whole element can fit inside the current list.
int64 total_element_size = header_size + element_size;
if (list_state.size_ != kWebMUnknownSize &&
list_state.size_ < list_state.bytes_parsed_ + total_element_size) {
return -1;
}
if (id_type == LIST) {
list_state.bytes_parsed_ += header_size;
if (!OnListStart(id, element_size))
return -1;
return header_size;
}
// Make sure we have the entire element before trying to parse a non-list
// element.
if (size < element_size)
return 0;
int bytes_parsed = ParseNonListElement(id_type, id, element_size,
data, size, list_state.client_);
DCHECK_LE(bytes_parsed, size);
// Return if an error occurred or we need more data.
// Note: bytes_parsed is 0 for a successful parse of a size 0 element. We
// need to check the element_size to disambiguate the "need more data" case
// from a successful parse.
if (bytes_parsed < 0 || (bytes_parsed == 0 && element_size != 0))
return bytes_parsed;
int result = header_size + bytes_parsed;
list_state.bytes_parsed_ += result;
// See if we have reached the end of the current list.
if (list_state.bytes_parsed_ == list_state.size_) {
if (!OnListEnd())
return -1;
}
return result;
}
bool WebMListParser::OnListStart(int id, int64 size) {
const ListElementInfo* element_info = FindListInfo(id);
if (!element_info)
return false;
int current_level = root_level_ + list_state_stack_.size() - 1;
if (current_level + 1 != element_info->level_)
return false;
WebMParserClient* current_list_client = NULL;
if (!list_state_stack_.empty()) {
// Make sure the new list doesn't go past the end of the current list.
ListState current_list_state = list_state_stack_.back();
if (current_list_state.size_ != kWebMUnknownSize &&
current_list_state.size_ < current_list_state.bytes_parsed_ + size)
return false;
current_list_client = current_list_state.client_;
} else {
current_list_client = root_client_;
}
WebMParserClient* new_list_client = current_list_client->OnListStart(id);
if (!new_list_client)
return false;
ListState new_list_state = { id, size, 0, element_info, new_list_client };
list_state_stack_.push_back(new_list_state);
if (size == 0)
return OnListEnd();
return true;
}
bool WebMListParser::OnListEnd() {
int lists_ended = 0;
for (; !list_state_stack_.empty(); ++lists_ended) {
const ListState& list_state = list_state_stack_.back();
int64 bytes_parsed = list_state.bytes_parsed_;
int id = list_state.id_;
if (bytes_parsed != list_state.size_)
break;
list_state_stack_.pop_back();
WebMParserClient* client = NULL;
if (!list_state_stack_.empty()) {
// Update the bytes_parsed_ for the parent element.
list_state_stack_.back().bytes_parsed_ += bytes_parsed;
client = list_state_stack_.back().client_;
} else {
client = root_client_;
}
if (!client->OnListEnd(id))
return false;
}
DCHECK_GE(lists_ended, 1);
if (list_state_stack_.empty())
ChangeState(DONE_PARSING_LIST);
return true;
}
bool WebMListParser::IsSiblingOrAncestor(int id_a, int id_b) const {
DCHECK((id_a == kWebMIdSegment) || (id_a == kWebMIdCluster));
if (id_a == kWebMIdCluster) {
// kWebMIdCluster siblings.
for (size_t i = 0; i < arraysize(kSegmentIds); i++) {
if (kSegmentIds[i].id_ == id_b)
return true;
}
}
// kWebMIdSegment siblings.
return ((id_b == kWebMIdSegment) || (id_b == kWebMIdEBMLHeader));
}
} // namespace media

View File

@ -0,0 +1,158 @@
// Copyright 2014 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#ifndef MEDIA_FORMATS_WEBM_WEBM_PARSER_H_
#define MEDIA_FORMATS_WEBM_WEBM_PARSER_H_
#include <string>
#include <vector>
#include "base/basictypes.h"
#include "media/base/media_export.h"
namespace media {
// Interface for receiving WebM parser events.
//
// Each method is called when an element of the specified type is parsed.
// The ID of the element that was parsed is given along with the value
// stored in the element. List elements generate calls at the start and
// end of the list. Any pointers passed to these methods are only guaranteed
// to be valid for the life of that call. Each method (except for OnListStart)
// returns a bool that indicates whether the parsed data is valid. OnListStart
// returns a pointer to a WebMParserClient object, which should be used to
// handle elements parsed out of the list being started. If false (or NULL by
// OnListStart) is returned then the parse is immediately terminated and an
// error is reported by the parser.
class MEDIA_EXPORT WebMParserClient {
public:
virtual ~WebMParserClient();
virtual WebMParserClient* OnListStart(int id);
virtual bool OnListEnd(int id);
virtual bool OnUInt(int id, int64 val);
virtual bool OnFloat(int id, double val);
virtual bool OnBinary(int id, const uint8* data, int size);
virtual bool OnString(int id, const std::string& str);
protected:
WebMParserClient();
DISALLOW_COPY_AND_ASSIGN(WebMParserClient);
};
struct ListElementInfo;
// Parses a WebM list element and all of its children. This
// class supports incremental parsing of the list so Parse()
// can be called multiple times with pieces of the list.
// IsParsingComplete() will return true once the entire list has
// been parsed.
class MEDIA_EXPORT WebMListParser {
public:
// |id| - Element ID of the list we intend to parse.
// |client| - Called as different elements in the list are parsed.
WebMListParser(int id, WebMParserClient* client);
~WebMListParser();
// Resets the state of the parser so it can start parsing a new list.
void Reset();
// Parses list data contained in |buf|.
//
// Returns < 0 if the parse fails.
// Returns 0 if more data is needed.
// Returning > 0 indicates success & the number of bytes parsed.
int Parse(const uint8* buf, int size);
// Returns true if the entire list has been parsed.
bool IsParsingComplete() const;
private:
enum State {
NEED_LIST_HEADER,
INSIDE_LIST,
DONE_PARSING_LIST,
PARSE_ERROR,
};
struct ListState {
int id_;
int64 size_;
int64 bytes_parsed_;
const ListElementInfo* element_info_;
WebMParserClient* client_;
};
void ChangeState(State new_state);
// Parses a single element in the current list.
//
// |header_size| - The size of the element header
// |id| - The ID of the element being parsed.
// |element_size| - The size of the element body.
// |data| - Pointer to the element contents.
// |size| - Number of bytes in |data|
// |client| - Client to pass the parsed data to.
//
// Returns < 0 if the parse fails.
// Returns 0 if more data is needed.
// Returning > 0 indicates success & the number of bytes parsed.
int ParseListElement(int header_size,
int id, int64 element_size,
const uint8* data, int size);
// Called when starting to parse a new list.
//
// |id| - The ID of the new list.
// |size| - The size of the new list.
// |client| - The client object to notify that a new list is being parsed.
//
// Returns true if this list can be started in the current context. False
// if starting this list causes some sort of parse error.
bool OnListStart(int id, int64 size);
// Called when the end of the current list has been reached. This may also
// signal the end of the current list's ancestors if the current list happens
// to be at the end of its parent.
//
// Returns true if no errors occurred while ending this list(s).
bool OnListEnd();
// Checks to see if |id_b| is a sibling or ancestor of |id_a|.
bool IsSiblingOrAncestor(int id_a, int id_b) const;
State state_;
// Element ID passed to the constructor.
const int root_id_;
// Element level for |root_id_|. Used to verify that elements appear at
// the correct level.
const int root_level_;
// WebMParserClient to handle the root list.
WebMParserClient* const root_client_;
// Stack of state for all the lists currently being parsed. Lists are
// added and removed from this stack as they are parsed.
std::vector<ListState> list_state_stack_;
DISALLOW_COPY_AND_ASSIGN(WebMListParser);
};
// Parses an element header & returns the ID and element size.
//
// Returns < 0 if the parse fails.
// Returns 0 if more data is needed.
// Returning > 0 indicates success & the number of bytes parsed.
// |*id| contains the element ID on success and is undefined otherwise.
// |*element_size| contains the element size on success and is undefined
// otherwise.
int MEDIA_EXPORT WebMParseElementHeader(const uint8* buf, int size,
int* id, int64* element_size);
} // namespace media
#endif // MEDIA_FORMATS_WEBM_WEBM_PARSER_H_

View File

@ -0,0 +1,412 @@
// Copyright 2014 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#include "media/formats/webm/cluster_builder.h"
#include "media/formats/webm/webm_constants.h"
#include "media/formats/webm/webm_parser.h"
#include "testing/gmock/include/gmock/gmock.h"
#include "testing/gtest/include/gtest/gtest.h"
using ::testing::InSequence;
using ::testing::Return;
using ::testing::ReturnNull;
using ::testing::StrictMock;
using ::testing::_;
namespace media {
enum { kBlockCount = 5 };
class MockWebMParserClient : public WebMParserClient {
public:
virtual ~MockWebMParserClient() {}
// WebMParserClient methods.
MOCK_METHOD1(OnListStart, WebMParserClient*(int));
MOCK_METHOD1(OnListEnd, bool(int));
MOCK_METHOD2(OnUInt, bool(int, int64));
MOCK_METHOD2(OnFloat, bool(int, double));
MOCK_METHOD3(OnBinary, bool(int, const uint8*, int));
MOCK_METHOD2(OnString, bool(int, const std::string&));
};
class WebMParserTest : public testing::Test {
protected:
StrictMock<MockWebMParserClient> client_;
};
static scoped_ptr<Cluster> CreateCluster(int block_count) {
ClusterBuilder cb;
cb.SetClusterTimecode(0);
for (int i = 0; i < block_count; i++) {
uint8 data[] = { 0x00 };
cb.AddSimpleBlock(0, i, 0, data, sizeof(data));
}
return cb.Finish();
}
static void CreateClusterExpectations(int block_count,
bool is_complete_cluster,
MockWebMParserClient* client) {
InSequence s;
EXPECT_CALL(*client, OnListStart(kWebMIdCluster)).WillOnce(Return(client));
EXPECT_CALL(*client, OnUInt(kWebMIdTimecode, 0))
.WillOnce(Return(true));
for (int i = 0; i < block_count; i++) {
EXPECT_CALL(*client, OnBinary(kWebMIdSimpleBlock, _, _))
.WillOnce(Return(true));
}
if (is_complete_cluster)
EXPECT_CALL(*client, OnListEnd(kWebMIdCluster)).WillOnce(Return(true));
}
TEST_F(WebMParserTest, EmptyCluster) {
const uint8 kEmptyCluster[] = {
0x1F, 0x43, 0xB6, 0x75, 0x80 // CLUSTER (size = 0)
};
int size = sizeof(kEmptyCluster);
InSequence s;
EXPECT_CALL(client_, OnListStart(kWebMIdCluster)).WillOnce(Return(&client_));
EXPECT_CALL(client_, OnListEnd(kWebMIdCluster)).WillOnce(Return(true));
WebMListParser parser(kWebMIdCluster, &client_);
EXPECT_EQ(size, parser.Parse(kEmptyCluster, size));
EXPECT_TRUE(parser.IsParsingComplete());
}
TEST_F(WebMParserTest, EmptyClusterInSegment) {
const uint8 kBuffer[] = {
0x18, 0x53, 0x80, 0x67, 0x85, // SEGMENT (size = 5)
0x1F, 0x43, 0xB6, 0x75, 0x80, // CLUSTER (size = 0)
};
int size = sizeof(kBuffer);
InSequence s;
EXPECT_CALL(client_, OnListStart(kWebMIdSegment)).WillOnce(Return(&client_));
EXPECT_CALL(client_, OnListStart(kWebMIdCluster)).WillOnce(Return(&client_));
EXPECT_CALL(client_, OnListEnd(kWebMIdCluster)).WillOnce(Return(true));
EXPECT_CALL(client_, OnListEnd(kWebMIdSegment)).WillOnce(Return(true));
WebMListParser parser(kWebMIdSegment, &client_);
EXPECT_EQ(size, parser.Parse(kBuffer, size));
EXPECT_TRUE(parser.IsParsingComplete());
}
// Test the case where a non-list child element has a size
// that is beyond the end of the parent.
TEST_F(WebMParserTest, ChildNonListLargerThanParent) {
const uint8 kBuffer[] = {
0x1F, 0x43, 0xB6, 0x75, 0x81, // CLUSTER (size = 1)
0xE7, 0x81, 0x01, // Timecode (size=1, value=1)
};
InSequence s;
EXPECT_CALL(client_, OnListStart(kWebMIdCluster)).WillOnce(Return(&client_));
WebMListParser parser(kWebMIdCluster, &client_);
EXPECT_EQ(-1, parser.Parse(kBuffer, sizeof(kBuffer)));
EXPECT_FALSE(parser.IsParsingComplete());
}
// Test the case where a list child element has a size
// that is beyond the end of the parent.
TEST_F(WebMParserTest, ChildListLargerThanParent) {
const uint8 kBuffer[] = {
0x18, 0x53, 0x80, 0x67, 0x85, // SEGMENT (size = 5)
0x1F, 0x43, 0xB6, 0x75, 0x81, 0x11 // CLUSTER (size = 1)
};
InSequence s;
EXPECT_CALL(client_, OnListStart(kWebMIdSegment)).WillOnce(Return(&client_));
WebMListParser parser(kWebMIdSegment, &client_);
EXPECT_EQ(-1, parser.Parse(kBuffer, sizeof(kBuffer)));
EXPECT_FALSE(parser.IsParsingComplete());
}
// Expecting to parse a Cluster, but get a Segment.
TEST_F(WebMParserTest, ListIdDoesNotMatch) {
const uint8 kBuffer[] = {
0x18, 0x53, 0x80, 0x67, 0x80, // SEGMENT (size = 0)
};
WebMListParser parser(kWebMIdCluster, &client_);
EXPECT_EQ(-1, parser.Parse(kBuffer, sizeof(kBuffer)));
EXPECT_FALSE(parser.IsParsingComplete());
}
TEST_F(WebMParserTest, InvalidElementInList) {
const uint8 kBuffer[] = {
0x18, 0x53, 0x80, 0x67, 0x82, // SEGMENT (size = 2)
0xAE, 0x80, // TrackEntry (size = 0)
};
InSequence s;
EXPECT_CALL(client_, OnListStart(kWebMIdSegment)).WillOnce(Return(&client_));
WebMListParser parser(kWebMIdSegment, &client_);
EXPECT_EQ(-1, parser.Parse(kBuffer, sizeof(kBuffer)));
EXPECT_FALSE(parser.IsParsingComplete());
}
// Test specific case of InvalidElementInList to verify EBMLHEADER within
// known-sized cluster causes parse error.
TEST_F(WebMParserTest, InvalidEBMLHeaderInCluster) {
const uint8 kBuffer[] = {
0x1F, 0x43, 0xB6, 0x75, 0x85, // CLUSTER (size = 5)
0x1A, 0x45, 0xDF, 0xA3, 0x80, // EBMLHEADER (size = 0)
};
InSequence s;
EXPECT_CALL(client_, OnListStart(kWebMIdCluster)).WillOnce(Return(&client_));
WebMListParser parser(kWebMIdCluster, &client_);
EXPECT_EQ(-1, parser.Parse(kBuffer, sizeof(kBuffer)));
EXPECT_FALSE(parser.IsParsingComplete());
}
// Verify that EBMLHEADER ends a preceding "unknown"-sized CLUSTER.
TEST_F(WebMParserTest, UnknownSizeClusterFollowedByEBMLHeader) {
const uint8 kBuffer[] = {
0x1F, 0x43, 0xB6, 0x75, 0xFF, // CLUSTER (size = unknown; really 0 due to:)
0x1A, 0x45, 0xDF, 0xA3, 0x80, // EBMLHEADER (size = 0)
};
InSequence s;
EXPECT_CALL(client_, OnListStart(kWebMIdCluster)).WillOnce(Return(&client_));
EXPECT_CALL(client_, OnListEnd(kWebMIdCluster)).WillOnce(Return(true));
WebMListParser parser(kWebMIdCluster, &client_);
// List parse should consume the CLUSTER but not the EBMLHEADER.
EXPECT_EQ(5, parser.Parse(kBuffer, sizeof(kBuffer)));
EXPECT_TRUE(parser.IsParsingComplete());
}
TEST_F(WebMParserTest, VoidAndCRC32InList) {
const uint8 kBuffer[] = {
0x18, 0x53, 0x80, 0x67, 0x99, // SEGMENT (size = 25)
0xEC, 0x83, 0x00, 0x00, 0x00, // Void (size = 3)
0xBF, 0x83, 0x00, 0x00, 0x00, // CRC32 (size = 3)
0x1F, 0x43, 0xB6, 0x75, 0x8A, // CLUSTER (size = 10)
0xEC, 0x83, 0x00, 0x00, 0x00, // Void (size = 3)
0xBF, 0x83, 0x00, 0x00, 0x00, // CRC32 (size = 3)
};
int size = sizeof(kBuffer);
InSequence s;
EXPECT_CALL(client_, OnListStart(kWebMIdSegment)).WillOnce(Return(&client_));
EXPECT_CALL(client_, OnListStart(kWebMIdCluster)).WillOnce(Return(&client_));
EXPECT_CALL(client_, OnListEnd(kWebMIdCluster)).WillOnce(Return(true));
EXPECT_CALL(client_, OnListEnd(kWebMIdSegment)).WillOnce(Return(true));
WebMListParser parser(kWebMIdSegment, &client_);
EXPECT_EQ(size, parser.Parse(kBuffer, size));
EXPECT_TRUE(parser.IsParsingComplete());
}
TEST_F(WebMParserTest, ParseListElementWithSingleCall) {
scoped_ptr<Cluster> cluster(CreateCluster(kBlockCount));
CreateClusterExpectations(kBlockCount, true, &client_);
WebMListParser parser(kWebMIdCluster, &client_);
EXPECT_EQ(cluster->size(), parser.Parse(cluster->data(), cluster->size()));
EXPECT_TRUE(parser.IsParsingComplete());
}
TEST_F(WebMParserTest, ParseListElementWithMultipleCalls) {
scoped_ptr<Cluster> cluster(CreateCluster(kBlockCount));
CreateClusterExpectations(kBlockCount, true, &client_);
const uint8* data = cluster->data();
int size = cluster->size();
int default_parse_size = 3;
WebMListParser parser(kWebMIdCluster, &client_);
int parse_size = std::min(default_parse_size, size);
while (size > 0) {
int result = parser.Parse(data, parse_size);
ASSERT_GE(result, 0);
ASSERT_LE(result, parse_size);
if (result == 0) {
// The parser needs more data so increase the parse_size a little.
EXPECT_FALSE(parser.IsParsingComplete());
parse_size += default_parse_size;
parse_size = std::min(parse_size, size);
continue;
}
parse_size = default_parse_size;
data += result;
size -= result;
EXPECT_EQ((size == 0), parser.IsParsingComplete());
}
EXPECT_TRUE(parser.IsParsingComplete());
}
TEST_F(WebMParserTest, Reset) {
InSequence s;
scoped_ptr<Cluster> cluster(CreateCluster(kBlockCount));
// First expect all but the last block.
CreateClusterExpectations(kBlockCount - 1, false, &client_);
// Now expect all blocks.
CreateClusterExpectations(kBlockCount, true, &client_);
WebMListParser parser(kWebMIdCluster, &client_);
// Send slightly less than the full cluster so all but the last block is
// parsed.
int result = parser.Parse(cluster->data(), cluster->size() - 1);
EXPECT_GT(result, 0);
EXPECT_LT(result, cluster->size());
EXPECT_FALSE(parser.IsParsingComplete());
parser.Reset();
// Now parse a whole cluster to verify that all the blocks will get parsed.
EXPECT_EQ(cluster->size(), parser.Parse(cluster->data(), cluster->size()));
EXPECT_TRUE(parser.IsParsingComplete());
}
// Test the case where multiple clients are used for different lists.
TEST_F(WebMParserTest, MultipleClients) {
const uint8 kBuffer[] = {
0x18, 0x53, 0x80, 0x67, 0x94, // SEGMENT (size = 20)
0x16, 0x54, 0xAE, 0x6B, 0x85, // TRACKS (size = 5)
0xAE, 0x83, // TRACKENTRY (size = 3)
0xD7, 0x81, 0x01, // TRACKNUMBER (size = 1)
0x1F, 0x43, 0xB6, 0x75, 0x85, // CLUSTER (size = 5)
0xEC, 0x83, 0x00, 0x00, 0x00, // Void (size = 3)
};
int size = sizeof(kBuffer);
StrictMock<MockWebMParserClient> c1_;
StrictMock<MockWebMParserClient> c2_;
StrictMock<MockWebMParserClient> c3_;
InSequence s;
EXPECT_CALL(client_, OnListStart(kWebMIdSegment)).WillOnce(Return(&c1_));
EXPECT_CALL(c1_, OnListStart(kWebMIdTracks)).WillOnce(Return(&c2_));
EXPECT_CALL(c2_, OnListStart(kWebMIdTrackEntry)).WillOnce(Return(&c3_));
EXPECT_CALL(c3_, OnUInt(kWebMIdTrackNumber, 1)).WillOnce(Return(true));
EXPECT_CALL(c2_, OnListEnd(kWebMIdTrackEntry)).WillOnce(Return(true));
EXPECT_CALL(c1_, OnListEnd(kWebMIdTracks)).WillOnce(Return(true));
EXPECT_CALL(c1_, OnListStart(kWebMIdCluster)).WillOnce(Return(&c2_));
EXPECT_CALL(c1_, OnListEnd(kWebMIdCluster)).WillOnce(Return(true));
EXPECT_CALL(client_, OnListEnd(kWebMIdSegment)).WillOnce(Return(true));
WebMListParser parser(kWebMIdSegment, &client_);
EXPECT_EQ(size, parser.Parse(kBuffer, size));
EXPECT_TRUE(parser.IsParsingComplete());
}
// Test the case where multiple clients are used for different lists.
TEST_F(WebMParserTest, InvalidClient) {
const uint8 kBuffer[] = {
0x18, 0x53, 0x80, 0x67, 0x85, // SEGMENT (size = 20)
0x16, 0x54, 0xAE, 0x6B, 0x80, // TRACKS (size = 5)
};
InSequence s;
EXPECT_CALL(client_, OnListStart(kWebMIdSegment)).WillOnce(ReturnNull());
WebMListParser parser(kWebMIdSegment, &client_);
EXPECT_EQ(-1, parser.Parse(kBuffer, sizeof(kBuffer)));
EXPECT_FALSE(parser.IsParsingComplete());
}
TEST_F(WebMParserTest, ReservedIds) {
const uint8 k1ByteReservedId[] = { 0xFF, 0x81 };
const uint8 k2ByteReservedId[] = { 0x7F, 0xFF, 0x81 };
const uint8 k3ByteReservedId[] = { 0x3F, 0xFF, 0xFF, 0x81 };
const uint8 k4ByteReservedId[] = { 0x1F, 0xFF, 0xFF, 0xFF, 0x81 };
const uint8* kBuffers[] = {
k1ByteReservedId,
k2ByteReservedId,
k3ByteReservedId,
k4ByteReservedId
};
for (size_t i = 0; i < arraysize(kBuffers); i++) {
int id;
int64 element_size;
int buffer_size = 2 + i;
EXPECT_EQ(buffer_size, WebMParseElementHeader(kBuffers[i], buffer_size,
&id, &element_size));
EXPECT_EQ(id, kWebMReservedId);
EXPECT_EQ(element_size, 1);
}
}
TEST_F(WebMParserTest, ReservedSizes) {
const uint8 k1ByteReservedSize[] = { 0xA3, 0xFF };
const uint8 k2ByteReservedSize[] = { 0xA3, 0x7F, 0xFF };
const uint8 k3ByteReservedSize[] = { 0xA3, 0x3F, 0xFF, 0xFF };
const uint8 k4ByteReservedSize[] = { 0xA3, 0x1F, 0xFF, 0xFF, 0xFF };
const uint8 k5ByteReservedSize[] = { 0xA3, 0x0F, 0xFF, 0xFF, 0xFF, 0xFF };
const uint8 k6ByteReservedSize[] = { 0xA3, 0x07, 0xFF, 0xFF, 0xFF, 0xFF,
0xFF };
const uint8 k7ByteReservedSize[] = { 0xA3, 0x03, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF,
0xFF };
const uint8 k8ByteReservedSize[] = { 0xA3, 0x01, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF,
0xFF, 0xFF };
const uint8* kBuffers[] = {
k1ByteReservedSize,
k2ByteReservedSize,
k3ByteReservedSize,
k4ByteReservedSize,
k5ByteReservedSize,
k6ByteReservedSize,
k7ByteReservedSize,
k8ByteReservedSize
};
for (size_t i = 0; i < arraysize(kBuffers); i++) {
int id;
int64 element_size;
int buffer_size = 2 + i;
EXPECT_EQ(buffer_size, WebMParseElementHeader(kBuffers[i], buffer_size,
&id, &element_size));
EXPECT_EQ(id, 0xA3);
EXPECT_EQ(element_size, kWebMUnknownSize);
}
}
TEST_F(WebMParserTest, ZeroPaddedStrings) {
const uint8 kBuffer[] = {
0x1A, 0x45, 0xDF, 0xA3, 0x91, // EBMLHEADER (size = 17)
0x42, 0x82, 0x80, // DocType (size = 0)
0x42, 0x82, 0x81, 0x00, // DocType (size = 1) ""
0x42, 0x82, 0x81, 'a', // DocType (size = 1) "a"
0x42, 0x82, 0x83, 'a', 0x00, 0x00 // DocType (size = 3) "a"
};
int size = sizeof(kBuffer);
InSequence s;
EXPECT_CALL(client_, OnListStart(kWebMIdEBMLHeader))
.WillOnce(Return(&client_));
EXPECT_CALL(client_, OnString(kWebMIdDocType, "")).WillOnce(Return(true));
EXPECT_CALL(client_, OnString(kWebMIdDocType, "")).WillOnce(Return(true));
EXPECT_CALL(client_, OnString(kWebMIdDocType, "a")).WillOnce(Return(true));
EXPECT_CALL(client_, OnString(kWebMIdDocType, "a")).WillOnce(Return(true));
EXPECT_CALL(client_, OnListEnd(kWebMIdEBMLHeader)).WillOnce(Return(true));
WebMListParser parser(kWebMIdEBMLHeader, &client_);
EXPECT_EQ(size, parser.Parse(kBuffer, size));
EXPECT_TRUE(parser.IsParsingComplete());
}
} // namespace media

View File

@ -0,0 +1,283 @@
// Copyright 2014 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#include "media/formats/webm/webm_stream_parser.h"
#include <string>
#include "base/callback.h"
#include "base/callback_helpers.h"
#include "base/logging.h"
#include "media/base/timestamp_constants.h"
#include "media/formats/webm/webm_cluster_parser.h"
#include "media/formats/webm/webm_constants.h"
#include "media/formats/webm/webm_content_encodings.h"
#include "media/formats/webm/webm_info_parser.h"
#include "media/formats/webm/webm_tracks_parser.h"
namespace media {
WebMStreamParser::WebMStreamParser()
: state_(kWaitingForInit),
unknown_segment_size_(false) {
}
WebMStreamParser::~WebMStreamParser() {
}
void WebMStreamParser::Init(
const InitCB& init_cb,
const NewConfigCB& config_cb,
const NewBuffersCB& new_buffers_cb,
bool ignore_text_tracks,
const EncryptedMediaInitDataCB& encrypted_media_init_data_cb,
const NewMediaSegmentCB& new_segment_cb,
const base::Closure& end_of_segment_cb,
const scoped_refptr<MediaLog>& media_log) {
DCHECK_EQ(state_, kWaitingForInit);
DCHECK(init_cb_.is_null());
DCHECK(!init_cb.is_null());
DCHECK(!config_cb.is_null());
DCHECK(!new_buffers_cb.is_null());
DCHECK(!encrypted_media_init_data_cb.is_null());
DCHECK(!new_segment_cb.is_null());
DCHECK(!end_of_segment_cb.is_null());
ChangeState(kParsingHeaders);
init_cb_ = init_cb;
config_cb_ = config_cb;
new_buffers_cb_ = new_buffers_cb;
ignore_text_tracks_ = ignore_text_tracks;
encrypted_media_init_data_cb_ = encrypted_media_init_data_cb;
new_segment_cb_ = new_segment_cb;
end_of_segment_cb_ = end_of_segment_cb;
media_log_ = media_log;
}
void WebMStreamParser::Flush() {
DCHECK_NE(state_, kWaitingForInit);
byte_queue_.Reset();
if (cluster_parser_)
cluster_parser_->Reset();
if (state_ == kParsingClusters) {
ChangeState(kParsingHeaders);
end_of_segment_cb_.Run();
}
}
bool WebMStreamParser::Parse(const uint8* buf, int size) {
DCHECK_NE(state_, kWaitingForInit);
if (state_ == kError)
return false;
byte_queue_.Push(buf, size);
int result = 0;
int bytes_parsed = 0;
const uint8* cur = NULL;
int cur_size = 0;
byte_queue_.Peek(&cur, &cur_size);
while (cur_size > 0) {
State oldState = state_;
switch (state_) {
case kParsingHeaders:
result = ParseInfoAndTracks(cur, cur_size);
break;
case kParsingClusters:
result = ParseCluster(cur, cur_size);
break;
case kWaitingForInit:
case kError:
return false;
}
if (result < 0) {
ChangeState(kError);
return false;
}
if (state_ == oldState && result == 0)
break;
DCHECK_GE(result, 0);
cur += result;
cur_size -= result;
bytes_parsed += result;
}
byte_queue_.Pop(bytes_parsed);
return true;
}
void WebMStreamParser::ChangeState(State new_state) {
DVLOG(1) << "ChangeState() : " << state_ << " -> " << new_state;
state_ = new_state;
}
int WebMStreamParser::ParseInfoAndTracks(const uint8* data, int size) {
DVLOG(2) << "ParseInfoAndTracks()";
DCHECK(data);
DCHECK_GT(size, 0);
const uint8* cur = data;
int cur_size = size;
int bytes_parsed = 0;
int id;
int64 element_size;
int result = WebMParseElementHeader(cur, cur_size, &id, &element_size);
if (result <= 0)
return result;
switch (id) {
case kWebMIdEBMLHeader:
case kWebMIdSeekHead:
case kWebMIdVoid:
case kWebMIdCRC32:
case kWebMIdCues:
case kWebMIdChapters:
case kWebMIdTags:
case kWebMIdAttachments:
// TODO(matthewjheaney): Implement support for chapters.
if (cur_size < (result + element_size)) {
// We don't have the whole element yet. Signal we need more data.
return 0;
}
// Skip the element.
return result + element_size;
break;
case kWebMIdCluster:
if (!cluster_parser_) {
MEDIA_LOG(ERROR, media_log_) << "Found Cluster element before Info.";
return -1;
}
ChangeState(kParsingClusters);
new_segment_cb_.Run();
return 0;
break;
case kWebMIdSegment:
// Segment of unknown size indicates live stream.
if (element_size == kWebMUnknownSize)
unknown_segment_size_ = true;
// Just consume the segment header.
return result;
break;
case kWebMIdInfo:
// We've found the element we are looking for.
break;
default: {
MEDIA_LOG(ERROR, media_log_) << "Unexpected element ID 0x" << std::hex
<< id;
return -1;
}
}
WebMInfoParser info_parser;
result = info_parser.Parse(cur, cur_size);
if (result <= 0)
return result;
cur += result;
cur_size -= result;
bytes_parsed += result;
WebMTracksParser tracks_parser(media_log_, ignore_text_tracks_);
result = tracks_parser.Parse(cur, cur_size);
if (result <= 0)
return result;
bytes_parsed += result;
double timecode_scale_in_us = info_parser.timecode_scale() / 1000.0;
InitParameters params(kInfiniteDuration());
if (info_parser.duration() > 0) {
int64 duration_in_us = info_parser.duration() * timecode_scale_in_us;
params.duration = base::TimeDelta::FromMicroseconds(duration_in_us);
}
params.timeline_offset = info_parser.date_utc();
if (unknown_segment_size_ && (info_parser.duration() <= 0) &&
!info_parser.date_utc().is_null()) {
params.liveness = DemuxerStream::LIVENESS_LIVE;
} else if (info_parser.duration() >= 0) {
params.liveness = DemuxerStream::LIVENESS_RECORDED;
} else {
params.liveness = DemuxerStream::LIVENESS_UNKNOWN;
}
const AudioDecoderConfig& audio_config = tracks_parser.audio_decoder_config();
if (audio_config.is_encrypted())
OnEncryptedMediaInitData(tracks_parser.audio_encryption_key_id());
const VideoDecoderConfig& video_config = tracks_parser.video_decoder_config();
if (video_config.is_encrypted())
OnEncryptedMediaInitData(tracks_parser.video_encryption_key_id());
if (!config_cb_.Run(audio_config,
video_config,
tracks_parser.text_tracks())) {
DVLOG(1) << "New config data isn't allowed.";
return -1;
}
cluster_parser_.reset(new WebMClusterParser(
info_parser.timecode_scale(), tracks_parser.audio_track_num(),
tracks_parser.GetAudioDefaultDuration(timecode_scale_in_us),
tracks_parser.video_track_num(),
tracks_parser.GetVideoDefaultDuration(timecode_scale_in_us),
tracks_parser.text_tracks(), tracks_parser.ignored_tracks(),
tracks_parser.audio_encryption_key_id(),
tracks_parser.video_encryption_key_id(), audio_config.codec(),
media_log_));
if (!init_cb_.is_null())
base::ResetAndReturn(&init_cb_).Run(params);
return bytes_parsed;
}
int WebMStreamParser::ParseCluster(const uint8* data, int size) {
if (!cluster_parser_)
return -1;
int bytes_parsed = cluster_parser_->Parse(data, size);
if (bytes_parsed < 0)
return bytes_parsed;
const BufferQueue& audio_buffers = cluster_parser_->GetAudioBuffers();
const BufferQueue& video_buffers = cluster_parser_->GetVideoBuffers();
const TextBufferQueueMap& text_map = cluster_parser_->GetTextBuffers();
bool cluster_ended = cluster_parser_->cluster_ended();
if ((!audio_buffers.empty() || !video_buffers.empty() ||
!text_map.empty()) &&
!new_buffers_cb_.Run(audio_buffers, video_buffers, text_map)) {
return -1;
}
if (cluster_ended) {
ChangeState(kParsingHeaders);
end_of_segment_cb_.Run();
}
return bytes_parsed;
}
void WebMStreamParser::OnEncryptedMediaInitData(const std::string& key_id) {
std::vector<uint8> key_id_vector(key_id.begin(), key_id.end());
encrypted_media_init_data_cb_.Run(EmeInitDataType::WEBM, key_id_vector);
}
} // namespace media

View File

@ -0,0 +1,89 @@
// Copyright 2014 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#ifndef MEDIA_FORMATS_WEBM_WEBM_STREAM_PARSER_H_
#define MEDIA_FORMATS_WEBM_WEBM_STREAM_PARSER_H_
#include "base/callback_forward.h"
#include "base/memory/ref_counted.h"
#include "media/base/audio_decoder_config.h"
#include "media/base/byte_queue.h"
#include "media/base/stream_parser.h"
#include "media/base/video_decoder_config.h"
namespace media {
class WebMClusterParser;
class WebMStreamParser : public StreamParser {
public:
WebMStreamParser();
~WebMStreamParser() override;
// StreamParser implementation.
void Init(const InitCB& init_cb,
const NewConfigCB& config_cb,
const NewBuffersCB& new_buffers_cb,
bool ignore_text_tracks,
const EncryptedMediaInitDataCB& encrypted_media_init_data_cb,
const NewMediaSegmentCB& new_segment_cb,
const base::Closure& end_of_segment_cb,
const scoped_refptr<MediaLog>& media_log) override;
void Flush() override;
bool Parse(const uint8* buf, int size) override;
private:
enum State {
kWaitingForInit,
kParsingHeaders,
kParsingClusters,
kError
};
void ChangeState(State new_state);
// Parses WebM Header, Info, Tracks elements. It also skips other level 1
// elements that are not used right now. Once the Info & Tracks elements have
// been parsed, this method will transition the parser from PARSING_HEADERS to
// PARSING_CLUSTERS.
//
// Returns < 0 if the parse fails.
// Returns 0 if more data is needed.
// Returning > 0 indicates success & the number of bytes parsed.
int ParseInfoAndTracks(const uint8* data, int size);
// Incrementally parses WebM cluster elements. This method also skips
// CUES elements if they are encountered since we currently don't use the
// data in these elements.
//
// Returns < 0 if the parse fails.
// Returns 0 if more data is needed.
// Returning > 0 indicates success & the number of bytes parsed.
int ParseCluster(const uint8* data, int size);
// Fire needkey event through the |encrypted_media_init_data_cb_|.
void OnEncryptedMediaInitData(const std::string& key_id);
State state_;
InitCB init_cb_;
NewConfigCB config_cb_;
NewBuffersCB new_buffers_cb_;
bool ignore_text_tracks_;
EncryptedMediaInitDataCB encrypted_media_init_data_cb_;
NewMediaSegmentCB new_segment_cb_;
base::Closure end_of_segment_cb_;
scoped_refptr<MediaLog> media_log_;
bool unknown_segment_size_;
scoped_ptr<WebMClusterParser> cluster_parser_;
ByteQueue byte_queue_;
DISALLOW_COPY_AND_ASSIGN(WebMStreamParser);
};
} // namespace media
#endif // MEDIA_FORMATS_WEBM_WEBM_STREAM_PARSER_H_

View File

@ -0,0 +1,344 @@
// Copyright 2014 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#include "media/formats/webm/webm_tracks_parser.h"
#include "base/logging.h"
#include "base/strings/string_number_conversions.h"
#include "base/strings/string_util.h"
#include "media/base/timestamp_constants.h"
#include "media/formats/webm/webm_constants.h"
#include "media/formats/webm/webm_content_encodings.h"
namespace media {
static TextKind CodecIdToTextKind(const std::string& codec_id) {
if (codec_id == kWebMCodecSubtitles)
return kTextSubtitles;
if (codec_id == kWebMCodecCaptions)
return kTextCaptions;
if (codec_id == kWebMCodecDescriptions)
return kTextDescriptions;
if (codec_id == kWebMCodecMetadata)
return kTextMetadata;
return kTextNone;
}
static base::TimeDelta PrecisionCappedDefaultDuration(
const double timecode_scale_in_us, const int64 duration_in_ns) {
if (duration_in_ns <= 0)
return kNoTimestamp();
int64 mult = duration_in_ns / 1000;
mult /= timecode_scale_in_us;
if (mult == 0)
return kNoTimestamp();
mult = static_cast<double>(mult) * timecode_scale_in_us;
return base::TimeDelta::FromMicroseconds(mult);
}
WebMTracksParser::WebMTracksParser(const scoped_refptr<MediaLog>& media_log,
bool ignore_text_tracks)
: track_type_(-1),
track_num_(-1),
seek_preroll_(-1),
codec_delay_(-1),
default_duration_(-1),
audio_track_num_(-1),
audio_default_duration_(-1),
video_track_num_(-1),
video_default_duration_(-1),
ignore_text_tracks_(ignore_text_tracks),
media_log_(media_log),
audio_client_(media_log),
video_client_(media_log) {
}
WebMTracksParser::~WebMTracksParser() {}
int WebMTracksParser::Parse(const uint8* buf, int size) {
track_type_ =-1;
track_num_ = -1;
default_duration_ = -1;
track_name_.clear();
track_language_.clear();
audio_track_num_ = -1;
audio_default_duration_ = -1;
audio_decoder_config_ = AudioDecoderConfig();
video_track_num_ = -1;
video_default_duration_ = -1;
video_decoder_config_ = VideoDecoderConfig();
text_tracks_.clear();
ignored_tracks_.clear();
WebMListParser parser(kWebMIdTracks, this);
int result = parser.Parse(buf, size);
if (result <= 0)
return result;
// For now we do all or nothing parsing.
return parser.IsParsingComplete() ? result : 0;
}
base::TimeDelta WebMTracksParser::GetAudioDefaultDuration(
const double timecode_scale_in_us) const {
return PrecisionCappedDefaultDuration(timecode_scale_in_us,
audio_default_duration_);
}
base::TimeDelta WebMTracksParser::GetVideoDefaultDuration(
const double timecode_scale_in_us) const {
return PrecisionCappedDefaultDuration(timecode_scale_in_us,
video_default_duration_);
}
WebMParserClient* WebMTracksParser::OnListStart(int id) {
if (id == kWebMIdContentEncodings) {
DCHECK(!track_content_encodings_client_.get());
track_content_encodings_client_.reset(
new WebMContentEncodingsClient(media_log_));
return track_content_encodings_client_->OnListStart(id);
}
if (id == kWebMIdTrackEntry) {
track_type_ = -1;
track_num_ = -1;
default_duration_ = -1;
track_name_.clear();
track_language_.clear();
codec_id_ = "";
codec_private_.clear();
audio_client_.Reset();
video_client_.Reset();
return this;
}
if (id == kWebMIdAudio)
return &audio_client_;
if (id == kWebMIdVideo)
return &video_client_;
return this;
}
bool WebMTracksParser::OnListEnd(int id) {
if (id == kWebMIdContentEncodings) {
DCHECK(track_content_encodings_client_.get());
return track_content_encodings_client_->OnListEnd(id);
}
if (id == kWebMIdTrackEntry) {
if (track_type_ == -1 || track_num_ == -1) {
MEDIA_LOG(ERROR, media_log_) << "Missing TrackEntry data for "
<< " TrackType " << track_type_
<< " TrackNum " << track_num_;
return false;
}
if (track_type_ != kWebMTrackTypeAudio &&
track_type_ != kWebMTrackTypeVideo &&
track_type_ != kWebMTrackTypeSubtitlesOrCaptions &&
track_type_ != kWebMTrackTypeDescriptionsOrMetadata) {
MEDIA_LOG(ERROR, media_log_) << "Unexpected TrackType " << track_type_;
return false;
}
TextKind text_track_kind = kTextNone;
if (track_type_ == kWebMTrackTypeSubtitlesOrCaptions) {
text_track_kind = CodecIdToTextKind(codec_id_);
if (text_track_kind == kTextNone) {
MEDIA_LOG(ERROR, media_log_) << "Missing TrackEntry CodecID"
<< " TrackNum " << track_num_;
return false;
}
if (text_track_kind != kTextSubtitles &&
text_track_kind != kTextCaptions) {
MEDIA_LOG(ERROR, media_log_) << "Wrong TrackEntry CodecID"
<< " TrackNum " << track_num_;
return false;
}
} else if (track_type_ == kWebMTrackTypeDescriptionsOrMetadata) {
text_track_kind = CodecIdToTextKind(codec_id_);
if (text_track_kind == kTextNone) {
MEDIA_LOG(ERROR, media_log_) << "Missing TrackEntry CodecID"
<< " TrackNum " << track_num_;
return false;
}
if (text_track_kind != kTextDescriptions &&
text_track_kind != kTextMetadata) {
MEDIA_LOG(ERROR, media_log_) << "Wrong TrackEntry CodecID"
<< " TrackNum " << track_num_;
return false;
}
}
std::string encryption_key_id;
if (track_content_encodings_client_) {
DCHECK(!track_content_encodings_client_->content_encodings().empty());
// If we have multiple ContentEncoding in one track. Always choose the
// key id in the first ContentEncoding as the key id of the track.
encryption_key_id = track_content_encodings_client_->
content_encodings()[0]->encryption_key_id();
}
if (track_type_ == kWebMTrackTypeAudio) {
if (audio_track_num_ == -1) {
audio_track_num_ = track_num_;
audio_encryption_key_id_ = encryption_key_id;
if (default_duration_ == 0) {
MEDIA_LOG(ERROR, media_log_) << "Illegal 0ns audio TrackEntry "
"DefaultDuration";
return false;
}
audio_default_duration_ = default_duration_;
DCHECK(!audio_decoder_config_.IsValidConfig());
if (!audio_client_.InitializeConfig(
codec_id_, codec_private_, seek_preroll_, codec_delay_,
!audio_encryption_key_id_.empty(), &audio_decoder_config_)) {
return false;
}
} else {
MEDIA_LOG(DEBUG, media_log_) << "Ignoring audio track " << track_num_;
ignored_tracks_.insert(track_num_);
}
} else if (track_type_ == kWebMTrackTypeVideo) {
if (video_track_num_ == -1) {
video_track_num_ = track_num_;
video_encryption_key_id_ = encryption_key_id;
if (default_duration_ == 0) {
MEDIA_LOG(ERROR, media_log_) << "Illegal 0ns video TrackEntry "
"DefaultDuration";
return false;
}
video_default_duration_ = default_duration_;
DCHECK(!video_decoder_config_.IsValidConfig());
if (!video_client_.InitializeConfig(
codec_id_, codec_private_, !video_encryption_key_id_.empty(),
&video_decoder_config_)) {
return false;
}
} else {
MEDIA_LOG(DEBUG, media_log_) << "Ignoring video track " << track_num_;
ignored_tracks_.insert(track_num_);
}
} else if (track_type_ == kWebMTrackTypeSubtitlesOrCaptions ||
track_type_ == kWebMTrackTypeDescriptionsOrMetadata) {
if (ignore_text_tracks_) {
MEDIA_LOG(DEBUG, media_log_) << "Ignoring text track " << track_num_;
ignored_tracks_.insert(track_num_);
} else {
std::string track_num = base::Int64ToString(track_num_);
text_tracks_[track_num_] = TextTrackConfig(
text_track_kind, track_name_, track_language_, track_num);
}
} else {
MEDIA_LOG(ERROR, media_log_) << "Unexpected TrackType " << track_type_;
return false;
}
track_type_ = -1;
track_num_ = -1;
default_duration_ = -1;
track_name_.clear();
track_language_.clear();
codec_id_ = "";
codec_private_.clear();
track_content_encodings_client_.reset();
audio_client_.Reset();
video_client_.Reset();
return true;
}
return true;
}
bool WebMTracksParser::OnUInt(int id, int64 val) {
int64* dst = NULL;
switch (id) {
case kWebMIdTrackNumber:
dst = &track_num_;
break;
case kWebMIdTrackType:
dst = &track_type_;
break;
case kWebMIdSeekPreRoll:
dst = &seek_preroll_;
break;
case kWebMIdCodecDelay:
dst = &codec_delay_;
break;
case kWebMIdDefaultDuration:
dst = &default_duration_;
break;
default:
return true;
}
if (*dst != -1) {
MEDIA_LOG(ERROR, media_log_) << "Multiple values for id " << std::hex << id
<< " specified";
return false;
}
*dst = val;
return true;
}
bool WebMTracksParser::OnFloat(int id, double val) {
return true;
}
bool WebMTracksParser::OnBinary(int id, const uint8* data, int size) {
if (id == kWebMIdCodecPrivate) {
if (!codec_private_.empty()) {
MEDIA_LOG(ERROR, media_log_)
<< "Multiple CodecPrivate fields in a track.";
return false;
}
codec_private_.assign(data, data + size);
return true;
}
return true;
}
bool WebMTracksParser::OnString(int id, const std::string& str) {
if (id == kWebMIdCodecID) {
if (!codec_id_.empty()) {
MEDIA_LOG(ERROR, media_log_) << "Multiple CodecID fields in a track";
return false;
}
codec_id_ = str;
return true;
}
if (id == kWebMIdName) {
track_name_ = str;
return true;
}
if (id == kWebMIdLanguage) {
track_language_ = str;
return true;
}
return true;
}
} // namespace media

View File

@ -0,0 +1,119 @@
// Copyright 2014 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#ifndef MEDIA_FORMATS_WEBM_WEBM_TRACKS_PARSER_H_
#define MEDIA_FORMATS_WEBM_WEBM_TRACKS_PARSER_H_
#include <map>
#include <set>
#include <string>
#include <vector>
#include "base/compiler_specific.h"
#include "base/memory/scoped_ptr.h"
#include "base/time/time.h"
#include "media/base/audio_decoder_config.h"
#include "media/base/media_log.h"
#include "media/base/text_track_config.h"
#include "media/base/video_decoder_config.h"
#include "media/formats/webm/webm_audio_client.h"
#include "media/formats/webm/webm_content_encodings_client.h"
#include "media/formats/webm/webm_parser.h"
#include "media/formats/webm/webm_video_client.h"
namespace media {
// Parser for WebM Tracks element.
class MEDIA_EXPORT WebMTracksParser : public WebMParserClient {
public:
WebMTracksParser(const scoped_refptr<MediaLog>& media_log,
bool ignore_text_tracks);
~WebMTracksParser() override;
// Parses a WebM Tracks element in |buf|.
//
// Returns -1 if the parse fails.
// Returns 0 if more data is needed.
// Returns the number of bytes parsed on success.
int Parse(const uint8* buf, int size);
int64 audio_track_num() const { return audio_track_num_; }
int64 video_track_num() const { return video_track_num_; }
// If TrackEntry DefaultDuration field existed for the associated audio or
// video track, returns that value converted from ns to base::TimeDelta with
// precision not greater than |timecode_scale_in_us|. Defaults to
// kNoTimestamp().
base::TimeDelta GetAudioDefaultDuration(
const double timecode_scale_in_us) const;
base::TimeDelta GetVideoDefaultDuration(
const double timecode_scale_in_us) const;
const std::set<int64>& ignored_tracks() const { return ignored_tracks_; }
const std::string& audio_encryption_key_id() const {
return audio_encryption_key_id_;
}
const AudioDecoderConfig& audio_decoder_config() {
return audio_decoder_config_;
}
const std::string& video_encryption_key_id() const {
return video_encryption_key_id_;
}
const VideoDecoderConfig& video_decoder_config() {
return video_decoder_config_;
}
typedef std::map<int, TextTrackConfig> TextTracks;
const TextTracks& text_tracks() const {
return text_tracks_;
}
private:
// WebMParserClient implementation.
WebMParserClient* OnListStart(int id) override;
bool OnListEnd(int id) override;
bool OnUInt(int id, int64 val) override;
bool OnFloat(int id, double val) override;
bool OnBinary(int id, const uint8* data, int size) override;
bool OnString(int id, const std::string& str) override;
int64 track_type_;
int64 track_num_;
std::string track_name_;
std::string track_language_;
std::string codec_id_;
std::vector<uint8> codec_private_;
int64 seek_preroll_;
int64 codec_delay_;
int64 default_duration_;
scoped_ptr<WebMContentEncodingsClient> track_content_encodings_client_;
int64 audio_track_num_;
int64 audio_default_duration_;
int64 video_track_num_;
int64 video_default_duration_;
bool ignore_text_tracks_;
TextTracks text_tracks_;
std::set<int64> ignored_tracks_;
std::string audio_encryption_key_id_;
std::string video_encryption_key_id_;
scoped_refptr<MediaLog> media_log_;
WebMAudioClient audio_client_;
AudioDecoderConfig audio_decoder_config_;
WebMVideoClient video_client_;
VideoDecoderConfig video_decoder_config_;
DISALLOW_COPY_AND_ASSIGN(WebMTracksParser);
};
} // namespace media
#endif // MEDIA_FORMATS_WEBM_WEBM_TRACKS_PARSER_H_

View File

@ -0,0 +1,211 @@
// Copyright 2014 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#include "base/logging.h"
#include "media/base/channel_layout.h"
#include "media/base/mock_media_log.h"
#include "media/base/timestamp_constants.h"
#include "media/formats/webm/tracks_builder.h"
#include "media/formats/webm/webm_constants.h"
#include "media/formats/webm/webm_tracks_parser.h"
#include "testing/gmock/include/gmock/gmock.h"
#include "testing/gtest/include/gtest/gtest.h"
using ::testing::HasSubstr;
using ::testing::InSequence;
using ::testing::Return;
using ::testing::StrictMock;
using ::testing::_;
namespace media {
static const double kDefaultTimecodeScaleInUs = 1000.0; // 1 ms resolution
class WebMTracksParserTest : public testing::Test {
public:
WebMTracksParserTest() : media_log_(new StrictMock<MockMediaLog>()) {}
protected:
void VerifyTextTrackInfo(const uint8* buffer,
int buffer_size,
TextKind text_kind,
const std::string& name,
const std::string& language) {
scoped_ptr<WebMTracksParser> parser(
new WebMTracksParser(media_log_, false));
int result = parser->Parse(buffer, buffer_size);
EXPECT_GT(result, 0);
EXPECT_EQ(result, buffer_size);
const WebMTracksParser::TextTracks& text_tracks = parser->text_tracks();
EXPECT_EQ(text_tracks.size(), WebMTracksParser::TextTracks::size_type(1));
const WebMTracksParser::TextTracks::const_iterator itr =
text_tracks.begin();
EXPECT_EQ(itr->first, 1); // track num
const TextTrackConfig& config = itr->second;
EXPECT_EQ(config.kind(), text_kind);
EXPECT_TRUE(config.label() == name);
EXPECT_TRUE(config.language() == language);
}
scoped_refptr<StrictMock<MockMediaLog>> media_log_;
};
TEST_F(WebMTracksParserTest, SubtitleNoNameNoLang) {
InSequence s;
TracksBuilder tb;
tb.AddTextTrack(1, 1, kWebMCodecSubtitles, "", "");
const std::vector<uint8> buf = tb.Finish();
VerifyTextTrackInfo(&buf[0], buf.size(), kTextSubtitles, "", "");
}
TEST_F(WebMTracksParserTest, SubtitleYesNameNoLang) {
InSequence s;
TracksBuilder tb;
tb.AddTextTrack(1, 1, kWebMCodecSubtitles, "Spock", "");
const std::vector<uint8> buf = tb.Finish();
VerifyTextTrackInfo(&buf[0], buf.size(), kTextSubtitles, "Spock", "");
}
TEST_F(WebMTracksParserTest, SubtitleNoNameYesLang) {
InSequence s;
TracksBuilder tb;
tb.AddTextTrack(1, 1, kWebMCodecSubtitles, "", "eng");
const std::vector<uint8> buf = tb.Finish();
VerifyTextTrackInfo(&buf[0], buf.size(), kTextSubtitles, "", "eng");
}
TEST_F(WebMTracksParserTest, SubtitleYesNameYesLang) {
InSequence s;
TracksBuilder tb;
tb.AddTextTrack(1, 1, kWebMCodecSubtitles, "Picard", "fre");
const std::vector<uint8> buf = tb.Finish();
VerifyTextTrackInfo(&buf[0], buf.size(), kTextSubtitles, "Picard", "fre");
}
TEST_F(WebMTracksParserTest, IgnoringTextTracks) {
InSequence s;
TracksBuilder tb;
tb.AddTextTrack(1, 1, kWebMCodecSubtitles, "Subtitles", "fre");
tb.AddTextTrack(2, 2, kWebMCodecSubtitles, "Commentary", "fre");
const std::vector<uint8> buf = tb.Finish();
scoped_ptr<WebMTracksParser> parser(new WebMTracksParser(media_log_, true));
EXPECT_MEDIA_LOG(HasSubstr("Ignoring text track 1"));
EXPECT_MEDIA_LOG(HasSubstr("Ignoring text track 2"));
int result = parser->Parse(&buf[0], buf.size());
EXPECT_GT(result, 0);
EXPECT_EQ(result, static_cast<int>(buf.size()));
EXPECT_EQ(parser->text_tracks().size(), 0u);
const std::set<int64>& ignored_tracks = parser->ignored_tracks();
EXPECT_TRUE(ignored_tracks.find(1) != ignored_tracks.end());
EXPECT_TRUE(ignored_tracks.find(2) != ignored_tracks.end());
// Test again w/o ignoring the test tracks.
parser.reset(new WebMTracksParser(media_log_, false));
result = parser->Parse(&buf[0], buf.size());
EXPECT_GT(result, 0);
EXPECT_EQ(parser->ignored_tracks().size(), 0u);
EXPECT_EQ(parser->text_tracks().size(), 2u);
}
TEST_F(WebMTracksParserTest, AudioVideoDefaultDurationUnset) {
// Other audio/video decoder config fields are necessary in the test
// audio/video TrackEntry configurations. This method does only very minimal
// verification of their inclusion and parsing; the goal is to confirm
// TrackEntry DefaultDuration defaults to -1 if not included in audio or
// video TrackEntry.
TracksBuilder tb;
tb.AddAudioTrack(1, 1, "A_VORBIS", "audio", "", -1, 2, 8000);
tb.AddVideoTrack(2, 2, "V_VP8", "video", "", -1, 320, 240);
const std::vector<uint8> buf = tb.Finish();
scoped_ptr<WebMTracksParser> parser(new WebMTracksParser(media_log_, true));
int result = parser->Parse(&buf[0], buf.size());
EXPECT_LE(0, result);
EXPECT_EQ(static_cast<int>(buf.size()), result);
EXPECT_EQ(kNoTimestamp(),
parser->GetAudioDefaultDuration(kDefaultTimecodeScaleInUs));
EXPECT_EQ(kNoTimestamp(),
parser->GetVideoDefaultDuration(kDefaultTimecodeScaleInUs));
const VideoDecoderConfig& video_config = parser->video_decoder_config();
EXPECT_TRUE(video_config.IsValidConfig());
EXPECT_EQ(320, video_config.coded_size().width());
EXPECT_EQ(240, video_config.coded_size().height());
const AudioDecoderConfig& audio_config = parser->audio_decoder_config();
EXPECT_TRUE(audio_config.IsValidConfig());
EXPECT_EQ(CHANNEL_LAYOUT_STEREO, audio_config.channel_layout());
EXPECT_EQ(8000, audio_config.samples_per_second());
}
TEST_F(WebMTracksParserTest, AudioVideoDefaultDurationSet) {
// Confirm audio or video TrackEntry DefaultDuration values are parsed, if
// present.
TracksBuilder tb;
tb.AddAudioTrack(1, 1, "A_VORBIS", "audio", "", 12345678, 2, 8000);
tb.AddVideoTrack(2, 2, "V_VP8", "video", "", 987654321, 320, 240);
const std::vector<uint8> buf = tb.Finish();
scoped_ptr<WebMTracksParser> parser(new WebMTracksParser(media_log_, true));
int result = parser->Parse(&buf[0], buf.size());
EXPECT_LE(0, result);
EXPECT_EQ(static_cast<int>(buf.size()), result);
EXPECT_EQ(base::TimeDelta::FromMicroseconds(12000),
parser->GetAudioDefaultDuration(kDefaultTimecodeScaleInUs));
EXPECT_EQ(base::TimeDelta::FromMicroseconds(985000),
parser->GetVideoDefaultDuration(5000.0)); // 5 ms resolution
EXPECT_EQ(kNoTimestamp(), parser->GetAudioDefaultDuration(12346.0));
EXPECT_EQ(base::TimeDelta::FromMicroseconds(12345),
parser->GetAudioDefaultDuration(12345.0));
EXPECT_EQ(base::TimeDelta::FromMicroseconds(12003),
parser->GetAudioDefaultDuration(1000.3)); // 1.0003 ms resolution
}
TEST_F(WebMTracksParserTest, InvalidZeroDefaultDurationSet) {
// Confirm parse error if TrackEntry DefaultDuration is present, but is 0ns.
TracksBuilder tb(true);
tb.AddAudioTrack(1, 1, "A_VORBIS", "audio", "", 0, 2, 8000);
const std::vector<uint8> buf = tb.Finish();
scoped_ptr<WebMTracksParser> parser(new WebMTracksParser(media_log_, true));
EXPECT_MEDIA_LOG(HasSubstr("Illegal 0ns audio TrackEntry DefaultDuration"));
EXPECT_EQ(-1, parser->Parse(&buf[0], buf.size()));
}
TEST_F(WebMTracksParserTest, HighTrackUID) {
// Confirm no parse error if TrackEntry TrackUID has MSb set
// (http://crbug.com/397067).
TracksBuilder tb(true);
tb.AddAudioTrack(1, 1ULL << 31, "A_VORBIS", "audio", "", 40, 2, 8000);
const std::vector<uint8> buf = tb.Finish();
scoped_ptr<WebMTracksParser> parser(new WebMTracksParser(media_log_, true));
EXPECT_GT(parser->Parse(&buf[0], buf.size()),0);
}
} // namespace media

View File

@ -0,0 +1,163 @@
// Copyright 2014 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#include "media/formats/webm/webm_video_client.h"
#include "media/base/video_decoder_config.h"
#include "media/formats/webm/webm_constants.h"
namespace media {
WebMVideoClient::WebMVideoClient(const scoped_refptr<MediaLog>& media_log)
: media_log_(media_log) {
Reset();
}
WebMVideoClient::~WebMVideoClient() {
}
void WebMVideoClient::Reset() {
pixel_width_ = -1;
pixel_height_ = -1;
crop_bottom_ = -1;
crop_top_ = -1;
crop_left_ = -1;
crop_right_ = -1;
display_width_ = -1;
display_height_ = -1;
display_unit_ = -1;
alpha_mode_ = -1;
}
bool WebMVideoClient::InitializeConfig(
const std::string& codec_id, const std::vector<uint8>& codec_private,
bool is_encrypted, VideoDecoderConfig* config) {
DCHECK(config);
VideoCodec video_codec = kUnknownVideoCodec;
VideoCodecProfile profile = VIDEO_CODEC_PROFILE_UNKNOWN;
if (codec_id == "V_VP8") {
video_codec = kCodecVP8;
profile = VP8PROFILE_ANY;
} else if (codec_id == "V_VP9") {
video_codec = kCodecVP9;
profile = VP9PROFILE_ANY;
} else {
MEDIA_LOG(ERROR, media_log_) << "Unsupported video codec_id " << codec_id;
return false;
}
VideoPixelFormat format =
(alpha_mode_ == 1) ? PIXEL_FORMAT_YV12A : PIXEL_FORMAT_YV12;
if (pixel_width_ <= 0 || pixel_height_ <= 0)
return false;
// Set crop and display unit defaults if these elements are not present.
if (crop_bottom_ == -1)
crop_bottom_ = 0;
if (crop_top_ == -1)
crop_top_ = 0;
if (crop_left_ == -1)
crop_left_ = 0;
if (crop_right_ == -1)
crop_right_ = 0;
if (display_unit_ == -1)
display_unit_ = 0;
gfx::Size coded_size(pixel_width_, pixel_height_);
gfx::Rect visible_rect(crop_top_, crop_left_,
pixel_width_ - (crop_left_ + crop_right_),
pixel_height_ - (crop_top_ + crop_bottom_));
if (display_unit_ == 0) {
if (display_width_ <= 0)
display_width_ = visible_rect.width();
if (display_height_ <= 0)
display_height_ = visible_rect.height();
} else if (display_unit_ == 3) {
if (display_width_ <= 0 || display_height_ <= 0)
return false;
} else {
MEDIA_LOG(ERROR, media_log_) << "Unsupported display unit type "
<< display_unit_;
return false;
}
gfx::Size natural_size = gfx::Size(display_width_, display_height_);
const uint8* extra_data = NULL;
size_t extra_data_size = 0;
if (codec_private.size() > 0) {
extra_data = &codec_private[0];
extra_data_size = codec_private.size();
}
config->Initialize(video_codec, profile, format, COLOR_SPACE_HD_REC709,
coded_size, visible_rect, natural_size, extra_data,
extra_data_size, is_encrypted);
return config->IsValidConfig();
}
bool WebMVideoClient::OnUInt(int id, int64 val) {
int64* dst = NULL;
switch (id) {
case kWebMIdPixelWidth:
dst = &pixel_width_;
break;
case kWebMIdPixelHeight:
dst = &pixel_height_;
break;
case kWebMIdPixelCropTop:
dst = &crop_top_;
break;
case kWebMIdPixelCropBottom:
dst = &crop_bottom_;
break;
case kWebMIdPixelCropLeft:
dst = &crop_left_;
break;
case kWebMIdPixelCropRight:
dst = &crop_right_;
break;
case kWebMIdDisplayWidth:
dst = &display_width_;
break;
case kWebMIdDisplayHeight:
dst = &display_height_;
break;
case kWebMIdDisplayUnit:
dst = &display_unit_;
break;
case kWebMIdAlphaMode:
dst = &alpha_mode_;
break;
default:
return true;
}
if (*dst != -1) {
MEDIA_LOG(ERROR, media_log_) << "Multiple values for id " << std::hex << id
<< " specified (" << *dst << " and " << val
<< ")";
return false;
}
*dst = val;
return true;
}
bool WebMVideoClient::OnBinary(int id, const uint8* data, int size) {
// Accept binary fields we don't care about for now.
return true;
}
bool WebMVideoClient::OnFloat(int id, double val) {
// Accept float fields we don't care about for now.
return true;
}
} // namespace media

View File

@ -0,0 +1,61 @@
// Copyright 2014 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#ifndef MEDIA_FORMATS_WEBM_WEBM_VIDEO_CLIENT_H_
#define MEDIA_FORMATS_WEBM_WEBM_VIDEO_CLIENT_H_
#include <string>
#include <vector>
#include "media/base/media_log.h"
#include "media/formats/webm/webm_parser.h"
namespace media {
class VideoDecoderConfig;
// Helper class used to parse a Video element inside a TrackEntry element.
class WebMVideoClient : public WebMParserClient {
public:
explicit WebMVideoClient(const scoped_refptr<MediaLog>& media_log);
~WebMVideoClient() override;
// Reset this object's state so it can process a new video track element.
void Reset();
// Initialize |config| with the data in |codec_id|, |codec_private|,
// |is_encrypted| and the fields parsed from the last video track element this
// object was used to parse.
// Returns true if |config| was successfully initialized.
// Returns false if there was unexpected values in the provided parameters or
// video track element fields. The contents of |config| are undefined in this
// case and should not be relied upon.
bool InitializeConfig(const std::string& codec_id,
const std::vector<uint8>& codec_private,
bool is_encrypted,
VideoDecoderConfig* config);
private:
// WebMParserClient implementation.
bool OnUInt(int id, int64 val) override;
bool OnBinary(int id, const uint8* data, int size) override;
bool OnFloat(int id, double val) override;
scoped_refptr<MediaLog> media_log_;
int64 pixel_width_;
int64 pixel_height_;
int64 crop_bottom_;
int64 crop_top_;
int64 crop_left_;
int64 crop_right_;
int64 display_width_;
int64 display_height_;
int64 display_unit_;
int64 alpha_mode_;
DISALLOW_COPY_AND_ASSIGN(WebMVideoClient);
};
} // namespace media
#endif // MEDIA_FORMATS_WEBM_WEBM_VIDEO_CLIENT_H_

View File

@ -0,0 +1,78 @@
// Copyright 2014 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#include "media/formats/webm/webm_webvtt_parser.h"
namespace media {
void WebMWebVTTParser::Parse(const uint8* payload, int payload_size,
std::string* id,
std::string* settings,
std::string* content) {
WebMWebVTTParser parser(payload, payload_size);
parser.Parse(id, settings, content);
}
WebMWebVTTParser::WebMWebVTTParser(const uint8* payload, int payload_size)
: ptr_(payload),
ptr_end_(payload + payload_size) {
}
void WebMWebVTTParser::Parse(std::string* id,
std::string* settings,
std::string* content) {
ParseLine(id);
ParseLine(settings);
content->assign(ptr_, ptr_end_);
}
bool WebMWebVTTParser::GetByte(uint8* byte) {
if (ptr_ >= ptr_end_)
return false; // indicates end-of-stream
*byte = *ptr_++;
return true;
}
void WebMWebVTTParser::UngetByte() {
--ptr_;
}
void WebMWebVTTParser::ParseLine(std::string* line) {
line->clear();
// Consume characters from the stream, until we reach end-of-line.
// The WebVTT spec states that lines may be terminated in any of the following
// three ways:
// LF
// CR
// CR LF
// The spec is here:
// http://wiki.webmproject.org/webm-metadata/temporal-metadata/webvtt-in-webm
enum {
kLF = '\x0A',
kCR = '\x0D'
};
for (;;) {
uint8 byte;
if (!GetByte(&byte) || byte == kLF)
return;
if (byte == kCR) {
if (GetByte(&byte) && byte != kLF)
UngetByte();
return;
}
line->push_back(byte);
}
}
} // namespace media

View File

@ -0,0 +1,49 @@
// Copyright 2014 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#ifndef MEDIA_FORMATS_WEBM_WEBM_WEBVTT_PARSER_H_
#define MEDIA_FORMATS_WEBM_WEBM_WEBVTT_PARSER_H_
#include <string>
#include "base/basictypes.h"
#include "media/base/media_export.h"
namespace media {
class MEDIA_EXPORT WebMWebVTTParser {
public:
// Utility function to parse the WebVTT cue from a byte stream.
static void Parse(const uint8* payload, int payload_size,
std::string* id,
std::string* settings,
std::string* content);
private:
// The payload is the embedded WebVTT cue, stored in a WebM block.
// The parser treats this as a UTF-8 byte stream.
WebMWebVTTParser(const uint8* payload, int payload_size);
// Parse the cue identifier, settings, and content from the stream.
void Parse(std::string* id, std::string* settings, std::string* content);
// Remove a byte from the stream, advancing the stream pointer.
// Returns true if a character was returned; false means "end of stream".
bool GetByte(uint8* byte);
// Backup the stream pointer.
void UngetByte();
// Parse a line of text from the stream.
void ParseLine(std::string* line);
// Represents the portion of the stream that has not been consumed yet.
const uint8* ptr_;
const uint8* const ptr_end_;
DISALLOW_COPY_AND_ASSIGN(WebMWebVTTParser);
};
} // namespace media
#endif // MEDIA_FORMATS_WEBM_WEBM_WEBVTT_PARSER_H_

View File

@ -0,0 +1,105 @@
// Copyright 2014 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#include "media/formats/webm/webm_webvtt_parser.h"
#include "testing/gmock/include/gmock/gmock.h"
#include "testing/gtest/include/gtest/gtest.h"
using ::testing::InSequence;
namespace media {
typedef std::vector<uint8> Cue;
static Cue EncodeCue(const std::string& id,
const std::string& settings,
const std::string& content) {
const std::string result = id + '\n' + settings + '\n' + content;
const uint8* const buf = reinterpret_cast<const uint8*>(result.data());
return Cue(buf, buf + result.length());
}
static void DecodeCue(const Cue& cue,
std::string* id,
std::string* settings,
std::string* content) {
WebMWebVTTParser::Parse(&cue[0], static_cast<int>(cue.size()),
id, settings, content);
}
class WebMWebVTTParserTest : public testing::Test {
public:
WebMWebVTTParserTest() {}
};
TEST_F(WebMWebVTTParserTest, Blank) {
InSequence s;
const Cue cue = EncodeCue("", "", "Subtitle");
std::string id, settings, content;
DecodeCue(cue, &id, &settings, &content);
EXPECT_EQ(id, "");
EXPECT_EQ(settings, "");
EXPECT_EQ(content, "Subtitle");
}
TEST_F(WebMWebVTTParserTest, Id) {
InSequence s;
for (int i = 1; i <= 9; ++i) {
const std::string idsrc(1, '0'+i);
const Cue cue = EncodeCue(idsrc, "", "Subtitle");
std::string id, settings, content;
DecodeCue(cue, &id, &settings, &content);
EXPECT_EQ(id, idsrc);
EXPECT_EQ(settings, "");
EXPECT_EQ(content, "Subtitle");
}
}
TEST_F(WebMWebVTTParserTest, Settings) {
InSequence s;
enum { kSettingsCount = 4 };
const char* const settings_str[kSettingsCount] = {
"vertical:lr",
"line:50%",
"position:42%",
"vertical:rl line:42% position:100%" };
for (int i = 0; i < kSettingsCount; ++i) {
const Cue cue = EncodeCue("", settings_str[i], "Subtitle");
std::string id, settings, content;
DecodeCue(cue, &id, &settings, &content);
EXPECT_EQ(id, "");
EXPECT_EQ(settings, settings_str[i]);
EXPECT_EQ(content, "Subtitle");
}
}
TEST_F(WebMWebVTTParserTest, Content) {
InSequence s;
enum { kContentCount = 4 };
const char* const content_str[kContentCount] = {
"Subtitle",
"Another Subtitle",
"Yet Another Subtitle",
"Another Subtitle\nSplit Across Two Lines" };
for (int i = 0; i < kContentCount; ++i) {
const Cue cue = EncodeCue("", "", content_str[i]);
std::string id, settings, content;
DecodeCue(cue, &id, &settings, &content);
EXPECT_EQ(id, "");
EXPECT_EQ(settings, "");
EXPECT_EQ(content, content_str[i]);
}
}
} // namespace media