Shaka Packager SDK
webm_cluster_parser.cc
1 // Copyright 2014 The Chromium Authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
4 
5 #include "packager/media/formats/webm/webm_cluster_parser.h"
6 
7 #include <algorithm>
8 #include <vector>
9 
10 #include "packager/base/logging.h"
11 #include "packager/base/sys_byteorder.h"
12 #include "packager/media/base/decrypt_config.h"
13 #include "packager/media/base/timestamp.h"
14 #include "packager/media/codecs/vp8_parser.h"
15 #include "packager/media/codecs/vp9_parser.h"
16 #include "packager/media/codecs/webvtt_util.h"
17 #include "packager/media/formats/webm/webm_constants.h"
18 #include "packager/media/formats/webm/webm_crypto_helpers.h"
19 #include "packager/media/formats/webm/webm_webvtt_parser.h"
20 
21 namespace shaka {
22 namespace media {
23 namespace {
24 
25 const int64_t kMicrosecondsPerMillisecond = 1000;
26 
27 } // namespace
28 
30  int64_t timecode_scale,
31  std::shared_ptr<AudioStreamInfo> audio_stream_info,
32  std::shared_ptr<VideoStreamInfo> video_stream_info,
33  const VPCodecConfigurationRecord& vp_config,
34  int64_t audio_default_duration,
35  int64_t video_default_duration,
36  const WebMTracksParser::TextTracks& text_tracks,
37  const std::set<int64_t>& ignored_tracks,
38  const std::string& audio_encryption_key_id,
39  const std::string& video_encryption_key_id,
40  const MediaParser::NewSampleCB& new_sample_cb,
41  const MediaParser::InitCB& init_cb,
42  KeySource* decryption_key_source)
43  : timecode_multiplier_(timecode_scale /
44  static_cast<double>(kMicrosecondsPerMillisecond)),
45  audio_stream_info_(audio_stream_info),
46  video_stream_info_(video_stream_info),
47  vp_config_(vp_config),
48  ignored_tracks_(ignored_tracks),
49  audio_encryption_key_id_(audio_encryption_key_id),
50  video_encryption_key_id_(video_encryption_key_id),
51  parser_(kWebMIdCluster, this),
52  initialized_(false),
53  init_cb_(init_cb),
54  cluster_start_time_(kNoTimestamp),
55  audio_(audio_stream_info ? audio_stream_info->track_id() : -1,
56  false,
57  audio_default_duration,
58  new_sample_cb),
59  video_(video_stream_info ? video_stream_info->track_id() : -1,
60  true,
61  video_default_duration,
62  new_sample_cb) {
63  if (decryption_key_source) {
64  decryptor_source_.reset(new DecryptorSource(decryption_key_source));
65  if (audio_stream_info_)
66  audio_stream_info_->set_is_encrypted(false);
67  if (video_stream_info_)
68  video_stream_info_->set_is_encrypted(false);
69  }
70  for (WebMTracksParser::TextTracks::const_iterator it = text_tracks.begin();
71  it != text_tracks.end();
72  ++it) {
73  text_track_map_.insert(std::make_pair(
74  it->first, Track(it->first, false, kNoTimestamp, new_sample_cb)));
75  }
76 }
77 
78 WebMClusterParser::~WebMClusterParser() {}
79 
81  last_block_timecode_ = -1;
82  cluster_timecode_ = -1;
83  cluster_start_time_ = kNoTimestamp;
84  cluster_ended_ = false;
85  parser_.Reset();
86  audio_.Reset();
87  video_.Reset();
88  ResetTextTracks();
89 }
90 
92  // Estimate the duration of the last frame if necessary.
93  bool audio_result = audio_.ApplyDurationEstimateIfNeeded();
94  bool video_result = video_.ApplyDurationEstimateIfNeeded();
95  Reset();
96  return audio_result && video_result;
97 }
98 
99 int WebMClusterParser::Parse(const uint8_t* buf, int size) {
100  int result = parser_.Parse(buf, size);
101 
102  if (result < 0) {
103  cluster_ended_ = false;
104  return result;
105  }
106 
107  cluster_ended_ = parser_.IsParsingComplete();
108  if (cluster_ended_) {
109  // If there were no buffers in this cluster, set the cluster start time to
110  // be the |cluster_timecode_|.
111  if (cluster_start_time_ == kNoTimestamp) {
112  // If the cluster did not even have a |cluster_timecode_|, signal parse
113  // error.
114  if (cluster_timecode_ < 0)
115  return -1;
116 
117  cluster_start_time_ = cluster_timecode_ * timecode_multiplier_;
118  }
119 
120  // Reset the parser if we're done parsing so that
121  // it is ready to accept another cluster on the next
122  // call.
123  parser_.Reset();
124 
125  last_block_timecode_ = -1;
126  cluster_timecode_ = -1;
127  }
128 
129  return result;
130 }
131 
132 WebMParserClient* WebMClusterParser::OnListStart(int id) {
133  if (id == kWebMIdCluster) {
134  cluster_timecode_ = -1;
135  cluster_start_time_ = kNoTimestamp;
136  } else if (id == kWebMIdBlockGroup) {
137  block_data_.reset();
138  block_data_size_ = -1;
139  block_duration_ = -1;
140  discard_padding_ = -1;
141  discard_padding_set_ = false;
142  reference_block_set_ = false;
143  } else if (id == kWebMIdBlockAdditions) {
144  block_add_id_ = -1;
145  block_additional_data_.reset();
146  block_additional_data_size_ = 0;
147  }
148 
149  return this;
150 }
151 
152 bool WebMClusterParser::OnListEnd(int id) {
153  if (id != kWebMIdBlockGroup)
154  return true;
155 
156  // Make sure the BlockGroup actually had a Block.
157  if (block_data_size_ == -1) {
158  LOG(ERROR) << "Block missing from BlockGroup.";
159  return false;
160  }
161 
162  bool result = ParseBlock(
163  false, block_data_.get(), block_data_size_, block_additional_data_.get(),
164  block_additional_data_size_, block_duration_,
165  discard_padding_set_ ? discard_padding_ : 0, reference_block_set_);
166  block_data_.reset();
167  block_data_size_ = -1;
168  block_duration_ = -1;
169  block_add_id_ = -1;
170  block_additional_data_.reset();
171  block_additional_data_size_ = 0;
172  discard_padding_ = -1;
173  discard_padding_set_ = false;
174  reference_block_set_ = false;
175  return result;
176 }
177 
178 bool WebMClusterParser::OnUInt(int id, int64_t val) {
179  int64_t* dst;
180  switch (id) {
181  case kWebMIdTimecode:
182  dst = &cluster_timecode_;
183  break;
184  case kWebMIdBlockDuration:
185  dst = &block_duration_;
186  break;
187  case kWebMIdBlockAddID:
188  dst = &block_add_id_;
189  break;
190  default:
191  return true;
192  }
193  if (*dst != -1)
194  return false;
195  *dst = val;
196  return true;
197 }
198 
199 bool WebMClusterParser::ParseBlock(bool is_simple_block,
200  const uint8_t* buf,
201  int size,
202  const uint8_t* additional,
203  int additional_size,
204  int duration,
205  int64_t discard_padding,
206  bool reference_block_set) {
207  if (size < 4)
208  return false;
209 
210  // Return an error if the trackNum > 127. We just aren't
211  // going to support large track numbers right now.
212  if (!(buf[0] & 0x80)) {
213  LOG(ERROR) << "TrackNumber over 127 not supported";
214  return false;
215  }
216 
217  int track_num = buf[0] & 0x7f;
218  int timecode = buf[1] << 8 | buf[2];
219  int flags = buf[3] & 0xff;
220  int lacing = (flags >> 1) & 0x3;
221 
222  if (lacing) {
223  LOG(ERROR) << "Lacing " << lacing << " is not supported yet.";
224  return false;
225  }
226 
227  // Sign extend negative timecode offsets.
228  if (timecode & 0x8000)
229  timecode |= ~0xffff;
230 
231  // The first bit of the flags is set when a SimpleBlock contains only
232  // keyframes. If this is a Block, then keyframe is inferred by the absence of
233  // the ReferenceBlock Element.
234  // http://www.matroska.org/technical/specs/index.html
235  bool is_key_frame =
236  is_simple_block ? (flags & 0x80) != 0 : !reference_block_set;
237 
238  const uint8_t* frame_data = buf + 4;
239  int frame_size = size - (frame_data - buf);
240  return OnBlock(is_simple_block, track_num, timecode, duration, frame_data,
241  frame_size, additional, additional_size, discard_padding,
242  is_key_frame);
243 }
244 
245 bool WebMClusterParser::OnBinary(int id, const uint8_t* data, int size) {
246  switch (id) {
247  case kWebMIdSimpleBlock:
248  return ParseBlock(true, data, size, NULL, 0, -1, 0, false);
249 
250  case kWebMIdBlock:
251  if (block_data_) {
252  LOG(ERROR) << "More than 1 Block in a BlockGroup is not "
253  "supported.";
254  return false;
255  }
256  block_data_.reset(new uint8_t[size]);
257  memcpy(block_data_.get(), data, size);
258  block_data_size_ = size;
259  return true;
260 
261  case kWebMIdBlockAdditional: {
262  uint64_t block_add_id = base::HostToNet64(block_add_id_);
263  if (block_additional_data_) {
264  // TODO: Technically, more than 1 BlockAdditional is allowed as per
265  // matroska spec. But for now we don't have a use case to support
266  // parsing of such files. Take a look at this again when such a case
267  // arises.
268  LOG(ERROR) << "More than 1 BlockAdditional in a "
269  "BlockGroup is not supported.";
270  return false;
271  }
272  // First 8 bytes of side_data in DecoderBuffer is the BlockAddID
273  // element's value in Big Endian format. This is done to mimic ffmpeg
274  // demuxer's behavior.
275  block_additional_data_size_ = size + sizeof(block_add_id);
276  block_additional_data_.reset(new uint8_t[block_additional_data_size_]);
277  memcpy(block_additional_data_.get(), &block_add_id,
278  sizeof(block_add_id));
279  memcpy(block_additional_data_.get() + 8, data, size);
280  return true;
281  }
282  case kWebMIdDiscardPadding: {
283  if (discard_padding_set_ || size <= 0 || size > 8)
284  return false;
285  discard_padding_set_ = true;
286 
287  // Read in the big-endian integer.
288  discard_padding_ = static_cast<int8_t>(data[0]);
289  for (int i = 1; i < size; ++i)
290  discard_padding_ = (discard_padding_ << 8) | data[i];
291 
292  return true;
293  }
294  case kWebMIdReferenceBlock:
295  // We use ReferenceBlock to determine whether the current Block contains a
296  // keyframe or not. Other than that, we don't care about the value of the
297  // ReferenceBlock element itself.
298  reference_block_set_ = true;
299  return true;
300  default:
301  return true;
302  }
303 }
304 
305 bool WebMClusterParser::OnBlock(bool is_simple_block,
306  int track_num,
307  int timecode,
308  int block_duration,
309  const uint8_t* data,
310  int size,
311  const uint8_t* additional,
312  int additional_size,
313  int64_t discard_padding,
314  bool is_key_frame) {
315  DCHECK_GE(size, 0);
316  if (cluster_timecode_ == -1) {
317  LOG(ERROR) << "Got a block before cluster timecode.";
318  return false;
319  }
320 
321  // TODO: Should relative negative timecode offsets be rejected? Or only when
322  // the absolute timecode is negative? See http://crbug.com/271794
323  if (timecode < 0) {
324  LOG(ERROR) << "Got a block with negative timecode offset " << timecode;
325  return false;
326  }
327 
328  if (last_block_timecode_ != -1 && timecode < last_block_timecode_) {
329  LOG(ERROR) << "Got a block with a timecode before the previous block.";
330  return false;
331  }
332 
333  Track* track = NULL;
334  StreamType stream_type = kStreamUnknown;
335  std::string encryption_key_id;
336  if (track_num == audio_.track_num()) {
337  track = &audio_;
338  encryption_key_id = audio_encryption_key_id_;
339  stream_type = kStreamAudio;
340  } else if (track_num == video_.track_num()) {
341  track = &video_;
342  encryption_key_id = video_encryption_key_id_;
343  stream_type = kStreamVideo;
344  } else if (ignored_tracks_.find(track_num) != ignored_tracks_.end()) {
345  return true;
346  } else if (Track* const text_track = FindTextTrack(track_num)) {
347  if (is_simple_block) // BlockGroup is required for WebVTT cues
348  return false;
349  if (block_duration < 0) // not specified
350  return false;
351  track = text_track;
352  stream_type = kStreamText;
353  } else {
354  LOG(ERROR) << "Unexpected track number " << track_num;
355  return false;
356  }
357  DCHECK_NE(stream_type, kStreamUnknown);
358 
359  last_block_timecode_ = timecode;
360 
361  int64_t timestamp = (cluster_timecode_ + timecode) * timecode_multiplier_;
362 
363  std::shared_ptr<MediaSample> buffer;
364  if (stream_type != kStreamText) {
365  // Every encrypted Block has a signal byte and IV prepended to it. Current
366  // encrypted WebM request for comments specification is here
367  // http://wiki.webmproject.org/encryption/webm-encryption-rfc
368  std::unique_ptr<DecryptConfig> decrypt_config;
369  int data_offset = 0;
370  if (!encryption_key_id.empty() &&
371  !WebMCreateDecryptConfig(
372  data, size,
373  reinterpret_cast<const uint8_t*>(encryption_key_id.data()),
374  encryption_key_id.size(),
375  &decrypt_config, &data_offset)) {
376  return false;
377  }
378 
379  const uint8_t* media_data = data + data_offset;
380  const size_t media_data_size = size - data_offset;
381  // Use a dummy data size of 0 to avoid copying overhead.
382  // Actual media data is set later.
383  const size_t kDummyDataSize = 0;
384  buffer = MediaSample::CopyFrom(media_data, kDummyDataSize, additional,
385  additional_size, is_key_frame);
386 
387  if (decrypt_config) {
388  if (!decryptor_source_) {
389  buffer->SetData(media_data, media_data_size);
390  // If the demuxer does not have the decryptor_source_, store
391  // decrypt_config so that the demuxed sample can be decrypted later.
392  buffer->set_decrypt_config(std::move(decrypt_config));
393  buffer->set_is_encrypted(true);
394  } else {
395  std::shared_ptr<uint8_t> decrypted_media_data(
396  new uint8_t[media_data_size], std::default_delete<uint8_t[]>());
397  if (!decryptor_source_->DecryptSampleBuffer(
398  decrypt_config.get(), media_data, media_data_size,
399  decrypted_media_data.get())) {
400  LOG(ERROR) << "Cannot decrypt samples";
401  return false;
402  }
403  buffer->TransferData(std::move(decrypted_media_data), media_data_size);
404  }
405  } else {
406  buffer->SetData(media_data, media_data_size);
407  }
408  } else {
409  std::string id, settings, content;
410  WebMWebVTTParser::Parse(data, size, &id, &settings, &content);
411 
412  std::vector<uint8_t> side_data;
413  MakeSideData(id.begin(), id.end(),
414  settings.begin(), settings.end(),
415  &side_data);
416 
417  buffer = MediaSample::CopyFrom(
418  reinterpret_cast<const uint8_t*>(content.data()), content.length(),
419  &side_data[0], side_data.size(), true);
420  }
421 
422  buffer->set_dts(timestamp);
423  buffer->set_pts(timestamp);
424  if (cluster_start_time_ == kNoTimestamp)
425  cluster_start_time_ = timestamp;
426  buffer->set_duration(block_duration > 0
427  ? (block_duration * timecode_multiplier_)
428  : kNoTimestamp);
429 
430  if (!init_cb_.is_null() && !initialized_) {
431  std::vector<std::shared_ptr<StreamInfo>> streams;
432  if (audio_stream_info_)
433  streams.push_back(audio_stream_info_);
434  if (video_stream_info_) {
435  if (stream_type == kStreamVideo) {
436  std::unique_ptr<VPxParser> vpx_parser;
437  switch (video_stream_info_->codec()) {
438  case kCodecVP8:
439  vpx_parser.reset(new VP8Parser);
440  break;
441  case kCodecVP9:
442  vpx_parser.reset(new VP9Parser);
443  break;
444  default:
445  NOTIMPLEMENTED() << "Unsupported codec "
446  << video_stream_info_->codec();
447  return false;
448  }
449  std::vector<VPxFrameInfo> vpx_frames;
450  if (!vpx_parser->Parse(buffer->data(), buffer->data_size(),
451  &vpx_frames)) {
452  LOG(ERROR) << "Failed to parse vpx frame.";
453  return false;
454  }
455  if (vpx_frames.size() != 1u || !vpx_frames[0].is_keyframe) {
456  LOG(ERROR) << "The first frame should be a key frame.";
457  return false;
458  }
459 
460  vp_config_.MergeFrom(vpx_parser->codec_config());
461  video_stream_info_->set_codec_string(
462  vp_config_.GetCodecString(video_stream_info_->codec()));
463  std::vector<uint8_t> config_serialized;
464  vp_config_.WriteMP4(&config_serialized);
465  video_stream_info_->set_codec_config(config_serialized);
466  streams.push_back(video_stream_info_);
467  init_cb_.Run(streams);
468  initialized_ = true;
469  }
470  } else {
471  init_cb_.Run(streams);
472  initialized_ = true;
473  }
474  }
475 
476  return track->EmitBuffer(buffer);
477 }
478 
479 WebMClusterParser::Track::Track(int track_num,
480  bool is_video,
481  int64_t default_duration,
482  const MediaParser::NewSampleCB& new_sample_cb)
483  : track_num_(track_num),
484  is_video_(is_video),
485  default_duration_(default_duration),
486  estimated_next_frame_duration_(kNoTimestamp),
487  new_sample_cb_(new_sample_cb) {
488  DCHECK(default_duration_ == kNoTimestamp || default_duration_ > 0);
489 }
490 
491 WebMClusterParser::Track::~Track() {}
492 
493 bool WebMClusterParser::Track::EmitBuffer(
494  const std::shared_ptr<MediaSample>& buffer) {
495  DVLOG(2) << "EmitBuffer() : " << track_num_
496  << " ts " << buffer->pts()
497  << " dur " << buffer->duration()
498  << " kf " << buffer->is_key_frame()
499  << " size " << buffer->data_size();
500 
501  if (last_added_buffer_missing_duration_.get()) {
502  int64_t derived_duration =
503  buffer->pts() - last_added_buffer_missing_duration_->pts();
504  last_added_buffer_missing_duration_->set_duration(derived_duration);
505 
506  DVLOG(2) << "EmitBuffer() : applied derived duration to held-back buffer : "
507  << " ts "
508  << last_added_buffer_missing_duration_->pts()
509  << " dur "
510  << last_added_buffer_missing_duration_->duration()
511  << " kf " << last_added_buffer_missing_duration_->is_key_frame()
512  << " size " << last_added_buffer_missing_duration_->data_size();
513  std::shared_ptr<MediaSample> updated_buffer =
514  last_added_buffer_missing_duration_;
515  last_added_buffer_missing_duration_ = NULL;
516  if (!EmitBufferHelp(updated_buffer))
517  return false;
518  }
519 
520  if (buffer->duration() == kNoTimestamp) {
521  last_added_buffer_missing_duration_ = buffer;
522  DVLOG(2) << "EmitBuffer() : holding back buffer that is missing duration";
523  return true;
524  }
525 
526  return EmitBufferHelp(buffer);
527 }
528 
529 bool WebMClusterParser::Track::ApplyDurationEstimateIfNeeded() {
530  if (!last_added_buffer_missing_duration_.get())
531  return true;
532 
533  int64_t estimated_duration = GetDurationEstimate();
534  last_added_buffer_missing_duration_->set_duration(estimated_duration);
535 
536  VLOG(1) << "Track " << track_num_ << ": Estimating WebM block duration to be "
537  << estimated_duration / 1000
538  << "ms for the last (Simple)Block in the Cluster for this Track. Use "
539  "BlockGroups with BlockDurations at the end of each Track in a "
540  "Cluster to avoid estimation.";
541 
542  DVLOG(2) << " new dur : ts " << last_added_buffer_missing_duration_->pts()
543  << " dur " << last_added_buffer_missing_duration_->duration()
544  << " kf " << last_added_buffer_missing_duration_->is_key_frame()
545  << " size " << last_added_buffer_missing_duration_->data_size();
546 
547  // Don't use the applied duration as a future estimation (don't use
548  // EmitBufferHelp() here.)
549  if (!new_sample_cb_.Run(track_num_, last_added_buffer_missing_duration_))
550  return false;
551  last_added_buffer_missing_duration_ = NULL;
552  return true;
553 }
554 
555 void WebMClusterParser::Track::Reset() {
556  last_added_buffer_missing_duration_ = NULL;
557 }
558 
559 bool WebMClusterParser::Track::EmitBufferHelp(
560  const std::shared_ptr<MediaSample>& buffer) {
561  DCHECK(!last_added_buffer_missing_duration_.get());
562 
563  int64_t duration = buffer->duration();
564  if (duration < 0 || duration == kNoTimestamp) {
565  LOG(ERROR) << "Invalid buffer duration: " << duration;
566  return false;
567  }
568 
569  // The estimated frame duration is the maximum non-zero duration since the
570  // last initialization segment.
571  if (duration > 0) {
572  int64_t orig_duration_estimate = estimated_next_frame_duration_;
573  if (estimated_next_frame_duration_ == kNoTimestamp) {
574  estimated_next_frame_duration_ = duration;
575  } else {
576  estimated_next_frame_duration_ =
577  std::max(duration, estimated_next_frame_duration_);
578  }
579 
580  if (orig_duration_estimate != estimated_next_frame_duration_) {
581  DVLOG(3) << "Updated duration estimate:"
582  << orig_duration_estimate
583  << " -> "
584  << estimated_next_frame_duration_
585  << " at timestamp: "
586  << buffer->dts();
587  }
588  }
589 
590  return new_sample_cb_.Run(track_num_, buffer);
591 }
592 
593 int64_t WebMClusterParser::Track::GetDurationEstimate() {
594  int64_t duration = kNoTimestamp;
595  if (default_duration_ != kNoTimestamp) {
596  duration = default_duration_;
597  DVLOG(3) << __FUNCTION__ << " : using track default duration " << duration;
598  } else if (estimated_next_frame_duration_ != kNoTimestamp) {
599  duration = estimated_next_frame_duration_;
600  DVLOG(3) << __FUNCTION__ << " : using estimated duration " << duration;
601  } else {
602  if (is_video_) {
603  duration = kDefaultVideoBufferDurationInMs * kMicrosecondsPerMillisecond;
604  } else {
605  duration = kDefaultAudioBufferDurationInMs * kMicrosecondsPerMillisecond;
606  }
607  DVLOG(3) << __FUNCTION__ << " : using hardcoded default duration "
608  << duration;
609  }
610 
611  DCHECK_GT(duration, 0);
612  DCHECK_NE(duration, kNoTimestamp);
613  return duration;
614 }
615 
616 void WebMClusterParser::ResetTextTracks() {
617  for (TextTrackMap::iterator it = text_track_map_.begin();
618  it != text_track_map_.end();
619  ++it) {
620  it->second.Reset();
621  }
622 }
623 
624 WebMClusterParser::Track*
625 WebMClusterParser::FindTextTrack(int track_num) {
626  const TextTrackMap::iterator it = text_track_map_.find(track_num);
627 
628  if (it == text_track_map_.end())
629  return NULL;
630 
631  return &it->second;
632 }
633 
634 } // namespace media
635 } // namespace shaka
Class for parsing or writing VP codec configuration record.
WebMClusterParser(int64_t timecode_scale, std::shared_ptr< AudioStreamInfo > audio_stream_info, std::shared_ptr< VideoStreamInfo > video_stream_info, const VPCodecConfigurationRecord &vp_config, int64_t audio_default_duration, int64_t video_default_duration, const WebMTracksParser::TextTracks &text_tracks, const std::set< int64_t > &ignored_tracks, const std::string &audio_encryption_key_id, const std::string &video_encryption_key_id, const MediaParser::NewSampleCB &new_sample_cb, const MediaParser::InitCB &init_cb, KeySource *decryption_key_source)
All the methods that are virtual are virtual for mocking.
int Parse(const uint8_t *buf, int size)
base::Callback< void(const std::vector< std::shared_ptr< StreamInfo > > &stream_info)> InitCB
Definition: media_parser.h:34
base::Callback< bool(uint32_t track_id, const std::shared_ptr< MediaSample > &media_sample)> NewSampleCB
Definition: media_parser.h:43
void Reset()
Resets the state of the parser so it can start parsing a new list.
Definition: webm_parser.cc:733
int Parse(const uint8_t *buf, int size)
Definition: webm_parser.cc:738
Class to parse a vp9 bit stream.
Definition: vp9_parser.h:20
bool Flush() WARN_UNUSED_RESULT
void WriteMP4(std::vector< uint8_t > *data) const
static void Parse(const uint8_t *payload, int payload_size, std::string *id, std::string *settings, std::string *content)
Utility function to parse the WebVTT cue from a byte stream.
void MergeFrom(const VPCodecConfigurationRecord &other)
static std::shared_ptr< MediaSample > CopyFrom(const uint8_t *data, size_t size, bool is_key_frame)
Definition: media_sample.cc:42
KeySource is responsible for encryption key acquisition.
Definition: key_source.h:50
DecryptorSource wraps KeySource and is responsible for decryptor management.
void Reset()
Resets the parser state so it can accept a new cluster.