Advertisement
Not a member of Pastebin yet?
Sign Up,
it unlocks many cool features!
- diff --git a/media/base/audio_buffer_converter.cc b/media/base/audio_buffer_converter.cc
- index 59c6681..5d231db 100644
- --- a/media/base/audio_buffer_converter.cc
- +++ b/media/base/audio_buffer_converter.cc
- @@ -17,6 +17,10 @@
- namespace media {
- +
- +static base::TimeDelta duration_in;
- +static base::TimeDelta duration_out;
- +
- // Is the config presented by |buffer| a config change from |params|?
- static bool IsConfigChange(const AudioParameters& params,
- const scoped_refptr<AudioBuffer>& buffer) {
- @@ -45,6 +49,8 @@ void AudioBufferConverter::AddInput(const scoped_refptr<AudioBuffer>& buffer) {
- return;
- }
- + duration_in += buffer->duration();
- +
- // We'll need a new |audio_converter_| if there was a config change.
- if (IsConfigChange(input_params_, buffer))
- ResetConverter(buffer);
- @@ -70,10 +76,14 @@ scoped_refptr<AudioBuffer> AudioBufferConverter::GetNextBuffer() {
- DCHECK(!queued_outputs_.empty());
- scoped_refptr<AudioBuffer> out = queued_outputs_.front();
- queued_outputs_.pop_front();
- + duration_out += out->duration();
- return out;
- }
- void AudioBufferConverter::Reset() {
- + LOG(ERROR) << "in: " << duration_in.InMicroseconds()
- + << ", out: " << duration_out.InMicroseconds();
- + duration_out = duration_in = base::TimeDelta();
- audio_converter_.reset();
- queued_inputs_.clear();
- queued_outputs_.clear();
- diff --git a/media/base/audio_converter.cc b/media/base/audio_converter.cc
- index aa0be4f..b51d274 100644
- --- a/media/base/audio_converter.cc
- +++ b/media/base/audio_converter.cc
- @@ -34,7 +34,7 @@ AudioConverter::AudioConverter(const AudioParameters& input_params,
- // Handle different input and output channel layouts.
- if (input_params.channel_layout() != output_params.channel_layout()) {
- - DVLOG(1) << "Remixing channel layout from " << input_params.channel_layout()
- + LOG(ERROR) << "Remixing channel layout from " << input_params.channel_layout()
- << " to " << output_params.channel_layout() << "; from "
- << input_params.channels() << " channels to "
- << output_params.channels() << " channels.";
- @@ -46,7 +46,7 @@ AudioConverter::AudioConverter(const AudioParameters& input_params,
- // Only resample if necessary since it's expensive.
- if (input_params.sample_rate() != output_params.sample_rate()) {
- - DVLOG(1) << "Resampling from " << input_params.sample_rate() << " to "
- + LOG(ERROR) << "Resampling from " << input_params.sample_rate() << " to "
- << output_params.sample_rate();
- const int request_size = disable_fifo ? SincResampler::kDefaultRequestSize :
- input_params.frames_per_buffer();
- @@ -76,7 +76,7 @@ AudioConverter::AudioConverter(const AudioParameters& input_params,
- // asked for, we need to use a FIFO to ensure that both sides read in chunk
- // sizes they're configured for.
- if (input_params.frames_per_buffer() != output_params.frames_per_buffer()) {
- - DVLOG(1) << "Rebuffering from " << input_params.frames_per_buffer()
- + LOG(ERROR) << "Rebuffering from " << input_params.frames_per_buffer()
- << " to " << output_params.frames_per_buffer();
- chunk_size_ = input_params.frames_per_buffer();
- audio_fifo_.reset(new AudioPullFifo(
- diff --git a/media/base/audio_discard_helper.cc b/media/base/audio_discard_helper.cc
- index 303ee79..fccfa9b 100644
- --- a/media/base/audio_discard_helper.cc
- +++ b/media/base/audio_discard_helper.cc
- @@ -64,6 +64,7 @@ bool AudioDiscardHelper::ProcessBuffers(
- // If this is the first buffer seen, setup the timestamp helper.
- const bool first_buffer = !initialized();
- if (first_buffer) {
- + LOG(ERROR) << "Reseting time state to " << encoded_buffer->timestamp().InMicroseconds();
- // Clamp the base timestamp to zero.
- timestamp_helper_.SetBaseTimestamp(
- std::max(base::TimeDelta(), encoded_buffer->timestamp()));
- @@ -191,8 +192,19 @@ bool AudioDiscardHelper::ProcessBuffers(
- }
- // Assign timestamp to the buffer.
- + if (decoded_buffer->duration() != encoded_buffer->duration()) {
- + LOG(ERROR) << "Duration mismatch: "
- + << encoded_buffer->duration().InMicroseconds() << " vs "
- + << decoded_buffer->duration().InMicroseconds() << ". Delta: "
- + << std::abs((decoded_buffer->duration() -
- + encoded_buffer->duration()).InMicroseconds());
- + }
- +
- decoded_buffer->set_timestamp(timestamp_helper_.GetTimestamp());
- timestamp_helper_.AddFrames(decoded_buffer->frame_count());
- +
- + LOG(ERROR) << encoded_buffer->timestamp().InMicroseconds() << " -> "
- + << decoded_buffer->timestamp().InMicroseconds();
- return true;
- }
- diff --git a/media/base/audio_splicer.cc b/media/base/audio_splicer.cc
- index 7fafc8b..4bb53b1 100644
- --- a/media/base/audio_splicer.cc
- +++ b/media/base/audio_splicer.cc
- @@ -128,7 +128,7 @@ bool AudioStreamSanitizer::AddInput(const scoped_refptr<AudioBuffer>& input) {
- output_timestamp_helper_.SetBaseTimestamp(input->timestamp());
- if (output_timestamp_helper_.base_timestamp() > input->timestamp()) {
- - DVLOG(1) << "Input timestamp is before the base timestamp.";
- + LOG(ERROR) << "Input timestamp is before the base timestamp.";
- return false;
- }
- @@ -139,7 +139,7 @@ bool AudioStreamSanitizer::AddInput(const scoped_refptr<AudioBuffer>& input) {
- if (std::abs(delta.InMilliseconds()) >
- AudioSplicer::kMaxTimeDeltaInMilliseconds) {
- - DVLOG(1) << "Timestamp delta too large: " << delta.InMicroseconds() << "us";
- + LOG(ERROR) << "Timestamp delta too large: " << delta.InMicroseconds() << "us";
- return false;
- }
- @@ -153,7 +153,7 @@ bool AudioStreamSanitizer::AddInput(const scoped_refptr<AudioBuffer>& input) {
- }
- if (frames_to_fill > 0) {
- - DVLOG(1) << "Gap detected @ " << expected_timestamp.InMicroseconds()
- + LOG(ERROR) << "Gap detected @ " << expected_timestamp.InMicroseconds()
- << " us: " << delta.InMicroseconds() << " us";
- // Create a buffer with enough silence samples to fill the gap and
- @@ -177,12 +177,12 @@ bool AudioStreamSanitizer::AddInput(const scoped_refptr<AudioBuffer>& input) {
- //
- // A crossfade can't be done here because only the current buffer is available
- // at this point, not previous buffers.
- - DVLOG(1) << "Overlap detected @ " << expected_timestamp.InMicroseconds()
- + LOG(ERROR) << "Overlap detected @ " << expected_timestamp.InMicroseconds()
- << " us: " << -delta.InMicroseconds() << " us";
- const int frames_to_skip = -frames_to_fill;
- if (input->frame_count() <= frames_to_skip) {
- - DVLOG(1) << "Dropping whole buffer";
- + LOG(ERROR) << "Dropping whole buffer";
- return true;
- }
- @@ -267,6 +267,10 @@ bool AudioSplicer::AddInput(const scoped_refptr<AudioBuffer>& input) {
- // added to the output queue.
- if (input->timestamp() + input->duration() < splice_timestamp_) {
- DCHECK(!pre_splice_sanitizer_->HasNextBuffer());
- + LOG(ERROR) << "Skipping pre splice: [" << input->timestamp().InMicroseconds()
- + << ", "
- + << (input->timestamp() + input->duration()).InMicroseconds()
- + << "]";
- return output_sanitizer_->AddInput(input);
- }
- @@ -279,6 +283,10 @@ bool AudioSplicer::AddInput(const scoped_refptr<AudioBuffer>& input) {
- output_ts_helper.frame_count(), output_ts_helper.base_timestamp());
- }
- + LOG(ERROR) << "Add pre splice: [" << input->timestamp().InMicroseconds()
- + << ", "
- + << (input->timestamp() + input->duration()).InMicroseconds()
- + << "]";
- return pre_splice_sanitizer_->AddInput(input);
- }
- @@ -289,8 +297,14 @@ bool AudioSplicer::AddInput(const scoped_refptr<AudioBuffer>& input) {
- // At this point we have all the fade out preroll buffers from the decoder.
- // We now need to wait until we have enough data to perform the crossfade (or
- // we receive an end of stream).
- - if (!post_splice_sanitizer_->AddInput(input))
- + if (!post_splice_sanitizer_->AddInput(input)) {
- return false;
- + } else {
- + LOG(ERROR) << "Add post splice: [" << input->timestamp().InMicroseconds()
- + << ", "
- + << (input->timestamp() + input->duration()).InMicroseconds()
- + << "]";
- + }
- // Ensure |output_sanitizer_| has a valid base timestamp so we can use it for
- // timestamp calculations.
- @@ -304,6 +318,18 @@ bool AudioSplicer::AddInput(const scoped_refptr<AudioBuffer>& input) {
- // the splice. In this case, just transfer all data to the output sanitizer.
- if (pre_splice_sanitizer_->GetFrameCount() <=
- output_ts_helper.GetFramesToTarget(splice_timestamp_)) {
- + LOG(ERROR) << "Aborting false splice @ "
- + << splice_timestamp_.InMicroseconds() << ", NextPre: "
- + << (pre_splice_sanitizer_->HasNextBuffer()
- + ? pre_splice_sanitizer_->timestamp_helper()
- + .GetTimestamp()
- + .InMicroseconds()
- + : -1) << ", FirstPost: "
- + << (post_splice_sanitizer_->HasNextBuffer()
- + ? post_splice_sanitizer_->timestamp_helper()
- + .base_timestamp()
- + .InMicroseconds()
- + : -1);
- CHECK(pre_splice_sanitizer_->DrainInto(output_sanitizer_.get()));
- // If the file contains incorrectly muxed timestamps, there may be huge gaps
- diff --git a/media/filters/audio_renderer_impl.cc b/media/filters/audio_renderer_impl.cc
- index 144e054..f18ae03 100644
- --- a/media/filters/audio_renderer_impl.cc
- +++ b/media/filters/audio_renderer_impl.cc
- @@ -682,6 +682,7 @@ void AudioRendererImpl::OnConfigChange() {
- // only appear after config changes, AddInput() should never fail here.
- while (buffer_converter_->HasNextBuffer())
- CHECK(splicer_->AddInput(buffer_converter_->GetNextBuffer()));
- + buffer_converter_->Reset();
- }
- void AudioRendererImpl::SetBufferingState_Locked(
- diff --git a/media/filters/source_buffer_stream.cc b/media/filters/source_buffer_stream.cc
- index 8bc65854..707af04 100644
- --- a/media/filters/source_buffer_stream.cc
- +++ b/media/filters/source_buffer_stream.cc
- @@ -1697,6 +1697,14 @@ void SourceBufferStream::GenerateSpliceFrame(const BufferQueue& new_buffers) {
- return;
- }
- + const base::TimeDelta end_ts = pre_splice_buffers.front()->timestamp() +
- + pre_splice_buffers.front()->duration();
- + LOG(ERROR) << "Generating splice frame @ "
- + << splice_timestamp.InMicroseconds() << "ms. Overlaps range ["
- + << pre_splice_buffers.front()->timestamp().InMicroseconds()
- + << ", " << end_ts.InMicroseconds() << "] by "
- + << (end_ts - splice_timestamp).InMicroseconds() << "ms";
- +
- new_buffers.front()->ConvertToSpliceBuffer(pre_splice_buffers);
- }
- diff --git a/media/formats/mp4/mp4_stream_parser.cc b/media/formats/mp4/mp4_stream_parser.cc
- index 8ec925c..4f993eb 100644
- --- a/media/formats/mp4/mp4_stream_parser.cc
- +++ b/media/formats/mp4/mp4_stream_parser.cc
- @@ -270,6 +270,7 @@ bool MP4StreamParser::ParseMoov(BoxReader* reader) {
- is_audio_track_encrypted_, false, base::TimeDelta(),
- 0);
- has_audio_ = true;
- + LOG(ERROR) << "MP4 Parsing Audio";
- audio_track_id_ = track->header.track_id;
- }
- if (track->media.handler.type == kVideo && !video_config.IsValidConfig()) {
- diff --git a/media/formats/webm/webm_stream_parser.cc b/media/formats/webm/webm_stream_parser.cc
- index dd200d2..ea59454 100644
- --- a/media/formats/webm/webm_stream_parser.cc
- +++ b/media/formats/webm/webm_stream_parser.cc
- @@ -220,6 +220,10 @@ int WebMStreamParser::ParseInfoAndTracks(const uint8* data, int size) {
- if (video_config.is_encrypted())
- FireNeedKey(tracks_parser.video_encryption_key_id());
- + if (audio_config.IsValidConfig()) {
- + LOG(ERROR) << "WebM parsing audio...";
- + }
- +
- if (!config_cb_.Run(audio_config,
- video_config,
- tracks_parser.text_tracks())) {
Advertisement
Add Comment
Please, Sign In to add comment
Advertisement