ffmpeg: recognize planar audio and request packed instead

* Our media kit is designed to work with packed audio: which means the
  samples from different channels are interleaved in a single stream
* Old ffmpeg versions also used this, but they now switched to the
  planar format, where each channel is stored separately.
* Fortunately, we can request ffmpeg to use the packed format. We
  actually already tried to do that, but the API for requesting a sample
  format has also changed.
* Finally, we didn't recognize the packed format reported by the codecs,
  which in some cases could lead to 16/32 bit mismatches on top of the
  planar/packed mixup.

Fixes audio with ffmpeg 2.8 (ticket #12460)
This commit is contained in:
Adrien Destugues 2015-11-14 19:15:03 +01:00
parent 6b56f7d282
commit 894640da2d
2 changed files with 22 additions and 2 deletions

View File

@ -766,6 +766,10 @@ AVCodecDecoder::_ApplyEssentialAudioContainerPropertiesToContext()
= static_cast<int>(containerProperties.frame_size);
ConvertRawAudioFormatToAVSampleFormat(
containerProperties.output.format, fContext->sample_fmt);
#if LIBAVCODEC_VERSION_INT > ((52 << 16) | (114 << 8))
ConvertRawAudioFormatToAVSampleFormat(
containerProperties.output.format, fContext->request_sample_fmt);
#endif
fContext->sample_rate
= static_cast<int>(containerProperties.output.frame_rate);
fContext->channels
@ -904,8 +908,18 @@ AVCodecDecoder::_MoveAudioFramesToRawDecodedAudioAndUpdateStartTimes()
debugger("fDecodedDataBufferSize not multiple of frame size!");
size_t remainingSize = frames * fOutputFrameSize;
#if 0
// Some decoders do not support format conversion on themselves, or use
// "planar" audio (each channel separated instead of interleaved samples).
// If this is a problem we will need to use swresample to convert the data
// here, instead of directly copying it.
swr_convert(fResampleContext, fRawDecodedAudio->data,
fDecodedDataBuffer->data + fDecodedDataBufferOffset, frames);
#else
memcpy(fRawDecodedAudio->data[0], fDecodedDataBuffer->data[0]
+ fDecodedDataBufferOffset, remainingSize);
#endif
bool firstAudioFramesCopiedToRawDecodedAudio
= fRawDecodedAudio->data[0] != fDecodedData;

View File

@ -235,7 +235,7 @@ inline void
ConvertRawAudioFormatToAVSampleFormat(uint32 rawAudioFormatIn,
AVSampleFormat& sampleFormatOut)
{
switch(rawAudioFormatIn) {
switch (rawAudioFormatIn) {
case media_raw_audio_format::B_AUDIO_FLOAT:
sampleFormatOut = AV_SAMPLE_FMT_FLT;
return;
@ -281,24 +281,29 @@ inline void
ConvertAVSampleFormatToRawAudioFormat(AVSampleFormat sampleFormatIn,
uint32& rawAudioFormatOut)
{
switch(sampleFormatIn) {
switch (sampleFormatIn) {
case AV_SAMPLE_FMT_FLT:
case AV_SAMPLE_FMT_FLTP:
rawAudioFormatOut = media_raw_audio_format::B_AUDIO_FLOAT;
return;
case AV_SAMPLE_FMT_DBL:
case AV_SAMPLE_FMT_DBLP:
rawAudioFormatOut = media_raw_audio_format::B_AUDIO_DOUBLE;
return;
case AV_SAMPLE_FMT_S32:
case AV_SAMPLE_FMT_S32P:
rawAudioFormatOut = media_raw_audio_format::B_AUDIO_INT;
return;
case AV_SAMPLE_FMT_S16:
case AV_SAMPLE_FMT_S16P:
rawAudioFormatOut = media_raw_audio_format::B_AUDIO_SHORT;
return;
case AV_SAMPLE_FMT_U8:
case AV_SAMPLE_FMT_U8P:
rawAudioFormatOut = media_raw_audio_format::B_AUDIO_UCHAR;
return;
@ -311,4 +316,5 @@ ConvertAVSampleFormatToRawAudioFormat(AVSampleFormat sampleFormatIn,
rawAudioFormatOut = kBAudioNone;
}
#endif // UTILITIES_H