ffmpeg: recognize planar audio and request packed instead
* Our media kit is designed to work with packed audio: which means the samples from different channels are interleaved in a single stream * Old ffmpeg versions also used this, but they now switched to the planar format, where each channel is stored separately. * Fortunately, we can request ffmpeg to use the packed format. We actually already tried to do that, but the API for requesting a sample format has also changed. * Finally, we didn't recognize the packed format reported by the codecs, which in some cases could lead to 16/32 bit mismatches on top of the planar/packed mixup. Fixes audio with ffmpeg 2.8 (ticket #12460)
This commit is contained in:
parent
6b56f7d282
commit
894640da2d
@ -766,6 +766,10 @@ AVCodecDecoder::_ApplyEssentialAudioContainerPropertiesToContext()
|
|||||||
= static_cast<int>(containerProperties.frame_size);
|
= static_cast<int>(containerProperties.frame_size);
|
||||||
ConvertRawAudioFormatToAVSampleFormat(
|
ConvertRawAudioFormatToAVSampleFormat(
|
||||||
containerProperties.output.format, fContext->sample_fmt);
|
containerProperties.output.format, fContext->sample_fmt);
|
||||||
|
#if LIBAVCODEC_VERSION_INT > ((52 << 16) | (114 << 8))
|
||||||
|
ConvertRawAudioFormatToAVSampleFormat(
|
||||||
|
containerProperties.output.format, fContext->request_sample_fmt);
|
||||||
|
#endif
|
||||||
fContext->sample_rate
|
fContext->sample_rate
|
||||||
= static_cast<int>(containerProperties.output.frame_rate);
|
= static_cast<int>(containerProperties.output.frame_rate);
|
||||||
fContext->channels
|
fContext->channels
|
||||||
@ -904,8 +908,18 @@ AVCodecDecoder::_MoveAudioFramesToRawDecodedAudioAndUpdateStartTimes()
|
|||||||
debugger("fDecodedDataBufferSize not multiple of frame size!");
|
debugger("fDecodedDataBufferSize not multiple of frame size!");
|
||||||
|
|
||||||
size_t remainingSize = frames * fOutputFrameSize;
|
size_t remainingSize = frames * fOutputFrameSize;
|
||||||
|
|
||||||
|
#if 0
|
||||||
|
// Some decoders do not support format conversion on themselves, or use
|
||||||
|
// "planar" audio (each channel separated instead of interleaved samples).
|
||||||
|
// If this is a problem we will need to use swresample to convert the data
|
||||||
|
// here, instead of directly copying it.
|
||||||
|
swr_convert(fResampleContext, fRawDecodedAudio->data,
|
||||||
|
fDecodedDataBuffer->data + fDecodedDataBufferOffset, frames);
|
||||||
|
#else
|
||||||
memcpy(fRawDecodedAudio->data[0], fDecodedDataBuffer->data[0]
|
memcpy(fRawDecodedAudio->data[0], fDecodedDataBuffer->data[0]
|
||||||
+ fDecodedDataBufferOffset, remainingSize);
|
+ fDecodedDataBufferOffset, remainingSize);
|
||||||
|
#endif
|
||||||
|
|
||||||
bool firstAudioFramesCopiedToRawDecodedAudio
|
bool firstAudioFramesCopiedToRawDecodedAudio
|
||||||
= fRawDecodedAudio->data[0] != fDecodedData;
|
= fRawDecodedAudio->data[0] != fDecodedData;
|
||||||
|
@ -235,7 +235,7 @@ inline void
|
|||||||
ConvertRawAudioFormatToAVSampleFormat(uint32 rawAudioFormatIn,
|
ConvertRawAudioFormatToAVSampleFormat(uint32 rawAudioFormatIn,
|
||||||
AVSampleFormat& sampleFormatOut)
|
AVSampleFormat& sampleFormatOut)
|
||||||
{
|
{
|
||||||
switch(rawAudioFormatIn) {
|
switch (rawAudioFormatIn) {
|
||||||
case media_raw_audio_format::B_AUDIO_FLOAT:
|
case media_raw_audio_format::B_AUDIO_FLOAT:
|
||||||
sampleFormatOut = AV_SAMPLE_FMT_FLT;
|
sampleFormatOut = AV_SAMPLE_FMT_FLT;
|
||||||
return;
|
return;
|
||||||
@ -281,24 +281,29 @@ inline void
|
|||||||
ConvertAVSampleFormatToRawAudioFormat(AVSampleFormat sampleFormatIn,
|
ConvertAVSampleFormatToRawAudioFormat(AVSampleFormat sampleFormatIn,
|
||||||
uint32& rawAudioFormatOut)
|
uint32& rawAudioFormatOut)
|
||||||
{
|
{
|
||||||
switch(sampleFormatIn) {
|
switch (sampleFormatIn) {
|
||||||
case AV_SAMPLE_FMT_FLT:
|
case AV_SAMPLE_FMT_FLT:
|
||||||
|
case AV_SAMPLE_FMT_FLTP:
|
||||||
rawAudioFormatOut = media_raw_audio_format::B_AUDIO_FLOAT;
|
rawAudioFormatOut = media_raw_audio_format::B_AUDIO_FLOAT;
|
||||||
return;
|
return;
|
||||||
|
|
||||||
case AV_SAMPLE_FMT_DBL:
|
case AV_SAMPLE_FMT_DBL:
|
||||||
|
case AV_SAMPLE_FMT_DBLP:
|
||||||
rawAudioFormatOut = media_raw_audio_format::B_AUDIO_DOUBLE;
|
rawAudioFormatOut = media_raw_audio_format::B_AUDIO_DOUBLE;
|
||||||
return;
|
return;
|
||||||
|
|
||||||
case AV_SAMPLE_FMT_S32:
|
case AV_SAMPLE_FMT_S32:
|
||||||
|
case AV_SAMPLE_FMT_S32P:
|
||||||
rawAudioFormatOut = media_raw_audio_format::B_AUDIO_INT;
|
rawAudioFormatOut = media_raw_audio_format::B_AUDIO_INT;
|
||||||
return;
|
return;
|
||||||
|
|
||||||
case AV_SAMPLE_FMT_S16:
|
case AV_SAMPLE_FMT_S16:
|
||||||
|
case AV_SAMPLE_FMT_S16P:
|
||||||
rawAudioFormatOut = media_raw_audio_format::B_AUDIO_SHORT;
|
rawAudioFormatOut = media_raw_audio_format::B_AUDIO_SHORT;
|
||||||
return;
|
return;
|
||||||
|
|
||||||
case AV_SAMPLE_FMT_U8:
|
case AV_SAMPLE_FMT_U8:
|
||||||
|
case AV_SAMPLE_FMT_U8P:
|
||||||
rawAudioFormatOut = media_raw_audio_format::B_AUDIO_UCHAR;
|
rawAudioFormatOut = media_raw_audio_format::B_AUDIO_UCHAR;
|
||||||
return;
|
return;
|
||||||
|
|
||||||
@ -311,4 +316,5 @@ ConvertAVSampleFormatToRawAudioFormat(AVSampleFormat sampleFormatIn,
|
|||||||
rawAudioFormatOut = kBAudioNone;
|
rawAudioFormatOut = kBAudioNone;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
#endif // UTILITIES_H
|
#endif // UTILITIES_H
|
||||||
|
Loading…
Reference in New Issue
Block a user