summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorwolfbeast <mcwerewolf@wolfbeast.com>2019-05-25 17:44:44 +0200
committerwolfbeast <mcwerewolf@wolfbeast.com>2019-05-25 17:45:05 +0200
commitc4e345b6d499393132c0cd57d10c98a7a4db370b (patch)
tree8e58ae34ba4e5b69453e4b2d07fdc6a0ae464d20
parent764e2a6247dd2690b302af3677e544a411d70c7e (diff)
downloadUXP-c4e345b6d499393132c0cd57d10c98a7a4db370b.tar
UXP-c4e345b6d499393132c0cd57d10c98a7a4db370b.tar.gz
UXP-c4e345b6d499393132c0cd57d10c98a7a4db370b.tar.lz
UXP-c4e345b6d499393132c0cd57d10c98a7a4db370b.tar.xz
UXP-c4e345b6d499393132c0cd57d10c98a7a4db370b.zip
[media] Rewrite AudioConverter::DownmixAudio
- Structure the code better - Directly downmix to mono from multichannel
-rw-r--r--dom/media/AudioConverter.cpp108
1 files changed, 62 insertions, 46 deletions
diff --git a/dom/media/AudioConverter.cpp b/dom/media/AudioConverter.cpp
index 25b981f43..002e79108 100644
--- a/dom/media/AudioConverter.cpp
+++ b/dom/media/AudioConverter.cpp
@@ -5,8 +5,8 @@
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
#include "AudioConverter.h"
-#include <string.h>
#include <speex/speex_resampler.h>
+#include <string.h>
#include <cmath>
/*
@@ -140,24 +140,28 @@ static inline int16_t clipTo15(int32_t aX)
size_t
AudioConverter::DownmixAudio(void* aOut, const void* aIn, size_t aFrames) const
{
- MOZ_ASSERT(mIn.Format() == AudioConfig::FORMAT_S16 ||
- mIn.Format() == AudioConfig::FORMAT_FLT);
- MOZ_ASSERT(mIn.Channels() >= mOut.Channels());
- MOZ_ASSERT(mIn.Layout() == AudioConfig::ChannelLayout(mIn.Channels()),
- "Can only downmix input data in SMPTE layout");
- MOZ_ASSERT(mOut.Layout() == AudioConfig::ChannelLayout(2) ||
- mOut.Layout() == AudioConfig::ChannelLayout(1));
+ MOZ_DIAGNOSTIC_ASSERT(mIn.Format() == AudioConfig::FORMAT_S16 ||
+ mIn.Format() == AudioConfig::FORMAT_FLT);
+ MOZ_DIAGNOSTIC_ASSERT(mIn.Channels() >= mOut.Channels());
+ MOZ_DIAGNOSTIC_ASSERT(
+ mIn.Layout() == AudioConfig::ChannelLayout(mIn.Channels()),
+ "Can only downmix input data in SMPTE layout");
+ MOZ_DIAGNOSTIC_ASSERT(mOut.Layout() == AudioConfig::ChannelLayout(2) ||
+ mOut.Layout() == AudioConfig::ChannelLayout(1),
+ "Can only downmix to stereo or mono");
- uint32_t channels = mIn.Channels();
+ uint32_t inChannels = mIn.Channels();
+ uint32_t outChannels = mOut.Channels();
- if (channels == 1 && mOut.Channels() == 1) {
+ if (inChannels == outChannels) {
+ // Number of channels is equal; no processing needed, just move data.
if (aOut != aIn) {
memmove(aOut, aIn, FramesOutToBytes(aFrames));
}
return aFrames;
}
- if (channels > 2) {
+ if (inChannels > 2) {
if (mIn.Format() == AudioConfig::FORMAT_FLT) {
// Downmix matrix. Per-row normalization 1 for rows 3,4 and 2 for rows 5-8.
static const float dmatrix[6][8][2]= {
@@ -174,12 +178,18 @@ AudioConverter::DownmixAudio(void* aOut, const void* aIn, size_t aFrames) const
for (uint32_t i = 0; i < aFrames; i++) {
float sampL = 0.0;
float sampR = 0.0;
- for (uint32_t j = 0; j < channels; j++) {
- sampL += in[i*mIn.Channels()+j]*dmatrix[mIn.Channels()-3][j][0];
- sampR += in[i*mIn.Channels()+j]*dmatrix[mIn.Channels()-3][j][1];
+ for (uint32_t j = 0; j < inChannels; j++) {
+ sampL += in[i * inChannels + j] * dmatrix[inChannels - 3][j][0];
+ sampR += in[i * inChannels + j] * dmatrix[inChannels - 3][j][1];
+ }
+ if (outChannels == 2) {
+ // Stereo
+ *out++ = sampL;
+ *out++ = sampR;
+ } else {
+ // Mono
+ *out++ = (sampL + sampR) * 0.5;
}
- *out++ = sampL;
- *out++ = sampR;
}
} else if (mIn.Format() == AudioConfig::FORMAT_S16) {
// Downmix matrix. Per-row normalization 1 for rows 3,4 and 2 for rows 5-8.
@@ -198,45 +208,51 @@ AudioConverter::DownmixAudio(void* aOut, const void* aIn, size_t aFrames) const
for (uint32_t i = 0; i < aFrames; i++) {
int32_t sampL = 0;
int32_t sampR = 0;
- for (uint32_t j = 0; j < channels; j++) {
- sampL+=in[i*channels+j]*dmatrix[channels-3][j][0];
- sampR+=in[i*channels+j]*dmatrix[channels-3][j][1];
+ for (uint32_t j = 0; j < inChannels; j++) {
+ sampL += in[i * inChannels + j] * dmatrix[inChannels - 3][j][0];
+ sampR += in[i * inChannels + j] * dmatrix[inChannels - 3][j][1];
+ }
+ sampL = clipTo15((sampL + 8192) >> 14);
+ sampR = clipTo15((sampR + 8192) >> 14);
+ if (outChannels == 2) {
+ // Stereo
+ *out++ = sampL;
+ *out++ = sampR;
+ } else {
+ // Mono
+ *out++ = (sampL + sampR) * 0.5;
}
- *out++ = clipTo15((sampL + 8192)>>14);
- *out++ = clipTo15((sampR + 8192)>>14);
}
} else {
MOZ_DIAGNOSTIC_ASSERT(false, "Unsupported data type");
}
- // If we are to continue downmixing to mono, start working on the output
- // buffer.
- aIn = aOut;
- channels = 2;
+ return aFrames;
}
- if (mOut.Channels() == 1) {
- if (mIn.Format() == AudioConfig::FORMAT_FLT) {
- const float* in = static_cast<const float*>(aIn);
- float* out = static_cast<float*>(aOut);
- for (size_t fIdx = 0; fIdx < aFrames; ++fIdx) {
- float sample = 0.0;
- // The sample of the buffer would be interleaved.
- sample = (in[fIdx*channels] + in[fIdx*channels + 1]) * 0.5;
- *out++ = sample;
- }
- } else if (mIn.Format() == AudioConfig::FORMAT_S16) {
- const int16_t* in = static_cast<const int16_t*>(aIn);
- int16_t* out = static_cast<int16_t*>(aOut);
- for (size_t fIdx = 0; fIdx < aFrames; ++fIdx) {
- int32_t sample = 0.0;
- // The sample of the buffer would be interleaved.
- sample = (in[fIdx*channels] + in[fIdx*channels + 1]) * 0.5;
- *out++ = sample;
- }
- } else {
- MOZ_DIAGNOSTIC_ASSERT(false, "Unsupported data type");
+ // If we get here, we're doing a stereo -> mono conversion.
+ MOZ_DIAGNOSTIC_ASSERT(inChannels == 2 && outChannels == 1);
+
+ if (mIn.Format() == AudioConfig::FORMAT_FLT) {
+ const float* in = static_cast<const float*>(aIn);
+ float* out = static_cast<float*>(aOut);
+ for (size_t fIdx = 0; fIdx < aFrames; ++fIdx) {
+ float sample = 0.0;
+ // The sample of the buffer would be interleaved.
+ sample = (in[fIdx * inChannels] + in[fIdx * inChannels + 1]) * 0.5;
+ *out++ = sample;
}
+ } else if (mIn.Format() == AudioConfig::FORMAT_S16) {
+ const int16_t* in = static_cast<const int16_t*>(aIn);
+ int16_t* out = static_cast<int16_t*>(aOut);
+ for (size_t fIdx = 0; fIdx < aFrames; ++fIdx) {
+ int32_t sample = 0.0;
+ // The sample of the buffer would be interleaved.
+ sample = (in[fIdx * inChannels] + in[fIdx * inChannels + 1]) * 0.5;
+ *out++ = sample;
+ }
+ } else {
+ MOZ_DIAGNOSTIC_ASSERT(false, "Unsupported data type");
}
return aFrames;
}