diff options
author | Matt A. Tobin <mattatobin@localhost.localdomain> | 2018-02-02 04:16:08 -0500 |
---|---|---|
committer | Matt A. Tobin <mattatobin@localhost.localdomain> | 2018-02-02 04:16:08 -0500 |
commit | 5f8de423f190bbb79a62f804151bc24824fa32d8 (patch) | |
tree | 10027f336435511475e392454359edea8e25895d /dom/media/webaudio | |
parent | 49ee0794b5d912db1f95dce6eb52d781dc210db5 (diff) | |
download | UXP-5f8de423f190bbb79a62f804151bc24824fa32d8.tar UXP-5f8de423f190bbb79a62f804151bc24824fa32d8.tar.gz UXP-5f8de423f190bbb79a62f804151bc24824fa32d8.tar.lz UXP-5f8de423f190bbb79a62f804151bc24824fa32d8.tar.xz UXP-5f8de423f190bbb79a62f804151bc24824fa32d8.zip |
Add m-esr52 at 52.6.0
Diffstat (limited to 'dom/media/webaudio')
342 files changed, 40757 insertions, 0 deletions
diff --git a/dom/media/webaudio/AlignedTArray.h b/dom/media/webaudio/AlignedTArray.h new file mode 100644 index 000000000..afd2f1f48 --- /dev/null +++ b/dom/media/webaudio/AlignedTArray.h @@ -0,0 +1,121 @@ +/* -*- Mode: C++; tab-width: 2; indent-tabs-mode: nil; c-basic-offset: 2 -*- */ +/* vim:set ts=2 sw=2 sts=2 et cindent: */ +/* This Source Code Form is subject to the terms of the Mozilla Public + * License, v. 2.0. If a copy of the MPL was not distributed with this + * file, You can obtain one at http://mozilla.org/MPL/2.0/. */ + +#ifndef AlignedTArray_h__ +#define AlignedTArray_h__ + +#include "mozilla/Alignment.h" +#include "nsTArray.h" + +/** + * E: element type, must be a POD type. + * N: N bytes alignment for the first element, defaults to 32 + * S: S bytes of inline storage + */ +template <typename E, int S, int N = 32> +class AlignedAutoTArray : private AutoTArray<E, S + N> +{ + static_assert((N & (N-1)) == 0, "N must be power of 2"); + typedef AutoTArray<E, S + N> base_type; +public: + typedef E elem_type; + typedef typename base_type::size_type size_type; + typedef typename base_type::index_type index_type; + + AlignedAutoTArray() {} + explicit AlignedAutoTArray(size_type capacity) : base_type(capacity + sExtra) {} + elem_type* Elements() { return getAligned(base_type::Elements()); } + const elem_type* Elements() const { return getAligned(base_type::Elements()); } + elem_type& operator[](index_type i) { return Elements()[i];} + const elem_type& operator[](index_type i) const { return Elements()[i]; } + + void SetLength(size_type newLen) + { + base_type::SetLength(newLen + sExtra); + } + + MOZ_MUST_USE + bool SetLength(size_type newLen, const mozilla::fallible_t&) + { + return base_type::SetLength(newLen + sExtra, mozilla::fallible); + } + + size_type Length() const { + return base_type::Length() <= sExtra ? 0 : base_type::Length() - sExtra; + } + + using base_type::ShallowSizeOfExcludingThis; + using base_type::ShallowSizeOfIncludingThis; + +private: + AlignedAutoTArray(const AlignedAutoTArray& other) = delete; + void operator=(const AlignedAutoTArray& other) = delete; + + static const size_type sPadding = N <= MOZ_ALIGNOF(E) ? 0 : N - MOZ_ALIGNOF(E); + static const size_type sExtra = (sPadding + sizeof(E) - 1) / sizeof(E); + + template <typename U> + static U* getAligned(U* p) + { + return reinterpret_cast<U*>(((uintptr_t)p + N - 1) & ~(N-1)); + } +}; + +/** + * E: element type, must be a POD type. + * N: N bytes alignment for the first element, defaults to 32 + */ +template <typename E, int N = 32> +class AlignedTArray : private nsTArray_Impl<E, nsTArrayInfallibleAllocator> +{ + static_assert((N & (N-1)) == 0, "N must be power of 2"); + typedef nsTArray_Impl<E, nsTArrayInfallibleAllocator> base_type; +public: + typedef E elem_type; + typedef typename base_type::size_type size_type; + typedef typename base_type::index_type index_type; + + AlignedTArray() {} + explicit AlignedTArray(size_type capacity) : base_type(capacity + sExtra) {} + elem_type* Elements() { return getAligned(base_type::Elements()); } + const elem_type* Elements() const { return getAligned(base_type::Elements()); } + elem_type& operator[](index_type i) { return Elements()[i];} + const elem_type& operator[](index_type i) const { return Elements()[i]; } + + void SetLength(size_type newLen) + { + base_type::SetLength(newLen + sExtra); + } + + MOZ_MUST_USE + bool SetLength(size_type newLen, const mozilla::fallible_t&) + { + return base_type::SetLength(newLen + sExtra, mozilla::fallible); + } + + size_type Length() const { + return base_type::Length() <= sExtra ? 0 : base_type::Length() - sExtra; + } + + using base_type::ShallowSizeOfExcludingThis; + using base_type::ShallowSizeOfIncludingThis; + +private: + AlignedTArray(const AlignedTArray& other) = delete; + void operator=(const AlignedTArray& other) = delete; + + static const size_type sPadding = N <= MOZ_ALIGNOF(E) ? 0 : N - MOZ_ALIGNOF(E); + static const size_type sExtra = (sPadding + sizeof(E) - 1) / sizeof(E); + + template <typename U> + static U* getAligned(U* p) + { + return reinterpret_cast<U*>(((uintptr_t)p + N - 1) & ~(N-1)); + } +}; + + +#endif // AlignedTArray_h__ diff --git a/dom/media/webaudio/AlignmentUtils.h b/dom/media/webaudio/AlignmentUtils.h new file mode 100644 index 000000000..6b145a8ca --- /dev/null +++ b/dom/media/webaudio/AlignmentUtils.h @@ -0,0 +1,29 @@ +/* -*- Mode: C++; tab-width: 2; indent-tabs-mode: nil; c-basic-offset: 2 -*- */ +/* vim:set ts=2 sw=2 sts=2 et cindent: */ +/* This Source Code Form is subject to the terms of the Mozilla Public + * License, v. 2.0. If a copy of the MPL was not distributed with this + * file, You can obtain one at http://mozilla.org/MPL/2.0/. */ + +#ifndef AlignmentUtils_h__ +#define AlignmentUtils_h__ + +#define IS_ALIGNED16(ptr) ((((uintptr_t)ptr + 15) & ~0x0F) == (uintptr_t)ptr) + +#ifdef DEBUG + #define ASSERT_ALIGNED16(ptr) \ + MOZ_ASSERT(IS_ALIGNED16(ptr), \ + #ptr " has to be aligned to a 16 byte boundary"); +#else + #define ASSERT_ALIGNED16(ptr) +#endif + +#ifdef DEBUG + #define ASSERT_MULTIPLE16(v) \ + MOZ_ASSERT(v % 16 == 0, #v " has to be a a multiple of 16"); +#else + #define ASSERT_MULTIPLE16(v) +#endif + +#define ALIGNED16(ptr) (float*)(((uintptr_t)ptr + 15) & ~0x0F); + +#endif diff --git a/dom/media/webaudio/AnalyserNode.cpp b/dom/media/webaudio/AnalyserNode.cpp new file mode 100644 index 000000000..64c3cf4da --- /dev/null +++ b/dom/media/webaudio/AnalyserNode.cpp @@ -0,0 +1,387 @@ +/* -*- Mode: C++; tab-width: 2; indent-tabs-mode: nil; c-basic-offset: 2 -*- */ +/* vim:set ts=2 sw=2 sts=2 et cindent: */ +/* This Source Code Form is subject to the terms of the Mozilla Public + * License, v. 2.0. If a copy of the MPL was not distributed with this + * file, You can obtain one at http://mozilla.org/MPL/2.0/. */ + +#include "mozilla/dom/AnalyserNode.h" +#include "mozilla/dom/AnalyserNodeBinding.h" +#include "AudioNodeEngine.h" +#include "AudioNodeStream.h" +#include "mozilla/Mutex.h" +#include "mozilla/PodOperations.h" + +namespace mozilla { + +static const uint32_t MAX_FFT_SIZE = 32768; +static const size_t CHUNK_COUNT = MAX_FFT_SIZE >> WEBAUDIO_BLOCK_SIZE_BITS; +static_assert(MAX_FFT_SIZE == CHUNK_COUNT * WEBAUDIO_BLOCK_SIZE, + "MAX_FFT_SIZE must be a multiple of WEBAUDIO_BLOCK_SIZE"); +static_assert((CHUNK_COUNT & (CHUNK_COUNT - 1)) == 0, + "CHUNK_COUNT must be power of 2 for remainder behavior"); + +namespace dom { + +NS_IMPL_ISUPPORTS_INHERITED0(AnalyserNode, AudioNode) + +class AnalyserNodeEngine final : public AudioNodeEngine +{ + class TransferBuffer final : public Runnable + { + public: + TransferBuffer(AudioNodeStream* aStream, + const AudioChunk& aChunk) + : mStream(aStream) + , mChunk(aChunk) + { + } + + NS_IMETHOD Run() override + { + RefPtr<AnalyserNode> node = + static_cast<AnalyserNode*>(mStream->Engine()->NodeMainThread()); + if (node) { + node->AppendChunk(mChunk); + } + return NS_OK; + } + + private: + RefPtr<AudioNodeStream> mStream; + AudioChunk mChunk; + }; + +public: + explicit AnalyserNodeEngine(AnalyserNode* aNode) + : AudioNodeEngine(aNode) + { + MOZ_ASSERT(NS_IsMainThread()); + } + + virtual void ProcessBlock(AudioNodeStream* aStream, + GraphTime aFrom, + const AudioBlock& aInput, + AudioBlock* aOutput, + bool* aFinished) override + { + *aOutput = aInput; + + if (aInput.IsNull()) { + // If AnalyserNode::mChunks has only null chunks, then there is no need + // to send further null chunks. + if (mChunksToProcess == 0) { + return; + } + + --mChunksToProcess; + if (mChunksToProcess == 0) { + aStream->ScheduleCheckForInactive(); + } + + } else { + // This many null chunks will be required to empty AnalyserNode::mChunks. + mChunksToProcess = CHUNK_COUNT; + } + + RefPtr<TransferBuffer> transfer = + new TransferBuffer(aStream, aInput.AsAudioChunk()); + NS_DispatchToMainThread(transfer); + } + + virtual bool IsActive() const override + { + return mChunksToProcess != 0; + } + + virtual size_t SizeOfIncludingThis(MallocSizeOf aMallocSizeOf) const override + { + return aMallocSizeOf(this) + SizeOfExcludingThis(aMallocSizeOf); + } + + uint32_t mChunksToProcess = 0; +}; + +AnalyserNode::AnalyserNode(AudioContext* aContext) + : AudioNode(aContext, + 1, + ChannelCountMode::Max, + ChannelInterpretation::Speakers) + , mAnalysisBlock(2048) + , mMinDecibels(-100.) + , mMaxDecibels(-30.) + , mSmoothingTimeConstant(.8) +{ + mStream = AudioNodeStream::Create(aContext, + new AnalyserNodeEngine(this), + AudioNodeStream::NO_STREAM_FLAGS, + aContext->Graph()); + + // Enough chunks must be recorded to handle the case of fftSize being + // increased to maximum immediately before getFloatTimeDomainData() is + // called, for example. + Unused << mChunks.SetLength(CHUNK_COUNT, fallible); + + AllocateBuffer(); +} + +size_t +AnalyserNode::SizeOfExcludingThis(MallocSizeOf aMallocSizeOf) const +{ + size_t amount = AudioNode::SizeOfExcludingThis(aMallocSizeOf); + amount += mAnalysisBlock.SizeOfExcludingThis(aMallocSizeOf); + amount += mChunks.ShallowSizeOfExcludingThis(aMallocSizeOf); + amount += mOutputBuffer.ShallowSizeOfExcludingThis(aMallocSizeOf); + return amount; +} + +size_t +AnalyserNode::SizeOfIncludingThis(MallocSizeOf aMallocSizeOf) const +{ + return aMallocSizeOf(this) + SizeOfExcludingThis(aMallocSizeOf); +} + +JSObject* +AnalyserNode::WrapObject(JSContext* aCx, JS::Handle<JSObject*> aGivenProto) +{ + return AnalyserNodeBinding::Wrap(aCx, this, aGivenProto); +} + +void +AnalyserNode::SetFftSize(uint32_t aValue, ErrorResult& aRv) +{ + // Disallow values that are not a power of 2 and outside the [32,32768] range + if (aValue < 32 || + aValue > MAX_FFT_SIZE || + (aValue & (aValue - 1)) != 0) { + aRv.Throw(NS_ERROR_DOM_INDEX_SIZE_ERR); + return; + } + if (FftSize() != aValue) { + mAnalysisBlock.SetFFTSize(aValue); + AllocateBuffer(); + } +} + +void +AnalyserNode::SetMinDecibels(double aValue, ErrorResult& aRv) +{ + if (aValue >= mMaxDecibels) { + aRv.Throw(NS_ERROR_DOM_INDEX_SIZE_ERR); + return; + } + mMinDecibels = aValue; +} + +void +AnalyserNode::SetMaxDecibels(double aValue, ErrorResult& aRv) +{ + if (aValue <= mMinDecibels) { + aRv.Throw(NS_ERROR_DOM_INDEX_SIZE_ERR); + return; + } + mMaxDecibels = aValue; +} + +void +AnalyserNode::SetSmoothingTimeConstant(double aValue, ErrorResult& aRv) +{ + if (aValue < 0 || aValue > 1) { + aRv.Throw(NS_ERROR_DOM_INDEX_SIZE_ERR); + return; + } + mSmoothingTimeConstant = aValue; +} + +void +AnalyserNode::GetFloatFrequencyData(const Float32Array& aArray) +{ + if (!FFTAnalysis()) { + // Might fail to allocate memory + return; + } + + aArray.ComputeLengthAndData(); + + float* buffer = aArray.Data(); + size_t length = std::min(size_t(aArray.Length()), mOutputBuffer.Length()); + + for (size_t i = 0; i < length; ++i) { + buffer[i] = WebAudioUtils::ConvertLinearToDecibels(mOutputBuffer[i], mMinDecibels); + } +} + +void +AnalyserNode::GetByteFrequencyData(const Uint8Array& aArray) +{ + if (!FFTAnalysis()) { + // Might fail to allocate memory + return; + } + + const double rangeScaleFactor = 1.0 / (mMaxDecibels - mMinDecibels); + + aArray.ComputeLengthAndData(); + + unsigned char* buffer = aArray.Data(); + size_t length = std::min(size_t(aArray.Length()), mOutputBuffer.Length()); + + for (size_t i = 0; i < length; ++i) { + const double decibels = WebAudioUtils::ConvertLinearToDecibels(mOutputBuffer[i], mMinDecibels); + // scale down the value to the range of [0, UCHAR_MAX] + const double scaled = std::max(0.0, std::min(double(UCHAR_MAX), + UCHAR_MAX * (decibels - mMinDecibels) * rangeScaleFactor)); + buffer[i] = static_cast<unsigned char>(scaled); + } +} + +void +AnalyserNode::GetFloatTimeDomainData(const Float32Array& aArray) +{ + aArray.ComputeLengthAndData(); + + float* buffer = aArray.Data(); + size_t length = std::min(aArray.Length(), FftSize()); + + GetTimeDomainData(buffer, length); +} + +void +AnalyserNode::GetByteTimeDomainData(const Uint8Array& aArray) +{ + aArray.ComputeLengthAndData(); + + size_t length = std::min(aArray.Length(), FftSize()); + + AlignedTArray<float> tmpBuffer; + if (!tmpBuffer.SetLength(length, fallible)) { + return; + } + + GetTimeDomainData(tmpBuffer.Elements(), length); + + unsigned char* buffer = aArray.Data(); + for (size_t i = 0; i < length; ++i) { + const float value = tmpBuffer[i]; + // scale the value to the range of [0, UCHAR_MAX] + const float scaled = std::max(0.0f, std::min(float(UCHAR_MAX), + 128.0f * (value + 1.0f))); + buffer[i] = static_cast<unsigned char>(scaled); + } +} + +bool +AnalyserNode::FFTAnalysis() +{ + AlignedTArray<float> tmpBuffer; + size_t fftSize = FftSize(); + if (!tmpBuffer.SetLength(fftSize, fallible)) { + return false; + } + + float* inputBuffer = tmpBuffer.Elements(); + GetTimeDomainData(inputBuffer, fftSize); + ApplyBlackmanWindow(inputBuffer, fftSize); + mAnalysisBlock.PerformFFT(inputBuffer); + + // Normalize so than an input sine wave at 0dBfs registers as 0dBfs (undo FFT scaling factor). + const double magnitudeScale = 1.0 / fftSize; + + for (uint32_t i = 0; i < mOutputBuffer.Length(); ++i) { + double scalarMagnitude = NS_hypot(mAnalysisBlock.RealData(i), + mAnalysisBlock.ImagData(i)) * + magnitudeScale; + mOutputBuffer[i] = mSmoothingTimeConstant * mOutputBuffer[i] + + (1.0 - mSmoothingTimeConstant) * scalarMagnitude; + } + + return true; +} + +void +AnalyserNode::ApplyBlackmanWindow(float* aBuffer, uint32_t aSize) +{ + double alpha = 0.16; + double a0 = 0.5 * (1.0 - alpha); + double a1 = 0.5; + double a2 = 0.5 * alpha; + + for (uint32_t i = 0; i < aSize; ++i) { + double x = double(i) / aSize; + double window = a0 - a1 * cos(2 * M_PI * x) + a2 * cos(4 * M_PI * x); + aBuffer[i] *= window; + } +} + +bool +AnalyserNode::AllocateBuffer() +{ + bool result = true; + if (mOutputBuffer.Length() != FrequencyBinCount()) { + if (!mOutputBuffer.SetLength(FrequencyBinCount(), fallible)) { + return false; + } + memset(mOutputBuffer.Elements(), 0, sizeof(float) * FrequencyBinCount()); + } + return result; +} + +void +AnalyserNode::AppendChunk(const AudioChunk& aChunk) +{ + if (mChunks.Length() == 0) { + return; + } + + ++mCurrentChunk; + mChunks[mCurrentChunk & (CHUNK_COUNT - 1)] = aChunk; +} + +// Reads into aData the oldest aLength samples of the fftSize most recent +// samples. +void +AnalyserNode::GetTimeDomainData(float* aData, size_t aLength) +{ + size_t fftSize = FftSize(); + MOZ_ASSERT(aLength <= fftSize); + + if (mChunks.Length() == 0) { + PodZero(aData, aLength); + return; + } + + size_t readChunk = + mCurrentChunk - ((fftSize - 1) >> WEBAUDIO_BLOCK_SIZE_BITS); + size_t readIndex = (0 - fftSize) & (WEBAUDIO_BLOCK_SIZE - 1); + MOZ_ASSERT(readIndex == 0 || readIndex + fftSize == WEBAUDIO_BLOCK_SIZE); + + for (size_t writeIndex = 0; writeIndex < aLength; ) { + const AudioChunk& chunk = mChunks[readChunk & (CHUNK_COUNT - 1)]; + const size_t channelCount = chunk.ChannelCount(); + size_t copyLength = + std::min<size_t>(aLength - writeIndex, WEBAUDIO_BLOCK_SIZE); + float* dataOut = &aData[writeIndex]; + + if (channelCount == 0) { + PodZero(dataOut, copyLength); + } else { + float scale = chunk.mVolume / channelCount; + { // channel 0 + auto channelData = + static_cast<const float*>(chunk.mChannelData[0]) + readIndex; + AudioBufferCopyWithScale(channelData, scale, dataOut, copyLength); + } + for (uint32_t i = 1; i < channelCount; ++i) { + auto channelData = + static_cast<const float*>(chunk.mChannelData[i]) + readIndex; + AudioBufferAddWithScale(channelData, scale, dataOut, copyLength); + } + } + + readChunk++; + writeIndex += copyLength; + } +} + +} // namespace dom +} // namespace mozilla + diff --git a/dom/media/webaudio/AnalyserNode.h b/dom/media/webaudio/AnalyserNode.h new file mode 100644 index 000000000..7fca5df6f --- /dev/null +++ b/dom/media/webaudio/AnalyserNode.h @@ -0,0 +1,90 @@ +/* -*- Mode: C++; tab-width: 2; indent-tabs-mode: nil; c-basic-offset: 2 -*- */ +/* vim:set ts=2 sw=2 sts=2 et cindent: */ +/* This Source Code Form is subject to the terms of the Mozilla Public + * License, v. 2.0. If a copy of the MPL was not distributed with this + * file, You can obtain one at http://mozilla.org/MPL/2.0/. */ + +#ifndef AnalyserNode_h_ +#define AnalyserNode_h_ + +#include "AudioNode.h" +#include "FFTBlock.h" +#include "AlignedTArray.h" + +namespace mozilla { +namespace dom { + +class AudioContext; + +class AnalyserNode final : public AudioNode +{ +public: + explicit AnalyserNode(AudioContext* aContext); + + NS_DECL_ISUPPORTS_INHERITED + + virtual JSObject* WrapObject(JSContext* aCx, JS::Handle<JSObject*> aGivenProto) override; + + void GetFloatFrequencyData(const Float32Array& aArray); + void GetByteFrequencyData(const Uint8Array& aArray); + void GetFloatTimeDomainData(const Float32Array& aArray); + void GetByteTimeDomainData(const Uint8Array& aArray); + uint32_t FftSize() const + { + return mAnalysisBlock.FFTSize(); + } + void SetFftSize(uint32_t aValue, ErrorResult& aRv); + uint32_t FrequencyBinCount() const + { + return FftSize() / 2; + } + double MinDecibels() const + { + return mMinDecibels; + } + void SetMinDecibels(double aValue, ErrorResult& aRv); + double MaxDecibels() const + { + return mMaxDecibels; + } + void SetMaxDecibels(double aValue, ErrorResult& aRv); + double SmoothingTimeConstant() const + { + return mSmoothingTimeConstant; + } + void SetSmoothingTimeConstant(double aValue, ErrorResult& aRv); + + virtual const char* NodeType() const override + { + return "AnalyserNode"; + } + + virtual size_t SizeOfExcludingThis(MallocSizeOf aMallocSizeOf) const override; + virtual size_t SizeOfIncludingThis(MallocSizeOf aMallocSizeOf) const override; + +protected: + ~AnalyserNode() {} + +private: + friend class AnalyserNodeEngine; + void AppendChunk(const AudioChunk& aChunk); + bool AllocateBuffer(); + bool FFTAnalysis(); + void ApplyBlackmanWindow(float* aBuffer, uint32_t aSize); + void GetTimeDomainData(float* aData, size_t aLength); + +private: + FFTBlock mAnalysisBlock; + nsTArray<AudioChunk> mChunks; + double mMinDecibels; + double mMaxDecibels; + double mSmoothingTimeConstant; + size_t mCurrentChunk = 0; + AlignedTArray<float> mOutputBuffer; +}; + +} // namespace dom +} // namespace mozilla + +#endif + diff --git a/dom/media/webaudio/AudioBlock.cpp b/dom/media/webaudio/AudioBlock.cpp new file mode 100644 index 000000000..a8c714019 --- /dev/null +++ b/dom/media/webaudio/AudioBlock.cpp @@ -0,0 +1,166 @@ +/* -*- Mode: C++; tab-width: 2; indent-tabs-mode: nil; c-basic-offset: 2 -*- */ +/* vim:set ts=2 sw=2 sts=2 et cindent: */ +/* This Source Code Form is subject to the terms of the Mozilla Public + * License, v. 2.0. If a copy of the MPL was not distributed with this + * file, You can obtain one at http://mozilla.org/MPL/2.0/. */ + +#include "AudioBlock.h" +#include "AlignmentUtils.h" + +namespace mozilla { + +/** + * Heap-allocated buffer of channels of 128-sample float arrays, with + * threadsafe refcounting. Typically you would allocate one of these, fill it + * in, and then treat it as immutable while it's shared. + * + * Downstream references are accounted specially so that the creator of the + * buffer can reuse and modify its contents next iteration if other references + * are all downstream temporary references held by AudioBlock. + * + * We guarantee 16 byte alignment of the channel data. + */ +class AudioBlockBuffer final : public ThreadSharedObject { +public: + + virtual AudioBlockBuffer* AsAudioBlockBuffer() override { return this; }; + + float* ChannelData(uint32_t aChannel) + { + float* base = reinterpret_cast<float*>(((uintptr_t)(this + 1) + 15) & ~0x0F); + ASSERT_ALIGNED16(base); + return base + aChannel * WEBAUDIO_BLOCK_SIZE; + } + + static already_AddRefed<AudioBlockBuffer> Create(uint32_t aChannelCount) + { + CheckedInt<size_t> size = WEBAUDIO_BLOCK_SIZE; + size *= aChannelCount; + size *= sizeof(float); + size += sizeof(AudioBlockBuffer); + size += 15; //padding for alignment + if (!size.isValid()) { + MOZ_CRASH(); + } + + void* m = moz_xmalloc(size.value()); + RefPtr<AudioBlockBuffer> p = new (m) AudioBlockBuffer(); + NS_ASSERTION((reinterpret_cast<char*>(p.get() + 1) - reinterpret_cast<char*>(p.get())) % 4 == 0, + "AudioBlockBuffers should be at least 4-byte aligned"); + return p.forget(); + } + + // Graph thread only. + void DownstreamRefAdded() { ++mDownstreamRefCount; } + void DownstreamRefRemoved() { + MOZ_ASSERT(mDownstreamRefCount > 0); + --mDownstreamRefCount; + } + // Whether this is shared by any owners that are not downstream. + // Called only from owners with a reference that is not a downstream + // reference. Graph thread only. + bool HasLastingShares() + { + // mRefCnt is atomic and so reading its value is defined even when + // modifications may happen on other threads. mDownstreamRefCount is + // not modified on any other thread. + // + // If all other references are downstream references (managed on this, the + // graph thread), then other threads are not using this buffer and cannot + // add further references. This method can safely return false. The + // buffer contents can be modified. + // + // If there are other references that are not downstream references, then + // this method will return true. The buffer will be assumed to be still + // in use and so will not be reused. + nsrefcnt count = mRefCnt; + // This test is strictly less than because the caller has a reference + // that is not a downstream reference. + MOZ_ASSERT(mDownstreamRefCount < count); + return count != mDownstreamRefCount + 1; + } + + virtual size_t SizeOfIncludingThis(MallocSizeOf aMallocSizeOf) const override + { + return aMallocSizeOf(this) + SizeOfExcludingThis(aMallocSizeOf); + } + +private: + AudioBlockBuffer() {} + ~AudioBlockBuffer() override { MOZ_ASSERT(mDownstreamRefCount == 0); } + + nsAutoRefCnt mDownstreamRefCount; +}; + +AudioBlock::~AudioBlock() +{ + ClearDownstreamMark(); +} + +void +AudioBlock::SetBuffer(ThreadSharedObject* aNewBuffer) +{ + if (aNewBuffer == mBuffer) { + return; + } + + ClearDownstreamMark(); + + mBuffer = aNewBuffer; + + if (!aNewBuffer) { + return; + } + + AudioBlockBuffer* buffer = aNewBuffer->AsAudioBlockBuffer(); + if (buffer) { + buffer->DownstreamRefAdded(); + mBufferIsDownstreamRef = true; + } +} + +void +AudioBlock::ClearDownstreamMark() { + if (mBufferIsDownstreamRef) { + mBuffer->AsAudioBlockBuffer()->DownstreamRefRemoved(); + mBufferIsDownstreamRef = false; + } +} + +bool +AudioBlock::CanWrite() { + // If mBufferIsDownstreamRef is set then the buffer is not ours to use. + // It may be in use by another node which is not downstream. + return !mBufferIsDownstreamRef && + !mBuffer->AsAudioBlockBuffer()->HasLastingShares(); +} + +void +AudioBlock::AllocateChannels(uint32_t aChannelCount) +{ + MOZ_ASSERT(mDuration == WEBAUDIO_BLOCK_SIZE); + + if (mBufferIsDownstreamRef) { + // This is not our buffer to re-use. + ClearDownstreamMark(); + } else if (mBuffer && ChannelCount() == aChannelCount) { + AudioBlockBuffer* buffer = mBuffer->AsAudioBlockBuffer(); + if (buffer && !buffer->HasLastingShares()) { + MOZ_ASSERT(mBufferFormat == AUDIO_FORMAT_FLOAT32); + // No need to allocate again. + mVolume = 1.0f; + return; + } + } + + RefPtr<AudioBlockBuffer> buffer = AudioBlockBuffer::Create(aChannelCount); + mChannelData.SetLength(aChannelCount); + for (uint32_t i = 0; i < aChannelCount; ++i) { + mChannelData[i] = buffer->ChannelData(i); + } + mBuffer = buffer.forget(); + mVolume = 1.0f; + mBufferFormat = AUDIO_FORMAT_FLOAT32; +} + +} // namespace mozilla diff --git a/dom/media/webaudio/AudioBlock.h b/dom/media/webaudio/AudioBlock.h new file mode 100644 index 000000000..c9a5bb400 --- /dev/null +++ b/dom/media/webaudio/AudioBlock.h @@ -0,0 +1,136 @@ +/* -*- Mode: C++; tab-width: 2; indent-tabs-mode: nil; c-basic-offset: 2 -*- */ +/* vim:set ts=2 sw=2 sts=2 et cindent: */ +/* This Source Code Form is subject to the terms of the Mozilla Public + * License, v. 2.0. If a copy of the MPL was not distributed with this + * file, You can obtain one at http://mozilla.org/MPL/2.0/. */ +#ifndef MOZILLA_AUDIOBLOCK_H_ +#define MOZILLA_AUDIOBLOCK_H_ + +#include "AudioSegment.h" + +namespace mozilla { + +/** + * An AudioChunk whose buffer contents need to be valid only for one + * processing block iteration, after which contents can be overwritten if the + * buffer has not been passed to longer term storage or to another thread, + * which may happen though AsAudioChunk() or AsMutableChunk(). + * + * Use on graph thread only. + */ +class AudioBlock : private AudioChunk +{ +public: + AudioBlock() { + mDuration = WEBAUDIO_BLOCK_SIZE; + mBufferFormat = AUDIO_FORMAT_SILENCE; + } + // No effort is made in constructors to ensure that mBufferIsDownstreamRef + // is set because the block is expected to be a temporary and so the + // reference will be released before the next iteration. + // The custom copy constructor is required so as not to set + // mBufferIsDownstreamRef without notifying AudioBlockBuffer. + AudioBlock(const AudioBlock& aBlock) : AudioChunk(aBlock.AsAudioChunk()) {} + explicit AudioBlock(const AudioChunk& aChunk) + : AudioChunk(aChunk) + { + MOZ_ASSERT(aChunk.mDuration == WEBAUDIO_BLOCK_SIZE); + } + ~AudioBlock(); + + using AudioChunk::GetDuration; + using AudioChunk::IsNull; + using AudioChunk::ChannelCount; + using AudioChunk::ChannelData; + using AudioChunk::SizeOfExcludingThisIfUnshared; + using AudioChunk::SizeOfExcludingThis; + // mDuration is not exposed. Use GetDuration(). + // mBuffer is not exposed. Use SetBuffer(). + using AudioChunk::mChannelData; + using AudioChunk::mVolume; + using AudioChunk::mBufferFormat; + + const AudioChunk& AsAudioChunk() const { return *this; } + AudioChunk* AsMutableChunk() { + ClearDownstreamMark(); + return this; + } + + /** + * Allocates, if necessary, aChannelCount buffers of WEBAUDIO_BLOCK_SIZE float + * samples for writing. + */ + void AllocateChannels(uint32_t aChannelCount); + + /** + * ChannelFloatsForWrite() should only be used when the buffers have been + * created with AllocateChannels(). + */ + float* ChannelFloatsForWrite(size_t aChannel) + { + MOZ_ASSERT(mBufferFormat == AUDIO_FORMAT_FLOAT32); + MOZ_ASSERT(CanWrite()); + return static_cast<float*>(const_cast<void*>(mChannelData[aChannel])); + } + + void SetBuffer(ThreadSharedObject* aNewBuffer); + void SetNull(StreamTime aDuration) { + MOZ_ASSERT(aDuration == WEBAUDIO_BLOCK_SIZE); + SetBuffer(nullptr); + mChannelData.Clear(); + mVolume = 1.0f; + mBufferFormat = AUDIO_FORMAT_SILENCE; + } + + AudioBlock& operator=(const AudioBlock& aBlock) { + // Instead of just copying, mBufferIsDownstreamRef must be first cleared + // if set. It is set again for the new mBuffer if possible. This happens + // in SetBuffer(). + return *this = aBlock.AsAudioChunk(); + } + AudioBlock& operator=(const AudioChunk& aChunk) { + MOZ_ASSERT(aChunk.mDuration == WEBAUDIO_BLOCK_SIZE); + SetBuffer(aChunk.mBuffer); + mChannelData = aChunk.mChannelData; + mVolume = aChunk.mVolume; + mBufferFormat = aChunk.mBufferFormat; + return *this; + } + + bool IsMuted() const { return mVolume == 0.0f; } + + bool IsSilentOrSubnormal() const + { + if (!mBuffer) { + return true; + } + + for (uint32_t i = 0, length = mChannelData.Length(); i < length; ++i) { + const float* channel = static_cast<const float*>(mChannelData[i]); + for (StreamTime frame = 0; frame < mDuration; ++frame) { + if (fabs(channel[frame]) >= FLT_MIN) { + return false; + } + } + } + + return true; + } + +private: + void ClearDownstreamMark(); + bool CanWrite(); + + // mBufferIsDownstreamRef is set only when mBuffer references an + // AudioBlockBuffer created in a different AudioBlock. That can happen when + // this AudioBlock is on a node downstream from the node which created the + // buffer. When this is set, the AudioBlockBuffer is notified that this + // reference does prevent the upstream node from re-using the buffer next + // iteration and modifying its contents. The AudioBlockBuffer is also + // notified when mBuffer releases this reference. + bool mBufferIsDownstreamRef = false; +}; + +} // namespace mozilla + +#endif // MOZILLA_AUDIOBLOCK_H_ diff --git a/dom/media/webaudio/AudioBuffer.cpp b/dom/media/webaudio/AudioBuffer.cpp new file mode 100644 index 000000000..cb834f6a5 --- /dev/null +++ b/dom/media/webaudio/AudioBuffer.cpp @@ -0,0 +1,421 @@ +/* -*- Mode: C++; tab-width: 2; indent-tabs-mode: nil; c-basic-offset: 2 -*- */ +/* vim:set ts=2 sw=2 sts=2 et cindent: */ +/* This Source Code Form is subject to the terms of the Mozilla Public + * License, v. 2.0. If a copy of the MPL was not distributed with this + * file, You can obtain one at http://mozilla.org/MPL/2.0/. */ + +#include "AudioBuffer.h" +#include "mozilla/dom/AudioBufferBinding.h" +#include "jsfriendapi.h" +#include "mozilla/ErrorResult.h" +#include "AudioSegment.h" +#include "AudioChannelFormat.h" +#include "mozilla/PodOperations.h" +#include "mozilla/CheckedInt.h" +#include "mozilla/MemoryReporting.h" +#include "AudioNodeEngine.h" + +namespace mozilla { +namespace dom { + +NS_IMPL_CYCLE_COLLECTION_CLASS(AudioBuffer) + +NS_IMPL_CYCLE_COLLECTION_UNLINK_BEGIN(AudioBuffer) + NS_IMPL_CYCLE_COLLECTION_UNLINK(mJSChannels) + NS_IMPL_CYCLE_COLLECTION_UNLINK_PRESERVED_WRAPPER + tmp->ClearJSChannels(); +NS_IMPL_CYCLE_COLLECTION_UNLINK_END + +NS_IMPL_CYCLE_COLLECTION_TRAVERSE_BEGIN(AudioBuffer) + NS_IMPL_CYCLE_COLLECTION_TRAVERSE_SCRIPT_OBJECTS +NS_IMPL_CYCLE_COLLECTION_TRAVERSE_END + +NS_IMPL_CYCLE_COLLECTION_TRACE_BEGIN(AudioBuffer) + NS_IMPL_CYCLE_COLLECTION_TRACE_PRESERVED_WRAPPER + for (uint32_t i = 0; i < tmp->mJSChannels.Length(); ++i) { + NS_IMPL_CYCLE_COLLECTION_TRACE_JS_MEMBER_CALLBACK(mJSChannels[i]) + } +NS_IMPL_CYCLE_COLLECTION_TRACE_END + +NS_IMPL_CYCLE_COLLECTION_ROOT_NATIVE(AudioBuffer, AddRef) +NS_IMPL_CYCLE_COLLECTION_UNROOT_NATIVE(AudioBuffer, Release) + +/** + * AudioBuffers can be shared between AudioContexts, so we need a separate + * mechanism to track their memory usage. This thread-safe class keeps track of + * all the AudioBuffers, and gets called back by the memory reporting system + * when a memory report is needed, reporting how much memory is used by the + * buffers backing AudioBuffer objects. */ +class AudioBufferMemoryTracker : public nsIMemoryReporter +{ + NS_DECL_THREADSAFE_ISUPPORTS + NS_DECL_NSIMEMORYREPORTER + +private: + AudioBufferMemoryTracker(); + virtual ~AudioBufferMemoryTracker(); + +public: + /* Those methods can be called on any thread. */ + static void RegisterAudioBuffer(const AudioBuffer* aAudioBuffer); + static void UnregisterAudioBuffer(const AudioBuffer* aAudioBuffer); +private: + static AudioBufferMemoryTracker* GetInstance(); + /* Those methods must be called with the lock held. */ + void RegisterAudioBufferInternal(const AudioBuffer* aAudioBuffer); + /* Returns the number of buffers still present in the hash table. */ + uint32_t UnregisterAudioBufferInternal(const AudioBuffer* aAudioBuffer); + void Init(); + + /* This protects all members of this class. */ + static StaticMutex sMutex; + static StaticRefPtr<AudioBufferMemoryTracker> sSingleton; + nsTHashtable<nsPtrHashKey<const AudioBuffer>> mBuffers; +}; + +StaticRefPtr<AudioBufferMemoryTracker> AudioBufferMemoryTracker::sSingleton; +StaticMutex AudioBufferMemoryTracker::sMutex; + +NS_IMPL_ISUPPORTS(AudioBufferMemoryTracker, nsIMemoryReporter); + +AudioBufferMemoryTracker* AudioBufferMemoryTracker::GetInstance() +{ + sMutex.AssertCurrentThreadOwns(); + if (!sSingleton) { + sSingleton = new AudioBufferMemoryTracker(); + sSingleton->Init(); + } + return sSingleton; +} + +AudioBufferMemoryTracker::AudioBufferMemoryTracker() +{ +} + +void +AudioBufferMemoryTracker::Init() +{ + RegisterWeakMemoryReporter(this); +} + +AudioBufferMemoryTracker::~AudioBufferMemoryTracker() +{ + UnregisterWeakMemoryReporter(this); +} + +void +AudioBufferMemoryTracker::RegisterAudioBuffer(const AudioBuffer* aAudioBuffer) +{ + StaticMutexAutoLock lock(sMutex); + AudioBufferMemoryTracker* tracker = AudioBufferMemoryTracker::GetInstance(); + tracker->RegisterAudioBufferInternal(aAudioBuffer); +} + +void +AudioBufferMemoryTracker::UnregisterAudioBuffer(const AudioBuffer* aAudioBuffer) +{ + StaticMutexAutoLock lock(sMutex); + AudioBufferMemoryTracker* tracker = AudioBufferMemoryTracker::GetInstance(); + uint32_t count; + count = tracker->UnregisterAudioBufferInternal(aAudioBuffer); + if (count == 0) { + sSingleton = nullptr; + } +} + +void +AudioBufferMemoryTracker::RegisterAudioBufferInternal(const AudioBuffer* aAudioBuffer) +{ + sMutex.AssertCurrentThreadOwns(); + mBuffers.PutEntry(aAudioBuffer); +} + +uint32_t +AudioBufferMemoryTracker::UnregisterAudioBufferInternal(const AudioBuffer* aAudioBuffer) +{ + sMutex.AssertCurrentThreadOwns(); + mBuffers.RemoveEntry(aAudioBuffer); + return mBuffers.Count(); +} + +MOZ_DEFINE_MALLOC_SIZE_OF(AudioBufferMemoryTrackerMallocSizeOf) + +NS_IMETHODIMP +AudioBufferMemoryTracker::CollectReports(nsIHandleReportCallback* aHandleReport, + nsISupports* aData, bool) +{ + size_t amount = 0; + + for (auto iter = mBuffers.Iter(); !iter.Done(); iter.Next()) { + amount += iter.Get()->GetKey()->SizeOfIncludingThis(AudioBufferMemoryTrackerMallocSizeOf); + } + + MOZ_COLLECT_REPORT( + "explicit/webaudio/audiobuffer", KIND_HEAP, UNITS_BYTES, amount, + "Memory used by AudioBuffer objects (Web Audio)."); + + return NS_OK; +} + +AudioBuffer::AudioBuffer(AudioContext* aContext, uint32_t aNumberOfChannels, + uint32_t aLength, float aSampleRate, + already_AddRefed<ThreadSharedFloatArrayBufferList> + aInitialContents) + : mOwnerWindow(do_GetWeakReference(aContext->GetOwner())), + mSharedChannels(aInitialContents), + mLength(aLength), + mSampleRate(aSampleRate) +{ + MOZ_ASSERT(!mSharedChannels || + mSharedChannels->GetChannels() == aNumberOfChannels); + mJSChannels.SetLength(aNumberOfChannels); + mozilla::HoldJSObjects(this); + AudioBufferMemoryTracker::RegisterAudioBuffer(this); +} + +AudioBuffer::~AudioBuffer() +{ + AudioBufferMemoryTracker::UnregisterAudioBuffer(this); + ClearJSChannels(); + mozilla::DropJSObjects(this); +} + +void +AudioBuffer::ClearJSChannels() +{ + mJSChannels.Clear(); +} + +/* static */ already_AddRefed<AudioBuffer> +AudioBuffer::Create(AudioContext* aContext, uint32_t aNumberOfChannels, + uint32_t aLength, float aSampleRate, + already_AddRefed<ThreadSharedFloatArrayBufferList> + aInitialContents, + ErrorResult& aRv) +{ + // Note that a buffer with zero channels is permitted here for the sake of + // AudioProcessingEvent, where channel counts must match parameters passed + // to createScriptProcessor(), one of which may be zero. + if (aSampleRate < WebAudioUtils::MinSampleRate || + aSampleRate > WebAudioUtils::MaxSampleRate || + aNumberOfChannels > WebAudioUtils::MaxChannelCount || + !aLength || aLength > INT32_MAX) { + aRv.Throw(NS_ERROR_DOM_INDEX_SIZE_ERR); + return nullptr; + } + + RefPtr<AudioBuffer> buffer = + new AudioBuffer(aContext, aNumberOfChannels, aLength, aSampleRate, + Move(aInitialContents)); + + return buffer.forget(); +} + +JSObject* +AudioBuffer::WrapObject(JSContext* aCx, JS::Handle<JSObject*> aGivenProto) +{ + return AudioBufferBinding::Wrap(aCx, this, aGivenProto); +} + +bool +AudioBuffer::RestoreJSChannelData(JSContext* aJSContext) +{ + for (uint32_t i = 0; i < mJSChannels.Length(); ++i) { + if (mJSChannels[i]) { + // Already have data in JS array. + continue; + } + + // The following code first zeroes the array and then copies our data + // into it. We could avoid this with additional JS APIs to construct + // an array (or ArrayBuffer) containing initial data. + JS::Rooted<JSObject*> array(aJSContext, + JS_NewFloat32Array(aJSContext, mLength)); + if (!array) { + return false; + } + if (mSharedChannels) { + // "4. Attach ArrayBuffers containing copies of the data to the + // AudioBuffer, to be returned by the next call to getChannelData." + const float* data = mSharedChannels->GetData(i); + JS::AutoCheckCannotGC nogc; + bool isShared; + mozilla::PodCopy(JS_GetFloat32ArrayData(array, &isShared, nogc), data, mLength); + MOZ_ASSERT(!isShared); // Was created as unshared above + } + mJSChannels[i] = array; + } + + mSharedChannels = nullptr; + + return true; +} + +void +AudioBuffer::CopyFromChannel(const Float32Array& aDestination, uint32_t aChannelNumber, + uint32_t aStartInChannel, ErrorResult& aRv) +{ + aDestination.ComputeLengthAndData(); + + uint32_t length = aDestination.Length(); + CheckedInt<uint32_t> end = aStartInChannel; + end += length; + if (aChannelNumber >= NumberOfChannels() || + !end.isValid() || end.value() > mLength) { + aRv.Throw(NS_ERROR_DOM_INDEX_SIZE_ERR); + return; + } + + JS::AutoCheckCannotGC nogc; + JSObject* channelArray = mJSChannels[aChannelNumber]; + const float* sourceData = nullptr; + if (channelArray) { + if (JS_GetTypedArrayLength(channelArray) != mLength) { + // The array's buffer was detached. + aRv.Throw(NS_ERROR_DOM_INDEX_SIZE_ERR); + return; + } + + bool isShared = false; + sourceData = JS_GetFloat32ArrayData(channelArray, &isShared, nogc); + // The sourceData arrays should all have originated in + // RestoreJSChannelData, where they are created unshared. + MOZ_ASSERT(!isShared); + } else if (mSharedChannels) { + sourceData = mSharedChannels->GetData(aChannelNumber); + } + + if (sourceData) { + PodMove(aDestination.Data(), sourceData + aStartInChannel, length); + } else { + PodZero(aDestination.Data(), length); + } +} + +void +AudioBuffer::CopyToChannel(JSContext* aJSContext, const Float32Array& aSource, + uint32_t aChannelNumber, uint32_t aStartInChannel, + ErrorResult& aRv) +{ + aSource.ComputeLengthAndData(); + + uint32_t length = aSource.Length(); + CheckedInt<uint32_t> end = aStartInChannel; + end += length; + if (aChannelNumber >= NumberOfChannels() || + !end.isValid() || end.value() > mLength) { + aRv.Throw(NS_ERROR_DOM_INDEX_SIZE_ERR); + return; + } + + if (!RestoreJSChannelData(aJSContext)) { + aRv.Throw(NS_ERROR_OUT_OF_MEMORY); + return; + } + + JS::AutoCheckCannotGC nogc; + JSObject* channelArray = mJSChannels[aChannelNumber]; + if (JS_GetTypedArrayLength(channelArray) != mLength) { + // The array's buffer was detached. + aRv.Throw(NS_ERROR_DOM_INDEX_SIZE_ERR); + return; + } + + bool isShared = false; + float* channelData = JS_GetFloat32ArrayData(channelArray, &isShared, nogc); + // The channelData arrays should all have originated in + // RestoreJSChannelData, where they are created unshared. + MOZ_ASSERT(!isShared); + PodMove(channelData + aStartInChannel, aSource.Data(), length); +} + +void +AudioBuffer::GetChannelData(JSContext* aJSContext, uint32_t aChannel, + JS::MutableHandle<JSObject*> aRetval, + ErrorResult& aRv) +{ + if (aChannel >= NumberOfChannels()) { + aRv.Throw(NS_ERROR_DOM_SYNTAX_ERR); + return; + } + + if (!RestoreJSChannelData(aJSContext)) { + aRv.Throw(NS_ERROR_OUT_OF_MEMORY); + return; + } + + aRetval.set(mJSChannels[aChannel]); +} + +already_AddRefed<ThreadSharedFloatArrayBufferList> +AudioBuffer::StealJSArrayDataIntoSharedChannels(JSContext* aJSContext) +{ + // "1. If any of the AudioBuffer's ArrayBuffer have been detached, abort + // these steps, and return a zero-length channel data buffers to the + // invoker." + for (uint32_t i = 0; i < mJSChannels.Length(); ++i) { + JSObject* channelArray = mJSChannels[i]; + if (!channelArray || mLength != JS_GetTypedArrayLength(channelArray)) { + // Either empty buffer or one of the arrays' buffers was detached. + return nullptr; + } + } + + // "2. Detach all ArrayBuffers for arrays previously returned by + // getChannelData on this AudioBuffer." + // "3. Retain the underlying data buffers from those ArrayBuffers and return + // references to them to the invoker." + RefPtr<ThreadSharedFloatArrayBufferList> result = + new ThreadSharedFloatArrayBufferList(mJSChannels.Length()); + for (uint32_t i = 0; i < mJSChannels.Length(); ++i) { + JS::Rooted<JSObject*> arrayBufferView(aJSContext, mJSChannels[i]); + bool isSharedMemory; + JS::Rooted<JSObject*> arrayBuffer(aJSContext, + JS_GetArrayBufferViewBuffer(aJSContext, + arrayBufferView, + &isSharedMemory)); + // The channel data arrays should all have originated in + // RestoreJSChannelData, where they are created unshared. + MOZ_ASSERT(!isSharedMemory); + auto stolenData = arrayBuffer + ? static_cast<float*>(JS_StealArrayBufferContents(aJSContext, + arrayBuffer)) + : nullptr; + if (stolenData) { + result->SetData(i, stolenData, js_free, stolenData); + } else { + NS_ASSERTION(i == 0, "some channels lost when contents not acquired"); + return nullptr; + } + } + + for (uint32_t i = 0; i < mJSChannels.Length(); ++i) { + mJSChannels[i] = nullptr; + } + + return result.forget(); +} + +ThreadSharedFloatArrayBufferList* +AudioBuffer::GetThreadSharedChannelsForRate(JSContext* aJSContext) +{ + if (!mSharedChannels) { + mSharedChannels = StealJSArrayDataIntoSharedChannels(aJSContext); + } + + return mSharedChannels; +} + +size_t +AudioBuffer::SizeOfIncludingThis(MallocSizeOf aMallocSizeOf) const +{ + size_t amount = aMallocSizeOf(this); + amount += mJSChannels.ShallowSizeOfExcludingThis(aMallocSizeOf); + if (mSharedChannels) { + amount += mSharedChannels->SizeOfIncludingThis(aMallocSizeOf); + } + return amount; +} + +} // namespace dom +} // namespace mozilla diff --git a/dom/media/webaudio/AudioBuffer.h b/dom/media/webaudio/AudioBuffer.h new file mode 100644 index 000000000..2f2aef5fe --- /dev/null +++ b/dom/media/webaudio/AudioBuffer.h @@ -0,0 +1,137 @@ +/* -*- Mode: C++; tab-width: 2; indent-tabs-mode: nil; c-basic-offset: 2 -*- */ +/* vim:set ts=2 sw=2 sts=2 et cindent: */ +/* This Source Code Form is subject to the terms of the Mozilla Public + * License, v. 2.0. If a copy of the MPL was not distributed with this + * file, You can obtain one at http://mozilla.org/MPL/2.0/. */ + +#ifndef AudioBuffer_h_ +#define AudioBuffer_h_ + +#include "nsWrapperCache.h" +#include "nsCycleCollectionParticipant.h" +#include "mozilla/Attributes.h" +#include "mozilla/StaticPtr.h" +#include "mozilla/StaticMutex.h" +#include "nsTArray.h" +#include "AudioContext.h" +#include "js/TypeDecls.h" +#include "mozilla/MemoryReporting.h" + +namespace mozilla { + +class ErrorResult; +class ThreadSharedFloatArrayBufferList; + +namespace dom { + +class AudioContext; + +/** + * An AudioBuffer keeps its data either in the mJSChannels objects, which + * are Float32Arrays, or in mSharedChannels if the mJSChannels objects' buffers + * are detached. + */ +class AudioBuffer final : public nsWrapperCache +{ +public: + // If non-null, aInitialContents must have number of channels equal to + // aNumberOfChannels and their lengths must be at least aLength. + static already_AddRefed<AudioBuffer> + Create(AudioContext* aContext, uint32_t aNumberOfChannels, + uint32_t aLength, float aSampleRate, + already_AddRefed<ThreadSharedFloatArrayBufferList> aInitialContents, + ErrorResult& aRv); + + static already_AddRefed<AudioBuffer> + Create(AudioContext* aContext, uint32_t aNumberOfChannels, + uint32_t aLength, float aSampleRate, + ErrorResult& aRv) + { + return Create(aContext, aNumberOfChannels, aLength, aSampleRate, + nullptr, aRv); + } + + size_t SizeOfIncludingThis(mozilla::MallocSizeOf aMallocSizeOf) const; + + NS_INLINE_DECL_CYCLE_COLLECTING_NATIVE_REFCOUNTING(AudioBuffer) + NS_DECL_CYCLE_COLLECTION_SCRIPT_HOLDER_NATIVE_CLASS(AudioBuffer) + + nsPIDOMWindowInner* GetParentObject() const + { + nsCOMPtr<nsPIDOMWindowInner> parentObject = do_QueryReferent(mOwnerWindow); + return parentObject; + } + + JSObject* WrapObject(JSContext* aCx, JS::Handle<JSObject*> aGivenProto) override; + + float SampleRate() const + { + return mSampleRate; + } + + uint32_t Length() const + { + return mLength; + } + + double Duration() const + { + return mLength / static_cast<double> (mSampleRate); + } + + uint32_t NumberOfChannels() const + { + return mJSChannels.Length(); + } + + /** + * If mSharedChannels is non-null, copies its contents to + * new Float32Arrays in mJSChannels. Returns a Float32Array. + */ + void GetChannelData(JSContext* aJSContext, uint32_t aChannel, + JS::MutableHandle<JSObject*> aRetval, + ErrorResult& aRv); + + void CopyFromChannel(const Float32Array& aDestination, uint32_t aChannelNumber, + uint32_t aStartInChannel, ErrorResult& aRv); + void CopyToChannel(JSContext* aJSContext, const Float32Array& aSource, + uint32_t aChannelNumber, uint32_t aStartInChannel, + ErrorResult& aRv); + + /** + * Returns a ThreadSharedFloatArrayBufferList containing the sample data. + * Can return null if there is no data. + */ + ThreadSharedFloatArrayBufferList* GetThreadSharedChannelsForRate(JSContext* aContext); + +protected: + AudioBuffer(AudioContext* aContext, uint32_t aNumberOfChannels, + uint32_t aLength, float aSampleRate, + already_AddRefed<ThreadSharedFloatArrayBufferList> + aInitialContents); + ~AudioBuffer(); + + bool RestoreJSChannelData(JSContext* aJSContext); + + already_AddRefed<ThreadSharedFloatArrayBufferList> + StealJSArrayDataIntoSharedChannels(JSContext* aJSContext); + + void ClearJSChannels(); + + nsWeakPtr mOwnerWindow; + // Float32Arrays + AutoTArray<JS::Heap<JSObject*>, 2> mJSChannels; + + // mSharedChannels aggregates the data from mJSChannels. This is non-null + // if and only if the mJSChannels' buffers are detached. + RefPtr<ThreadSharedFloatArrayBufferList> mSharedChannels; + + uint32_t mLength; + float mSampleRate; +}; + +} // namespace dom +} // namespace mozilla + +#endif + diff --git a/dom/media/webaudio/AudioBufferSourceNode.cpp b/dom/media/webaudio/AudioBufferSourceNode.cpp new file mode 100644 index 000000000..51b6bab4a --- /dev/null +++ b/dom/media/webaudio/AudioBufferSourceNode.cpp @@ -0,0 +1,853 @@ +/* -*- Mode: C++; tab-width: 2; indent-tabs-mode: nil; c-basic-offset: 2 -*- */ +/* vim:set ts=2 sw=2 sts=2 et cindent: */ +/* This Source Code Form is subject to the terms of the Mozilla Public + * License, v. 2.0. If a copy of the MPL was not distributed with this + * file, You can obtain one at http://mozilla.org/MPL/2.0/. */ + +#include "AudioBufferSourceNode.h" +#include "nsDebug.h" +#include "mozilla/dom/AudioBufferSourceNodeBinding.h" +#include "mozilla/dom/AudioParam.h" +#include "mozilla/FloatingPoint.h" +#include "nsContentUtils.h" +#include "nsMathUtils.h" +#include "AlignmentUtils.h" +#include "AudioNodeEngine.h" +#include "AudioNodeStream.h" +#include "AudioDestinationNode.h" +#include "AudioParamTimeline.h" +#include <limits> +#include <algorithm> + +namespace mozilla { +namespace dom { + +NS_IMPL_CYCLE_COLLECTION_INHERITED(AudioBufferSourceNode, AudioNode, mBuffer, mPlaybackRate, mDetune) + +NS_INTERFACE_MAP_BEGIN_CYCLE_COLLECTION_INHERITED(AudioBufferSourceNode) +NS_INTERFACE_MAP_END_INHERITING(AudioNode) + +NS_IMPL_ADDREF_INHERITED(AudioBufferSourceNode, AudioNode) +NS_IMPL_RELEASE_INHERITED(AudioBufferSourceNode, AudioNode) + +/** + * Media-thread playback engine for AudioBufferSourceNode. + * Nothing is played until a non-null buffer has been set (via + * AudioNodeStream::SetBuffer) and a non-zero mBufferEnd has been set (via + * AudioNodeStream::SetInt32Parameter). + */ +class AudioBufferSourceNodeEngine final : public AudioNodeEngine +{ +public: + AudioBufferSourceNodeEngine(AudioNode* aNode, + AudioDestinationNode* aDestination) : + AudioNodeEngine(aNode), + mStart(0.0), mBeginProcessing(0), + mStop(STREAM_TIME_MAX), + mResampler(nullptr), mRemainingResamplerTail(0), + mBufferEnd(0), + mLoopStart(0), mLoopEnd(0), + mBufferPosition(0), mBufferSampleRate(0), + // mResamplerOutRate is initialized in UpdateResampler(). + mChannels(0), + mDopplerShift(1.0f), + mDestination(aDestination->Stream()), + mPlaybackRateTimeline(1.0f), + mDetuneTimeline(0.0f), + mLoop(false) + {} + + ~AudioBufferSourceNodeEngine() + { + if (mResampler) { + speex_resampler_destroy(mResampler); + } + } + + void SetSourceStream(AudioNodeStream* aSource) + { + mSource = aSource; + } + + void RecvTimelineEvent(uint32_t aIndex, + dom::AudioTimelineEvent& aEvent) override + { + MOZ_ASSERT(mDestination); + WebAudioUtils::ConvertAudioTimelineEventToTicks(aEvent, + mDestination); + + switch (aIndex) { + case AudioBufferSourceNode::PLAYBACKRATE: + mPlaybackRateTimeline.InsertEvent<int64_t>(aEvent); + break; + case AudioBufferSourceNode::DETUNE: + mDetuneTimeline.InsertEvent<int64_t>(aEvent); + break; + default: + NS_ERROR("Bad AudioBufferSourceNodeEngine TimelineParameter"); + } + } + void SetStreamTimeParameter(uint32_t aIndex, StreamTime aParam) override + { + switch (aIndex) { + case AudioBufferSourceNode::STOP: mStop = aParam; break; + default: + NS_ERROR("Bad AudioBufferSourceNodeEngine StreamTimeParameter"); + } + } + void SetDoubleParameter(uint32_t aIndex, double aParam) override + { + switch (aIndex) { + case AudioBufferSourceNode::START: + MOZ_ASSERT(!mStart, "Another START?"); + mStart = aParam * mDestination->SampleRate(); + // Round to nearest + mBeginProcessing = mStart + 0.5; + break; + case AudioBufferSourceNode::DOPPLERSHIFT: + mDopplerShift = (aParam <= 0 || mozilla::IsNaN(aParam)) ? 1.0 : aParam; + break; + default: + NS_ERROR("Bad AudioBufferSourceNodeEngine double parameter."); + }; + } + void SetInt32Parameter(uint32_t aIndex, int32_t aParam) override + { + switch (aIndex) { + case AudioBufferSourceNode::SAMPLE_RATE: + MOZ_ASSERT(aParam > 0); + mBufferSampleRate = aParam; + mSource->SetActive(); + break; + case AudioBufferSourceNode::BUFFERSTART: + MOZ_ASSERT(aParam >= 0); + if (mBufferPosition == 0) { + mBufferPosition = aParam; + } + break; + case AudioBufferSourceNode::BUFFEREND: + MOZ_ASSERT(aParam >= 0); + mBufferEnd = aParam; + break; + case AudioBufferSourceNode::LOOP: mLoop = !!aParam; break; + case AudioBufferSourceNode::LOOPSTART: + MOZ_ASSERT(aParam >= 0); + mLoopStart = aParam; + break; + case AudioBufferSourceNode::LOOPEND: + MOZ_ASSERT(aParam >= 0); + mLoopEnd = aParam; + break; + default: + NS_ERROR("Bad AudioBufferSourceNodeEngine Int32Parameter"); + } + } + void SetBuffer(already_AddRefed<ThreadSharedFloatArrayBufferList> aBuffer) override + { + mBuffer = aBuffer; + } + + bool BegunResampling() + { + return mBeginProcessing == -STREAM_TIME_MAX; + } + + void UpdateResampler(int32_t aOutRate, uint32_t aChannels) + { + if (mResampler && + (aChannels != mChannels || + // If the resampler has begun, then it will have moved + // mBufferPosition to after the samples it has read, but it hasn't + // output its buffered samples. Keep using the resampler, even if + // the rates now match, so that this latent segment is output. + (aOutRate == mBufferSampleRate && !BegunResampling()))) { + speex_resampler_destroy(mResampler); + mResampler = nullptr; + mRemainingResamplerTail = 0; + mBeginProcessing = mStart + 0.5; + } + + if (aChannels == 0 || + (aOutRate == mBufferSampleRate && !mResampler)) { + mResamplerOutRate = aOutRate; + return; + } + + if (!mResampler) { + mChannels = aChannels; + mResampler = speex_resampler_init(mChannels, mBufferSampleRate, aOutRate, + SPEEX_RESAMPLER_QUALITY_MIN, + nullptr); + } else { + if (mResamplerOutRate == aOutRate) { + return; + } + if (speex_resampler_set_rate(mResampler, mBufferSampleRate, aOutRate) != RESAMPLER_ERR_SUCCESS) { + NS_ASSERTION(false, "speex_resampler_set_rate failed"); + return; + } + } + + mResamplerOutRate = aOutRate; + + if (!BegunResampling()) { + // Low pass filter effects from the resampler mean that samples before + // the start time are influenced by resampling the buffer. The input + // latency indicates half the filter width. + int64_t inputLatency = speex_resampler_get_input_latency(mResampler); + uint32_t ratioNum, ratioDen; + speex_resampler_get_ratio(mResampler, &ratioNum, &ratioDen); + // The output subsample resolution supported in aligning the resampler + // is ratioNum. First round the start time to the nearest subsample. + int64_t subsample = mStart * ratioNum + 0.5; + // Now include the leading effects of the filter, and round *up* to the + // next whole tick, because there is no effect on samples outside the + // filter width. + mBeginProcessing = + (subsample - inputLatency * ratioDen + ratioNum - 1) / ratioNum; + } + } + + // Borrow a full buffer of size WEBAUDIO_BLOCK_SIZE from the source buffer + // at offset aSourceOffset. This avoids copying memory. + void BorrowFromInputBuffer(AudioBlock* aOutput, + uint32_t aChannels) + { + aOutput->SetBuffer(mBuffer); + aOutput->mChannelData.SetLength(aChannels); + for (uint32_t i = 0; i < aChannels; ++i) { + aOutput->mChannelData[i] = mBuffer->GetData(i) + mBufferPosition; + } + aOutput->mVolume = 1.0f; + aOutput->mBufferFormat = AUDIO_FORMAT_FLOAT32; + } + + // Copy aNumberOfFrames frames from the source buffer at offset aSourceOffset + // and put it at offset aBufferOffset in the destination buffer. + void CopyFromInputBuffer(AudioBlock* aOutput, + uint32_t aChannels, + uintptr_t aOffsetWithinBlock, + uint32_t aNumberOfFrames) { + for (uint32_t i = 0; i < aChannels; ++i) { + float* baseChannelData = aOutput->ChannelFloatsForWrite(i); + memcpy(baseChannelData + aOffsetWithinBlock, + mBuffer->GetData(i) + mBufferPosition, + aNumberOfFrames * sizeof(float)); + } + } + + // Resamples input data to an output buffer, according to |mBufferSampleRate| and + // the playbackRate/detune. + // The number of frames consumed/produced depends on the amount of space + // remaining in both the input and output buffer, and the playback rate (that + // is, the ratio between the output samplerate and the input samplerate). + void CopyFromInputBufferWithResampling(AudioBlock* aOutput, + uint32_t aChannels, + uint32_t* aOffsetWithinBlock, + uint32_t aAvailableInOutput, + StreamTime* aCurrentPosition, + uint32_t aBufferMax) + { + if (*aOffsetWithinBlock == 0) { + aOutput->AllocateChannels(aChannels); + } + SpeexResamplerState* resampler = mResampler; + MOZ_ASSERT(aChannels > 0); + + if (mBufferPosition < aBufferMax) { + uint32_t availableInInputBuffer = aBufferMax - mBufferPosition; + uint32_t ratioNum, ratioDen; + speex_resampler_get_ratio(resampler, &ratioNum, &ratioDen); + // Limit the number of input samples copied and possibly + // format-converted for resampling by estimating how many will be used. + // This may be a little small if still filling the resampler with + // initial data, but we'll get called again and it will work out. + uint32_t inputLimit = aAvailableInOutput * ratioNum / ratioDen + 10; + if (!BegunResampling()) { + // First time the resampler is used. + uint32_t inputLatency = speex_resampler_get_input_latency(resampler); + inputLimit += inputLatency; + // If starting after mStart, then play from the beginning of the + // buffer, but correct for input latency. If starting before mStart, + // then align the resampler so that the time corresponding to the + // first input sample is mStart. + int64_t skipFracNum = static_cast<int64_t>(inputLatency) * ratioDen; + double leadTicks = mStart - *aCurrentPosition; + if (leadTicks > 0.0) { + // Round to nearest output subsample supported by the resampler at + // these rates. + int64_t leadSubsamples = leadTicks * ratioNum + 0.5; + MOZ_ASSERT(leadSubsamples <= skipFracNum, + "mBeginProcessing is wrong?"); + skipFracNum -= leadSubsamples; + } + speex_resampler_set_skip_frac_num(resampler, + std::min<int64_t>(skipFracNum, UINT32_MAX)); + + mBeginProcessing = -STREAM_TIME_MAX; + } + inputLimit = std::min(inputLimit, availableInInputBuffer); + + for (uint32_t i = 0; true; ) { + uint32_t inSamples = inputLimit; + const float* inputData = mBuffer->GetData(i) + mBufferPosition; + + uint32_t outSamples = aAvailableInOutput; + float* outputData = + aOutput->ChannelFloatsForWrite(i) + *aOffsetWithinBlock; + + WebAudioUtils::SpeexResamplerProcess(resampler, i, + inputData, &inSamples, + outputData, &outSamples); + if (++i == aChannels) { + mBufferPosition += inSamples; + MOZ_ASSERT(mBufferPosition <= mBufferEnd || mLoop); + *aOffsetWithinBlock += outSamples; + *aCurrentPosition += outSamples; + if (inSamples == availableInInputBuffer && !mLoop) { + // We'll feed in enough zeros to empty out the resampler's memory. + // This handles the output latency as well as capturing the low + // pass effects of the resample filter. + mRemainingResamplerTail = + 2 * speex_resampler_get_input_latency(resampler) - 1; + } + return; + } + } + } else { + for (uint32_t i = 0; true; ) { + uint32_t inSamples = mRemainingResamplerTail; + uint32_t outSamples = aAvailableInOutput; + float* outputData = + aOutput->ChannelFloatsForWrite(i) + *aOffsetWithinBlock; + + // AudioDataValue* for aIn selects the function that does not try to + // copy and format-convert input data. + WebAudioUtils::SpeexResamplerProcess(resampler, i, + static_cast<AudioDataValue*>(nullptr), &inSamples, + outputData, &outSamples); + if (++i == aChannels) { + MOZ_ASSERT(inSamples <= mRemainingResamplerTail); + mRemainingResamplerTail -= inSamples; + *aOffsetWithinBlock += outSamples; + *aCurrentPosition += outSamples; + break; + } + } + } + } + + /** + * Fill aOutput with as many zero frames as we can, and advance + * aOffsetWithinBlock and aCurrentPosition based on how many frames we write. + * This will never advance aOffsetWithinBlock past WEBAUDIO_BLOCK_SIZE or + * aCurrentPosition past aMaxPos. This function knows when it needs to + * allocate the output buffer, and also optimizes the case where it can avoid + * memory allocations. + */ + void FillWithZeroes(AudioBlock* aOutput, + uint32_t aChannels, + uint32_t* aOffsetWithinBlock, + StreamTime* aCurrentPosition, + StreamTime aMaxPos) + { + MOZ_ASSERT(*aCurrentPosition < aMaxPos); + uint32_t numFrames = + std::min<StreamTime>(WEBAUDIO_BLOCK_SIZE - *aOffsetWithinBlock, + aMaxPos - *aCurrentPosition); + if (numFrames == WEBAUDIO_BLOCK_SIZE || !aChannels) { + aOutput->SetNull(numFrames); + } else { + if (*aOffsetWithinBlock == 0) { + aOutput->AllocateChannels(aChannels); + } + WriteZeroesToAudioBlock(aOutput, *aOffsetWithinBlock, numFrames); + } + *aOffsetWithinBlock += numFrames; + *aCurrentPosition += numFrames; + } + + /** + * Copy as many frames as possible from the source buffer to aOutput, and + * advance aOffsetWithinBlock and aCurrentPosition based on how many frames + * we write. This will never advance aOffsetWithinBlock past + * WEBAUDIO_BLOCK_SIZE, or aCurrentPosition past mStop. It takes data from + * the buffer at aBufferOffset, and never takes more data than aBufferMax. + * This function knows when it needs to allocate the output buffer, and also + * optimizes the case where it can avoid memory allocations. + */ + void CopyFromBuffer(AudioBlock* aOutput, + uint32_t aChannels, + uint32_t* aOffsetWithinBlock, + StreamTime* aCurrentPosition, + uint32_t aBufferMax) + { + MOZ_ASSERT(*aCurrentPosition < mStop); + uint32_t availableInOutput = + std::min<StreamTime>(WEBAUDIO_BLOCK_SIZE - *aOffsetWithinBlock, + mStop - *aCurrentPosition); + if (mResampler) { + CopyFromInputBufferWithResampling(aOutput, aChannels, + aOffsetWithinBlock, availableInOutput, + aCurrentPosition, aBufferMax); + return; + } + + if (aChannels == 0) { + aOutput->SetNull(WEBAUDIO_BLOCK_SIZE); + // There is no attempt here to limit advance so that mBufferPosition is + // limited to aBufferMax. The only observable affect of skipping the + // check would be in the precise timing of the ended event if the loop + // attribute is reset after playback has looped. + *aOffsetWithinBlock += availableInOutput; + *aCurrentPosition += availableInOutput; + // Rounding at the start and end of the period means that fractional + // increments essentially accumulate if outRate remains constant. If + // outRate is varying, then accumulation happens on average but not + // precisely. + TrackTicks start = *aCurrentPosition * + mBufferSampleRate / mResamplerOutRate; + TrackTicks end = (*aCurrentPosition + availableInOutput) * + mBufferSampleRate / mResamplerOutRate; + mBufferPosition += end - start; + return; + } + + uint32_t numFrames = std::min(aBufferMax - mBufferPosition, + availableInOutput); + + bool inputBufferAligned = true; + for (uint32_t i = 0; i < aChannels; ++i) { + if (!IS_ALIGNED16(mBuffer->GetData(i) + mBufferPosition)) { + inputBufferAligned = false; + } + } + + if (numFrames == WEBAUDIO_BLOCK_SIZE && inputBufferAligned) { + MOZ_ASSERT(mBufferPosition < aBufferMax); + BorrowFromInputBuffer(aOutput, aChannels); + } else { + if (*aOffsetWithinBlock == 0) { + aOutput->AllocateChannels(aChannels); + } + MOZ_ASSERT(mBufferPosition < aBufferMax); + CopyFromInputBuffer(aOutput, aChannels, *aOffsetWithinBlock, numFrames); + } + *aOffsetWithinBlock += numFrames; + *aCurrentPosition += numFrames; + mBufferPosition += numFrames; + } + + int32_t ComputeFinalOutSampleRate(float aPlaybackRate, float aDetune) + { + float computedPlaybackRate = aPlaybackRate * pow(2, aDetune / 1200.f); + // Make sure the playback rate and the doppler shift are something + // our resampler can work with. + int32_t rate = WebAudioUtils:: + TruncateFloatToInt<int32_t>(mSource->SampleRate() / + (computedPlaybackRate * mDopplerShift)); + return rate ? rate : mBufferSampleRate; + } + + void UpdateSampleRateIfNeeded(uint32_t aChannels, StreamTime aStreamPosition) + { + float playbackRate; + float detune; + + if (mPlaybackRateTimeline.HasSimpleValue()) { + playbackRate = mPlaybackRateTimeline.GetValue(); + } else { + playbackRate = mPlaybackRateTimeline.GetValueAtTime(aStreamPosition); + } + if (mDetuneTimeline.HasSimpleValue()) { + detune = mDetuneTimeline.GetValue(); + } else { + detune = mDetuneTimeline.GetValueAtTime(aStreamPosition); + } + if (playbackRate <= 0 || mozilla::IsNaN(playbackRate)) { + playbackRate = 1.0f; + } + + detune = std::min(std::max(-1200.f, detune), 1200.f); + + int32_t outRate = ComputeFinalOutSampleRate(playbackRate, detune); + UpdateResampler(outRate, aChannels); + } + + void ProcessBlock(AudioNodeStream* aStream, + GraphTime aFrom, + const AudioBlock& aInput, + AudioBlock* aOutput, + bool* aFinished) override + { + if (mBufferSampleRate == 0) { + // start() has not yet been called or no buffer has yet been set + aOutput->SetNull(WEBAUDIO_BLOCK_SIZE); + return; + } + + StreamTime streamPosition = mDestination->GraphTimeToStreamTime(aFrom); + uint32_t channels = mBuffer ? mBuffer->GetChannels() : 0; + + UpdateSampleRateIfNeeded(channels, streamPosition); + + uint32_t written = 0; + while (written < WEBAUDIO_BLOCK_SIZE) { + if (mStop != STREAM_TIME_MAX && + streamPosition >= mStop) { + FillWithZeroes(aOutput, channels, &written, &streamPosition, STREAM_TIME_MAX); + continue; + } + if (streamPosition < mBeginProcessing) { + FillWithZeroes(aOutput, channels, &written, &streamPosition, + mBeginProcessing); + continue; + } + if (mLoop) { + // mLoopEnd can become less than mBufferPosition when a LOOPEND engine + // parameter is received after "loopend" is changed on the node or a + // new buffer with lower samplerate is set. + if (mBufferPosition >= mLoopEnd) { + mBufferPosition = mLoopStart; + } + CopyFromBuffer(aOutput, channels, &written, &streamPosition, mLoopEnd); + } else { + if (mBufferPosition < mBufferEnd || mRemainingResamplerTail) { + CopyFromBuffer(aOutput, channels, &written, &streamPosition, mBufferEnd); + } else { + FillWithZeroes(aOutput, channels, &written, &streamPosition, STREAM_TIME_MAX); + } + } + } + + // We've finished if we've gone past mStop, or if we're past mDuration when + // looping is disabled. + if (streamPosition >= mStop || + (!mLoop && mBufferPosition >= mBufferEnd && !mRemainingResamplerTail)) { + *aFinished = true; + } + } + + bool IsActive() const override + { + // Whether buffer has been set and start() has been called. + return mBufferSampleRate != 0; + } + + size_t SizeOfExcludingThis(MallocSizeOf aMallocSizeOf) const override + { + // Not owned: + // - mBuffer - shared w/ AudioNode + // - mPlaybackRateTimeline - shared w/ AudioNode + // - mDetuneTimeline - shared w/ AudioNode + + size_t amount = AudioNodeEngine::SizeOfExcludingThis(aMallocSizeOf); + + // NB: We need to modify speex if we want the full memory picture, internal + // fields that need measuring noted below. + // - mResampler->mem + // - mResampler->sinc_table + // - mResampler->last_sample + // - mResampler->magic_samples + // - mResampler->samp_frac_num + amount += aMallocSizeOf(mResampler); + + return amount; + } + + size_t SizeOfIncludingThis(MallocSizeOf aMallocSizeOf) const override + { + return aMallocSizeOf(this) + SizeOfExcludingThis(aMallocSizeOf); + } + + double mStart; // including the fractional position between ticks + // Low pass filter effects from the resampler mean that samples before the + // start time are influenced by resampling the buffer. mBeginProcessing + // includes the extent of this filter. The special value of -STREAM_TIME_MAX + // indicates that the resampler has begun processing. + StreamTime mBeginProcessing; + StreamTime mStop; + RefPtr<ThreadSharedFloatArrayBufferList> mBuffer; + SpeexResamplerState* mResampler; + // mRemainingResamplerTail, like mBufferPosition, and + // mBufferEnd, is measured in input buffer samples. + uint32_t mRemainingResamplerTail; + uint32_t mBufferEnd; + uint32_t mLoopStart; + uint32_t mLoopEnd; + uint32_t mBufferPosition; + int32_t mBufferSampleRate; + int32_t mResamplerOutRate; + uint32_t mChannels; + float mDopplerShift; + AudioNodeStream* mDestination; + AudioNodeStream* mSource; + AudioParamTimeline mPlaybackRateTimeline; + AudioParamTimeline mDetuneTimeline; + bool mLoop; +}; + +AudioBufferSourceNode::AudioBufferSourceNode(AudioContext* aContext) + : AudioNode(aContext, + 2, + ChannelCountMode::Max, + ChannelInterpretation::Speakers) + , mLoopStart(0.0) + , mLoopEnd(0.0) + // mOffset and mDuration are initialized in Start(). + , mPlaybackRate(new AudioParam(this, PLAYBACKRATE, 1.0f, "playbackRate")) + , mDetune(new AudioParam(this, DETUNE, 0.0f, "detune")) + , mLoop(false) + , mStartCalled(false) +{ + AudioBufferSourceNodeEngine* engine = new AudioBufferSourceNodeEngine(this, aContext->Destination()); + mStream = AudioNodeStream::Create(aContext, engine, + AudioNodeStream::NEED_MAIN_THREAD_FINISHED, + aContext->Graph()); + engine->SetSourceStream(mStream); + mStream->AddMainThreadListener(this); +} + +AudioBufferSourceNode::~AudioBufferSourceNode() +{ +} + +void +AudioBufferSourceNode::DestroyMediaStream() +{ + bool hadStream = mStream; + if (hadStream) { + mStream->RemoveMainThreadListener(this); + } + AudioNode::DestroyMediaStream(); + if (hadStream && Context()) { + Context()->UnregisterAudioBufferSourceNode(this); + } +} + +size_t +AudioBufferSourceNode::SizeOfExcludingThis(MallocSizeOf aMallocSizeOf) const +{ + size_t amount = AudioNode::SizeOfExcludingThis(aMallocSizeOf); + + /* mBuffer can be shared and is accounted for separately. */ + + amount += mPlaybackRate->SizeOfIncludingThis(aMallocSizeOf); + amount += mDetune->SizeOfIncludingThis(aMallocSizeOf); + return amount; +} + +size_t +AudioBufferSourceNode::SizeOfIncludingThis(MallocSizeOf aMallocSizeOf) const +{ + return aMallocSizeOf(this) + SizeOfExcludingThis(aMallocSizeOf); +} + +JSObject* +AudioBufferSourceNode::WrapObject(JSContext* aCx, JS::Handle<JSObject*> aGivenProto) +{ + return AudioBufferSourceNodeBinding::Wrap(aCx, this, aGivenProto); +} + +void +AudioBufferSourceNode::Start(double aWhen, double aOffset, + const Optional<double>& aDuration, ErrorResult& aRv) +{ + if (!WebAudioUtils::IsTimeValid(aWhen) || + (aDuration.WasPassed() && !WebAudioUtils::IsTimeValid(aDuration.Value()))) { + aRv.Throw(NS_ERROR_DOM_NOT_SUPPORTED_ERR); + return; + } + + if (mStartCalled) { + aRv.Throw(NS_ERROR_DOM_INVALID_STATE_ERR); + return; + } + mStartCalled = true; + + AudioNodeStream* ns = mStream; + if (!ns) { + // Nothing to play, or we're already dead for some reason + return; + } + + // Remember our arguments so that we can use them when we get a new buffer. + mOffset = aOffset; + mDuration = aDuration.WasPassed() ? aDuration.Value() + : std::numeric_limits<double>::min(); + + WEB_AUDIO_API_LOG("%f: %s %u Start(%f, %g, %g)", Context()->CurrentTime(), + NodeType(), Id(), aWhen, aOffset, mDuration); + + // We can't send these parameters without a buffer because we don't know the + // buffer's sample rate or length. + if (mBuffer) { + SendOffsetAndDurationParametersToStream(ns); + } + + // Don't set parameter unnecessarily + if (aWhen > 0.0) { + ns->SetDoubleParameter(START, aWhen); + } +} + +void +AudioBufferSourceNode::SendBufferParameterToStream(JSContext* aCx) +{ + AudioNodeStream* ns = mStream; + if (!ns) { + return; + } + + if (mBuffer) { + RefPtr<ThreadSharedFloatArrayBufferList> data = + mBuffer->GetThreadSharedChannelsForRate(aCx); + ns->SetBuffer(data.forget()); + + if (mStartCalled) { + SendOffsetAndDurationParametersToStream(ns); + } + } else { + ns->SetInt32Parameter(BUFFEREND, 0); + ns->SetBuffer(nullptr); + + MarkInactive(); + } +} + +void +AudioBufferSourceNode::SendOffsetAndDurationParametersToStream(AudioNodeStream* aStream) +{ + NS_ASSERTION(mBuffer && mStartCalled, + "Only call this when we have a buffer and start() has been called"); + + float rate = mBuffer->SampleRate(); + aStream->SetInt32Parameter(SAMPLE_RATE, rate); + + int32_t bufferEnd = mBuffer->Length(); + int32_t offsetSamples = std::max(0, NS_lround(mOffset * rate)); + + // Don't set parameter unnecessarily + if (offsetSamples > 0) { + aStream->SetInt32Parameter(BUFFERSTART, offsetSamples); + } + + if (mDuration != std::numeric_limits<double>::min()) { + MOZ_ASSERT(mDuration >= 0.0); // provided by Start() + MOZ_ASSERT(rate >= 0.0f); // provided by AudioBuffer::Create() + static_assert(std::numeric_limits<double>::digits >= + std::numeric_limits<decltype(bufferEnd)>::digits, + "bufferEnd should be represented exactly by double"); + // + 0.5 rounds mDuration to nearest sample when assigned to bufferEnd. + bufferEnd = std::min<double>(bufferEnd, + offsetSamples + mDuration * rate + 0.5); + } + aStream->SetInt32Parameter(BUFFEREND, bufferEnd); + + MarkActive(); +} + +void +AudioBufferSourceNode::Stop(double aWhen, ErrorResult& aRv) +{ + if (!WebAudioUtils::IsTimeValid(aWhen)) { + aRv.Throw(NS_ERROR_DOM_NOT_SUPPORTED_ERR); + return; + } + + if (!mStartCalled) { + aRv.Throw(NS_ERROR_DOM_INVALID_STATE_ERR); + return; + } + + WEB_AUDIO_API_LOG("%f: %s %u Stop(%f)", Context()->CurrentTime(), + NodeType(), Id(), aWhen); + + AudioNodeStream* ns = mStream; + if (!ns || !Context()) { + // We've already stopped and had our stream shut down + return; + } + + ns->SetStreamTimeParameter(STOP, Context(), std::max(0.0, aWhen)); +} + +void +AudioBufferSourceNode::NotifyMainThreadStreamFinished() +{ + MOZ_ASSERT(mStream->IsFinished()); + + class EndedEventDispatcher final : public Runnable + { + public: + explicit EndedEventDispatcher(AudioBufferSourceNode* aNode) + : mNode(aNode) {} + NS_IMETHOD Run() override + { + // If it's not safe to run scripts right now, schedule this to run later + if (!nsContentUtils::IsSafeToRunScript()) { + nsContentUtils::AddScriptRunner(this); + return NS_OK; + } + + mNode->DispatchTrustedEvent(NS_LITERAL_STRING("ended")); + // Release stream resources. + mNode->DestroyMediaStream(); + return NS_OK; + } + private: + RefPtr<AudioBufferSourceNode> mNode; + }; + + NS_DispatchToMainThread(new EndedEventDispatcher(this)); + + // Drop the playing reference + // Warning: The below line might delete this. + MarkInactive(); +} + +void +AudioBufferSourceNode::SendDopplerShiftToStream(double aDopplerShift) +{ + MOZ_ASSERT(mStream, "Should have disconnected panner if no stream"); + SendDoubleParameterToStream(DOPPLERSHIFT, aDopplerShift); +} + +void +AudioBufferSourceNode::SendLoopParametersToStream() +{ + if (!mStream) { + return; + } + // Don't compute and set the loop parameters unnecessarily + if (mLoop && mBuffer) { + float rate = mBuffer->SampleRate(); + double length = (double(mBuffer->Length()) / mBuffer->SampleRate()); + double actualLoopStart, actualLoopEnd; + if (mLoopStart >= 0.0 && mLoopEnd > 0.0 && + mLoopStart < mLoopEnd) { + MOZ_ASSERT(mLoopStart != 0.0 || mLoopEnd != 0.0); + actualLoopStart = (mLoopStart > length) ? 0.0 : mLoopStart; + actualLoopEnd = std::min(mLoopEnd, length); + } else { + actualLoopStart = 0.0; + actualLoopEnd = length; + } + int32_t loopStartTicks = NS_lround(actualLoopStart * rate); + int32_t loopEndTicks = NS_lround(actualLoopEnd * rate); + if (loopStartTicks < loopEndTicks) { + SendInt32ParameterToStream(LOOPSTART, loopStartTicks); + SendInt32ParameterToStream(LOOPEND, loopEndTicks); + SendInt32ParameterToStream(LOOP, 1); + } else { + // Be explicit about looping not happening if the offsets make + // looping impossible. + SendInt32ParameterToStream(LOOP, 0); + } + } else { + SendInt32ParameterToStream(LOOP, 0); + } +} + +} // namespace dom +} // namespace mozilla diff --git a/dom/media/webaudio/AudioBufferSourceNode.h b/dom/media/webaudio/AudioBufferSourceNode.h new file mode 100644 index 000000000..d982ec5cc --- /dev/null +++ b/dom/media/webaudio/AudioBufferSourceNode.h @@ -0,0 +1,149 @@ +/* -*- Mode: C++; tab-width: 2; indent-tabs-mode: nil; c-basic-offset: 2 -*- */ +/* vim:set ts=2 sw=2 sts=2 et cindent: */ +/* This Source Code Form is subject to the terms of the Mozilla Public + * License, v. 2.0. If a copy of the MPL was not distributed with this + * file, You can obtain one at http://mozilla.org/MPL/2.0/. */ + +#ifndef AudioBufferSourceNode_h_ +#define AudioBufferSourceNode_h_ + +#include "AudioNode.h" +#include "AudioBuffer.h" + +namespace mozilla { +namespace dom { + +class AudioParam; + +class AudioBufferSourceNode final : public AudioNode, + public MainThreadMediaStreamListener +{ +public: + explicit AudioBufferSourceNode(AudioContext* aContext); + + void DestroyMediaStream() override; + + uint16_t NumberOfInputs() const final override + { + return 0; + } + AudioBufferSourceNode* AsAudioBufferSourceNode() override + { + return this; + } + NS_DECL_ISUPPORTS_INHERITED + NS_DECL_CYCLE_COLLECTION_CLASS_INHERITED(AudioBufferSourceNode, AudioNode) + + JSObject* WrapObject(JSContext* aCx, JS::Handle<JSObject*> aGivenProto) override; + + void Start(double aWhen, double aOffset, + const Optional<double>& aDuration, ErrorResult& aRv); + void Stop(double aWhen, ErrorResult& aRv); + + AudioBuffer* GetBuffer(JSContext* aCx) const + { + return mBuffer; + } + void SetBuffer(JSContext* aCx, AudioBuffer* aBuffer) + { + mBuffer = aBuffer; + SendBufferParameterToStream(aCx); + SendLoopParametersToStream(); + } + AudioParam* PlaybackRate() const + { + return mPlaybackRate; + } + AudioParam* Detune() const + { + return mDetune; + } + bool Loop() const + { + return mLoop; + } + void SetLoop(bool aLoop) + { + mLoop = aLoop; + SendLoopParametersToStream(); + } + double LoopStart() const + { + return mLoopStart; + } + void SetLoopStart(double aStart) + { + mLoopStart = aStart; + SendLoopParametersToStream(); + } + double LoopEnd() const + { + return mLoopEnd; + } + void SetLoopEnd(double aEnd) + { + mLoopEnd = aEnd; + SendLoopParametersToStream(); + } + void SendDopplerShiftToStream(double aDopplerShift); + + IMPL_EVENT_HANDLER(ended) + + void NotifyMainThreadStreamFinished() override; + + const char* NodeType() const override + { + return "AudioBufferSourceNode"; + } + + size_t SizeOfExcludingThis(MallocSizeOf aMallocSizeOf) const override; + size_t SizeOfIncludingThis(MallocSizeOf aMallocSizeOf) const override; + +protected: + virtual ~AudioBufferSourceNode(); + +private: + friend class AudioBufferSourceNodeEngine; + // START is sent during Start(). + // STOP is sent during Stop(). + // BUFFERSTART and BUFFEREND are sent when SetBuffer() and Start() have + // been called (along with sending the buffer). + enum EngineParameters { + SAMPLE_RATE, + START, + STOP, + // BUFFERSTART is the "offset" passed to start(), multiplied by + // buffer.sampleRate. + BUFFERSTART, + // BUFFEREND is the sum of "offset" and "duration" passed to start(), + // multiplied by buffer.sampleRate, or the size of the buffer, if smaller. + BUFFEREND, + LOOP, + LOOPSTART, + LOOPEND, + PLAYBACKRATE, + DETUNE, + DOPPLERSHIFT + }; + + void SendLoopParametersToStream(); + void SendBufferParameterToStream(JSContext* aCx); + void SendOffsetAndDurationParametersToStream(AudioNodeStream* aStream); + +private: + double mLoopStart; + double mLoopEnd; + double mOffset; + double mDuration; + RefPtr<AudioBuffer> mBuffer; + RefPtr<AudioParam> mPlaybackRate; + RefPtr<AudioParam> mDetune; + bool mLoop; + bool mStartCalled; +}; + +} // namespace dom +} // namespace mozilla + +#endif + diff --git a/dom/media/webaudio/AudioContext.cpp b/dom/media/webaudio/AudioContext.cpp new file mode 100644 index 000000000..f61226a48 --- /dev/null +++ b/dom/media/webaudio/AudioContext.cpp @@ -0,0 +1,1247 @@ +/* -*- Mode: C++; tab-width: 2; indent-tabs-mode: nil; c-basic-offset: 2 -*- */ +/* vim:set ts=2 sw=2 sts=2 et cindent: */ +/* This Source Code Form is subject to the terms of the Mozilla Public + * License, v. 2.0. If a copy of the MPL was not distributed with this + * file, You can obtain one at http://mozilla.org/MPL/2.0/. */ + +#include "AudioContext.h" + +#include "blink/PeriodicWave.h" + +#include "mozilla/ErrorResult.h" +#include "mozilla/OwningNonNull.h" + +#include "mozilla/dom/AnalyserNode.h" +#include "mozilla/dom/AudioContextBinding.h" +#include "mozilla/dom/HTMLMediaElement.h" +#include "mozilla/dom/OfflineAudioContextBinding.h" +#include "mozilla/dom/Promise.h" + +#include "AudioBuffer.h" +#include "AudioBufferSourceNode.h" +#include "AudioChannelService.h" +#include "AudioDestinationNode.h" +#include "AudioListener.h" +#include "AudioStream.h" +#include "BiquadFilterNode.h" +#include "ChannelMergerNode.h" +#include "ChannelSplitterNode.h" +#include "ConstantSourceNode.h" +#include "ConvolverNode.h" +#include "DelayNode.h" +#include "DynamicsCompressorNode.h" +#include "GainNode.h" +#include "IIRFilterNode.h" +#include "MediaElementAudioSourceNode.h" +#include "MediaStreamAudioDestinationNode.h" +#include "MediaStreamAudioSourceNode.h" +#include "MediaStreamGraph.h" +#include "nsContentUtils.h" +#include "nsNetCID.h" +#include "nsNetUtil.h" +#include "nsPIDOMWindow.h" +#include "nsPrintfCString.h" +#include "OscillatorNode.h" +#include "PannerNode.h" +#include "PeriodicWave.h" +#include "ScriptProcessorNode.h" +#include "StereoPannerNode.h" +#include "WaveShaperNode.h" + +namespace mozilla { +namespace dom { + +// 0 is a special value that MediaStreams use to denote they are not part of a +// AudioContext. +static dom::AudioContext::AudioContextId gAudioContextId = 1; + +NS_IMPL_CYCLE_COLLECTION_CLASS(AudioContext) + +NS_IMPL_CYCLE_COLLECTION_UNLINK_BEGIN(AudioContext) + NS_IMPL_CYCLE_COLLECTION_UNLINK(mDestination) + NS_IMPL_CYCLE_COLLECTION_UNLINK(mListener) + NS_IMPL_CYCLE_COLLECTION_UNLINK(mPromiseGripArray) + if (!tmp->mIsStarted) { + NS_IMPL_CYCLE_COLLECTION_UNLINK(mActiveNodes) + } + // mDecodeJobs owns the WebAudioDecodeJob objects whose lifetime is managed explicitly. + // mAllNodes is an array of weak pointers, ignore it here. + // mPannerNodes is an array of weak pointers, ignore it here. + // mBasicWaveFormCache cannot participate in cycles, ignore it here. + + // Remove weak reference on the global window as the context is not usable + // without mDestination. + tmp->DisconnectFromWindow(); +NS_IMPL_CYCLE_COLLECTION_UNLINK_END_INHERITED(DOMEventTargetHelper) + +NS_IMPL_CYCLE_COLLECTION_TRAVERSE_BEGIN_INHERITED(AudioContext, + DOMEventTargetHelper) + NS_IMPL_CYCLE_COLLECTION_TRAVERSE(mDestination) + NS_IMPL_CYCLE_COLLECTION_TRAVERSE(mListener) + NS_IMPL_CYCLE_COLLECTION_TRAVERSE(mPromiseGripArray) + if (!tmp->mIsStarted) { + MOZ_ASSERT(tmp->mIsOffline, + "Online AudioContexts should always be started"); + NS_IMPL_CYCLE_COLLECTION_TRAVERSE(mActiveNodes) + } + // mDecodeJobs owns the WebAudioDecodeJob objects whose lifetime is managed explicitly. + // mAllNodes is an array of weak pointers, ignore it here. + // mPannerNodes is an array of weak pointers, ignore it here. + // mBasicWaveFormCache cannot participate in cycles, ignore it here. +NS_IMPL_CYCLE_COLLECTION_TRAVERSE_END + +NS_IMPL_ADDREF_INHERITED(AudioContext, DOMEventTargetHelper) +NS_IMPL_RELEASE_INHERITED(AudioContext, DOMEventTargetHelper) + +NS_INTERFACE_MAP_BEGIN_CYCLE_COLLECTION_INHERITED(AudioContext) + NS_INTERFACE_MAP_ENTRY(nsIMemoryReporter) +NS_INTERFACE_MAP_END_INHERITING(DOMEventTargetHelper) + +static float GetSampleRateForAudioContext(bool aIsOffline, float aSampleRate) +{ + if (aIsOffline) { + return aSampleRate; + } else { + return static_cast<float>(CubebUtils::PreferredSampleRate()); + } +} + +AudioContext::AudioContext(nsPIDOMWindowInner* aWindow, + bool aIsOffline, + AudioChannel aChannel, + uint32_t aNumberOfChannels, + uint32_t aLength, + float aSampleRate) + : DOMEventTargetHelper(aWindow) + , mId(gAudioContextId++) + , mSampleRate(GetSampleRateForAudioContext(aIsOffline, aSampleRate)) + , mAudioContextState(AudioContextState::Suspended) + , mNumberOfChannels(aNumberOfChannels) + , mIsOffline(aIsOffline) + , mIsStarted(!aIsOffline) + , mIsShutDown(false) + , mCloseCalled(false) + , mSuspendCalled(false) +{ + bool mute = aWindow->AddAudioContext(this); + + // Note: AudioDestinationNode needs an AudioContext that must already be + // bound to the window. + mDestination = new AudioDestinationNode(this, aIsOffline, aChannel, + aNumberOfChannels, aLength, aSampleRate); + + // The context can't be muted until it has a destination. + if (mute) { + Mute(); + } +} + +nsresult +AudioContext::Init() +{ + if (!mIsOffline) { + nsresult rv = mDestination->CreateAudioChannelAgent(); + if (NS_WARN_IF(NS_FAILED(rv))) { + return rv; + } + } + + return NS_OK; +} + +void +AudioContext::DisconnectFromWindow() +{ + nsPIDOMWindowInner* window = GetOwner(); + if (window) { + window->RemoveAudioContext(this); + } +} + +AudioContext::~AudioContext() +{ + DisconnectFromWindow(); + UnregisterWeakMemoryReporter(this); +} + +JSObject* +AudioContext::WrapObject(JSContext* aCx, JS::Handle<JSObject*> aGivenProto) +{ + if (mIsOffline) { + return OfflineAudioContextBinding::Wrap(aCx, this, aGivenProto); + } else { + return AudioContextBinding::Wrap(aCx, this, aGivenProto); + } +} + +/* static */ already_AddRefed<AudioContext> +AudioContext::Constructor(const GlobalObject& aGlobal, + ErrorResult& aRv) +{ + return AudioContext::Constructor(aGlobal, + AudioChannelService::GetDefaultAudioChannel(), + aRv); +} + +/* static */ already_AddRefed<AudioContext> +AudioContext::Constructor(const GlobalObject& aGlobal, + AudioChannel aChannel, + ErrorResult& aRv) +{ + nsCOMPtr<nsPIDOMWindowInner> window = do_QueryInterface(aGlobal.GetAsSupports()); + if (!window) { + aRv.Throw(NS_ERROR_FAILURE); + return nullptr; + } + + RefPtr<AudioContext> object = new AudioContext(window, false, aChannel); + aRv = object->Init(); + if (NS_WARN_IF(aRv.Failed())) { + return nullptr; + } + + RegisterWeakMemoryReporter(object); + + return object.forget(); +} + +/* static */ already_AddRefed<AudioContext> +AudioContext::Constructor(const GlobalObject& aGlobal, + uint32_t aNumberOfChannels, + uint32_t aLength, + float aSampleRate, + ErrorResult& aRv) +{ + nsCOMPtr<nsPIDOMWindowInner> window = do_QueryInterface(aGlobal.GetAsSupports()); + if (!window) { + aRv.Throw(NS_ERROR_FAILURE); + return nullptr; + } + + if (aNumberOfChannels == 0 || + aNumberOfChannels > WebAudioUtils::MaxChannelCount || + aLength == 0 || + aSampleRate < WebAudioUtils::MinSampleRate || + aSampleRate > WebAudioUtils::MaxSampleRate) { + // The DOM binding protects us against infinity and NaN + aRv.Throw(NS_ERROR_DOM_NOT_SUPPORTED_ERR); + return nullptr; + } + + RefPtr<AudioContext> object = new AudioContext(window, + true, + AudioChannel::Normal, + aNumberOfChannels, + aLength, + aSampleRate); + + RegisterWeakMemoryReporter(object); + + return object.forget(); +} + +bool AudioContext::CheckClosed(ErrorResult& aRv) +{ + if (mAudioContextState == AudioContextState::Closed) { + aRv.Throw(NS_ERROR_DOM_INVALID_STATE_ERR); + return true; + } + return false; +} + +already_AddRefed<AudioBufferSourceNode> +AudioContext::CreateBufferSource(ErrorResult& aRv) +{ + if (CheckClosed(aRv)) { + return nullptr; + } + + RefPtr<AudioBufferSourceNode> bufferNode = + new AudioBufferSourceNode(this); + return bufferNode.forget(); +} + +already_AddRefed<ConstantSourceNode> +AudioContext::CreateConstantSource(ErrorResult& aRv) +{ + if (CheckClosed(aRv)) { + return nullptr; + } + + RefPtr<ConstantSourceNode> constantSourceNode = + new ConstantSourceNode(this); + return constantSourceNode.forget(); +} + +already_AddRefed<AudioBuffer> +AudioContext::CreateBuffer(uint32_t aNumberOfChannels, uint32_t aLength, + float aSampleRate, + ErrorResult& aRv) +{ + if (!aNumberOfChannels) { + aRv.Throw(NS_ERROR_DOM_INDEX_SIZE_ERR); + return nullptr; + } + + return AudioBuffer::Create(this, aNumberOfChannels, aLength, + aSampleRate, aRv); +} + +namespace { + +bool IsValidBufferSize(uint32_t aBufferSize) { + switch (aBufferSize) { + case 0: // let the implementation choose the buffer size + case 256: + case 512: + case 1024: + case 2048: + case 4096: + case 8192: + case 16384: + return true; + default: + return false; + } +} + +} // namespace + +already_AddRefed<MediaStreamAudioDestinationNode> +AudioContext::CreateMediaStreamDestination(ErrorResult& aRv) +{ + if (mIsOffline) { + aRv.Throw(NS_ERROR_DOM_NOT_SUPPORTED_ERR); + return nullptr; + } + + if (CheckClosed(aRv)) { + return nullptr; + } + + RefPtr<MediaStreamAudioDestinationNode> node = + new MediaStreamAudioDestinationNode(this); + return node.forget(); +} + +already_AddRefed<ScriptProcessorNode> +AudioContext::CreateScriptProcessor(uint32_t aBufferSize, + uint32_t aNumberOfInputChannels, + uint32_t aNumberOfOutputChannels, + ErrorResult& aRv) +{ + if ((aNumberOfInputChannels == 0 && aNumberOfOutputChannels == 0) || + aNumberOfInputChannels > WebAudioUtils::MaxChannelCount || + aNumberOfOutputChannels > WebAudioUtils::MaxChannelCount || + !IsValidBufferSize(aBufferSize)) { + aRv.Throw(NS_ERROR_DOM_INDEX_SIZE_ERR); + return nullptr; + } + + if (CheckClosed(aRv)) { + return nullptr; + } + + RefPtr<ScriptProcessorNode> scriptProcessor = + new ScriptProcessorNode(this, aBufferSize, aNumberOfInputChannels, + aNumberOfOutputChannels); + return scriptProcessor.forget(); +} + +already_AddRefed<AnalyserNode> +AudioContext::CreateAnalyser(ErrorResult& aRv) +{ + if (CheckClosed(aRv)) { + return nullptr; + } + + RefPtr<AnalyserNode> analyserNode = new AnalyserNode(this); + return analyserNode.forget(); +} + +already_AddRefed<StereoPannerNode> +AudioContext::CreateStereoPanner(ErrorResult& aRv) +{ + if (CheckClosed(aRv)) { + return nullptr; + } + + RefPtr<StereoPannerNode> stereoPannerNode = new StereoPannerNode(this); + return stereoPannerNode.forget(); +} + +already_AddRefed<MediaElementAudioSourceNode> +AudioContext::CreateMediaElementSource(HTMLMediaElement& aMediaElement, + ErrorResult& aRv) +{ + if (mIsOffline) { + aRv.Throw(NS_ERROR_DOM_NOT_SUPPORTED_ERR); + return nullptr; + } + + if (aMediaElement.ContainsRestrictedContent()) { + aRv.Throw(NS_ERROR_DOM_NOT_SUPPORTED_ERR); + return nullptr; + } + + if (CheckClosed(aRv)) { + return nullptr; + } + + RefPtr<DOMMediaStream> stream = + aMediaElement.CaptureAudio(aRv, mDestination->Stream()->Graph()); + if (aRv.Failed()) { + return nullptr; + } + return MediaElementAudioSourceNode::Create(this, stream, aRv); +} + +already_AddRefed<MediaStreamAudioSourceNode> +AudioContext::CreateMediaStreamSource(DOMMediaStream& aMediaStream, + ErrorResult& aRv) +{ + if (mIsOffline) { + aRv.Throw(NS_ERROR_DOM_NOT_SUPPORTED_ERR); + return nullptr; + } + + if (CheckClosed(aRv)) { + return nullptr; + } + + return MediaStreamAudioSourceNode::Create(this, &aMediaStream, aRv); +} + +already_AddRefed<GainNode> +AudioContext::CreateGain(ErrorResult& aRv) +{ + if (CheckClosed(aRv)) { + return nullptr; + } + + RefPtr<GainNode> gainNode = new GainNode(this); + return gainNode.forget(); +} + +already_AddRefed<WaveShaperNode> +AudioContext::CreateWaveShaper(ErrorResult& aRv) +{ + if (CheckClosed(aRv)) { + return nullptr; + } + + RefPtr<WaveShaperNode> waveShaperNode = new WaveShaperNode(this); + return waveShaperNode.forget(); +} + +already_AddRefed<DelayNode> +AudioContext::CreateDelay(double aMaxDelayTime, ErrorResult& aRv) +{ + if (CheckClosed(aRv)) { + return nullptr; + } + + if (aMaxDelayTime > 0. && aMaxDelayTime < 180.) { + RefPtr<DelayNode> delayNode = new DelayNode(this, aMaxDelayTime); + return delayNode.forget(); + } + + aRv.Throw(NS_ERROR_DOM_NOT_SUPPORTED_ERR); + return nullptr; +} + +already_AddRefed<PannerNode> +AudioContext::CreatePanner(ErrorResult& aRv) +{ + if (CheckClosed(aRv)) { + return nullptr; + } + + RefPtr<PannerNode> pannerNode = new PannerNode(this); + mPannerNodes.PutEntry(pannerNode); + return pannerNode.forget(); +} + +already_AddRefed<ConvolverNode> +AudioContext::CreateConvolver(ErrorResult& aRv) +{ + if (CheckClosed(aRv)) { + return nullptr; + } + + RefPtr<ConvolverNode> convolverNode = new ConvolverNode(this); + return convolverNode.forget(); +} + +already_AddRefed<ChannelSplitterNode> +AudioContext::CreateChannelSplitter(uint32_t aNumberOfOutputs, ErrorResult& aRv) +{ + if (aNumberOfOutputs == 0 || + aNumberOfOutputs > WebAudioUtils::MaxChannelCount) { + aRv.Throw(NS_ERROR_DOM_INDEX_SIZE_ERR); + return nullptr; + } + + if (CheckClosed(aRv)) { + return nullptr; + } + + RefPtr<ChannelSplitterNode> splitterNode = + new ChannelSplitterNode(this, aNumberOfOutputs); + return splitterNode.forget(); +} + +already_AddRefed<ChannelMergerNode> +AudioContext::CreateChannelMerger(uint32_t aNumberOfInputs, ErrorResult& aRv) +{ + if (aNumberOfInputs == 0 || + aNumberOfInputs > WebAudioUtils::MaxChannelCount) { + aRv.Throw(NS_ERROR_DOM_INDEX_SIZE_ERR); + return nullptr; + } + + if (CheckClosed(aRv)) { + return nullptr; + } + + RefPtr<ChannelMergerNode> mergerNode = + new ChannelMergerNode(this, aNumberOfInputs); + return mergerNode.forget(); +} + +already_AddRefed<DynamicsCompressorNode> +AudioContext::CreateDynamicsCompressor(ErrorResult& aRv) +{ + if (CheckClosed(aRv)) { + return nullptr; + } + + RefPtr<DynamicsCompressorNode> compressorNode = + new DynamicsCompressorNode(this); + return compressorNode.forget(); +} + +already_AddRefed<BiquadFilterNode> +AudioContext::CreateBiquadFilter(ErrorResult& aRv) +{ + if (CheckClosed(aRv)) { + return nullptr; + } + + RefPtr<BiquadFilterNode> filterNode = + new BiquadFilterNode(this); + return filterNode.forget(); +} + +already_AddRefed<IIRFilterNode> +AudioContext::CreateIIRFilter(const mozilla::dom::binding_detail::AutoSequence<double>& aFeedforward, + const mozilla::dom::binding_detail::AutoSequence<double>& aFeedback, + mozilla::ErrorResult& aRv) +{ + if (CheckClosed(aRv)) { + return nullptr; + } + + if (aFeedforward.Length() == 0 || aFeedforward.Length() > 20) { + aRv.Throw(NS_ERROR_DOM_NOT_SUPPORTED_ERR); + return nullptr; + } + + if (aFeedback.Length() == 0 || aFeedback.Length() > 20) { + aRv.Throw(NS_ERROR_DOM_NOT_SUPPORTED_ERR); + return nullptr; + } + + bool feedforwardAllZeros = true; + for (size_t i = 0; i < aFeedforward.Length(); ++i) { + if (aFeedforward.Elements()[i] != 0.0) { + feedforwardAllZeros = false; + } + } + + if (feedforwardAllZeros || aFeedback.Elements()[0] == 0.0) { + aRv.Throw(NS_ERROR_DOM_INVALID_STATE_ERR); + return nullptr; + } + + RefPtr<IIRFilterNode> filterNode = + new IIRFilterNode(this, aFeedforward, aFeedback); + return filterNode.forget(); +} + +already_AddRefed<OscillatorNode> +AudioContext::CreateOscillator(ErrorResult& aRv) +{ + if (CheckClosed(aRv)) { + return nullptr; + } + + RefPtr<OscillatorNode> oscillatorNode = + new OscillatorNode(this); + return oscillatorNode.forget(); +} + +already_AddRefed<PeriodicWave> +AudioContext::CreatePeriodicWave(const Float32Array& aRealData, + const Float32Array& aImagData, + const PeriodicWaveConstraints& aConstraints, + ErrorResult& aRv) +{ + aRealData.ComputeLengthAndData(); + aImagData.ComputeLengthAndData(); + + if (aRealData.Length() != aImagData.Length() || + aRealData.Length() == 0) { + aRv.Throw(NS_ERROR_DOM_NOT_SUPPORTED_ERR); + return nullptr; + } + + RefPtr<PeriodicWave> periodicWave = + new PeriodicWave(this, aRealData.Data(), aImagData.Data(), + aImagData.Length(), aConstraints.mDisableNormalization, + aRv); + if (aRv.Failed()) { + return nullptr; + } + return periodicWave.forget(); +} + +AudioListener* +AudioContext::Listener() +{ + if (!mListener) { + mListener = new AudioListener(this); + } + return mListener; +} + +already_AddRefed<Promise> +AudioContext::DecodeAudioData(const ArrayBuffer& aBuffer, + const Optional<OwningNonNull<DecodeSuccessCallback> >& aSuccessCallback, + const Optional<OwningNonNull<DecodeErrorCallback> >& aFailureCallback, + ErrorResult& aRv) +{ + nsCOMPtr<nsIGlobalObject> parentObject = do_QueryInterface(GetParentObject()); + RefPtr<Promise> promise; + AutoJSAPI jsapi; + jsapi.Init(); + JSContext* cx = jsapi.cx(); + JSAutoCompartment ac(cx, aBuffer.Obj()); + + promise = Promise::Create(parentObject, aRv); + if (aRv.Failed()) { + return nullptr; + } + + aBuffer.ComputeLengthAndData(); + + if (aBuffer.IsShared()) { + // Throw if the object is mapping shared memory (must opt in). + aRv.ThrowTypeError<MSG_TYPEDARRAY_IS_SHARED>(NS_LITERAL_STRING("Argument of AudioContext.decodeAudioData")); + return nullptr; + } + + // Detach the array buffer + size_t length = aBuffer.Length(); + JS::RootedObject obj(cx, aBuffer.Obj()); + + uint8_t* data = static_cast<uint8_t*>(JS_StealArrayBufferContents(cx, obj)); + + // Sniff the content of the media. + // Failed type sniffing will be handled by AsyncDecodeWebAudio. + nsAutoCString contentType; + NS_SniffContent(NS_DATA_SNIFFER_CATEGORY, nullptr, data, length, contentType); + + RefPtr<DecodeErrorCallback> failureCallback; + RefPtr<DecodeSuccessCallback> successCallback; + if (aFailureCallback.WasPassed()) { + failureCallback = &aFailureCallback.Value(); + } + if (aSuccessCallback.WasPassed()) { + successCallback = &aSuccessCallback.Value(); + } + RefPtr<WebAudioDecodeJob> job( + new WebAudioDecodeJob(contentType, this, + promise, successCallback, failureCallback)); + AsyncDecodeWebAudio(contentType.get(), data, length, *job); + // Transfer the ownership to mDecodeJobs + mDecodeJobs.AppendElement(job.forget()); + + return promise.forget(); +} + +void +AudioContext::RemoveFromDecodeQueue(WebAudioDecodeJob* aDecodeJob) +{ + mDecodeJobs.RemoveElement(aDecodeJob); +} + +void +AudioContext::RegisterActiveNode(AudioNode* aNode) +{ + if (!mIsShutDown) { + mActiveNodes.PutEntry(aNode); + } +} + +void +AudioContext::UnregisterActiveNode(AudioNode* aNode) +{ + mActiveNodes.RemoveEntry(aNode); +} + +void +AudioContext::UnregisterAudioBufferSourceNode(AudioBufferSourceNode* aNode) +{ + UpdatePannerSource(); +} + +void +AudioContext::UnregisterPannerNode(PannerNode* aNode) +{ + mPannerNodes.RemoveEntry(aNode); + if (mListener) { + mListener->UnregisterPannerNode(aNode); + } +} + +void +AudioContext::UpdatePannerSource() +{ + for (auto iter = mPannerNodes.Iter(); !iter.Done(); iter.Next()) { + iter.Get()->GetKey()->FindConnectedSources(); + } +} + +uint32_t +AudioContext::MaxChannelCount() const +{ + return mIsOffline ? mNumberOfChannels : CubebUtils::MaxNumberOfChannels(); +} + +uint32_t +AudioContext::ActiveNodeCount() const +{ + return mActiveNodes.Count(); +} + +MediaStreamGraph* +AudioContext::Graph() const +{ + return Destination()->Stream()->Graph(); +} + +MediaStream* +AudioContext::DestinationStream() const +{ + if (Destination()) { + return Destination()->Stream(); + } + return nullptr; +} + +double +AudioContext::CurrentTime() const +{ + MediaStream* stream = Destination()->Stream(); + return stream->StreamTimeToSeconds(stream->GetCurrentTime()); +} + +void +AudioContext::Shutdown() +{ + mIsShutDown = true; + + if (!mIsOffline) { + ErrorResult dummy; + RefPtr<Promise> ignored = Close(dummy); + } + + for (auto p : mPromiseGripArray) { + p->MaybeReject(NS_ERROR_DOM_INVALID_STATE_ERR); + } + + mPromiseGripArray.Clear(); + + // Release references to active nodes. + // Active AudioNodes don't unregister in destructors, at which point the + // Node is already unregistered. + mActiveNodes.Clear(); + + // For offline contexts, we can destroy the MediaStreamGraph at this point. + if (mIsOffline && mDestination) { + mDestination->OfflineShutdown(); + } +} + +StateChangeTask::StateChangeTask(AudioContext* aAudioContext, + void* aPromise, + AudioContextState aNewState) + : mAudioContext(aAudioContext) + , mPromise(aPromise) + , mAudioNodeStream(nullptr) + , mNewState(aNewState) +{ + MOZ_ASSERT(NS_IsMainThread(), + "This constructor should be used from the main thread."); +} + +StateChangeTask::StateChangeTask(AudioNodeStream* aStream, + void* aPromise, + AudioContextState aNewState) + : mAudioContext(nullptr) + , mPromise(aPromise) + , mAudioNodeStream(aStream) + , mNewState(aNewState) +{ + MOZ_ASSERT(!NS_IsMainThread(), + "This constructor should be used from the graph thread."); +} + +NS_IMETHODIMP +StateChangeTask::Run() +{ + MOZ_ASSERT(NS_IsMainThread()); + + if (!mAudioContext && !mAudioNodeStream) { + return NS_OK; + } + if (mAudioNodeStream) { + AudioNode* node = mAudioNodeStream->Engine()->NodeMainThread(); + if (!node) { + return NS_OK; + } + mAudioContext = node->Context(); + if (!mAudioContext) { + return NS_OK; + } + } + + mAudioContext->OnStateChanged(mPromise, mNewState); + // We have can't call Release() on the AudioContext on the MSG thread, so we + // unref it here, on the main thread. + mAudioContext = nullptr; + + return NS_OK; +} + +/* This runnable allows to fire the "statechange" event */ +class OnStateChangeTask final : public Runnable +{ +public: + explicit OnStateChangeTask(AudioContext* aAudioContext) + : mAudioContext(aAudioContext) + {} + + NS_IMETHODIMP + Run() override + { + nsPIDOMWindowInner* parent = mAudioContext->GetParentObject(); + if (!parent) { + return NS_ERROR_FAILURE; + } + + nsIDocument* doc = parent->GetExtantDoc(); + if (!doc) { + return NS_ERROR_FAILURE; + } + + return nsContentUtils::DispatchTrustedEvent(doc, + static_cast<DOMEventTargetHelper*>(mAudioContext), + NS_LITERAL_STRING("statechange"), + false, false); + } + +private: + RefPtr<AudioContext> mAudioContext; +}; + +void +AudioContext::OnStateChanged(void* aPromise, AudioContextState aNewState) +{ + MOZ_ASSERT(NS_IsMainThread()); + + // This can happen if close() was called right after creating the + // AudioContext, before the context has switched to "running". + if (mAudioContextState == AudioContextState::Closed && + aNewState == AudioContextState::Running && + !aPromise) { + return; + } + + // This can happen if this is called in reaction to a + // MediaStreamGraph shutdown, and a AudioContext was being + // suspended at the same time, for example if a page was being + // closed. + if (mAudioContextState == AudioContextState::Closed && + aNewState == AudioContextState::Suspended) { + return; + } + +#ifndef WIN32 // Bug 1170547 +#ifndef XP_MACOSX +#ifdef DEBUG + + if (!((mAudioContextState == AudioContextState::Suspended && + aNewState == AudioContextState::Running) || + (mAudioContextState == AudioContextState::Running && + aNewState == AudioContextState::Suspended) || + (mAudioContextState == AudioContextState::Running && + aNewState == AudioContextState::Closed) || + (mAudioContextState == AudioContextState::Suspended && + aNewState == AudioContextState::Closed) || + (mAudioContextState == aNewState))) { + fprintf(stderr, + "Invalid transition: mAudioContextState: %d -> aNewState %d\n", + static_cast<int>(mAudioContextState), static_cast<int>(aNewState)); + MOZ_ASSERT(false); + } + +#endif // DEBUG +#endif // XP_MACOSX +#endif // WIN32 + + MOZ_ASSERT( + mIsOffline || aPromise || aNewState == AudioContextState::Running, + "We should have a promise here if this is a real-time AudioContext." + "Or this is the first time we switch to \"running\"."); + + if (aPromise) { + Promise* promise = reinterpret_cast<Promise*>(aPromise); + // It is possible for the promise to have been removed from + // mPromiseGripArray if the cycle collector has severed our connections. DO + // NOT dereference the promise pointer in that case since it may point to + // already freed memory. + if (mPromiseGripArray.Contains(promise)) { + promise->MaybeResolveWithUndefined(); + DebugOnly<bool> rv = mPromiseGripArray.RemoveElement(promise); + MOZ_ASSERT(rv, "Promise wasn't in the grip array?"); + } + } + + if (mAudioContextState != aNewState) { + RefPtr<OnStateChangeTask> onStateChangeTask = + new OnStateChangeTask(this); + NS_DispatchToMainThread(onStateChangeTask); + } + + mAudioContextState = aNewState; +} + +nsTArray<MediaStream*> +AudioContext::GetAllStreams() const +{ + nsTArray<MediaStream*> streams; + for (auto iter = mAllNodes.ConstIter(); !iter.Done(); iter.Next()) { + MediaStream* s = iter.Get()->GetKey()->GetStream(); + if (s) { + streams.AppendElement(s); + } + } + return streams; +} + +already_AddRefed<Promise> +AudioContext::Suspend(ErrorResult& aRv) +{ + nsCOMPtr<nsIGlobalObject> parentObject = do_QueryInterface(GetParentObject()); + RefPtr<Promise> promise; + promise = Promise::Create(parentObject, aRv); + if (aRv.Failed()) { + return nullptr; + } + if (mIsOffline) { + promise->MaybeReject(NS_ERROR_DOM_NOT_SUPPORTED_ERR); + return promise.forget(); + } + + if (mAudioContextState == AudioContextState::Closed || + mCloseCalled) { + promise->MaybeReject(NS_ERROR_DOM_INVALID_STATE_ERR); + return promise.forget(); + } + + Destination()->Suspend(); + + mPromiseGripArray.AppendElement(promise); + + nsTArray<MediaStream*> streams; + // If mSuspendCalled is true then we already suspended all our streams, + // so don't suspend them again (since suspend(); suspend(); resume(); should + // cancel both suspends). But we still need to do ApplyAudioContextOperation + // to ensure our new promise is resolved. + if (!mSuspendCalled) { + streams = GetAllStreams(); + } + Graph()->ApplyAudioContextOperation(DestinationStream()->AsAudioNodeStream(), + streams, + AudioContextOperation::Suspend, promise); + + mSuspendCalled = true; + + return promise.forget(); +} + +already_AddRefed<Promise> +AudioContext::Resume(ErrorResult& aRv) +{ + nsCOMPtr<nsIGlobalObject> parentObject = do_QueryInterface(GetParentObject()); + RefPtr<Promise> promise; + promise = Promise::Create(parentObject, aRv); + if (aRv.Failed()) { + return nullptr; + } + + if (mIsOffline) { + promise->MaybeReject(NS_ERROR_DOM_NOT_SUPPORTED_ERR); + return promise.forget(); + } + + if (mAudioContextState == AudioContextState::Closed || + mCloseCalled) { + promise->MaybeReject(NS_ERROR_DOM_INVALID_STATE_ERR); + return promise.forget(); + } + + Destination()->Resume(); + + nsTArray<MediaStream*> streams; + // If mSuspendCalled is false then we already resumed all our streams, + // so don't resume them again (since suspend(); resume(); resume(); should + // be OK). But we still need to do ApplyAudioContextOperation + // to ensure our new promise is resolved. + if (mSuspendCalled) { + streams = GetAllStreams(); + } + mPromiseGripArray.AppendElement(promise); + Graph()->ApplyAudioContextOperation(DestinationStream()->AsAudioNodeStream(), + streams, + AudioContextOperation::Resume, promise); + + mSuspendCalled = false; + + return promise.forget(); +} + +already_AddRefed<Promise> +AudioContext::Close(ErrorResult& aRv) +{ + nsCOMPtr<nsIGlobalObject> parentObject = do_QueryInterface(GetParentObject()); + RefPtr<Promise> promise; + promise = Promise::Create(parentObject, aRv); + if (aRv.Failed()) { + return nullptr; + } + + if (mIsOffline) { + promise->MaybeReject(NS_ERROR_DOM_NOT_SUPPORTED_ERR); + return promise.forget(); + } + + if (mAudioContextState == AudioContextState::Closed) { + promise->MaybeResolve(NS_ERROR_DOM_INVALID_STATE_ERR); + return promise.forget(); + } + + if (Destination()) { + Destination()->DestroyAudioChannelAgent(); + } + + mPromiseGripArray.AppendElement(promise); + + // This can be called when freeing a document, and the streams are dead at + // this point, so we need extra null-checks. + MediaStream* ds = DestinationStream(); + if (ds) { + nsTArray<MediaStream*> streams; + // If mSuspendCalled or mCloseCalled are true then we already suspended + // all our streams, so don't suspend them again. But we still need to do + // ApplyAudioContextOperation to ensure our new promise is resolved. + if (!mSuspendCalled && !mCloseCalled) { + streams = GetAllStreams(); + } + Graph()->ApplyAudioContextOperation(ds->AsAudioNodeStream(), streams, + AudioContextOperation::Close, promise); + } + mCloseCalled = true; + + return promise.forget(); +} + +void +AudioContext::RegisterNode(AudioNode* aNode) +{ + MOZ_ASSERT(!mAllNodes.Contains(aNode)); + mAllNodes.PutEntry(aNode); +} + +void +AudioContext::UnregisterNode(AudioNode* aNode) +{ + MOZ_ASSERT(mAllNodes.Contains(aNode)); + mAllNodes.RemoveEntry(aNode); +} + +JSObject* +AudioContext::GetGlobalJSObject() const +{ + nsCOMPtr<nsIGlobalObject> parentObject = do_QueryInterface(GetParentObject()); + if (!parentObject) { + return nullptr; + } + + // This can also return null. + return parentObject->GetGlobalJSObject(); +} + +already_AddRefed<Promise> +AudioContext::StartRendering(ErrorResult& aRv) +{ + nsCOMPtr<nsIGlobalObject> parentObject = do_QueryInterface(GetParentObject()); + + MOZ_ASSERT(mIsOffline, "This should only be called on OfflineAudioContext"); + if (mIsStarted) { + aRv.Throw(NS_ERROR_DOM_INVALID_STATE_ERR); + return nullptr; + } + + mIsStarted = true; + RefPtr<Promise> promise = Promise::Create(parentObject, aRv); + if (aRv.Failed()) { + return nullptr; + } + mDestination->StartRendering(promise); + + OnStateChanged(nullptr, AudioContextState::Running); + + return promise.forget(); +} + +unsigned long +AudioContext::Length() +{ + MOZ_ASSERT(mIsOffline); + return mDestination->Length(); +} + +void +AudioContext::Mute() const +{ + MOZ_ASSERT(!mIsOffline); + if (mDestination) { + mDestination->Mute(); + } +} + +void +AudioContext::Unmute() const +{ + MOZ_ASSERT(!mIsOffline); + if (mDestination) { + mDestination->Unmute(); + } +} + +AudioChannel +AudioContext::MozAudioChannelType() const +{ + return mDestination->MozAudioChannelType(); +} + +AudioChannel +AudioContext::TestAudioChannelInAudioNodeStream() +{ + MediaStream* stream = mDestination->Stream(); + MOZ_ASSERT(stream); + + return stream->AudioChannelType(); +} + +size_t +AudioContext::SizeOfIncludingThis(mozilla::MallocSizeOf aMallocSizeOf) const +{ + // AudioNodes are tracked separately because we do not want the AudioContext + // to track all of the AudioNodes it creates, so we wouldn't be able to + // traverse them from here. + + size_t amount = aMallocSizeOf(this); + if (mListener) { + amount += mListener->SizeOfIncludingThis(aMallocSizeOf); + } + amount += mDecodeJobs.ShallowSizeOfExcludingThis(aMallocSizeOf); + for (uint32_t i = 0; i < mDecodeJobs.Length(); ++i) { + amount += mDecodeJobs[i]->SizeOfIncludingThis(aMallocSizeOf); + } + amount += mActiveNodes.ShallowSizeOfExcludingThis(aMallocSizeOf); + amount += mPannerNodes.ShallowSizeOfExcludingThis(aMallocSizeOf); + return amount; +} + +NS_IMETHODIMP +AudioContext::CollectReports(nsIHandleReportCallback* aHandleReport, + nsISupports* aData, bool aAnonymize) +{ + const nsLiteralCString + nodeDescription("Memory used by AudioNode DOM objects (Web Audio)."); + for (auto iter = mAllNodes.ConstIter(); !iter.Done(); iter.Next()) { + AudioNode* node = iter.Get()->GetKey(); + int64_t amount = node->SizeOfIncludingThis(MallocSizeOf); + nsPrintfCString domNodePath("explicit/webaudio/audio-node/%s/dom-nodes", + node->NodeType()); + aHandleReport->Callback(EmptyCString(), domNodePath, KIND_HEAP, UNITS_BYTES, + amount, nodeDescription, aData); + } + + int64_t amount = SizeOfIncludingThis(MallocSizeOf); + MOZ_COLLECT_REPORT( + "explicit/webaudio/audiocontext", KIND_HEAP, UNITS_BYTES, amount, + "Memory used by AudioContext objects (Web Audio)."); + + return NS_OK; +} + +BasicWaveFormCache* +AudioContext::GetBasicWaveFormCache() +{ + MOZ_ASSERT(NS_IsMainThread()); + if (!mBasicWaveFormCache) { + mBasicWaveFormCache = new BasicWaveFormCache(SampleRate()); + } + return mBasicWaveFormCache; +} + +BasicWaveFormCache::BasicWaveFormCache(uint32_t aSampleRate) + : mSampleRate(aSampleRate) +{ + MOZ_ASSERT(NS_IsMainThread()); +} +BasicWaveFormCache::~BasicWaveFormCache() +{ } + +WebCore::PeriodicWave* +BasicWaveFormCache::GetBasicWaveForm(OscillatorType aType) +{ + MOZ_ASSERT(!NS_IsMainThread()); + if (aType == OscillatorType::Sawtooth) { + if (!mSawtooth) { + mSawtooth = WebCore::PeriodicWave::createSawtooth(mSampleRate); + } + return mSawtooth; + } else if (aType == OscillatorType::Square) { + if (!mSquare) { + mSquare = WebCore::PeriodicWave::createSquare(mSampleRate); + } + return mSquare; + } else if (aType == OscillatorType::Triangle) { + if (!mTriangle) { + mTriangle = WebCore::PeriodicWave::createTriangle(mSampleRate); + } + return mTriangle; + } else { + MOZ_ASSERT(false, "Not reached"); + return nullptr; + } +} + +} // namespace dom +} // namespace mozilla diff --git a/dom/media/webaudio/AudioContext.h b/dom/media/webaudio/AudioContext.h new file mode 100644 index 000000000..069efa986 --- /dev/null +++ b/dom/media/webaudio/AudioContext.h @@ -0,0 +1,382 @@ +/* -*- Mode: C++; tab-width: 2; indent-tabs-mode: nil; c-basic-offset: 2 -*- */ +/* vim:set ts=2 sw=2 sts=2 et cindent: */ +/* This Source Code Form is subject to the terms of the Mozilla Public + * License, v. 2.0. If a copy of the MPL was not distributed with this + * file, You can obtain one at http://mozilla.org/MPL/2.0/. */ + +#ifndef AudioContext_h_ +#define AudioContext_h_ + +#include "mozilla/dom/AudioChannelBinding.h" +#include "MediaBufferDecoder.h" +#include "mozilla/Attributes.h" +#include "mozilla/DOMEventTargetHelper.h" +#include "mozilla/MemoryReporting.h" +#include "mozilla/dom/TypedArray.h" +#include "nsCOMPtr.h" +#include "nsCycleCollectionParticipant.h" +#include "nsHashKeys.h" +#include "nsTHashtable.h" +#include "js/TypeDecls.h" +#include "nsIMemoryReporter.h" + +// X11 has a #define for CurrentTime. Unbelievable :-(. +// See dom/media/DOMMediaStream.h for more fun! +#ifdef CurrentTime +#undef CurrentTime +#endif + +namespace WebCore { + class PeriodicWave; +} // namespace WebCore + +class nsPIDOMWindowInner; + +namespace mozilla { + +class DOMMediaStream; +class ErrorResult; +class MediaStream; +class MediaStreamGraph; +class AudioNodeStream; + +namespace dom { + +enum class AudioContextState : uint32_t; +class AnalyserNode; +class AudioBuffer; +class AudioBufferSourceNode; +class AudioDestinationNode; +class AudioListener; +class AudioNode; +class BiquadFilterNode; +class ChannelMergerNode; +class ChannelSplitterNode; +class ConstantSourceNode; +class ConvolverNode; +class DelayNode; +class DynamicsCompressorNode; +class GainNode; +class GlobalObject; +class HTMLMediaElement; +class IIRFilterNode; +class MediaElementAudioSourceNode; +class MediaStreamAudioDestinationNode; +class MediaStreamAudioSourceNode; +class OscillatorNode; +class PannerNode; +class ScriptProcessorNode; +class StereoPannerNode; +class WaveShaperNode; +class PeriodicWave; +struct PeriodicWaveConstraints; +class Promise; +enum class OscillatorType : uint32_t; + +// This is addrefed by the OscillatorNodeEngine on the main thread +// and then used from the MSG thread. +// It can be released either from the graph thread or the main thread. +class BasicWaveFormCache +{ +public: + explicit BasicWaveFormCache(uint32_t aSampleRate); + NS_INLINE_DECL_THREADSAFE_REFCOUNTING(BasicWaveFormCache) + WebCore::PeriodicWave* GetBasicWaveForm(OscillatorType aType); +private: + ~BasicWaveFormCache(); + RefPtr<WebCore::PeriodicWave> mSawtooth; + RefPtr<WebCore::PeriodicWave> mSquare; + RefPtr<WebCore::PeriodicWave> mTriangle; + uint32_t mSampleRate; +}; + + +/* This runnable allows the MSG to notify the main thread when audio is actually + * flowing */ +class StateChangeTask final : public Runnable +{ +public: + /* This constructor should be used when this event is sent from the main + * thread. */ + StateChangeTask(AudioContext* aAudioContext, void* aPromise, AudioContextState aNewState); + + /* This constructor should be used when this event is sent from the audio + * thread. */ + StateChangeTask(AudioNodeStream* aStream, void* aPromise, AudioContextState aNewState); + + NS_IMETHOD Run() override; + +private: + RefPtr<AudioContext> mAudioContext; + void* mPromise; + RefPtr<AudioNodeStream> mAudioNodeStream; + AudioContextState mNewState; +}; + +enum class AudioContextOperation { Suspend, Resume, Close }; + +class AudioContext final : public DOMEventTargetHelper, + public nsIMemoryReporter +{ + AudioContext(nsPIDOMWindowInner* aParentWindow, + bool aIsOffline, + AudioChannel aChannel, + uint32_t aNumberOfChannels = 0, + uint32_t aLength = 0, + float aSampleRate = 0.0f); + ~AudioContext(); + + nsresult Init(); + +public: + typedef uint64_t AudioContextId; + + NS_DECL_ISUPPORTS_INHERITED + NS_DECL_CYCLE_COLLECTION_CLASS_INHERITED(AudioContext, + DOMEventTargetHelper) + MOZ_DEFINE_MALLOC_SIZE_OF(MallocSizeOf) + + nsPIDOMWindowInner* GetParentObject() const + { + return GetOwner(); + } + + void Shutdown(); // idempotent + + JSObject* WrapObject(JSContext* aCx, JS::Handle<JSObject*> aGivenProto) override; + + using DOMEventTargetHelper::DispatchTrustedEvent; + + // Constructor for regular AudioContext + static already_AddRefed<AudioContext> + Constructor(const GlobalObject& aGlobal, ErrorResult& aRv); + + // Constructor for regular AudioContext. A default audio channel is needed. + static already_AddRefed<AudioContext> + Constructor(const GlobalObject& aGlobal, + AudioChannel aChannel, + ErrorResult& aRv); + + // Constructor for offline AudioContext + static already_AddRefed<AudioContext> + Constructor(const GlobalObject& aGlobal, + uint32_t aNumberOfChannels, + uint32_t aLength, + float aSampleRate, + ErrorResult& aRv); + + // AudioContext methods + + AudioDestinationNode* Destination() const + { + return mDestination; + } + + float SampleRate() const + { + return mSampleRate; + } + + bool ShouldSuspendNewStream() const { return mSuspendCalled; } + + double CurrentTime() const; + + AudioListener* Listener(); + + AudioContextState State() const { return mAudioContextState; } + + // Those three methods return a promise to content, that is resolved when an + // (possibly long) operation is completed on the MSG (and possibly other) + // thread(s). To avoid having to match the calls and asychronous result when + // the operation is completed, we keep a reference to the promises on the main + // thread, and then send the promises pointers down the MSG thread, as a void* + // (to make it very clear that the pointer is to merely be treated as an ID). + // When back on the main thread, we can resolve or reject the promise, by + // casting it back to a `Promise*` while asserting we're back on the main + // thread and removing the reference we added. + already_AddRefed<Promise> Suspend(ErrorResult& aRv); + already_AddRefed<Promise> Resume(ErrorResult& aRv); + already_AddRefed<Promise> Close(ErrorResult& aRv); + IMPL_EVENT_HANDLER(statechange) + + already_AddRefed<AudioBufferSourceNode> CreateBufferSource(ErrorResult& aRv); + + already_AddRefed<ConstantSourceNode> CreateConstantSource(ErrorResult& aRv); + + already_AddRefed<AudioBuffer> + CreateBuffer(uint32_t aNumberOfChannels, uint32_t aLength, float aSampleRate, + ErrorResult& aRv); + + already_AddRefed<MediaStreamAudioDestinationNode> + CreateMediaStreamDestination(ErrorResult& aRv); + + already_AddRefed<ScriptProcessorNode> + CreateScriptProcessor(uint32_t aBufferSize, + uint32_t aNumberOfInputChannels, + uint32_t aNumberOfOutputChannels, + ErrorResult& aRv); + + already_AddRefed<StereoPannerNode> + CreateStereoPanner(ErrorResult& aRv); + + already_AddRefed<AnalyserNode> + CreateAnalyser(ErrorResult& aRv); + + already_AddRefed<GainNode> + CreateGain(ErrorResult& aRv); + + already_AddRefed<WaveShaperNode> + CreateWaveShaper(ErrorResult& aRv); + + already_AddRefed<MediaElementAudioSourceNode> + CreateMediaElementSource(HTMLMediaElement& aMediaElement, ErrorResult& aRv); + already_AddRefed<MediaStreamAudioSourceNode> + CreateMediaStreamSource(DOMMediaStream& aMediaStream, ErrorResult& aRv); + + already_AddRefed<DelayNode> + CreateDelay(double aMaxDelayTime, ErrorResult& aRv); + + already_AddRefed<PannerNode> + CreatePanner(ErrorResult& aRv); + + already_AddRefed<ConvolverNode> + CreateConvolver(ErrorResult& aRv); + + already_AddRefed<ChannelSplitterNode> + CreateChannelSplitter(uint32_t aNumberOfOutputs, ErrorResult& aRv); + + already_AddRefed<ChannelMergerNode> + CreateChannelMerger(uint32_t aNumberOfInputs, ErrorResult& aRv); + + already_AddRefed<DynamicsCompressorNode> + CreateDynamicsCompressor(ErrorResult& aRv); + + already_AddRefed<BiquadFilterNode> + CreateBiquadFilter(ErrorResult& aRv); + + already_AddRefed<IIRFilterNode> + CreateIIRFilter(const mozilla::dom::binding_detail::AutoSequence<double>& aFeedforward, + const mozilla::dom::binding_detail::AutoSequence<double>& aFeedback, + mozilla::ErrorResult& aRv); + + already_AddRefed<OscillatorNode> + CreateOscillator(ErrorResult& aRv); + + already_AddRefed<PeriodicWave> + CreatePeriodicWave(const Float32Array& aRealData, const Float32Array& aImagData, + const PeriodicWaveConstraints& aConstraints, + ErrorResult& aRv); + + already_AddRefed<Promise> + DecodeAudioData(const ArrayBuffer& aBuffer, + const Optional<OwningNonNull<DecodeSuccessCallback> >& aSuccessCallback, + const Optional<OwningNonNull<DecodeErrorCallback> >& aFailureCallback, + ErrorResult& aRv); + + // OfflineAudioContext methods + already_AddRefed<Promise> StartRendering(ErrorResult& aRv); + IMPL_EVENT_HANDLER(complete) + unsigned long Length(); + + bool IsOffline() const { return mIsOffline; } + + MediaStreamGraph* Graph() const; + MediaStream* DestinationStream() const; + + // Nodes register here if they will produce sound even if they have silent + // or no input connections. The AudioContext will keep registered nodes + // alive until the context is collected. This takes care of "playing" + // references and "tail-time" references. + void RegisterActiveNode(AudioNode* aNode); + // Nodes unregister when they have finished producing sound for the + // foreseeable future. + // Do NOT call UnregisterActiveNode from an AudioNode destructor. + // If the destructor is called, then the Node has already been unregistered. + // The destructor may be called during hashtable enumeration, during which + // unregistering would not be safe. + void UnregisterActiveNode(AudioNode* aNode); + + void UnregisterAudioBufferSourceNode(AudioBufferSourceNode* aNode); + void UnregisterPannerNode(PannerNode* aNode); + void UpdatePannerSource(); + + uint32_t MaxChannelCount() const; + + uint32_t ActiveNodeCount() const; + + void Mute() const; + void Unmute() const; + + JSObject* GetGlobalJSObject() const; + + AudioChannel MozAudioChannelType() const; + + AudioChannel TestAudioChannelInAudioNodeStream(); + + void RegisterNode(AudioNode* aNode); + void UnregisterNode(AudioNode* aNode); + + void OnStateChanged(void* aPromise, AudioContextState aNewState); + + BasicWaveFormCache* GetBasicWaveFormCache(); + + IMPL_EVENT_HANDLER(mozinterruptbegin) + IMPL_EVENT_HANDLER(mozinterruptend) + +private: + void DisconnectFromWindow(); + void RemoveFromDecodeQueue(WebAudioDecodeJob* aDecodeJob); + void ShutdownDecoder(); + + size_t SizeOfIncludingThis(mozilla::MallocSizeOf aMallocSizeOf) const; + NS_DECL_NSIMEMORYREPORTER + + friend struct ::mozilla::WebAudioDecodeJob; + + bool CheckClosed(ErrorResult& aRv); + + nsTArray<MediaStream*> GetAllStreams() const; + +private: + // Each AudioContext has an id, that is passed down the MediaStreams that + // back the AudioNodes, so we can easily compute the set of all the + // MediaStreams for a given context, on the MediasStreamGraph side. + const AudioContextId mId; + // Note that it's important for mSampleRate to be initialized before + // mDestination, as mDestination's constructor needs to access it! + const float mSampleRate; + AudioContextState mAudioContextState; + RefPtr<AudioDestinationNode> mDestination; + RefPtr<AudioListener> mListener; + nsTArray<RefPtr<WebAudioDecodeJob> > mDecodeJobs; + // This array is used to keep the suspend/resume/close promises alive until + // they are resolved, so we can safely pass them accross threads. + nsTArray<RefPtr<Promise>> mPromiseGripArray; + // See RegisterActiveNode. These will keep the AudioContext alive while it + // is rendering and the window remains alive. + nsTHashtable<nsRefPtrHashKey<AudioNode> > mActiveNodes; + // Raw (non-owning) references to all AudioNodes for this AudioContext. + nsTHashtable<nsPtrHashKey<AudioNode> > mAllNodes; + // Hashsets containing all the PannerNodes, to compute the doppler shift. + // These are weak pointers. + nsTHashtable<nsPtrHashKey<PannerNode> > mPannerNodes; + // Cache to avoid recomputing basic waveforms all the time. + RefPtr<BasicWaveFormCache> mBasicWaveFormCache; + // Number of channels passed in the OfflineAudioContext ctor. + uint32_t mNumberOfChannels; + bool mIsOffline; + bool mIsStarted; + bool mIsShutDown; + // Close has been called, reject suspend and resume call. + bool mCloseCalled; + // Suspend has been called with no following resume. + bool mSuspendCalled; +}; + +static const dom::AudioContext::AudioContextId NO_AUDIO_CONTEXT = 0; + +} // namespace dom +} // namespace mozilla + +#endif + diff --git a/dom/media/webaudio/AudioDestinationNode.cpp b/dom/media/webaudio/AudioDestinationNode.cpp new file mode 100644 index 000000000..29a9de736 --- /dev/null +++ b/dom/media/webaudio/AudioDestinationNode.cpp @@ -0,0 +1,680 @@ +/* -*- Mode: C++; tab-width: 2; indent-tabs-mode: nil; c-basic-offset: 2 -*- */ +/* vim:set ts=2 sw=2 sts=2 et cindent: */ +/* This Source Code Form is subject to the terms of the Mozilla Public + * License, v. 2.0. If a copy of the MPL was not distributed with this + * file, You can obtain one at http://mozilla.org/MPL/2.0/. */ + +#include "AudioDestinationNode.h" +#include "AlignmentUtils.h" +#include "AudioContext.h" +#include "mozilla/dom/AudioDestinationNodeBinding.h" +#include "mozilla/dom/ScriptSettings.h" +#include "mozilla/Services.h" +#include "AudioChannelAgent.h" +#include "AudioChannelService.h" +#include "AudioNodeEngine.h" +#include "AudioNodeStream.h" +#include "MediaStreamGraph.h" +#include "OfflineAudioCompletionEvent.h" +#include "nsContentUtils.h" +#include "nsIInterfaceRequestorUtils.h" +#include "nsIDocShell.h" +#include "nsIPermissionManager.h" +#include "nsIScriptObjectPrincipal.h" +#include "nsServiceManagerUtils.h" +#include "mozilla/dom/Promise.h" + +namespace mozilla { +namespace dom { + +static uint8_t gWebAudioOutputKey; + +class OfflineDestinationNodeEngine final : public AudioNodeEngine +{ +public: + OfflineDestinationNodeEngine(AudioDestinationNode* aNode, + uint32_t aNumberOfChannels, + uint32_t aLength, + float aSampleRate) + : AudioNodeEngine(aNode) + , mWriteIndex(0) + , mNumberOfChannels(aNumberOfChannels) + , mLength(aLength) + , mSampleRate(aSampleRate) + , mBufferAllocated(false) + { + } + + void ProcessBlock(AudioNodeStream* aStream, + GraphTime aFrom, + const AudioBlock& aInput, + AudioBlock* aOutput, + bool* aFinished) override + { + // Do this just for the sake of political correctness; this output + // will not go anywhere. + *aOutput = aInput; + + // The output buffer is allocated lazily, on the rendering thread, when + // non-null input is received. + if (!mBufferAllocated && !aInput.IsNull()) { + // These allocations might fail if content provides a huge number of + // channels or size, but it's OK since we'll deal with the failure + // gracefully. + mBuffer = ThreadSharedFloatArrayBufferList:: + Create(mNumberOfChannels, mLength, fallible); + if (mBuffer && mWriteIndex) { + // Zero leading for any null chunks that were skipped. + for (uint32_t i = 0; i < mNumberOfChannels; ++i) { + float* channelData = mBuffer->GetDataForWrite(i); + PodZero(channelData, mWriteIndex); + } + } + + mBufferAllocated = true; + } + + // Skip copying if there is no buffer. + uint32_t outputChannelCount = mBuffer ? mNumberOfChannels : 0; + + // Record our input buffer + MOZ_ASSERT(mWriteIndex < mLength, "How did this happen?"); + const uint32_t duration = std::min(WEBAUDIO_BLOCK_SIZE, mLength - mWriteIndex); + const uint32_t inputChannelCount = aInput.ChannelCount(); + for (uint32_t i = 0; i < outputChannelCount; ++i) { + float* outputData = mBuffer->GetDataForWrite(i) + mWriteIndex; + if (aInput.IsNull() || i >= inputChannelCount) { + PodZero(outputData, duration); + } else { + const float* inputBuffer = static_cast<const float*>(aInput.mChannelData[i]); + if (duration == WEBAUDIO_BLOCK_SIZE && IS_ALIGNED16(inputBuffer)) { + // Use the optimized version of the copy with scale operation + AudioBlockCopyChannelWithScale(inputBuffer, aInput.mVolume, + outputData); + } else { + if (aInput.mVolume == 1.0f) { + PodCopy(outputData, inputBuffer, duration); + } else { + for (uint32_t j = 0; j < duration; ++j) { + outputData[j] = aInput.mVolume * inputBuffer[j]; + } + } + } + } + } + mWriteIndex += duration; + + if (mWriteIndex >= mLength) { + NS_ASSERTION(mWriteIndex == mLength, "Overshot length"); + // Go to finished state. When the graph's current time eventually reaches + // the end of the stream, then the main thread will be notified and we'll + // shut down the AudioContext. + *aFinished = true; + } + } + + bool IsActive() const override + { + // Keep processing to track stream time, which is used for all timelines + // associated with the same AudioContext. + return true; + } + + + class OnCompleteTask final : public Runnable + { + public: + OnCompleteTask(AudioContext* aAudioContext, AudioBuffer* aRenderedBuffer) + : mAudioContext(aAudioContext) + , mRenderedBuffer(aRenderedBuffer) + {} + + NS_IMETHOD Run() override + { + RefPtr<OfflineAudioCompletionEvent> event = + new OfflineAudioCompletionEvent(mAudioContext, nullptr, nullptr); + event->InitEvent(mRenderedBuffer); + mAudioContext->DispatchTrustedEvent(event); + + return NS_OK; + } + private: + RefPtr<AudioContext> mAudioContext; + RefPtr<AudioBuffer> mRenderedBuffer; + }; + + void FireOfflineCompletionEvent(AudioDestinationNode* aNode) + { + AudioContext* context = aNode->Context(); + context->Shutdown(); + // Shutdown drops self reference, but the context is still referenced by aNode, + // which is strongly referenced by the runnable that called + // AudioDestinationNode::FireOfflineCompletionEvent. + + // Create the input buffer + ErrorResult rv; + RefPtr<AudioBuffer> renderedBuffer = + AudioBuffer::Create(context, mNumberOfChannels, mLength, mSampleRate, + mBuffer.forget(), rv); + if (rv.Failed()) { + rv.SuppressException(); + return; + } + + aNode->ResolvePromise(renderedBuffer); + + RefPtr<OnCompleteTask> onCompleteTask = + new OnCompleteTask(context, renderedBuffer); + NS_DispatchToMainThread(onCompleteTask); + + context->OnStateChanged(nullptr, AudioContextState::Closed); + } + + size_t SizeOfExcludingThis(MallocSizeOf aMallocSizeOf) const override + { + size_t amount = AudioNodeEngine::SizeOfExcludingThis(aMallocSizeOf); + if (mBuffer) { + amount += mBuffer->SizeOfIncludingThis(aMallocSizeOf); + } + return amount; + } + + size_t SizeOfIncludingThis(MallocSizeOf aMallocSizeOf) const override + { + return aMallocSizeOf(this) + SizeOfExcludingThis(aMallocSizeOf); + } + +private: + // The input to the destination node is recorded in mBuffer. + // When this buffer fills up with mLength frames, the buffered input is sent + // to the main thread in order to dispatch OfflineAudioCompletionEvent. + RefPtr<ThreadSharedFloatArrayBufferList> mBuffer; + // An index representing the next offset in mBuffer to be written to. + uint32_t mWriteIndex; + uint32_t mNumberOfChannels; + // How many frames the OfflineAudioContext intends to produce. + uint32_t mLength; + float mSampleRate; + bool mBufferAllocated; +}; + +class InputMutedRunnable final : public Runnable +{ +public: + InputMutedRunnable(AudioNodeStream* aStream, + bool aInputMuted) + : mStream(aStream) + , mInputMuted(aInputMuted) + { + } + + NS_IMETHOD Run() override + { + MOZ_ASSERT(NS_IsMainThread()); + RefPtr<AudioNode> node = mStream->Engine()->NodeMainThread(); + + if (node) { + RefPtr<AudioDestinationNode> destinationNode = + static_cast<AudioDestinationNode*>(node.get()); + destinationNode->InputMuted(mInputMuted); + } + return NS_OK; + } + +private: + RefPtr<AudioNodeStream> mStream; + bool mInputMuted; +}; + +class DestinationNodeEngine final : public AudioNodeEngine +{ +public: + explicit DestinationNodeEngine(AudioDestinationNode* aNode) + : AudioNodeEngine(aNode) + , mVolume(1.0f) + , mLastInputMuted(true) + , mSuspended(false) + { + MOZ_ASSERT(aNode); + } + + void ProcessBlock(AudioNodeStream* aStream, + GraphTime aFrom, + const AudioBlock& aInput, + AudioBlock* aOutput, + bool* aFinished) override + { + *aOutput = aInput; + aOutput->mVolume *= mVolume; + + if (mSuspended) { + return; + } + + bool newInputMuted = aInput.IsNull() || aInput.IsMuted(); + if (newInputMuted != mLastInputMuted) { + mLastInputMuted = newInputMuted; + + RefPtr<InputMutedRunnable> runnable = + new InputMutedRunnable(aStream, newInputMuted); + aStream->Graph()-> + DispatchToMainThreadAfterStreamStateUpdate(runnable.forget()); + } + } + + bool IsActive() const override + { + // Keep processing to track stream time, which is used for all timelines + // associated with the same AudioContext. If there are no other engines + // for the AudioContext, then this could return false to suspend the + // stream, but the stream is blocked anyway through + // AudioDestinationNode::SetIsOnlyNodeForContext(). + return true; + } + + void SetDoubleParameter(uint32_t aIndex, double aParam) override + { + if (aIndex == VOLUME) { + mVolume = aParam; + } + } + + void SetInt32Parameter(uint32_t aIndex, int32_t aParam) override + { + if (aIndex == SUSPENDED) { + mSuspended = !!aParam; + if (mSuspended) { + mLastInputMuted = true; + } + } + } + + enum Parameters { + VOLUME, + SUSPENDED, + }; + + size_t SizeOfIncludingThis(MallocSizeOf aMallocSizeOf) const override + { + return aMallocSizeOf(this) + SizeOfExcludingThis(aMallocSizeOf); + } + +private: + float mVolume; + bool mLastInputMuted; + bool mSuspended; +}; + +NS_IMPL_CYCLE_COLLECTION_INHERITED(AudioDestinationNode, AudioNode, + mAudioChannelAgent, + mOfflineRenderingPromise) + +NS_INTERFACE_MAP_BEGIN_CYCLE_COLLECTION_INHERITED(AudioDestinationNode) + NS_INTERFACE_MAP_ENTRY(nsIAudioChannelAgentCallback) +NS_INTERFACE_MAP_END_INHERITING(AudioNode) + +NS_IMPL_ADDREF_INHERITED(AudioDestinationNode, AudioNode) +NS_IMPL_RELEASE_INHERITED(AudioDestinationNode, AudioNode) + +AudioDestinationNode::AudioDestinationNode(AudioContext* aContext, + bool aIsOffline, + AudioChannel aChannel, + uint32_t aNumberOfChannels, + uint32_t aLength, float aSampleRate) + : AudioNode(aContext, aIsOffline ? aNumberOfChannels : 2, + ChannelCountMode::Explicit, ChannelInterpretation::Speakers) + , mFramesToProduce(aLength) + , mAudioChannel(AudioChannel::Normal) + , mIsOffline(aIsOffline) + , mAudioChannelSuspended(false) + , mCaptured(false) +{ + MediaStreamGraph* graph = aIsOffline ? + MediaStreamGraph::CreateNonRealtimeInstance(aSampleRate) : + MediaStreamGraph::GetInstance(MediaStreamGraph::AUDIO_THREAD_DRIVER, aChannel); + AudioNodeEngine* engine = aIsOffline ? + new OfflineDestinationNodeEngine(this, aNumberOfChannels, + aLength, aSampleRate) : + static_cast<AudioNodeEngine*>(new DestinationNodeEngine(this)); + + AudioNodeStream::Flags flags = + AudioNodeStream::NEED_MAIN_THREAD_CURRENT_TIME | + AudioNodeStream::NEED_MAIN_THREAD_FINISHED | + AudioNodeStream::EXTERNAL_OUTPUT; + mStream = AudioNodeStream::Create(aContext, engine, flags, graph); + mStream->AddMainThreadListener(this); + mStream->AddAudioOutput(&gWebAudioOutputKey); + + if (!aIsOffline) { + graph->NotifyWhenGraphStarted(mStream); + } + + if (aChannel != AudioChannel::Normal) { + ErrorResult rv; + SetMozAudioChannelType(aChannel, rv); + } +} + +AudioDestinationNode::~AudioDestinationNode() +{ +} + +size_t +AudioDestinationNode::SizeOfExcludingThis(MallocSizeOf aMallocSizeOf) const +{ + size_t amount = AudioNode::SizeOfExcludingThis(aMallocSizeOf); + // Might be useful in the future: + // - mAudioChannelAgent + return amount; +} + +size_t +AudioDestinationNode::SizeOfIncludingThis(MallocSizeOf aMallocSizeOf) const +{ + return aMallocSizeOf(this) + SizeOfExcludingThis(aMallocSizeOf); +} + +void +AudioDestinationNode::DestroyAudioChannelAgent() +{ + if (mAudioChannelAgent && !Context()->IsOffline()) { + mAudioChannelAgent->NotifyStoppedPlaying(); + mAudioChannelAgent = nullptr; + } +} + +void +AudioDestinationNode::DestroyMediaStream() +{ + DestroyAudioChannelAgent(); + + if (!mStream) + return; + + mStream->RemoveMainThreadListener(this); + MediaStreamGraph* graph = mStream->Graph(); + if (graph->IsNonRealtime()) { + MediaStreamGraph::DestroyNonRealtimeInstance(graph); + } + AudioNode::DestroyMediaStream(); +} + +void +AudioDestinationNode::NotifyMainThreadStreamFinished() +{ + MOZ_ASSERT(mStream->IsFinished()); + + if (mIsOffline) { + NS_DispatchToCurrentThread(NewRunnableMethod(this, + &AudioDestinationNode::FireOfflineCompletionEvent)); + } +} + +void +AudioDestinationNode::FireOfflineCompletionEvent() +{ + OfflineDestinationNodeEngine* engine = + static_cast<OfflineDestinationNodeEngine*>(Stream()->Engine()); + engine->FireOfflineCompletionEvent(this); +} + +void +AudioDestinationNode::ResolvePromise(AudioBuffer* aRenderedBuffer) +{ + MOZ_ASSERT(NS_IsMainThread()); + MOZ_ASSERT(mIsOffline); + mOfflineRenderingPromise->MaybeResolve(aRenderedBuffer); +} + +uint32_t +AudioDestinationNode::MaxChannelCount() const +{ + return Context()->MaxChannelCount(); +} + +void +AudioDestinationNode::SetChannelCount(uint32_t aChannelCount, ErrorResult& aRv) +{ + if (aChannelCount > MaxChannelCount()) { + aRv.Throw(NS_ERROR_DOM_INDEX_SIZE_ERR); + return; + } + + AudioNode::SetChannelCount(aChannelCount, aRv); +} + +void +AudioDestinationNode::Mute() +{ + MOZ_ASSERT(Context() && !Context()->IsOffline()); + SendDoubleParameterToStream(DestinationNodeEngine::VOLUME, 0.0f); +} + +void +AudioDestinationNode::Unmute() +{ + MOZ_ASSERT(Context() && !Context()->IsOffline()); + SendDoubleParameterToStream(DestinationNodeEngine::VOLUME, 1.0f); +} + +void +AudioDestinationNode::Suspend() +{ + DestroyAudioChannelAgent(); + SendInt32ParameterToStream(DestinationNodeEngine::SUSPENDED, 1); +} + +void +AudioDestinationNode::Resume() +{ + CreateAudioChannelAgent(); + SendInt32ParameterToStream(DestinationNodeEngine::SUSPENDED, 0); +} + +void +AudioDestinationNode::OfflineShutdown() +{ + MOZ_ASSERT(Context() && Context()->IsOffline(), + "Should only be called on a valid OfflineAudioContext"); + + MediaStreamGraph::DestroyNonRealtimeInstance(mStream->Graph()); + mOfflineRenderingRef.Drop(this); +} + +JSObject* +AudioDestinationNode::WrapObject(JSContext* aCx, JS::Handle<JSObject*> aGivenProto) +{ + return AudioDestinationNodeBinding::Wrap(aCx, this, aGivenProto); +} + +void +AudioDestinationNode::StartRendering(Promise* aPromise) +{ + mOfflineRenderingPromise = aPromise; + mOfflineRenderingRef.Take(this); + mStream->Graph()->StartNonRealtimeProcessing(mFramesToProduce); +} + +NS_IMETHODIMP +AudioDestinationNode::WindowVolumeChanged(float aVolume, bool aMuted) +{ + if (!mStream) { + return NS_OK; + } + + float volume = aMuted ? 0.0 : aVolume; + mStream->SetAudioOutputVolume(&gWebAudioOutputKey, volume); + return NS_OK; +} + +NS_IMETHODIMP +AudioDestinationNode::WindowSuspendChanged(nsSuspendedTypes aSuspend) +{ + if (!mStream) { + return NS_OK; + } + + bool suspended = (aSuspend != nsISuspendedTypes::NONE_SUSPENDED); + if (mAudioChannelSuspended == suspended) { + return NS_OK; + } + + mAudioChannelSuspended = suspended; + Context()->DispatchTrustedEvent(!suspended ? + NS_LITERAL_STRING("mozinterruptend") : + NS_LITERAL_STRING("mozinterruptbegin")); + + DisabledTrackMode disabledMode = suspended ? DisabledTrackMode::SILENCE_BLACK + : DisabledTrackMode::ENABLED; + mStream->SetTrackEnabled(AudioNodeStream::AUDIO_TRACK, disabledMode); + return NS_OK; +} + +NS_IMETHODIMP +AudioDestinationNode::WindowAudioCaptureChanged(bool aCapture) +{ + MOZ_ASSERT(mAudioChannelAgent); + + if (!mStream || Context()->IsOffline()) { + return NS_OK; + } + + nsCOMPtr<nsPIDOMWindowInner> ownerWindow = GetOwner(); + if (!ownerWindow) { + return NS_OK; + } + + if (aCapture != mCaptured) { + if (aCapture) { + nsCOMPtr<nsPIDOMWindowInner> window = Context()->GetParentObject(); + uint64_t id = window->WindowID(); + mCaptureStreamPort = + mStream->Graph()->ConnectToCaptureStream(id, mStream); + } else { + mCaptureStreamPort->Destroy(); + } + mCaptured = aCapture; + } + + return NS_OK; +} + +AudioChannel +AudioDestinationNode::MozAudioChannelType() const +{ + return mAudioChannel; +} + +void +AudioDestinationNode::SetMozAudioChannelType(AudioChannel aValue, ErrorResult& aRv) +{ + if (Context()->IsOffline()) { + aRv.Throw(NS_ERROR_DOM_INVALID_STATE_ERR); + return; + } + + if (aValue != mAudioChannel && + CheckAudioChannelPermissions(aValue)) { + mAudioChannel = aValue; + + if (mStream) { + mStream->SetAudioChannelType(mAudioChannel); + } + + if (mAudioChannelAgent) { + CreateAudioChannelAgent(); + } + } +} + +bool +AudioDestinationNode::CheckAudioChannelPermissions(AudioChannel aValue) +{ + // Only normal channel doesn't need permission. + if (aValue == AudioChannel::Normal) { + return true; + } + + // Maybe this audio channel is equal to the default one. + if (aValue == AudioChannelService::GetDefaultAudioChannel()) { + return true; + } + + nsCOMPtr<nsIPermissionManager> permissionManager = + services::GetPermissionManager(); + if (!permissionManager) { + return false; + } + + nsCOMPtr<nsIScriptObjectPrincipal> sop = do_QueryInterface(GetOwner()); + NS_ASSERTION(sop, "Window didn't QI to nsIScriptObjectPrincipal!"); + nsCOMPtr<nsIPrincipal> principal = sop->GetPrincipal(); + + uint32_t perm = nsIPermissionManager::UNKNOWN_ACTION; + + nsCString channel; + channel.AssignASCII(AudioChannelValues::strings[uint32_t(aValue)].value, + AudioChannelValues::strings[uint32_t(aValue)].length); + permissionManager->TestExactPermissionFromPrincipal(principal, + nsCString(NS_LITERAL_CSTRING("audio-channel-") + channel).get(), + &perm); + + return perm == nsIPermissionManager::ALLOW_ACTION; +} + +nsresult +AudioDestinationNode::CreateAudioChannelAgent() +{ + if (mIsOffline) { + return NS_OK; + } + + nsresult rv = NS_OK; + if (mAudioChannelAgent) { + rv = mAudioChannelAgent->NotifyStoppedPlaying(); + if (NS_WARN_IF(NS_FAILED(rv))) { + return rv; + } + } + + mAudioChannelAgent = new AudioChannelAgent(); + rv = mAudioChannelAgent->InitWithWeakCallback(GetOwner(), + static_cast<int32_t>(mAudioChannel), + this); + if (NS_WARN_IF(NS_FAILED(rv))) { + return rv; + } + + return NS_OK; +} + +void +AudioDestinationNode::InputMuted(bool aMuted) +{ + MOZ_ASSERT(Context() && !Context()->IsOffline()); + + if (!mAudioChannelAgent) { + if (aMuted) { + return; + } + CreateAudioChannelAgent(); + } + + if (aMuted) { + mAudioChannelAgent->NotifyStoppedPlaying(); + return; + } + + AudioPlaybackConfig config; + nsresult rv = mAudioChannelAgent->NotifyStartedPlaying(&config, + AudioChannelService::AudibleState::eAudible); + if (NS_WARN_IF(NS_FAILED(rv))) { + return; + } + + WindowVolumeChanged(config.mVolume, config.mMuted); + WindowSuspendChanged(config.mSuspend); +} + +} // namespace dom +} // namespace mozilla diff --git a/dom/media/webaudio/AudioDestinationNode.h b/dom/media/webaudio/AudioDestinationNode.h new file mode 100644 index 000000000..cf0db7862 --- /dev/null +++ b/dom/media/webaudio/AudioDestinationNode.h @@ -0,0 +1,115 @@ +/* -*- Mode: C++; tab-width: 2; indent-tabs-mode: nil; c-basic-offset: 2 -*- */ +/* vim:set ts=2 sw=2 sts=2 et cindent: */ +/* This Source Code Form is subject to the terms of the Mozilla Public + * License, v. 2.0. If a copy of the MPL was not distributed with this + * file, You can obtain one at http://mozilla.org/MPL/2.0/. */ + +#ifndef AudioDestinationNode_h_ +#define AudioDestinationNode_h_ + +#include "mozilla/dom/AudioChannelBinding.h" +#include "AudioNode.h" +#include "nsIAudioChannelAgent.h" + +namespace mozilla { +namespace dom { + +class AudioContext; + +class AudioDestinationNode final : public AudioNode + , public nsIAudioChannelAgentCallback + , public MainThreadMediaStreamListener +{ +public: + // This node type knows what MediaStreamGraph to use based on + // whether it's in offline mode. + AudioDestinationNode(AudioContext* aContext, + bool aIsOffline, + AudioChannel aChannel = AudioChannel::Normal, + uint32_t aNumberOfChannels = 0, + uint32_t aLength = 0, + float aSampleRate = 0.0f); + + void DestroyMediaStream() override; + + NS_DECL_ISUPPORTS_INHERITED + NS_DECL_CYCLE_COLLECTION_CLASS_INHERITED(AudioDestinationNode, AudioNode) + NS_DECL_NSIAUDIOCHANNELAGENTCALLBACK + + JSObject* WrapObject(JSContext* aCx, JS::Handle<JSObject*> aGivenProto) override; + + uint16_t NumberOfOutputs() const final override + { + return 0; + } + + uint32_t MaxChannelCount() const; + void SetChannelCount(uint32_t aChannelCount, + ErrorResult& aRv) override; + + // Returns the stream or null after unlink. + AudioNodeStream* Stream() { return mStream; } + + void Mute(); + void Unmute(); + + void Suspend(); + void Resume(); + + void StartRendering(Promise* aPromise); + + void OfflineShutdown(); + + AudioChannel MozAudioChannelType() const; + + void NotifyMainThreadStreamFinished() override; + void FireOfflineCompletionEvent(); + + nsresult CreateAudioChannelAgent(); + void DestroyAudioChannelAgent(); + + const char* NodeType() const override + { + return "AudioDestinationNode"; + } + + size_t SizeOfExcludingThis(MallocSizeOf aMallocSizeOf) const override; + size_t SizeOfIncludingThis(MallocSizeOf aMallocSizeOf) const override; + + void InputMuted(bool aInputMuted); + void ResolvePromise(AudioBuffer* aRenderedBuffer); + + unsigned long Length() + { + MOZ_ASSERT(mIsOffline); + return mFramesToProduce; + } + +protected: + virtual ~AudioDestinationNode(); + +private: + void SetMozAudioChannelType(AudioChannel aValue, ErrorResult& aRv); + bool CheckAudioChannelPermissions(AudioChannel aValue); + + SelfReference<AudioDestinationNode> mOfflineRenderingRef; + uint32_t mFramesToProduce; + + nsCOMPtr<nsIAudioChannelAgent> mAudioChannelAgent; + RefPtr<MediaInputPort> mCaptureStreamPort; + + RefPtr<Promise> mOfflineRenderingPromise; + + // Audio Channel Type. + AudioChannel mAudioChannel; + bool mIsOffline; + bool mAudioChannelSuspended; + + bool mCaptured; +}; + +} // namespace dom +} // namespace mozilla + +#endif + diff --git a/dom/media/webaudio/AudioEventTimeline.cpp b/dom/media/webaudio/AudioEventTimeline.cpp new file mode 100644 index 000000000..a6a7bbf66 --- /dev/null +++ b/dom/media/webaudio/AudioEventTimeline.cpp @@ -0,0 +1,315 @@ +/* -*- Mode: C++; tab-width: 2; indent-tabs-mode: nil; c-basic-offset: 2 -*- */ +/* vim:set ts=2 sw=2 sts=2 et cindent: */ +/* This Source Code Form is subject to the terms of the Mozilla Public + * License, v. 2.0. If a copy of the MPL was not distributed with this + * file, You can obtain one at http://mozilla.org/MPL/2.0/. */ + +#include "AudioEventTimeline.h" + +#include "mozilla/ErrorResult.h" + +static float LinearInterpolate(double t0, float v0, double t1, float v1, double t) +{ + return v0 + (v1 - v0) * ((t - t0) / (t1 - t0)); +} + +static float ExponentialInterpolate(double t0, float v0, double t1, float v1, double t) +{ + return v0 * powf(v1 / v0, (t - t0) / (t1 - t0)); +} + +static float ExponentialApproach(double t0, double v0, float v1, double timeConstant, double t) +{ + if (!mozilla::dom::WebAudioUtils::FuzzyEqual(timeConstant, 0.0)) { + return v1 + (v0 - v1) * expf(-(t - t0) / timeConstant); + } else { + return v1; + } +} + +static float ExtractValueFromCurve(double startTime, float* aCurve, uint32_t aCurveLength, double duration, double t) +{ + if (t >= startTime + duration) { + // After the duration, return the last curve value + return aCurve[aCurveLength - 1]; + } + double ratio = std::max((t - startTime) / duration, 0.0); + if (ratio >= 1.0) { + return aCurve[aCurveLength - 1]; + } + uint32_t current = uint32_t(floor((aCurveLength - 1) * ratio)); + uint32_t next = current + 1; + double step = duration / double(aCurveLength - 1); + if (next < aCurveLength) { + double t0 = current * step; + double t1 = next * step; + return LinearInterpolate(t0, aCurve[current], t1, aCurve[next], t - startTime); + } else { + return aCurve[current]; + } +} + +namespace mozilla { +namespace dom { + +// This method computes the AudioParam value at a given time based on the event timeline +template<class TimeType> void +AudioEventTimeline::GetValuesAtTimeHelper(TimeType aTime, float* aBuffer, + const size_t aSize) +{ + MOZ_ASSERT(aBuffer); + MOZ_ASSERT(aSize); + + auto TimeOf = [](const AudioTimelineEvent& aEvent) -> TimeType { + return aEvent.template Time<TimeType>(); + }; + + size_t eventIndex = 0; + const AudioTimelineEvent* previous = nullptr; + + // Let's remove old events except the last one: we need it to calculate some curves. + CleanupEventsOlderThan(aTime); + + for (size_t bufferIndex = 0; bufferIndex < aSize; ++bufferIndex, ++aTime) { + + bool timeMatchesEventIndex = false; + const AudioTimelineEvent* next; + for (; ; ++eventIndex) { + + if (eventIndex >= mEvents.Length()) { + next = nullptr; + break; + } + + next = &mEvents[eventIndex]; + if (aTime < TimeOf(*next)) { + break; + } + +#ifdef DEBUG + MOZ_ASSERT(next->mType == AudioTimelineEvent::SetValueAtTime || + next->mType == AudioTimelineEvent::SetTarget || + next->mType == AudioTimelineEvent::LinearRamp || + next->mType == AudioTimelineEvent::ExponentialRamp || + next->mType == AudioTimelineEvent::SetValueCurve); +#endif + + if (TimesEqual(aTime, TimeOf(*next))) { + mLastComputedValue = mComputedValue; + // Find the last event with the same time + while (eventIndex < mEvents.Length() - 1 && + TimesEqual(aTime, TimeOf(mEvents[eventIndex + 1]))) { + mLastComputedValue = GetValueAtTimeOfEvent<TimeType>(&mEvents[eventIndex]); + ++eventIndex; + } + + timeMatchesEventIndex = true; + break; + } + + previous = next; + } + + if (timeMatchesEventIndex) { + // The time matches one of the events exactly. + MOZ_ASSERT(TimesEqual(aTime, TimeOf(mEvents[eventIndex]))); + mComputedValue = GetValueAtTimeOfEvent<TimeType>(&mEvents[eventIndex]); + } else { + mComputedValue = GetValuesAtTimeHelperInternal(aTime, previous, next); + } + + aBuffer[bufferIndex] = mComputedValue; + } +} +template void +AudioEventTimeline::GetValuesAtTimeHelper(double aTime, float* aBuffer, + const size_t aSize); +template void +AudioEventTimeline::GetValuesAtTimeHelper(int64_t aTime, float* aBuffer, + const size_t aSize); + +template<class TimeType> float +AudioEventTimeline::GetValueAtTimeOfEvent(const AudioTimelineEvent* aNext) +{ + TimeType time = aNext->template Time<TimeType>(); + switch (aNext->mType) { + case AudioTimelineEvent::SetTarget: + // SetTarget nodes can be handled no matter what their next node is + // (if they have one). + // Follow the curve, without regard to the next event, starting at + // the last value of the last event. + return ExponentialApproach(time, + mLastComputedValue, aNext->mValue, + aNext->mTimeConstant, time); + break; + case AudioTimelineEvent::SetValueCurve: + // SetValueCurve events can be handled no matter what their event + // node is (if they have one) + return ExtractValueFromCurve(time, + aNext->mCurve, + aNext->mCurveLength, + aNext->mDuration, time); + break; + default: + // For other event types + return aNext->mValue; + } +} + +template<class TimeType> float +AudioEventTimeline::GetValuesAtTimeHelperInternal(TimeType aTime, + const AudioTimelineEvent* aPrevious, + const AudioTimelineEvent* aNext) +{ + // If the requested time is before all of the existing events + if (!aPrevious) { + return mValue; + } + + auto TimeOf = [](const AudioTimelineEvent* aEvent) -> TimeType { + return aEvent->template Time<TimeType>(); + }; + + // SetTarget nodes can be handled no matter what their next node is (if + // they have one) + if (aPrevious->mType == AudioTimelineEvent::SetTarget) { + return ExponentialApproach(TimeOf(aPrevious), + mLastComputedValue, aPrevious->mValue, + aPrevious->mTimeConstant, aTime); + } + + // SetValueCurve events can be handled no matter what their next node is + // (if they have one) + if (aPrevious->mType == AudioTimelineEvent::SetValueCurve) { + return ExtractValueFromCurve(TimeOf(aPrevious), + aPrevious->mCurve, aPrevious->mCurveLength, + aPrevious->mDuration, aTime); + } + + // If the requested time is after all of the existing events + if (!aNext) { + switch (aPrevious->mType) { + case AudioTimelineEvent::SetValueAtTime: + case AudioTimelineEvent::LinearRamp: + case AudioTimelineEvent::ExponentialRamp: + // The value will be constant after the last event + return aPrevious->mValue; + case AudioTimelineEvent::SetValueCurve: + return ExtractValueFromCurve(TimeOf(aPrevious), + aPrevious->mCurve, aPrevious->mCurveLength, + aPrevious->mDuration, aTime); + case AudioTimelineEvent::SetTarget: + MOZ_FALLTHROUGH_ASSERT("AudioTimelineEvent::SetTarget"); + case AudioTimelineEvent::SetValue: + case AudioTimelineEvent::Cancel: + case AudioTimelineEvent::Stream: + MOZ_ASSERT(false, "Should have been handled earlier."); + } + MOZ_ASSERT(false, "unreached"); + } + + // Finally, handle the case where we have both a previous and a next event + + // First, handle the case where our range ends up in a ramp event + switch (aNext->mType) { + case AudioTimelineEvent::LinearRamp: + return LinearInterpolate(TimeOf(aPrevious), + aPrevious->mValue, + TimeOf(aNext), + aNext->mValue, aTime); + + case AudioTimelineEvent::ExponentialRamp: + return ExponentialInterpolate(TimeOf(aPrevious), + aPrevious->mValue, + TimeOf(aNext), + aNext->mValue, aTime); + + case AudioTimelineEvent::SetValueAtTime: + case AudioTimelineEvent::SetTarget: + case AudioTimelineEvent::SetValueCurve: + break; + case AudioTimelineEvent::SetValue: + case AudioTimelineEvent::Cancel: + case AudioTimelineEvent::Stream: + MOZ_ASSERT(false, "Should have been handled earlier."); + } + + // Now handle all other cases + switch (aPrevious->mType) { + case AudioTimelineEvent::SetValueAtTime: + case AudioTimelineEvent::LinearRamp: + case AudioTimelineEvent::ExponentialRamp: + // If the next event type is neither linear or exponential ramp, the + // value is constant. + return aPrevious->mValue; + case AudioTimelineEvent::SetValueCurve: + return ExtractValueFromCurve(TimeOf(aPrevious), + aPrevious->mCurve, aPrevious->mCurveLength, + aPrevious->mDuration, aTime); + case AudioTimelineEvent::SetTarget: + MOZ_FALLTHROUGH_ASSERT("AudioTimelineEvent::SetTarget"); + case AudioTimelineEvent::SetValue: + case AudioTimelineEvent::Cancel: + case AudioTimelineEvent::Stream: + MOZ_ASSERT(false, "Should have been handled earlier."); + } + + MOZ_ASSERT(false, "unreached"); + return 0.0f; +} +template float +AudioEventTimeline::GetValuesAtTimeHelperInternal(double aTime, + const AudioTimelineEvent* aPrevious, + const AudioTimelineEvent* aNext); +template float +AudioEventTimeline::GetValuesAtTimeHelperInternal(int64_t aTime, + const AudioTimelineEvent* aPrevious, + const AudioTimelineEvent* aNext); + +const AudioTimelineEvent* +AudioEventTimeline::GetPreviousEvent(double aTime) const +{ + const AudioTimelineEvent* previous = nullptr; + const AudioTimelineEvent* next = nullptr; + + auto TimeOf = [](const AudioTimelineEvent& aEvent) -> double { + return aEvent.template Time<double>(); + }; + + bool bailOut = false; + for (unsigned i = 0; !bailOut && i < mEvents.Length(); ++i) { + switch (mEvents[i].mType) { + case AudioTimelineEvent::SetValueAtTime: + case AudioTimelineEvent::SetTarget: + case AudioTimelineEvent::LinearRamp: + case AudioTimelineEvent::ExponentialRamp: + case AudioTimelineEvent::SetValueCurve: + if (aTime == TimeOf(mEvents[i])) { + // Find the last event with the same time + do { + ++i; + } while (i < mEvents.Length() && + aTime == TimeOf(mEvents[i])); + return &mEvents[i - 1]; + } + previous = next; + next = &mEvents[i]; + if (aTime < TimeOf(mEvents[i])) { + bailOut = true; + } + break; + default: + MOZ_ASSERT(false, "unreached"); + } + } + // Handle the case where the time is past all of the events + if (!bailOut) { + previous = next; + } + + return previous; +} + +} // namespace dom +} // namespace mozilla + diff --git a/dom/media/webaudio/AudioEventTimeline.h b/dom/media/webaudio/AudioEventTimeline.h new file mode 100644 index 000000000..ae06ad4db --- /dev/null +++ b/dom/media/webaudio/AudioEventTimeline.h @@ -0,0 +1,474 @@ +/* -*- Mode: C++; tab-width: 2; indent-tabs-mode: nil; c-basic-offset: 2 -*- */ +/* vim:set ts=2 sw=2 sts=2 et cindent: */ +/* This Source Code Form is subject to the terms of the Mozilla Public + * License, v. 2.0. If a copy of the MPL was not distributed with this + * file, You can obtain one at http://mozilla.org/MPL/2.0/. */ + +#ifndef AudioEventTimeline_h_ +#define AudioEventTimeline_h_ + +#include <algorithm> +#include "mozilla/Assertions.h" +#include "mozilla/FloatingPoint.h" +#include "mozilla/PodOperations.h" + +#include "MainThreadUtils.h" +#include "nsTArray.h" +#include "math.h" +#include "WebAudioUtils.h" + +namespace mozilla { + +class MediaStream; + +namespace dom { + +struct AudioTimelineEvent final +{ + enum Type : uint32_t + { + SetValue, + SetValueAtTime, + LinearRamp, + ExponentialRamp, + SetTarget, + SetValueCurve, + Stream, + Cancel + }; + + AudioTimelineEvent(Type aType, double aTime, float aValue, double aTimeConstant = 0.0, + double aDuration = 0.0, const float* aCurve = nullptr, + uint32_t aCurveLength = 0) + : mType(aType) + , mCurve(nullptr) + , mTimeConstant(aTimeConstant) + , mDuration(aDuration) +#ifdef DEBUG + , mTimeIsInTicks(false) +#endif + { + mTime = aTime; + if (aType == AudioTimelineEvent::SetValueCurve) { + SetCurveParams(aCurve, aCurveLength); + } else { + mValue = aValue; + } + } + + explicit AudioTimelineEvent(MediaStream* aStream) + : mType(Stream) + , mCurve(nullptr) + , mStream(aStream) + , mTimeConstant(0.0) + , mDuration(0.0) +#ifdef DEBUG + , mTimeIsInTicks(false) +#endif + { + } + + AudioTimelineEvent(const AudioTimelineEvent& rhs) + { + PodCopy(this, &rhs, 1); + + if (rhs.mType == AudioTimelineEvent::SetValueCurve) { + SetCurveParams(rhs.mCurve, rhs.mCurveLength); + } else if (rhs.mType == AudioTimelineEvent::Stream) { + new (&mStream) decltype(mStream)(rhs.mStream); + } + } + + ~AudioTimelineEvent() + { + if (mType == AudioTimelineEvent::SetValueCurve) { + delete[] mCurve; + } + } + + template <class TimeType> + TimeType Time() const; + + void SetTimeInTicks(int64_t aTimeInTicks) + { + mTimeInTicks = aTimeInTicks; +#ifdef DEBUG + mTimeIsInTicks = true; +#endif + } + + void SetCurveParams(const float* aCurve, uint32_t aCurveLength) { + mCurveLength = aCurveLength; + if (aCurveLength) { + mCurve = new float[aCurveLength]; + PodCopy(mCurve, aCurve, aCurveLength); + } else { + mCurve = nullptr; + } + } + + Type mType; + union { + float mValue; + uint32_t mCurveLength; + }; + // mCurve contains a buffer of SetValueCurve samples. We sample the + // values in the buffer depending on how far along we are in time. + // If we're at time T and the event has started as time T0 and has a + // duration of D, we sample the buffer at floor(mCurveLength*(T-T0)/D) + // if T<T0+D, and just take the last sample in the buffer otherwise. + float* mCurve; + RefPtr<MediaStream> mStream; + double mTimeConstant; + double mDuration; +#ifdef DEBUG + bool mTimeIsInTicks; +#endif + +private: + // This member is accessed using the `Time` method, for safety. + // + // The time for an event can either be in absolute value or in ticks. + // Initially the time of the event is always in absolute value. + // In order to convert it to ticks, call SetTimeInTicks. Once this + // method has been called for an event, the time cannot be converted + // back to absolute value. + union { + double mTime; + int64_t mTimeInTicks; + }; +}; + +template <> +inline double AudioTimelineEvent::Time<double>() const +{ + MOZ_ASSERT(!mTimeIsInTicks); + return mTime; +} + +template <> +inline int64_t AudioTimelineEvent::Time<int64_t>() const +{ + MOZ_ASSERT(!NS_IsMainThread()); + MOZ_ASSERT(mTimeIsInTicks); + return mTimeInTicks; +} + +/** + * Some methods in this class will be instantiated with different ErrorResult + * template arguments for testing and production code. + * + * ErrorResult is a type which satisfies the following: + * - Implements a Throw() method taking an nsresult argument, representing an error code. + */ +class AudioEventTimeline +{ +public: + explicit AudioEventTimeline(float aDefaultValue) + : mValue(aDefaultValue), + mComputedValue(aDefaultValue), + mLastComputedValue(aDefaultValue) + { } + + template <class ErrorResult> + bool ValidateEvent(AudioTimelineEvent& aEvent, ErrorResult& aRv) + { + MOZ_ASSERT(NS_IsMainThread()); + + auto TimeOf = [](const AudioTimelineEvent& aEvent) -> double { + return aEvent.template Time<double>(); + }; + + // Validate the event itself + if (!WebAudioUtils::IsTimeValid(TimeOf(aEvent)) || + !WebAudioUtils::IsTimeValid(aEvent.mTimeConstant)) { + aRv.Throw(NS_ERROR_DOM_SYNTAX_ERR); + return false; + } + + if (aEvent.mType == AudioTimelineEvent::SetValueCurve) { + if (!aEvent.mCurve || !aEvent.mCurveLength) { + aRv.Throw(NS_ERROR_DOM_SYNTAX_ERR); + return false; + } + for (uint32_t i = 0; i < aEvent.mCurveLength; ++i) { + if (!IsValid(aEvent.mCurve[i])) { + aRv.Throw(NS_ERROR_DOM_SYNTAX_ERR); + return false; + } + } + } + + bool timeAndValueValid = IsValid(aEvent.mValue) && + IsValid(aEvent.mDuration); + if (!timeAndValueValid) { + aRv.Throw(NS_ERROR_DOM_SYNTAX_ERR); + return false; + } + + // Make sure that non-curve events don't fall within the duration of a + // curve event. + for (unsigned i = 0; i < mEvents.Length(); ++i) { + if (mEvents[i].mType == AudioTimelineEvent::SetValueCurve && + !(aEvent.mType == AudioTimelineEvent::SetValueCurve && + TimeOf(aEvent) == TimeOf(mEvents[i])) && + TimeOf(mEvents[i]) <= TimeOf(aEvent) && + TimeOf(mEvents[i]) + mEvents[i].mDuration >= TimeOf(aEvent)) { + aRv.Throw(NS_ERROR_DOM_SYNTAX_ERR); + return false; + } + } + + // Make sure that curve events don't fall in a range which includes other + // events. + if (aEvent.mType == AudioTimelineEvent::SetValueCurve) { + for (unsigned i = 0; i < mEvents.Length(); ++i) { + // In case we have two curve at the same time + if (mEvents[i].mType == AudioTimelineEvent::SetValueCurve && + TimeOf(mEvents[i]) == TimeOf(aEvent)) { + continue; + } + if (TimeOf(mEvents[i]) > TimeOf(aEvent) && + TimeOf(mEvents[i]) < TimeOf(aEvent) + aEvent.mDuration) { + aRv.Throw(NS_ERROR_DOM_SYNTAX_ERR); + return false; + } + } + } + + // Make sure that invalid values are not used for exponential curves + if (aEvent.mType == AudioTimelineEvent::ExponentialRamp) { + if (aEvent.mValue <= 0.f) { + aRv.Throw(NS_ERROR_DOM_SYNTAX_ERR); + return false; + } + const AudioTimelineEvent* previousEvent = GetPreviousEvent(TimeOf(aEvent)); + if (previousEvent) { + if (previousEvent->mValue <= 0.f) { + aRv.Throw(NS_ERROR_DOM_SYNTAX_ERR); + return false; + } + } else { + if (mValue <= 0.f) { + aRv.Throw(NS_ERROR_DOM_SYNTAX_ERR); + return false; + } + } + } + return true; + } + + template<typename TimeType> + void InsertEvent(const AudioTimelineEvent& aEvent) + { + for (unsigned i = 0; i < mEvents.Length(); ++i) { + if (aEvent.template Time<TimeType>() == mEvents[i].template Time<TimeType>()) { + if (aEvent.mType == mEvents[i].mType) { + // If times and types are equal, replace the event + mEvents.ReplaceElementAt(i, aEvent); + } else { + // Otherwise, place the element after the last event of another type + do { + ++i; + } while (i < mEvents.Length() && + aEvent.mType != mEvents[i].mType && + aEvent.template Time<TimeType>() == mEvents[i].template Time<TimeType>()); + mEvents.InsertElementAt(i, aEvent); + } + return; + } + // Otherwise, place the event right after the latest existing event + if (aEvent.template Time<TimeType>() < mEvents[i].template Time<TimeType>()) { + mEvents.InsertElementAt(i, aEvent); + return; + } + } + + // If we couldn't find a place for the event, just append it to the list + mEvents.AppendElement(aEvent); + } + + bool HasSimpleValue() const + { + return mEvents.IsEmpty(); + } + + float GetValue() const + { + // This method should only be called if HasSimpleValue() returns true + MOZ_ASSERT(HasSimpleValue()); + return mValue; + } + + float Value() const + { + // TODO: Return the current value based on the timeline of the AudioContext + return mValue; + } + + void SetValue(float aValue) + { + // Silently don't change anything if there are any events + if (mEvents.IsEmpty()) { + mLastComputedValue = mComputedValue = mValue = aValue; + } + } + + template <class ErrorResult> + void SetValueAtTime(float aValue, double aStartTime, ErrorResult& aRv) + { + AudioTimelineEvent event(AudioTimelineEvent::SetValueAtTime, aStartTime, aValue); + + if (ValidateEvent(event, aRv)) { + InsertEvent<double>(event); + } + } + + template <class ErrorResult> + void LinearRampToValueAtTime(float aValue, double aEndTime, ErrorResult& aRv) + { + AudioTimelineEvent event(AudioTimelineEvent::LinearRamp, aEndTime, aValue); + + if (ValidateEvent(event, aRv)) { + InsertEvent<double>(event); + } + } + + template <class ErrorResult> + void ExponentialRampToValueAtTime(float aValue, double aEndTime, ErrorResult& aRv) + { + AudioTimelineEvent event(AudioTimelineEvent::ExponentialRamp, aEndTime, aValue); + + if (ValidateEvent(event, aRv)) { + InsertEvent<double>(event); + } + } + + template <class ErrorResult> + void SetTargetAtTime(float aTarget, double aStartTime, double aTimeConstant, ErrorResult& aRv) + { + AudioTimelineEvent event(AudioTimelineEvent::SetTarget, aStartTime, aTarget, aTimeConstant); + + if (ValidateEvent(event, aRv)) { + InsertEvent<double>(event); + } + } + + template <class ErrorResult> + void SetValueCurveAtTime(const float* aValues, uint32_t aValuesLength, double aStartTime, double aDuration, ErrorResult& aRv) + { + AudioTimelineEvent event(AudioTimelineEvent::SetValueCurve, aStartTime, 0.0f, 0.0f, aDuration, aValues, aValuesLength); + if (ValidateEvent(event, aRv)) { + InsertEvent<double>(event); + } + } + + template<typename TimeType> + void CancelScheduledValues(TimeType aStartTime) + { + for (unsigned i = 0; i < mEvents.Length(); ++i) { + if (mEvents[i].template Time<TimeType>() >= aStartTime) { +#ifdef DEBUG + // Sanity check: the array should be sorted, so all of the following + // events should have a time greater than aStartTime too. + for (unsigned j = i + 1; j < mEvents.Length(); ++j) { + MOZ_ASSERT(mEvents[j].template Time<TimeType>() >= aStartTime); + } +#endif + mEvents.TruncateLength(i); + break; + } + } + } + + void CancelAllEvents() + { + mEvents.Clear(); + } + + static bool TimesEqual(int64_t aLhs, int64_t aRhs) + { + return aLhs == aRhs; + } + + // Since we are going to accumulate error by adding 0.01 multiple time in a + // loop, we want to fuzz the equality check in GetValueAtTime. + static bool TimesEqual(double aLhs, double aRhs) + { + const float kEpsilon = 0.0000000001f; + return fabs(aLhs - aRhs) < kEpsilon; + } + + template<class TimeType> + float GetValueAtTime(TimeType aTime) + { + float result; + GetValuesAtTimeHelper(aTime, &result, 1); + return result; + } + + template<class TimeType> + void GetValuesAtTime(TimeType aTime, float* aBuffer, const size_t aSize) + { + MOZ_ASSERT(aBuffer); + GetValuesAtTimeHelper(aTime, aBuffer, aSize); + } + + // Return the number of events scheduled + uint32_t GetEventCount() const + { + return mEvents.Length(); + } + + template<class TimeType> + void CleanupEventsOlderThan(TimeType aTime) + { + while (mEvents.Length() > 1 && + aTime > mEvents[1].template Time<TimeType>()) { + + if (mEvents[1].mType == AudioTimelineEvent::SetTarget) { + mLastComputedValue = GetValuesAtTimeHelperInternal( + mEvents[1].template Time<TimeType>(), + &mEvents[0], nullptr); + } + + mEvents.RemoveElementAt(0); + } + } + +private: + template<class TimeType> + void GetValuesAtTimeHelper(TimeType aTime, float* aBuffer, const size_t aSize); + + template<class TimeType> + float GetValueAtTimeOfEvent(const AudioTimelineEvent* aNext); + + template<class TimeType> + float GetValuesAtTimeHelperInternal(TimeType aTime, + const AudioTimelineEvent* aPrevious, + const AudioTimelineEvent* aNext); + + const AudioTimelineEvent* GetPreviousEvent(double aTime) const; + + static bool IsValid(double value) + { + return mozilla::IsFinite(value); + } + + // This is a sorted array of the events in the timeline. Queries of this + // data structure should probably be more frequent than modifications to it, + // and that is the reason why we're using a simple array as the data structure. + // We can optimize this in the future if the performance of the array ends up + // being a bottleneck. + nsTArray<AudioTimelineEvent> mEvents; + float mValue; + // This is the value of this AudioParam we computed at the last tick. + float mComputedValue; + // This is the value of this AudioParam at the last tick of the previous event. + float mLastComputedValue; +}; + +} // namespace dom +} // namespace mozilla + +#endif diff --git a/dom/media/webaudio/AudioListener.cpp b/dom/media/webaudio/AudioListener.cpp new file mode 100644 index 000000000..0bd11156a --- /dev/null +++ b/dom/media/webaudio/AudioListener.cpp @@ -0,0 +1,131 @@ +/* -*- Mode: C++; tab-width: 2; indent-tabs-mode: nil; c-basic-offset: 2 -*- */ +/* vim:set ts=2 sw=2 sts=2 et cindent: */ +/* This Source Code Form is subject to the terms of the Mozilla Public + * License, v. 2.0. If a copy of the MPL was not distributed with this + * file, You can obtain one at http://mozilla.org/MPL/2.0/. */ + +#include "AudioListener.h" +#include "AudioContext.h" +#include "mozilla/dom/AudioListenerBinding.h" + +namespace mozilla { +namespace dom { + +NS_IMPL_CYCLE_COLLECTION_WRAPPERCACHE(AudioListener, mContext) + +NS_IMPL_CYCLE_COLLECTION_ROOT_NATIVE(AudioListener, AddRef) +NS_IMPL_CYCLE_COLLECTION_UNROOT_NATIVE(AudioListener, Release) + +AudioListener::AudioListener(AudioContext* aContext) + : mContext(aContext) + , mPosition() + , mFrontVector(0., 0., -1.) + , mRightVector(1., 0., 0.) + , mVelocity() + , mDopplerFactor(1.) + , mSpeedOfSound(343.3) // meters/second +{ + MOZ_ASSERT(aContext); +} + +JSObject* +AudioListener::WrapObject(JSContext* aCx, JS::Handle<JSObject*> aGivenProto) +{ + return AudioListenerBinding::Wrap(aCx, this, aGivenProto); +} + +void +AudioListener::SetOrientation(double aX, double aY, double aZ, + double aXUp, double aYUp, double aZUp) +{ + ThreeDPoint front(aX, aY, aZ); + // The panning effect and the azimuth and elevation calculation in the Web + // Audio spec becomes undefined with linearly dependent vectors, so keep + // existing state in these situations. + if (front.IsZero()) { + return; + } + // Normalize before using CrossProduct() to avoid overflow. + front.Normalize(); + ThreeDPoint up(aXUp, aYUp, aZUp); + if (up.IsZero()) { + return; + } + up.Normalize(); + ThreeDPoint right = front.CrossProduct(up); + if (right.IsZero()) { + return; + } + right.Normalize(); + + if (!mFrontVector.FuzzyEqual(front)) { + mFrontVector = front; + SendThreeDPointParameterToStream(PannerNode::LISTENER_FRONT_VECTOR, front); + } + if (!mRightVector.FuzzyEqual(right)) { + mRightVector = right; + SendThreeDPointParameterToStream(PannerNode::LISTENER_RIGHT_VECTOR, right); + } +} + +void +AudioListener::RegisterPannerNode(PannerNode* aPannerNode) +{ + mPanners.AppendElement(aPannerNode); + + // Let the panner node know about our parameters + aPannerNode->SendThreeDPointParameterToStream(PannerNode::LISTENER_POSITION, mPosition); + aPannerNode->SendThreeDPointParameterToStream(PannerNode::LISTENER_FRONT_VECTOR, mFrontVector); + aPannerNode->SendThreeDPointParameterToStream(PannerNode::LISTENER_RIGHT_VECTOR, mRightVector); + aPannerNode->SendThreeDPointParameterToStream(PannerNode::LISTENER_VELOCITY, mVelocity); + aPannerNode->SendDoubleParameterToStream(PannerNode::LISTENER_DOPPLER_FACTOR, mDopplerFactor); + aPannerNode->SendDoubleParameterToStream(PannerNode::LISTENER_SPEED_OF_SOUND, mSpeedOfSound); + UpdatePannersVelocity(); +} + +void AudioListener::UnregisterPannerNode(PannerNode* aPannerNode) +{ + mPanners.RemoveElement(aPannerNode); +} + +void +AudioListener::SendDoubleParameterToStream(uint32_t aIndex, double aValue) +{ + for (uint32_t i = 0; i < mPanners.Length(); ++i) { + if (mPanners[i]) { + mPanners[i]->SendDoubleParameterToStream(aIndex, aValue); + } + } +} + +void +AudioListener::SendThreeDPointParameterToStream(uint32_t aIndex, const ThreeDPoint& aValue) +{ + for (uint32_t i = 0; i < mPanners.Length(); ++i) { + if (mPanners[i]) { + mPanners[i]->SendThreeDPointParameterToStream(aIndex, aValue); + } + } +} + +void AudioListener::UpdatePannersVelocity() +{ + for (uint32_t i = 0; i < mPanners.Length(); ++i) { + if (mPanners[i]) { + mPanners[i]->SendDopplerToSourcesIfNeeded(); + } + } +} + +size_t +AudioListener::SizeOfIncludingThis(MallocSizeOf aMallocSizeOf) const +{ + size_t amount = aMallocSizeOf(this); + // AudioNodes are tracked separately + amount += mPanners.ShallowSizeOfExcludingThis(aMallocSizeOf); + return amount; +} + +} // namespace dom +} // namespace mozilla + diff --git a/dom/media/webaudio/AudioListener.h b/dom/media/webaudio/AudioListener.h new file mode 100644 index 000000000..e3eaf1ca4 --- /dev/null +++ b/dom/media/webaudio/AudioListener.h @@ -0,0 +1,133 @@ +/* -*- Mode: C++; tab-width: 2; indent-tabs-mode: nil; c-basic-offset: 2 -*- */ +/* vim:set ts=2 sw=2 sts=2 et cindent: */ +/* This Source Code Form is subject to the terms of the Mozilla Public + * License, v. 2.0. If a copy of the MPL was not distributed with this + * file, You can obtain one at http://mozilla.org/MPL/2.0/. */ + +#ifndef AudioListener_h_ +#define AudioListener_h_ + +#include "nsWrapperCache.h" +#include "nsCycleCollectionParticipant.h" +#include "mozilla/Attributes.h" +#include "ThreeDPoint.h" +#include "AudioContext.h" +#include "PannerNode.h" +#include "WebAudioUtils.h" +#include "js/TypeDecls.h" +#include "mozilla/MemoryReporting.h" + +namespace mozilla { + +namespace dom { + +class AudioListener final : public nsWrapperCache +{ +public: + explicit AudioListener(AudioContext* aContext); + + NS_INLINE_DECL_CYCLE_COLLECTING_NATIVE_REFCOUNTING(AudioListener) + NS_DECL_CYCLE_COLLECTION_SCRIPT_HOLDER_NATIVE_CLASS(AudioListener) + + size_t SizeOfIncludingThis(mozilla::MallocSizeOf aMallocSizeOf) const; + + AudioContext* GetParentObject() const + { + return mContext; + } + + JSObject* WrapObject(JSContext* aCx, JS::Handle<JSObject*> aGivenProto) override; + + double DopplerFactor() const + { + return mDopplerFactor; + } + void SetDopplerFactor(double aDopplerFactor) + { + if (WebAudioUtils::FuzzyEqual(mDopplerFactor, aDopplerFactor)) { + return; + } + mDopplerFactor = aDopplerFactor; + SendDoubleParameterToStream(PannerNode::LISTENER_DOPPLER_FACTOR, mDopplerFactor); + } + + double SpeedOfSound() const + { + return mSpeedOfSound; + } + void SetSpeedOfSound(double aSpeedOfSound) + { + if (WebAudioUtils::FuzzyEqual(mSpeedOfSound, aSpeedOfSound)) { + return; + } + mSpeedOfSound = aSpeedOfSound; + SendDoubleParameterToStream(PannerNode::LISTENER_SPEED_OF_SOUND, mSpeedOfSound); + } + + void SetPosition(double aX, double aY, double aZ) + { + if (WebAudioUtils::FuzzyEqual(mPosition.x, aX) && + WebAudioUtils::FuzzyEqual(mPosition.y, aY) && + WebAudioUtils::FuzzyEqual(mPosition.z, aZ)) { + return; + } + mPosition.x = aX; + mPosition.y = aY; + mPosition.z = aZ; + SendThreeDPointParameterToStream(PannerNode::LISTENER_POSITION, mPosition); + } + + const ThreeDPoint& Position() const + { + return mPosition; + } + + void SetOrientation(double aX, double aY, double aZ, + double aXUp, double aYUp, double aZUp); + + const ThreeDPoint& Velocity() const + { + return mVelocity; + } + + void SetVelocity(double aX, double aY, double aZ) + { + if (WebAudioUtils::FuzzyEqual(mVelocity.x, aX) && + WebAudioUtils::FuzzyEqual(mVelocity.y, aY) && + WebAudioUtils::FuzzyEqual(mVelocity.z, aZ)) { + return; + } + mVelocity.x = aX; + mVelocity.y = aY; + mVelocity.z = aZ; + SendThreeDPointParameterToStream(PannerNode::LISTENER_VELOCITY, mVelocity); + UpdatePannersVelocity(); + } + + void RegisterPannerNode(PannerNode* aPannerNode); + void UnregisterPannerNode(PannerNode* aPannerNode); + +private: + ~AudioListener() {} + + void SendDoubleParameterToStream(uint32_t aIndex, double aValue); + void SendThreeDPointParameterToStream(uint32_t aIndex, const ThreeDPoint& aValue); + void UpdatePannersVelocity(); + +private: + friend class PannerNode; + RefPtr<AudioContext> mContext; + ThreeDPoint mPosition; + ThreeDPoint mFrontVector; + ThreeDPoint mRightVector; + ThreeDPoint mVelocity; + double mDopplerFactor; + double mSpeedOfSound; + nsTArray<WeakPtr<PannerNode> > mPanners; +}; + +} // namespace dom +} // namespace mozilla + +#endif + diff --git a/dom/media/webaudio/AudioNode.cpp b/dom/media/webaudio/AudioNode.cpp new file mode 100644 index 000000000..2b64fcf88 --- /dev/null +++ b/dom/media/webaudio/AudioNode.cpp @@ -0,0 +1,666 @@ +/* -*- Mode: C++; tab-width: 2; indent-tabs-mode: nil; c-basic-offset: 2 -*- */ +/* vim:set ts=2 sw=2 sts=2 et cindent: */ +/* This Source Code Form is subject to the terms of the Mozilla Public + * License, v. 2.0. If a copy of the MPL was not distributed with this + * file, You can obtain one at http://mozilla.org/MPL/2.0/. */ + +#include "AudioNode.h" +#include "mozilla/ErrorResult.h" +#include "AudioNodeStream.h" +#include "AudioNodeEngine.h" +#include "mozilla/dom/AudioParam.h" +#include "mozilla/Services.h" +#include "nsIObserverService.h" + +namespace mozilla { +namespace dom { + +static const uint32_t INVALID_PORT = 0xffffffff; +static uint32_t gId = 0; + +NS_IMPL_CYCLE_COLLECTION_CLASS(AudioNode) + +NS_IMPL_CYCLE_COLLECTION_UNLINK_BEGIN_INHERITED(AudioNode, DOMEventTargetHelper) + tmp->DisconnectFromGraph(); + if (tmp->mContext) { + tmp->mContext->UnregisterNode(tmp); + } + NS_IMPL_CYCLE_COLLECTION_UNLINK(mContext) + NS_IMPL_CYCLE_COLLECTION_UNLINK(mOutputNodes) + NS_IMPL_CYCLE_COLLECTION_UNLINK(mOutputParams) +NS_IMPL_CYCLE_COLLECTION_UNLINK_END +NS_IMPL_CYCLE_COLLECTION_TRAVERSE_BEGIN_INHERITED(AudioNode, + DOMEventTargetHelper) + NS_IMPL_CYCLE_COLLECTION_TRAVERSE(mContext) + NS_IMPL_CYCLE_COLLECTION_TRAVERSE(mOutputNodes) + NS_IMPL_CYCLE_COLLECTION_TRAVERSE(mOutputParams) +NS_IMPL_CYCLE_COLLECTION_TRAVERSE_END + +NS_IMPL_ADDREF_INHERITED(AudioNode, DOMEventTargetHelper) +NS_IMPL_RELEASE_INHERITED(AudioNode, DOMEventTargetHelper) + +NS_INTERFACE_MAP_BEGIN_CYCLE_COLLECTION_INHERITED(AudioNode) + NS_INTERFACE_MAP_ENTRY(nsISupportsWeakReference) +NS_INTERFACE_MAP_END_INHERITING(DOMEventTargetHelper) + +AudioNode::AudioNode(AudioContext* aContext, + uint32_t aChannelCount, + ChannelCountMode aChannelCountMode, + ChannelInterpretation aChannelInterpretation) + : DOMEventTargetHelper(aContext->GetParentObject()) + , mContext(aContext) + , mChannelCount(aChannelCount) + , mChannelCountMode(aChannelCountMode) + , mChannelInterpretation(aChannelInterpretation) + , mId(gId++) + , mPassThrough(false) +{ + MOZ_ASSERT(aContext); + DOMEventTargetHelper::BindToOwner(aContext->GetParentObject()); + aContext->RegisterNode(this); +} + +AudioNode::~AudioNode() +{ + MOZ_ASSERT(mInputNodes.IsEmpty()); + MOZ_ASSERT(mOutputNodes.IsEmpty()); + MOZ_ASSERT(mOutputParams.IsEmpty()); + MOZ_ASSERT(!mStream, + "The webaudio-node-demise notification must have been sent"); + if (mContext) { + mContext->UnregisterNode(this); + } +} + +size_t +AudioNode::SizeOfExcludingThis(MallocSizeOf aMallocSizeOf) const +{ + // Not owned: + // - mContext + // - mStream + size_t amount = 0; + + amount += mInputNodes.ShallowSizeOfExcludingThis(aMallocSizeOf); + for (size_t i = 0; i < mInputNodes.Length(); i++) { + amount += mInputNodes[i].SizeOfExcludingThis(aMallocSizeOf); + } + + // Just measure the array. The entire audio node graph is measured via the + // MediaStreamGraph's streams, so we don't want to double-count the elements. + amount += mOutputNodes.ShallowSizeOfExcludingThis(aMallocSizeOf); + + amount += mOutputParams.ShallowSizeOfExcludingThis(aMallocSizeOf); + for (size_t i = 0; i < mOutputParams.Length(); i++) { + amount += mOutputParams[i]->SizeOfIncludingThis(aMallocSizeOf); + } + + return amount; +} + +size_t +AudioNode::SizeOfIncludingThis(MallocSizeOf aMallocSizeOf) const +{ + return aMallocSizeOf(this) + SizeOfExcludingThis(aMallocSizeOf); +} + +template <class InputNode> +static size_t +FindIndexOfNode(const nsTArray<InputNode>& aInputNodes, const AudioNode* aNode) +{ + for (size_t i = 0; i < aInputNodes.Length(); ++i) { + if (aInputNodes[i].mInputNode == aNode) { + return i; + } + } + return nsTArray<InputNode>::NoIndex; +} + +template <class InputNode> +static size_t +FindIndexOfNodeWithPorts(const nsTArray<InputNode>& aInputNodes, + const AudioNode* aNode, + uint32_t aInputPort, uint32_t aOutputPort) +{ + for (size_t i = 0; i < aInputNodes.Length(); ++i) { + if (aInputNodes[i].mInputNode == aNode && + aInputNodes[i].mInputPort == aInputPort && + aInputNodes[i].mOutputPort == aOutputPort) { + return i; + } + } + return nsTArray<InputNode>::NoIndex; +} + +void +AudioNode::DisconnectFromGraph() +{ + MOZ_ASSERT(mRefCnt.get() > mInputNodes.Length(), + "Caller should be holding a reference"); + + // The idea here is that we remove connections one by one, and at each step + // the graph is in a valid state. + + // Disconnect inputs. We don't need them anymore. + while (!mInputNodes.IsEmpty()) { + size_t i = mInputNodes.Length() - 1; + RefPtr<AudioNode> input = mInputNodes[i].mInputNode; + mInputNodes.RemoveElementAt(i); + input->mOutputNodes.RemoveElement(this); + } + + while (!mOutputNodes.IsEmpty()) { + size_t i = mOutputNodes.Length() - 1; + RefPtr<AudioNode> output = mOutputNodes[i].forget(); + mOutputNodes.RemoveElementAt(i); + size_t inputIndex = FindIndexOfNode(output->mInputNodes, this); + // It doesn't matter which one we remove, since we're going to remove all + // entries for this node anyway. + output->mInputNodes.RemoveElementAt(inputIndex); + // This effects of this connection will remain. + output->NotifyHasPhantomInput(); + } + + while (!mOutputParams.IsEmpty()) { + size_t i = mOutputParams.Length() - 1; + RefPtr<AudioParam> output = mOutputParams[i].forget(); + mOutputParams.RemoveElementAt(i); + size_t inputIndex = FindIndexOfNode(output->InputNodes(), this); + // It doesn't matter which one we remove, since we're going to remove all + // entries for this node anyway. + output->RemoveInputNode(inputIndex); + } + + DestroyMediaStream(); +} + +AudioNode* +AudioNode::Connect(AudioNode& aDestination, uint32_t aOutput, + uint32_t aInput, ErrorResult& aRv) +{ + if (aOutput >= NumberOfOutputs() || + aInput >= aDestination.NumberOfInputs()) { + aRv.Throw(NS_ERROR_DOM_INDEX_SIZE_ERR); + return nullptr; + } + + if (Context() != aDestination.Context()) { + aRv.Throw(NS_ERROR_DOM_SYNTAX_ERR); + return nullptr; + } + + if (FindIndexOfNodeWithPorts(aDestination.mInputNodes, + this, aInput, aOutput) != + nsTArray<AudioNode::InputNode>::NoIndex) { + // connection already exists. + return &aDestination; + } + + WEB_AUDIO_API_LOG("%f: %s %u Connect() to %s %u", + Context()->CurrentTime(), NodeType(), Id(), + aDestination.NodeType(), aDestination.Id()); + + // The MediaStreamGraph will handle cycle detection. We don't need to do it + // here. + + mOutputNodes.AppendElement(&aDestination); + InputNode* input = aDestination.mInputNodes.AppendElement(); + input->mInputNode = this; + input->mInputPort = aInput; + input->mOutputPort = aOutput; + AudioNodeStream* destinationStream = aDestination.mStream; + if (mStream && destinationStream) { + // Connect streams in the MediaStreamGraph + MOZ_ASSERT(aInput <= UINT16_MAX, "Unexpected large input port number"); + MOZ_ASSERT(aOutput <= UINT16_MAX, "Unexpected large output port number"); + input->mStreamPort = destinationStream-> + AllocateInputPort(mStream, AudioNodeStream::AUDIO_TRACK, TRACK_ANY, + static_cast<uint16_t>(aInput), + static_cast<uint16_t>(aOutput)); + } + aDestination.NotifyInputsChanged(); + + // This connection may have connected a panner and a source. + Context()->UpdatePannerSource(); + + return &aDestination; +} + +void +AudioNode::Connect(AudioParam& aDestination, uint32_t aOutput, + ErrorResult& aRv) +{ + if (aOutput >= NumberOfOutputs()) { + aRv.Throw(NS_ERROR_DOM_INDEX_SIZE_ERR); + return; + } + + if (Context() != aDestination.GetParentObject()) { + aRv.Throw(NS_ERROR_DOM_SYNTAX_ERR); + return; + } + + if (FindIndexOfNodeWithPorts(aDestination.InputNodes(), + this, INVALID_PORT, aOutput) != + nsTArray<AudioNode::InputNode>::NoIndex) { + // connection already exists. + return; + } + + mOutputParams.AppendElement(&aDestination); + InputNode* input = aDestination.AppendInputNode(); + input->mInputNode = this; + input->mInputPort = INVALID_PORT; + input->mOutputPort = aOutput; + + MediaStream* stream = aDestination.Stream(); + MOZ_ASSERT(stream->AsProcessedStream()); + ProcessedMediaStream* ps = static_cast<ProcessedMediaStream*>(stream); + if (mStream) { + // Setup our stream as an input to the AudioParam's stream + MOZ_ASSERT(aOutput <= UINT16_MAX, "Unexpected large output port number"); + input->mStreamPort = + ps->AllocateInputPort(mStream, AudioNodeStream::AUDIO_TRACK, TRACK_ANY, + 0, static_cast<uint16_t>(aOutput)); + } +} + +void +AudioNode::SendDoubleParameterToStream(uint32_t aIndex, double aValue) +{ + MOZ_ASSERT(mStream, "How come we don't have a stream here?"); + mStream->SetDoubleParameter(aIndex, aValue); +} + +void +AudioNode::SendInt32ParameterToStream(uint32_t aIndex, int32_t aValue) +{ + MOZ_ASSERT(mStream, "How come we don't have a stream here?"); + mStream->SetInt32Parameter(aIndex, aValue); +} + +void +AudioNode::SendThreeDPointParameterToStream(uint32_t aIndex, + const ThreeDPoint& aValue) +{ + MOZ_ASSERT(mStream, "How come we don't have a stream here?"); + mStream->SetThreeDPointParameter(aIndex, aValue); +} + +void +AudioNode::SendChannelMixingParametersToStream() +{ + if (mStream) { + mStream->SetChannelMixingParameters(mChannelCount, mChannelCountMode, + mChannelInterpretation); + } +} + +template<> +bool +AudioNode::DisconnectFromOutputIfConnected<AudioNode>(uint32_t aOutputNodeIndex, + uint32_t aInputIndex) +{ + WEB_AUDIO_API_LOG("%f: %s %u Disconnect()", Context()->CurrentTime(), + NodeType(), Id()); + + AudioNode* destination = mOutputNodes[aOutputNodeIndex]; + + MOZ_ASSERT(aOutputNodeIndex < mOutputNodes.Length()); + MOZ_ASSERT(aInputIndex < destination->InputNodes().Length()); + + // An upstream node may be starting to play on the graph thread, and the + // engine for a downstream node may be sending a PlayingRefChangeHandler + // ADDREF message to this (main) thread. Wait for a round trip before + // releasing nodes, to give engines receiving sound now time to keep their + // nodes alive. + class RunnableRelease final : public Runnable + { + public: + explicit RunnableRelease(already_AddRefed<AudioNode> aNode) + : mNode(aNode) {} + + NS_IMETHOD Run() override + { + mNode = nullptr; + return NS_OK; + } + private: + RefPtr<AudioNode> mNode; + }; + + InputNode& input = destination->mInputNodes[aInputIndex]; + if (input.mInputNode != this) { + return false; + } + + // Remove one instance of 'dest' from mOutputNodes. There could be + // others, and it's not correct to remove them all since some of them + // could be for different output ports. + RefPtr<AudioNode> output = mOutputNodes[aOutputNodeIndex].forget(); + mOutputNodes.RemoveElementAt(aOutputNodeIndex); + // Destroying the InputNode here sends a message to the graph thread + // to disconnect the streams, which should be sent before the + // RunAfterPendingUpdates() call below. + destination->mInputNodes.RemoveElementAt(aInputIndex); + output->NotifyInputsChanged(); + if (mStream) { + nsCOMPtr<nsIRunnable> runnable = new RunnableRelease(output.forget()); + mStream->RunAfterPendingUpdates(runnable.forget()); + } + return true; +} + +template<> +bool +AudioNode::DisconnectFromOutputIfConnected<AudioParam>(uint32_t aOutputParamIndex, + uint32_t aInputIndex) +{ + MOZ_ASSERT(aOutputParamIndex < mOutputParams.Length()); + + AudioParam* destination = mOutputParams[aOutputParamIndex]; + + MOZ_ASSERT(aInputIndex < destination->InputNodes().Length()); + + const InputNode& input = destination->InputNodes()[aInputIndex]; + if (input.mInputNode != this) { + return false; + } + destination->RemoveInputNode(aInputIndex); + // Remove one instance of 'dest' from mOutputParams. There could be + // others, and it's not correct to remove them all since some of them + // could be for different output ports. + mOutputParams.RemoveElementAt(aOutputParamIndex); + return true; +} + +template<> +const nsTArray<AudioNode::InputNode>& +AudioNode::InputsForDestination<AudioNode>(uint32_t aOutputNodeIndex) const { + return mOutputNodes[aOutputNodeIndex]->InputNodes(); +} + +template<> +const nsTArray<AudioNode::InputNode>& +AudioNode::InputsForDestination<AudioParam>(uint32_t aOutputNodeIndex) const { + return mOutputParams[aOutputNodeIndex]->InputNodes(); +} + +template<typename DestinationType, typename Predicate> +bool +AudioNode::DisconnectMatchingDestinationInputs(uint32_t aDestinationIndex, + Predicate aPredicate) +{ + bool wasConnected = false; + uint32_t inputCount = + InputsForDestination<DestinationType>(aDestinationIndex).Length(); + + for (int32_t inputIndex = inputCount - 1; inputIndex >= 0; --inputIndex) { + const InputNode& input = + InputsForDestination<DestinationType>(aDestinationIndex)[inputIndex]; + if (aPredicate(input)) { + if (DisconnectFromOutputIfConnected<DestinationType>(aDestinationIndex, + inputIndex)) { + wasConnected = true; + break; + } + } + } + return wasConnected; +} + +void +AudioNode::Disconnect(ErrorResult& aRv) +{ + for (int32_t outputIndex = mOutputNodes.Length() - 1; + outputIndex >= 0; --outputIndex) { + DisconnectMatchingDestinationInputs<AudioNode>(outputIndex, + [](const InputNode&) { + return true; + }); + } + + for (int32_t outputIndex = mOutputParams.Length() - 1; + outputIndex >= 0; --outputIndex) { + DisconnectMatchingDestinationInputs<AudioParam>(outputIndex, + [](const InputNode&) { + return true; + }); + } + + // This disconnection may have disconnected a panner and a source. + Context()->UpdatePannerSource(); +} + +void +AudioNode::Disconnect(uint32_t aOutput, ErrorResult& aRv) +{ + if (aOutput >= NumberOfOutputs()) { + aRv.Throw(NS_ERROR_DOM_INDEX_SIZE_ERR); + return; + } + + for (int32_t outputIndex = mOutputNodes.Length() - 1; + outputIndex >= 0; --outputIndex) { + DisconnectMatchingDestinationInputs<AudioNode>( + outputIndex, + [aOutput](const InputNode& aInputNode) { + return aInputNode.mOutputPort == aOutput; + }); + } + + for (int32_t outputIndex = mOutputParams.Length() - 1; + outputIndex >= 0; --outputIndex) { + DisconnectMatchingDestinationInputs<AudioParam>( + outputIndex, + [aOutput](const InputNode& aInputNode) { + return aInputNode.mOutputPort == aOutput; + }); + } + + // This disconnection may have disconnected a panner and a source. + Context()->UpdatePannerSource(); +} + +void +AudioNode::Disconnect(AudioNode& aDestination, ErrorResult& aRv) +{ + bool wasConnected = false; + + for (int32_t outputIndex = mOutputNodes.Length() - 1; + outputIndex >= 0; --outputIndex) { + if (mOutputNodes[outputIndex] != &aDestination) { + continue; + } + wasConnected |= + DisconnectMatchingDestinationInputs<AudioNode>(outputIndex, + [](const InputNode&) { + return true; + }); + } + + if (!wasConnected) { + aRv.Throw(NS_ERROR_DOM_INVALID_ACCESS_ERR); + return; + } + + // This disconnection may have disconnected a panner and a source. + Context()->UpdatePannerSource(); +} + +void +AudioNode::Disconnect(AudioNode& aDestination, + uint32_t aOutput, + ErrorResult& aRv) +{ + if (aOutput >= NumberOfOutputs()) { + aRv.Throw(NS_ERROR_DOM_INDEX_SIZE_ERR); + return; + } + + bool wasConnected = false; + + for (int32_t outputIndex = mOutputNodes.Length() - 1; + outputIndex >= 0; --outputIndex) { + if (mOutputNodes[outputIndex] != &aDestination) { + continue; + } + wasConnected |= + DisconnectMatchingDestinationInputs<AudioNode>( + outputIndex, + [aOutput](const InputNode& aInputNode) { + return aInputNode.mOutputPort == aOutput; + }); + } + + if (!wasConnected) { + aRv.Throw(NS_ERROR_DOM_INVALID_ACCESS_ERR); + return; + } + + // This disconnection may have disconnected a panner and a source. + Context()->UpdatePannerSource(); +} + +void +AudioNode::Disconnect(AudioNode& aDestination, + uint32_t aOutput, + uint32_t aInput, + ErrorResult& aRv) +{ + if (aOutput >= NumberOfOutputs()) { + aRv.Throw(NS_ERROR_DOM_INDEX_SIZE_ERR); + return; + } + + if (aInput >= aDestination.NumberOfInputs()) { + aRv.Throw(NS_ERROR_DOM_INDEX_SIZE_ERR); + return; + } + + bool wasConnected = false; + + for (int32_t outputIndex = mOutputNodes.Length() - 1; + outputIndex >= 0; --outputIndex) { + if (mOutputNodes[outputIndex] != &aDestination) { + continue; + } + wasConnected |= + DisconnectMatchingDestinationInputs<AudioNode>( + outputIndex, + [aOutput, aInput](const InputNode& aInputNode) { + return aInputNode.mOutputPort == aOutput && + aInputNode.mInputPort == aInput; + }); + } + + if (!wasConnected) { + aRv.Throw(NS_ERROR_DOM_INVALID_ACCESS_ERR); + return; + } + + // This disconnection may have disconnected a panner and a source. + Context()->UpdatePannerSource(); +} + +void +AudioNode::Disconnect(AudioParam& aDestination, ErrorResult& aRv) +{ + bool wasConnected = false; + + for (int32_t outputIndex = mOutputParams.Length() - 1; + outputIndex >= 0; --outputIndex) { + if (mOutputParams[outputIndex] != &aDestination) { + continue; + } + wasConnected |= + DisconnectMatchingDestinationInputs<AudioParam>(outputIndex, + [](const InputNode&) { + return true; + }); + } + + if (!wasConnected) { + aRv.Throw(NS_ERROR_DOM_INVALID_ACCESS_ERR); + return; + } +} + +void +AudioNode::Disconnect(AudioParam& aDestination, + uint32_t aOutput, + ErrorResult& aRv) +{ + if (aOutput >= NumberOfOutputs()) { + aRv.Throw(NS_ERROR_DOM_INDEX_SIZE_ERR); + return; + } + + bool wasConnected = false; + + for (int32_t outputIndex = mOutputParams.Length() - 1; + outputIndex >= 0; --outputIndex) { + if (mOutputParams[outputIndex] != &aDestination) { + continue; + } + wasConnected |= + DisconnectMatchingDestinationInputs<AudioParam>( + outputIndex, + [aOutput](const InputNode& aInputNode) { + return aInputNode.mOutputPort == aOutput; + }); + } + + if (!wasConnected) { + aRv.Throw(NS_ERROR_DOM_INVALID_ACCESS_ERR); + return; + } +} + +void +AudioNode::DestroyMediaStream() +{ + if (mStream) { + // Remove the node pointer on the engine. + AudioNodeStream* ns = mStream; + MOZ_ASSERT(ns, "How come we don't have a stream here?"); + MOZ_ASSERT(ns->Engine()->NodeMainThread() == this, + "Invalid node reference"); + ns->Engine()->ClearNode(); + + mStream->Destroy(); + mStream = nullptr; + + nsCOMPtr<nsIObserverService> obs = services::GetObserverService(); + if (obs) { + nsAutoString id; + id.AppendPrintf("%u", mId); + obs->NotifyObservers(nullptr, "webaudio-node-demise", id.get()); + } + } +} + +void +AudioNode::RemoveOutputParam(AudioParam* aParam) +{ + mOutputParams.RemoveElement(aParam); +} + +bool +AudioNode::PassThrough() const +{ + MOZ_ASSERT(NumberOfInputs() <= 1 && NumberOfOutputs() == 1); + return mPassThrough; +} + +void +AudioNode::SetPassThrough(bool aPassThrough) +{ + MOZ_ASSERT(NumberOfInputs() <= 1 && NumberOfOutputs() == 1); + mPassThrough = aPassThrough; + if (mStream) { + mStream->SetPassThrough(mPassThrough); + } +} + +} // namespace dom +} // namespace mozilla diff --git a/dom/media/webaudio/AudioNode.h b/dom/media/webaudio/AudioNode.h new file mode 100644 index 000000000..ebef129c8 --- /dev/null +++ b/dom/media/webaudio/AudioNode.h @@ -0,0 +1,294 @@ +/* -*- Mode: C++; tab-width: 2; indent-tabs-mode: nil; c-basic-offset: 2 -*- */ +/* vim:set ts=2 sw=2 sts=2 et cindent: */ +/* This Source Code Form is subject to the terms of the Mozilla Public + * License, v. 2.0. If a copy of the MPL was not distributed with this + * file, You can obtain one at http://mozilla.org/MPL/2.0/. */ + +#ifndef AudioNode_h_ +#define AudioNode_h_ + +#include "mozilla/DOMEventTargetHelper.h" +#include "mozilla/dom/AudioNodeBinding.h" +#include "nsCycleCollectionParticipant.h" +#include "nsTArray.h" +#include "AudioContext.h" +#include "MediaStreamGraph.h" +#include "WebAudioUtils.h" +#include "mozilla/MemoryReporting.h" +#include "nsWeakReference.h" +#include "SelfRef.h" + +namespace mozilla { + +namespace dom { + +class AudioContext; +class AudioBufferSourceNode; +class AudioParam; +class AudioParamTimeline; +struct ThreeDPoint; + +/** + * The DOM object representing a Web Audio AudioNode. + * + * Each AudioNode has a MediaStream representing the actual + * real-time processing and output of this AudioNode. + * + * We track the incoming and outgoing connections to other AudioNodes. + * Outgoing connections have strong ownership. Also, AudioNodes that will + * produce sound on their output even when they have silent or no input ask + * the AudioContext to keep playing or tail-time references to keep them alive + * until the context is finished. + * + * Explicit disconnections will only remove references from output nodes after + * the graph is notified and the main thread receives a reply. Similarly, + * nodes with playing or tail-time references release these references only + * after receiving notification from their engine on the graph thread that + * playing has stopped. Engines notifying the main thread that they have + * finished do so strictly *after* producing and returning their last block. + * In this way, an engine that receives non-null input knows that the input + * comes from nodes that are still alive and will keep their output nodes + * alive for at least as long as it takes to process messages from the graph + * thread. i.e. the engine receiving non-null input knows that its node is + * still alive, and will still be alive when it receives a message from the + * engine. + */ +class AudioNode : public DOMEventTargetHelper, + public nsSupportsWeakReference +{ +protected: + // You can only use refcounting to delete this object + virtual ~AudioNode(); + +public: + AudioNode(AudioContext* aContext, + uint32_t aChannelCount, + ChannelCountMode aChannelCountMode, + ChannelInterpretation aChannelInterpretation); + + // This should be idempotent (safe to call multiple times). + virtual void DestroyMediaStream(); + + NS_DECL_ISUPPORTS_INHERITED + NS_DECL_CYCLE_COLLECTION_CLASS_INHERITED(AudioNode, + DOMEventTargetHelper) + + virtual AudioBufferSourceNode* AsAudioBufferSourceNode() + { + return nullptr; + } + + AudioContext* GetParentObject() const + { + return mContext; + } + + AudioContext* Context() const + { + return mContext; + } + + virtual AudioNode* Connect(AudioNode& aDestination, uint32_t aOutput, + uint32_t aInput, ErrorResult& aRv); + + virtual void Connect(AudioParam& aDestination, uint32_t aOutput, + ErrorResult& aRv); + + virtual void Disconnect(ErrorResult& aRv); + virtual void Disconnect(uint32_t aOutput, ErrorResult& aRv); + virtual void Disconnect(AudioNode& aDestination, ErrorResult& aRv); + virtual void Disconnect(AudioNode& aDestination, uint32_t aOutput, + ErrorResult& aRv); + virtual void Disconnect(AudioNode& aDestination, + uint32_t aOutput, uint32_t aInput, + ErrorResult& aRv); + virtual void Disconnect(AudioParam& aDestination, ErrorResult& aRv); + virtual void Disconnect(AudioParam& aDestination, uint32_t aOutput, + ErrorResult& aRv); + + // Called after input nodes have been explicitly added or removed through + // the Connect() or Disconnect() methods. + virtual void NotifyInputsChanged() {} + // Indicate that the node should continue indefinitely to behave as if an + // input is connected, even though there is no longer a corresponding entry + // in mInputNodes. Called after an input node has been removed because it + // is being garbage collected. + virtual void NotifyHasPhantomInput() {} + + // The following two virtual methods must be implemented by each node type + // to provide their number of input and output ports. These numbers are + // constant for the lifetime of the node. Both default to 1. + virtual uint16_t NumberOfInputs() const { return 1; } + virtual uint16_t NumberOfOutputs() const { return 1; } + + uint32_t Id() const { return mId; } + + bool PassThrough() const; + void SetPassThrough(bool aPassThrough); + + uint32_t ChannelCount() const { return mChannelCount; } + virtual void SetChannelCount(uint32_t aChannelCount, ErrorResult& aRv) + { + if (aChannelCount == 0 || + aChannelCount > WebAudioUtils::MaxChannelCount) { + aRv.Throw(NS_ERROR_DOM_NOT_SUPPORTED_ERR); + return; + } + mChannelCount = aChannelCount; + SendChannelMixingParametersToStream(); + } + ChannelCountMode ChannelCountModeValue() const + { + return mChannelCountMode; + } + virtual void SetChannelCountModeValue(ChannelCountMode aMode, ErrorResult& aRv) + { + mChannelCountMode = aMode; + SendChannelMixingParametersToStream(); + } + ChannelInterpretation ChannelInterpretationValue() const + { + return mChannelInterpretation; + } + void SetChannelInterpretationValue(ChannelInterpretation aMode) + { + mChannelInterpretation = aMode; + SendChannelMixingParametersToStream(); + } + + struct InputNode final + { + ~InputNode() + { + if (mStreamPort) { + mStreamPort->Destroy(); + } + } + + size_t SizeOfExcludingThis(MallocSizeOf aMallocSizeOf) const + { + size_t amount = 0; + if (mStreamPort) { + amount += mStreamPort->SizeOfIncludingThis(aMallocSizeOf); + } + + return amount; + } + + // Weak reference. + AudioNode* mInputNode; + RefPtr<MediaInputPort> mStreamPort; + // The index of the input port this node feeds into. + // This is not used for connections to AudioParams. + uint32_t mInputPort; + // The index of the output port this node comes out of. + uint32_t mOutputPort; + }; + + // Returns the stream, if any. + AudioNodeStream* GetStream() const { return mStream; } + + const nsTArray<InputNode>& InputNodes() const + { + return mInputNodes; + } + const nsTArray<RefPtr<AudioNode> >& OutputNodes() const + { + return mOutputNodes; + } + const nsTArray<RefPtr<AudioParam> >& OutputParams() const + { + return mOutputParams; + } + + template<typename T> + const nsTArray<InputNode>& + InputsForDestination(uint32_t aOutputIndex) const; + + void RemoveOutputParam(AudioParam* aParam); + + // MarkActive() asks the context to keep the AudioNode alive until the + // context is finished. This takes care of "playing" references and + // "tail-time" references. + void MarkActive() { Context()->RegisterActiveNode(this); } + // Active nodes call MarkInactive() when they have finished producing sound + // for the foreseeable future. + // Do not call MarkInactive from a node destructor. If the destructor is + // called, then the node is already inactive. + // MarkInactive() may delete |this|. + void MarkInactive() { Context()->UnregisterActiveNode(this); } + + virtual size_t SizeOfExcludingThis(MallocSizeOf aMallocSizeOf) const; + virtual size_t SizeOfIncludingThis(MallocSizeOf aMallocSizeOf) const; + + // Returns a string from constant static storage identifying the dom node + // type. + virtual const char* NodeType() const = 0; + +private: + // Given: + // + // - a DestinationType, that can be an AudioNode or an AudioParam ; + // - a Predicate, a function that takes an InputNode& and returns a bool ; + // + // This method iterates on the InputNodes() of the node at the index + // aDestinationIndex, and calls `DisconnectFromOutputIfConnected` with this + // input node, if aPredicate returns true. + template<typename DestinationType, typename Predicate> + bool DisconnectMatchingDestinationInputs(uint32_t aDestinationIndex, + Predicate aPredicate); + + virtual void LastRelease() override + { + // We are about to be deleted, disconnect the object from the graph before + // the derived type is destroyed. + DisconnectFromGraph(); + } + // Callers must hold a reference to 'this'. + void DisconnectFromGraph(); + + template<typename DestinationType> + bool DisconnectFromOutputIfConnected(uint32_t aOutputIndex, uint32_t aInputIndex); + +protected: + // Helpers for sending different value types to streams + void SendDoubleParameterToStream(uint32_t aIndex, double aValue); + void SendInt32ParameterToStream(uint32_t aIndex, int32_t aValue); + void SendThreeDPointParameterToStream(uint32_t aIndex, const ThreeDPoint& aValue); + void SendChannelMixingParametersToStream(); + +private: + RefPtr<AudioContext> mContext; + +protected: + // Must be set in the constructor. Must not be null unless finished. + RefPtr<AudioNodeStream> mStream; + +private: + // For every InputNode, there is a corresponding entry in mOutputNodes of the + // InputNode's mInputNode. + nsTArray<InputNode> mInputNodes; + // For every mOutputNode entry, there is a corresponding entry in mInputNodes + // of the mOutputNode entry. We won't necessarily be able to identify the + // exact matching entry, since mOutputNodes doesn't include the port + // identifiers and the same node could be connected on multiple ports. + nsTArray<RefPtr<AudioNode> > mOutputNodes; + // For every mOutputParams entry, there is a corresponding entry in + // AudioParam::mInputNodes of the mOutputParams entry. We won't necessarily be + // able to identify the exact matching entry, since mOutputParams doesn't + // include the port identifiers and the same node could be connected on + // multiple ports. + nsTArray<RefPtr<AudioParam> > mOutputParams; + uint32_t mChannelCount; + ChannelCountMode mChannelCountMode; + ChannelInterpretation mChannelInterpretation; + const uint32_t mId; + // Whether the node just passes through its input. This is a devtools API that + // only works for some node types. + bool mPassThrough; +}; + +} // namespace dom +} // namespace mozilla + +#endif diff --git a/dom/media/webaudio/AudioNodeEngine.cpp b/dom/media/webaudio/AudioNodeEngine.cpp new file mode 100644 index 000000000..91170adb3 --- /dev/null +++ b/dom/media/webaudio/AudioNodeEngine.cpp @@ -0,0 +1,400 @@ +/* -*- Mode: C++; tab-width: 2; indent-tabs-mode: nil; c-basic-offset: 2 -*- */ +/* vim:set ts=2 sw=2 sts=2 et cindent: */ +/* This Source Code Form is subject to the terms of the Mozilla Public + * License, v. 2.0. If a copy of the MPL was not distributed with this + * file, You can obtain one at http://mozilla.org/MPL/2.0/. */ + +#include "AudioNodeEngine.h" +#ifdef BUILD_ARM_NEON +#include "mozilla/arm.h" +#include "AudioNodeEngineNEON.h" +#endif +#ifdef USE_SSE2 +#include "mozilla/SSE.h" +#include "AlignmentUtils.h" +#include "AudioNodeEngineSSE2.h" +#endif + +namespace mozilla { + +already_AddRefed<ThreadSharedFloatArrayBufferList> +ThreadSharedFloatArrayBufferList::Create(uint32_t aChannelCount, + size_t aLength, + const mozilla::fallible_t&) +{ + RefPtr<ThreadSharedFloatArrayBufferList> buffer = + new ThreadSharedFloatArrayBufferList(aChannelCount); + + for (uint32_t i = 0; i < aChannelCount; ++i) { + float* channelData = js_pod_malloc<float>(aLength); + if (!channelData) { + return nullptr; + } + + buffer->SetData(i, channelData, js_free, channelData); + } + + return buffer.forget(); +} + +void +WriteZeroesToAudioBlock(AudioBlock* aChunk, + uint32_t aStart, uint32_t aLength) +{ + MOZ_ASSERT(aStart + aLength <= WEBAUDIO_BLOCK_SIZE); + MOZ_ASSERT(!aChunk->IsNull(), "You should pass a non-null chunk"); + if (aLength == 0) + return; + + for (uint32_t i = 0; i < aChunk->ChannelCount(); ++i) { + PodZero(aChunk->ChannelFloatsForWrite(i) + aStart, aLength); + } +} + +void AudioBufferCopyWithScale(const float* aInput, + float aScale, + float* aOutput, + uint32_t aSize) +{ + if (aScale == 1.0f) { + PodCopy(aOutput, aInput, aSize); + } else { + for (uint32_t i = 0; i < aSize; ++i) { + aOutput[i] = aInput[i]*aScale; + } + } +} + +void AudioBufferAddWithScale(const float* aInput, + float aScale, + float* aOutput, + uint32_t aSize) +{ +#ifdef BUILD_ARM_NEON + if (mozilla::supports_neon()) { + AudioBufferAddWithScale_NEON(aInput, aScale, aOutput, aSize); + return; + } +#endif + +#ifdef USE_SSE2 + if (mozilla::supports_sse2()) { + if (aScale == 1.0f) { + while (aSize && (!IS_ALIGNED16(aInput) || !IS_ALIGNED16(aOutput))) { + *aOutput += *aInput; + ++aOutput; + ++aInput; + --aSize; + } + } else { + while (aSize && (!IS_ALIGNED16(aInput) || !IS_ALIGNED16(aOutput))) { + *aOutput += *aInput*aScale; + ++aOutput; + ++aInput; + --aSize; + } + } + + // we need to round aSize down to the nearest multiple of 16 + uint32_t alignedSize = aSize & ~0x0F; + if (alignedSize > 0) { + AudioBufferAddWithScale_SSE(aInput, aScale, aOutput, alignedSize); + + // adjust parameters for use with scalar operations below + aInput += alignedSize; + aOutput += alignedSize; + aSize -= alignedSize; + } + } +#endif + + if (aScale == 1.0f) { + for (uint32_t i = 0; i < aSize; ++i) { + aOutput[i] += aInput[i]; + } + } else { + for (uint32_t i = 0; i < aSize; ++i) { + aOutput[i] += aInput[i]*aScale; + } + } +} + +void +AudioBlockAddChannelWithScale(const float aInput[WEBAUDIO_BLOCK_SIZE], + float aScale, + float aOutput[WEBAUDIO_BLOCK_SIZE]) +{ + AudioBufferAddWithScale(aInput, aScale, aOutput, WEBAUDIO_BLOCK_SIZE); +} + +void +AudioBlockCopyChannelWithScale(const float* aInput, + float aScale, + float* aOutput) +{ + if (aScale == 1.0f) { + memcpy(aOutput, aInput, WEBAUDIO_BLOCK_SIZE*sizeof(float)); + } else { +#ifdef BUILD_ARM_NEON + if (mozilla::supports_neon()) { + AudioBlockCopyChannelWithScale_NEON(aInput, aScale, aOutput); + return; + } +#endif + +#ifdef USE_SSE2 + if (mozilla::supports_sse2()) { + AudioBlockCopyChannelWithScale_SSE(aInput, aScale, aOutput); + return; + } +#endif + + for (uint32_t i = 0; i < WEBAUDIO_BLOCK_SIZE; ++i) { + aOutput[i] = aInput[i]*aScale; + } + } +} + +void +BufferComplexMultiply(const float* aInput, + const float* aScale, + float* aOutput, + uint32_t aSize) +{ + +#ifdef USE_SSE2 + if (mozilla::supports_sse()) { + BufferComplexMultiply_SSE(aInput, aScale, aOutput, aSize); + return; + } +#endif + + for (uint32_t i = 0; i < aSize * 2; i += 2) { + float real1 = aInput[i]; + float imag1 = aInput[i + 1]; + float real2 = aScale[i]; + float imag2 = aScale[i + 1]; + float realResult = real1 * real2 - imag1 * imag2; + float imagResult = real1 * imag2 + imag1 * real2; + aOutput[i] = realResult; + aOutput[i + 1] = imagResult; + } +} + +float +AudioBufferPeakValue(const float *aInput, uint32_t aSize) +{ + float max = 0.0f; + for (uint32_t i = 0; i < aSize; i++) { + float mag = fabs(aInput[i]); + if (mag > max) { + max = mag; + } + } + return max; +} + +void +AudioBlockCopyChannelWithScale(const float aInput[WEBAUDIO_BLOCK_SIZE], + const float aScale[WEBAUDIO_BLOCK_SIZE], + float aOutput[WEBAUDIO_BLOCK_SIZE]) +{ +#ifdef BUILD_ARM_NEON + if (mozilla::supports_neon()) { + AudioBlockCopyChannelWithScale_NEON(aInput, aScale, aOutput); + return; + } +#endif + +#ifdef USE_SSE2 + if (mozilla::supports_sse2()) { + AudioBlockCopyChannelWithScale_SSE(aInput, aScale, aOutput); + return; + } +#endif + + for (uint32_t i = 0; i < WEBAUDIO_BLOCK_SIZE; ++i) { + aOutput[i] = aInput[i]*aScale[i]; + } +} + +void +AudioBlockInPlaceScale(float aBlock[WEBAUDIO_BLOCK_SIZE], + float aScale) +{ + AudioBufferInPlaceScale(aBlock, aScale, WEBAUDIO_BLOCK_SIZE); +} + +void +AudioBufferInPlaceScale(float* aBlock, + float aScale, + uint32_t aSize) +{ + if (aScale == 1.0f) { + return; + } +#ifdef BUILD_ARM_NEON + if (mozilla::supports_neon()) { + AudioBufferInPlaceScale_NEON(aBlock, aScale, aSize); + return; + } +#endif + +#ifdef USE_SSE2 + if (mozilla::supports_sse2()) { + AudioBufferInPlaceScale_SSE(aBlock, aScale, aSize); + return; + } +#endif + + for (uint32_t i = 0; i < aSize; ++i) { + *aBlock++ *= aScale; + } +} + +void +AudioBlockPanMonoToStereo(const float aInput[WEBAUDIO_BLOCK_SIZE], + float aGainL[WEBAUDIO_BLOCK_SIZE], + float aGainR[WEBAUDIO_BLOCK_SIZE], + float aOutputL[WEBAUDIO_BLOCK_SIZE], + float aOutputR[WEBAUDIO_BLOCK_SIZE]) +{ + AudioBlockCopyChannelWithScale(aInput, aGainL, aOutputL); + AudioBlockCopyChannelWithScale(aInput, aGainR, aOutputR); +} + +void +AudioBlockPanMonoToStereo(const float aInput[WEBAUDIO_BLOCK_SIZE], + float aGainL, float aGainR, + float aOutputL[WEBAUDIO_BLOCK_SIZE], + float aOutputR[WEBAUDIO_BLOCK_SIZE]) +{ + AudioBlockCopyChannelWithScale(aInput, aGainL, aOutputL); + AudioBlockCopyChannelWithScale(aInput, aGainR, aOutputR); +} + +void +AudioBlockPanStereoToStereo(const float aInputL[WEBAUDIO_BLOCK_SIZE], + const float aInputR[WEBAUDIO_BLOCK_SIZE], + float aGainL, float aGainR, bool aIsOnTheLeft, + float aOutputL[WEBAUDIO_BLOCK_SIZE], + float aOutputR[WEBAUDIO_BLOCK_SIZE]) +{ +#ifdef BUILD_ARM_NEON + if (mozilla::supports_neon()) { + AudioBlockPanStereoToStereo_NEON(aInputL, aInputR, + aGainL, aGainR, aIsOnTheLeft, + aOutputL, aOutputR); + return; + } +#endif + +#ifdef USE_SSE2 + if (mozilla::supports_sse2()) { + AudioBlockPanStereoToStereo_SSE(aInputL, aInputR, + aGainL, aGainR, aIsOnTheLeft, + aOutputL, aOutputR); + return; + } +#endif + + uint32_t i; + + if (aIsOnTheLeft) { + for (i = 0; i < WEBAUDIO_BLOCK_SIZE; ++i) { + aOutputL[i] = aInputL[i] + aInputR[i] * aGainL; + aOutputR[i] = aInputR[i] * aGainR; + } + } else { + for (i = 0; i < WEBAUDIO_BLOCK_SIZE; ++i) { + aOutputL[i] = aInputL[i] * aGainL; + aOutputR[i] = aInputR[i] + aInputL[i] * aGainR; + } + } +} + +void +AudioBlockPanStereoToStereo(const float aInputL[WEBAUDIO_BLOCK_SIZE], + const float aInputR[WEBAUDIO_BLOCK_SIZE], + float aGainL[WEBAUDIO_BLOCK_SIZE], + float aGainR[WEBAUDIO_BLOCK_SIZE], + bool aIsOnTheLeft[WEBAUDIO_BLOCK_SIZE], + float aOutputL[WEBAUDIO_BLOCK_SIZE], + float aOutputR[WEBAUDIO_BLOCK_SIZE]) +{ +#ifdef BUILD_ARM_NEON + if (mozilla::supports_neon()) { + AudioBlockPanStereoToStereo_NEON(aInputL, aInputR, + aGainL, aGainR, aIsOnTheLeft, + aOutputL, aOutputR); + return; + } +#endif + + uint32_t i; + for (i = 0; i < WEBAUDIO_BLOCK_SIZE; i++) { + if (aIsOnTheLeft[i]) { + aOutputL[i] = aInputL[i] + aInputR[i] * aGainL[i]; + aOutputR[i] = aInputR[i] * aGainR[i]; + } else { + aOutputL[i] = aInputL[i] * aGainL[i]; + aOutputR[i] = aInputR[i] + aInputL[i] * aGainR[i]; + } + } +} + +float +AudioBufferSumOfSquares(const float* aInput, uint32_t aLength) +{ + float sum = 0.0f; + +#ifdef USE_SSE2 + if (mozilla::supports_sse()) { + const float* alignedInput = ALIGNED16(aInput); + float vLength = (aLength >> 4) << 4; + + // use scalar operations for any unaligned data at the beginning + while (aInput != alignedInput) { + sum += *aInput * *aInput; + ++aInput; + } + + sum += AudioBufferSumOfSquares_SSE(alignedInput, vLength); + + // adjust aInput and aLength to use scalar operations for any + // remaining values + aInput = alignedInput + 1; + aLength -= vLength; + } +#endif + + while (aLength--) { + sum += *aInput * *aInput; + ++aInput; + } + return sum; +} + +void +AudioNodeEngine::ProcessBlock(AudioNodeStream* aStream, + GraphTime aFrom, + const AudioBlock& aInput, + AudioBlock* aOutput, + bool* aFinished) +{ + MOZ_ASSERT(mInputCount <= 1 && mOutputCount <= 1); + *aOutput = aInput; +} + +void +AudioNodeEngine::ProcessBlocksOnPorts(AudioNodeStream* aStream, + const OutputChunks& aInput, + OutputChunks& aOutput, + bool* aFinished) +{ + MOZ_ASSERT(mInputCount > 1 || mOutputCount > 1); + // Only produce one output port, and drop all other input ports. + aOutput[0] = aInput[0]; +} + +} // namespace mozilla diff --git a/dom/media/webaudio/AudioNodeEngine.h b/dom/media/webaudio/AudioNodeEngine.h new file mode 100644 index 000000000..d49b5c906 --- /dev/null +++ b/dom/media/webaudio/AudioNodeEngine.h @@ -0,0 +1,410 @@ +/* -*- Mode: C++; tab-width: 2; indent-tabs-mode: nil; c-basic-offset: 2 -*- */ +/* vim:set ts=2 sw=2 sts=2 et cindent: */ +/* This Source Code Form is subject to the terms of the Mozilla Public + * License, v. 2.0. If a copy of the MPL was not distributed with this + * file, You can obtain one at http://mozilla.org/MPL/2.0/. */ +#ifndef MOZILLA_AUDIONODEENGINE_H_ +#define MOZILLA_AUDIONODEENGINE_H_ + +#include "AudioSegment.h" +#include "mozilla/dom/AudioNode.h" +#include "mozilla/MemoryReporting.h" +#include "mozilla/Mutex.h" + +namespace mozilla { + +namespace dom { +struct ThreeDPoint; +class AudioParamTimeline; +class DelayNodeEngine; +struct AudioTimelineEvent; +} // namespace dom + +class AudioBlock; +class AudioNodeStream; + +/** + * This class holds onto a set of immutable channel buffers. The storage + * for the buffers must be malloced, but the buffer pointers and the malloc + * pointers can be different (e.g. if the buffers are contained inside + * some malloced object). + */ +class ThreadSharedFloatArrayBufferList final : public ThreadSharedObject +{ +public: + /** + * Construct with null channel data pointers. + */ + explicit ThreadSharedFloatArrayBufferList(uint32_t aCount) + { + mContents.SetLength(aCount); + } + /** + * Create with buffers suitable for transfer to + * JS_NewArrayBufferWithContents(). The buffer contents are uninitialized + * and so should be set using GetDataForWrite(). + */ + static already_AddRefed<ThreadSharedFloatArrayBufferList> + Create(uint32_t aChannelCount, size_t aLength, const mozilla::fallible_t&); + + struct Storage final + { + Storage() : + mDataToFree(nullptr), + mFree(nullptr), + mSampleData(nullptr) + {} + ~Storage() { + if (mFree) { + mFree(mDataToFree); + } else { MOZ_ASSERT(!mDataToFree); } + } + size_t SizeOfExcludingThis(MallocSizeOf aMallocSizeOf) const + { + // NB: mSampleData might not be owned, if it is it just points to + // mDataToFree. + return aMallocSizeOf(mDataToFree); + } + void* mDataToFree; + void (*mFree)(void*); + float* mSampleData; + }; + + /** + * This can be called on any thread. + */ + uint32_t GetChannels() const { return mContents.Length(); } + /** + * This can be called on any thread. + */ + const float* GetData(uint32_t aIndex) const { return mContents[aIndex].mSampleData; } + /** + * This can be called on any thread, but only when the calling thread is the + * only owner. + */ + float* GetDataForWrite(uint32_t aIndex) + { + MOZ_ASSERT(!IsShared()); + return mContents[aIndex].mSampleData; + } + + /** + * Call this only during initialization, before the object is handed to + * any other thread. + */ + void SetData(uint32_t aIndex, void* aDataToFree, void (*aFreeFunc)(void*), float* aData) + { + Storage* s = &mContents[aIndex]; + if (s->mFree) { + s->mFree(s->mDataToFree); + } else { + MOZ_ASSERT(!s->mDataToFree); + } + + s->mDataToFree = aDataToFree; + s->mFree = aFreeFunc; + s->mSampleData = aData; + } + + /** + * Put this object into an error state where there are no channels. + */ + void Clear() { mContents.Clear(); } + + size_t SizeOfExcludingThis(mozilla::MallocSizeOf aMallocSizeOf) const override + { + size_t amount = ThreadSharedObject::SizeOfExcludingThis(aMallocSizeOf); + amount += mContents.ShallowSizeOfExcludingThis(aMallocSizeOf); + for (size_t i = 0; i < mContents.Length(); i++) { + amount += mContents[i].SizeOfExcludingThis(aMallocSizeOf); + } + + return amount; + } + + size_t SizeOfIncludingThis(MallocSizeOf aMallocSizeOf) const override + { + return aMallocSizeOf(this) + SizeOfExcludingThis(aMallocSizeOf); + } + +private: + AutoTArray<Storage, 2> mContents; +}; + +/** + * aChunk must have been allocated by AllocateAudioBlock. + */ +void WriteZeroesToAudioBlock(AudioBlock* aChunk, uint32_t aStart, + uint32_t aLength); + +/** + * Copy with scale. aScale == 1.0f should be optimized. + */ +void AudioBufferCopyWithScale(const float* aInput, + float aScale, + float* aOutput, + uint32_t aSize); + +/** + * Pointwise multiply-add operation. aScale == 1.0f should be optimized. + */ +void AudioBufferAddWithScale(const float* aInput, + float aScale, + float* aOutput, + uint32_t aSize); + +/** + * Pointwise multiply-add operation. aScale == 1.0f should be optimized. + */ +void AudioBlockAddChannelWithScale(const float aInput[WEBAUDIO_BLOCK_SIZE], + float aScale, + float aOutput[WEBAUDIO_BLOCK_SIZE]); + +/** + * Pointwise copy-scaled operation. aScale == 1.0f should be optimized. + * + * Buffer size is implicitly assumed to be WEBAUDIO_BLOCK_SIZE. + */ +void AudioBlockCopyChannelWithScale(const float* aInput, + float aScale, + float* aOutput); + +/** + * Vector copy-scaled operation. + */ +void AudioBlockCopyChannelWithScale(const float aInput[WEBAUDIO_BLOCK_SIZE], + const float aScale[WEBAUDIO_BLOCK_SIZE], + float aOutput[WEBAUDIO_BLOCK_SIZE]); + +/** + * Vector complex multiplication on arbitrary sized buffers. + */ +void BufferComplexMultiply(const float* aInput, + const float* aScale, + float* aOutput, + uint32_t aSize); + +/** + * Vector maximum element magnitude ( max(abs(aInput)) ). + */ +float AudioBufferPeakValue(const float* aInput, uint32_t aSize); + +/** + * In place gain. aScale == 1.0f should be optimized. + */ +void AudioBlockInPlaceScale(float aBlock[WEBAUDIO_BLOCK_SIZE], + float aScale); + +/** + * In place gain. aScale == 1.0f should be optimized. + */ +void AudioBufferInPlaceScale(float* aBlock, + float aScale, + uint32_t aSize); + +/** + * Upmix a mono input to a stereo output, scaling the two output channels by two + * different gain value. + * This algorithm is specified in the WebAudio spec. + */ +void +AudioBlockPanMonoToStereo(const float aInput[WEBAUDIO_BLOCK_SIZE], + float aGainL, float aGainR, + float aOutputL[WEBAUDIO_BLOCK_SIZE], + float aOutputR[WEBAUDIO_BLOCK_SIZE]); + +void +AudioBlockPanMonoToStereo(const float aInput[WEBAUDIO_BLOCK_SIZE], + float aGainL[WEBAUDIO_BLOCK_SIZE], + float aGainR[WEBAUDIO_BLOCK_SIZE], + float aOutputL[WEBAUDIO_BLOCK_SIZE], + float aOutputR[WEBAUDIO_BLOCK_SIZE]); +/** + * Pan a stereo source according to right and left gain, and the position + * (whether the listener is on the left of the source or not). + * This algorithm is specified in the WebAudio spec. + */ +void +AudioBlockPanStereoToStereo(const float aInputL[WEBAUDIO_BLOCK_SIZE], + const float aInputR[WEBAUDIO_BLOCK_SIZE], + float aGainL, float aGainR, bool aIsOnTheLeft, + float aOutputL[WEBAUDIO_BLOCK_SIZE], + float aOutputR[WEBAUDIO_BLOCK_SIZE]); +void +AudioBlockPanStereoToStereo(const float aInputL[WEBAUDIO_BLOCK_SIZE], + const float aInputR[WEBAUDIO_BLOCK_SIZE], + float aGainL[WEBAUDIO_BLOCK_SIZE], + float aGainR[WEBAUDIO_BLOCK_SIZE], + bool aIsOnTheLeft[WEBAUDIO_BLOCK_SIZE], + float aOutputL[WEBAUDIO_BLOCK_SIZE], + float aOutputR[WEBAUDIO_BLOCK_SIZE]); + +/** + * Return the sum of squares of all of the samples in the input. + */ +float +AudioBufferSumOfSquares(const float* aInput, uint32_t aLength); + +/** + * All methods of this class and its subclasses are called on the + * MediaStreamGraph thread. + */ +class AudioNodeEngine +{ +public: + // This should be compatible with AudioNodeStream::OutputChunks. + typedef AutoTArray<AudioBlock, 1> OutputChunks; + + explicit AudioNodeEngine(dom::AudioNode* aNode) + : mNode(aNode) + , mNodeType(aNode ? aNode->NodeType() : nullptr) + , mInputCount(aNode ? aNode->NumberOfInputs() : 1) + , mOutputCount(aNode ? aNode->NumberOfOutputs() : 0) + { + MOZ_ASSERT(NS_IsMainThread()); + MOZ_COUNT_CTOR(AudioNodeEngine); + } + virtual ~AudioNodeEngine() + { + MOZ_ASSERT(!mNode, "The node reference must be already cleared"); + MOZ_COUNT_DTOR(AudioNodeEngine); + } + + virtual dom::DelayNodeEngine* AsDelayNodeEngine() { return nullptr; } + + virtual void SetStreamTimeParameter(uint32_t aIndex, StreamTime aParam) + { + NS_ERROR("Invalid SetStreamTimeParameter index"); + } + virtual void SetDoubleParameter(uint32_t aIndex, double aParam) + { + NS_ERROR("Invalid SetDoubleParameter index"); + } + virtual void SetInt32Parameter(uint32_t aIndex, int32_t aParam) + { + NS_ERROR("Invalid SetInt32Parameter index"); + } + virtual void RecvTimelineEvent(uint32_t aIndex, + dom::AudioTimelineEvent& aValue) + { + NS_ERROR("Invalid RecvTimelineEvent index"); + } + virtual void SetThreeDPointParameter(uint32_t aIndex, + const dom::ThreeDPoint& aValue) + { + NS_ERROR("Invalid SetThreeDPointParameter index"); + } + virtual void SetBuffer(already_AddRefed<ThreadSharedFloatArrayBufferList> aBuffer) + { + NS_ERROR("SetBuffer called on engine that doesn't support it"); + } + // This consumes the contents of aData. aData will be emptied after this returns. + virtual void SetRawArrayData(nsTArray<float>& aData) + { + NS_ERROR("SetRawArrayData called on an engine that doesn't support it"); + } + + /** + * Produce the next block of audio samples, given input samples aInput + * (the mixed data for input 0). + * aInput is guaranteed to have float sample format (if it has samples at all) + * and to have been resampled to the sampling rate for the stream, and to have + * exactly WEBAUDIO_BLOCK_SIZE samples. + * *aFinished is set to false by the caller. The callee must not set this to + * true unless silent output is produced. If set to true, we'll finish the + * stream, consider this input inactive on any downstream nodes, and not + * call this again. + */ + virtual void ProcessBlock(AudioNodeStream* aStream, + GraphTime aFrom, + const AudioBlock& aInput, + AudioBlock* aOutput, + bool* aFinished); + /** + * Produce the next block of audio samples, before input is provided. + * ProcessBlock() will be called later, and it then should not change + * aOutput. This is used only for DelayNodeEngine in a feedback loop. + */ + virtual void ProduceBlockBeforeInput(AudioNodeStream* aStream, + GraphTime aFrom, + AudioBlock* aOutput) + { + NS_NOTREACHED("ProduceBlockBeforeInput called on wrong engine\n"); + } + + /** + * Produce the next block of audio samples, given input samples in the aInput + * array. There is one input sample per active port in aInput, in order. + * This is the multi-input/output version of ProcessBlock. Only one kind + * of ProcessBlock is called on each node, depending on whether the + * number of inputs and outputs are both 1 or not. + * + * aInput is always guaranteed to not contain more input AudioChunks than the + * maximum number of inputs for the node. It is the responsibility of the + * overrides of this function to make sure they will only add a maximum number + * of AudioChunks to aOutput as advertized by the AudioNode implementation. + * An engine may choose to produce fewer inputs than advertizes by the + * corresponding AudioNode, in which case it will be interpreted as a channel + * of silence. + */ + virtual void ProcessBlocksOnPorts(AudioNodeStream* aStream, + const OutputChunks& aInput, + OutputChunks& aOutput, + bool* aFinished); + + // IsActive() returns true if the engine needs to continue processing an + // unfinished stream even when it has silent or no input connections. This + // includes tail-times and when sources have been scheduled to start. If + // returning false, then the stream can be suspended. + virtual bool IsActive() const { return false; } + + bool HasNode() const + { + MOZ_ASSERT(NS_IsMainThread()); + return !!mNode; + } + + dom::AudioNode* NodeMainThread() const + { + MOZ_ASSERT(NS_IsMainThread()); + return mNode; + } + + void ClearNode() + { + MOZ_ASSERT(NS_IsMainThread()); + MOZ_ASSERT(mNode != nullptr); + mNode = nullptr; + } + + uint16_t InputCount() const { return mInputCount; } + uint16_t OutputCount() const { return mOutputCount; } + + virtual size_t SizeOfExcludingThis(MallocSizeOf aMallocSizeOf) const + { + // NB: |mNode| is tracked separately so it is excluded here. + return 0; + } + + virtual size_t SizeOfIncludingThis(MallocSizeOf aMallocSizeOf) const + { + return aMallocSizeOf(this) + SizeOfExcludingThis(aMallocSizeOf); + } + + void SizeOfIncludingThis(MallocSizeOf aMallocSizeOf, + AudioNodeSizes& aUsage) const + { + aUsage.mEngine = SizeOfIncludingThis(aMallocSizeOf); + aUsage.mNodeType = mNodeType; + } + +private: + dom::AudioNode* mNode; // main thread only + const char* const mNodeType; + const uint16_t mInputCount; + const uint16_t mOutputCount; +}; + +} // namespace mozilla + +#endif /* MOZILLA_AUDIONODEENGINE_H_ */ diff --git a/dom/media/webaudio/AudioNodeEngineNEON.cpp b/dom/media/webaudio/AudioNodeEngineNEON.cpp new file mode 100644 index 000000000..079a1cc8b --- /dev/null +++ b/dom/media/webaudio/AudioNodeEngineNEON.cpp @@ -0,0 +1,318 @@ +/* -*- mode: c++; tab-width: 2; indent-tabs-mode: nil; c-basic-offset: 2 -*- */ +/* this source code form is subject to the terms of the mozilla public + * license, v. 2.0. if a copy of the mpl was not distributed with this file, + * You can obtain one at http://mozilla.org/MPL/2.0/. */ + +#include "AudioNodeEngineNEON.h" +#include <arm_neon.h> + +//#ifdef DEBUG +#if 0 // see bug 921099 + #define ASSERT_ALIGNED(ptr) \ + MOZ_ASSERT((((uintptr_t)ptr + 15) & ~0x0F) == (uintptr_t)ptr, \ + #ptr " has to be aligned 16-bytes aligned."); +#else + #define ASSERT_ALIGNED(ptr) +#endif + +#define ADDRESS_OF(array, index) ((float32_t*)&array[index]) + +namespace mozilla { +void AudioBufferAddWithScale_NEON(const float* aInput, + float aScale, + float* aOutput, + uint32_t aSize) +{ + ASSERT_ALIGNED(aInput); + ASSERT_ALIGNED(aOutput); + + float32x4_t vin0, vin1, vin2, vin3; + float32x4_t vout0, vout1, vout2, vout3; + float32x4_t vscale = vmovq_n_f32(aScale); + + uint32_t dif = aSize % 16; + aSize -= dif; + unsigned i = 0; + for (; i < aSize; i+=16) { + vin0 = vld1q_f32(ADDRESS_OF(aInput, i)); + vin1 = vld1q_f32(ADDRESS_OF(aInput, i+4)); + vin2 = vld1q_f32(ADDRESS_OF(aInput, i+8)); + vin3 = vld1q_f32(ADDRESS_OF(aInput, i+12)); + + vout0 = vld1q_f32(ADDRESS_OF(aOutput, i)); + vout1 = vld1q_f32(ADDRESS_OF(aOutput, i+4)); + vout2 = vld1q_f32(ADDRESS_OF(aOutput, i+8)); + vout3 = vld1q_f32(ADDRESS_OF(aOutput, i+12)); + + vout0 = vmlaq_f32(vout0, vin0, vscale); + vout1 = vmlaq_f32(vout1, vin1, vscale); + vout2 = vmlaq_f32(vout2, vin2, vscale); + vout3 = vmlaq_f32(vout3, vin3, vscale); + + vst1q_f32(ADDRESS_OF(aOutput, i), vout0); + vst1q_f32(ADDRESS_OF(aOutput, i+4), vout1); + vst1q_f32(ADDRESS_OF(aOutput, i+8), vout2); + vst1q_f32(ADDRESS_OF(aOutput, i+12), vout3); + } + + for (unsigned j = 0; j < dif; ++i, ++j) { + aOutput[i] += aInput[i]*aScale; + } +} +void +AudioBlockCopyChannelWithScale_NEON(const float* aInput, + float aScale, + float* aOutput) +{ + ASSERT_ALIGNED(aInput); + ASSERT_ALIGNED(aOutput); + + float32x4_t vin0, vin1, vin2, vin3; + float32x4_t vout0, vout1, vout2, vout3; + float32x4_t vscale = vmovq_n_f32(aScale); + + for (uint32_t i = 0; i < WEBAUDIO_BLOCK_SIZE; i+=16) { + vin0 = vld1q_f32(ADDRESS_OF(aInput, i)); + vin1 = vld1q_f32(ADDRESS_OF(aInput, i+4)); + vin2 = vld1q_f32(ADDRESS_OF(aInput, i+8)); + vin3 = vld1q_f32(ADDRESS_OF(aInput, i+12)); + + vout0 = vmulq_f32(vin0, vscale); + vout1 = vmulq_f32(vin1, vscale); + vout2 = vmulq_f32(vin2, vscale); + vout3 = vmulq_f32(vin3, vscale); + + vst1q_f32(ADDRESS_OF(aOutput, i), vout0); + vst1q_f32(ADDRESS_OF(aOutput, i+4), vout1); + vst1q_f32(ADDRESS_OF(aOutput, i+8), vout2); + vst1q_f32(ADDRESS_OF(aOutput, i+12), vout3); + } +} + +void +AudioBlockCopyChannelWithScale_NEON(const float aInput[WEBAUDIO_BLOCK_SIZE], + const float aScale[WEBAUDIO_BLOCK_SIZE], + float aOutput[WEBAUDIO_BLOCK_SIZE]) +{ + ASSERT_ALIGNED(aInput); + ASSERT_ALIGNED(aScale); + ASSERT_ALIGNED(aOutput); + + float32x4_t vin0, vin1, vin2, vin3; + float32x4_t vout0, vout1, vout2, vout3; + float32x4_t vscale0, vscale1, vscale2, vscale3; + + for (uint32_t i = 0; i < WEBAUDIO_BLOCK_SIZE; i+=16) { + vin0 = vld1q_f32(ADDRESS_OF(aInput, i)); + vin1 = vld1q_f32(ADDRESS_OF(aInput, i+4)); + vin2 = vld1q_f32(ADDRESS_OF(aInput, i+8)); + vin3 = vld1q_f32(ADDRESS_OF(aInput, i+12)); + + vscale0 = vld1q_f32(ADDRESS_OF(aScale, i)); + vscale1 = vld1q_f32(ADDRESS_OF(aScale, i+4)); + vscale2 = vld1q_f32(ADDRESS_OF(aScale, i+8)); + vscale3 = vld1q_f32(ADDRESS_OF(aScale, i+12)); + + vout0 = vmulq_f32(vin0, vscale0); + vout1 = vmulq_f32(vin1, vscale1); + vout2 = vmulq_f32(vin2, vscale2); + vout3 = vmulq_f32(vin3, vscale3); + + vst1q_f32(ADDRESS_OF(aOutput, i), vout0); + vst1q_f32(ADDRESS_OF(aOutput, i+4), vout1); + vst1q_f32(ADDRESS_OF(aOutput, i+8), vout2); + vst1q_f32(ADDRESS_OF(aOutput, i+12), vout3); + } +} + +void +AudioBufferInPlaceScale_NEON(float* aBlock, + float aScale, + uint32_t aSize) +{ + ASSERT_ALIGNED(aBlock); + + float32x4_t vin0, vin1, vin2, vin3; + float32x4_t vout0, vout1, vout2, vout3; + float32x4_t vscale = vmovq_n_f32(aScale); + + uint32_t dif = aSize % 16; + uint32_t vectorSize = aSize - dif; + uint32_t i = 0; + for (; i < vectorSize; i+=16) { + vin0 = vld1q_f32(ADDRESS_OF(aBlock, i)); + vin1 = vld1q_f32(ADDRESS_OF(aBlock, i+4)); + vin2 = vld1q_f32(ADDRESS_OF(aBlock, i+8)); + vin3 = vld1q_f32(ADDRESS_OF(aBlock, i+12)); + + vout0 = vmulq_f32(vin0, vscale); + vout1 = vmulq_f32(vin1, vscale); + vout2 = vmulq_f32(vin2, vscale); + vout3 = vmulq_f32(vin3, vscale); + + vst1q_f32(ADDRESS_OF(aBlock, i), vout0); + vst1q_f32(ADDRESS_OF(aBlock, i+4), vout1); + vst1q_f32(ADDRESS_OF(aBlock, i+8), vout2); + vst1q_f32(ADDRESS_OF(aBlock, i+12), vout3); + } + + for (unsigned j = 0; j < dif; ++i, ++j) { + aBlock[i] *= aScale; + } +} + +void +AudioBlockPanStereoToStereo_NEON(const float aInputL[WEBAUDIO_BLOCK_SIZE], + const float aInputR[WEBAUDIO_BLOCK_SIZE], + float aGainL, float aGainR, bool aIsOnTheLeft, + float aOutputL[WEBAUDIO_BLOCK_SIZE], + float aOutputR[WEBAUDIO_BLOCK_SIZE]) +{ + ASSERT_ALIGNED(aInputL); + ASSERT_ALIGNED(aInputR); + ASSERT_ALIGNED(aOutputL); + ASSERT_ALIGNED(aOutputR); + + float32x4_t vinL0, vinL1; + float32x4_t vinR0, vinR1; + float32x4_t voutL0, voutL1; + float32x4_t voutR0, voutR1; + float32x4_t vscaleL = vmovq_n_f32(aGainL); + float32x4_t vscaleR = vmovq_n_f32(aGainR); + + if (aIsOnTheLeft) { + for (uint32_t i = 0; i < WEBAUDIO_BLOCK_SIZE; i+=8) { + vinL0 = vld1q_f32(ADDRESS_OF(aInputL, i)); + vinL1 = vld1q_f32(ADDRESS_OF(aInputL, i+4)); + + vinR0 = vld1q_f32(ADDRESS_OF(aInputR, i)); + vinR1 = vld1q_f32(ADDRESS_OF(aInputR, i+4)); + + voutL0 = vmlaq_f32(vinL0, vinR0, vscaleL); + voutL1 = vmlaq_f32(vinL1, vinR1, vscaleL); + + vst1q_f32(ADDRESS_OF(aOutputL, i), voutL0); + vst1q_f32(ADDRESS_OF(aOutputL, i+4), voutL1); + + voutR0 = vmulq_f32(vinR0, vscaleR); + voutR1 = vmulq_f32(vinR1, vscaleR); + + vst1q_f32(ADDRESS_OF(aOutputR, i), voutR0); + vst1q_f32(ADDRESS_OF(aOutputR, i+4), voutR1); + } + } else { + for (uint32_t i = 0; i < WEBAUDIO_BLOCK_SIZE; i+=8) { + vinL0 = vld1q_f32(ADDRESS_OF(aInputL, i)); + vinL1 = vld1q_f32(ADDRESS_OF(aInputL, i+4)); + + vinR0 = vld1q_f32(ADDRESS_OF(aInputR, i)); + vinR1 = vld1q_f32(ADDRESS_OF(aInputR, i+4)); + + voutL0 = vmulq_f32(vinL0, vscaleL); + voutL1 = vmulq_f32(vinL1, vscaleL); + + vst1q_f32(ADDRESS_OF(aOutputL, i), voutL0); + vst1q_f32(ADDRESS_OF(aOutputL, i+4), voutL1); + + voutR0 = vmlaq_f32(vinR0, vinL0, vscaleR); + voutR1 = vmlaq_f32(vinR1, vinL1, vscaleR); + + vst1q_f32(ADDRESS_OF(aOutputR, i), voutR0); + vst1q_f32(ADDRESS_OF(aOutputR, i+4), voutR1); + } + } +} + +void +AudioBlockPanStereoToStereo_NEON(const float aInputL[WEBAUDIO_BLOCK_SIZE], + const float aInputR[WEBAUDIO_BLOCK_SIZE], + float aGainL[WEBAUDIO_BLOCK_SIZE], + float aGainR[WEBAUDIO_BLOCK_SIZE], + const bool aIsOnTheLeft[WEBAUDIO_BLOCK_SIZE], + float aOutputL[WEBAUDIO_BLOCK_SIZE], + float aOutputR[WEBAUDIO_BLOCK_SIZE]) +{ + ASSERT_ALIGNED(aInputL); + ASSERT_ALIGNED(aInputR); + ASSERT_ALIGNED(aGainL); + ASSERT_ALIGNED(aGainR); + ASSERT_ALIGNED(aIsOnTheLeft); + ASSERT_ALIGNED(aOutputL); + ASSERT_ALIGNED(aOutputR); + + float32x4_t vinL0, vinL1; + float32x4_t vinR0, vinR1; + float32x4_t voutL0, voutL1; + float32x4_t voutR0, voutR1; + float32x4_t vscaleL0, vscaleL1; + float32x4_t vscaleR0, vscaleR1; + float32x4_t onleft0, onleft1, notonleft0, notonleft1; + + float32x4_t zero = {0, 0, 0, 0}; + uint8x8_t isOnTheLeft; + + for (uint32_t i = 0; i < WEBAUDIO_BLOCK_SIZE; i+=8) { + vinL0 = vld1q_f32(ADDRESS_OF(aInputL, i)); + vinL1 = vld1q_f32(ADDRESS_OF(aInputL, i+4)); + + vinR0 = vld1q_f32(ADDRESS_OF(aInputR, i)); + vinR1 = vld1q_f32(ADDRESS_OF(aInputR, i+4)); + + vscaleL0 = vld1q_f32(ADDRESS_OF(aGainL, i)); + vscaleL1 = vld1q_f32(ADDRESS_OF(aGainL, i+4)); + + vscaleR0 = vld1q_f32(ADDRESS_OF(aGainR, i)); + vscaleR1 = vld1q_f32(ADDRESS_OF(aGainR, i+4)); + + // Load output with boolean "on the left" values. This assumes that + // bools are stored as a single byte. + isOnTheLeft = vld1_u8((uint8_t *)&aIsOnTheLeft[i]); + voutL0 = vsetq_lane_f32(vget_lane_u8(isOnTheLeft, 0), voutL0, 0); + voutL0 = vsetq_lane_f32(vget_lane_u8(isOnTheLeft, 1), voutL0, 1); + voutL0 = vsetq_lane_f32(vget_lane_u8(isOnTheLeft, 2), voutL0, 2); + voutL0 = vsetq_lane_f32(vget_lane_u8(isOnTheLeft, 3), voutL0, 3); + voutL1 = vsetq_lane_f32(vget_lane_u8(isOnTheLeft, 4), voutL1, 0); + voutL1 = vsetq_lane_f32(vget_lane_u8(isOnTheLeft, 5), voutL1, 1); + voutL1 = vsetq_lane_f32(vget_lane_u8(isOnTheLeft, 6), voutL1, 2); + voutL1 = vsetq_lane_f32(vget_lane_u8(isOnTheLeft, 7), voutL1, 3); + + // Convert the boolean values into masks by setting all bits to 1 + // if true. + voutL0 = (float32x4_t)vcgtq_f32(voutL0, zero); + voutL1 = (float32x4_t)vcgtq_f32(voutL1, zero); + + // The right output masks are the same as the left masks + voutR0 = voutL0; + voutR1 = voutL1; + + // Calculate left channel assuming isOnTheLeft + onleft0 = vmlaq_f32(vinL0, vinR0, vscaleL0); + onleft1 = vmlaq_f32(vinL1, vinR1, vscaleL0); + + // Calculate left channel assuming not isOnTheLeft + notonleft0 = vmulq_f32(vinL0, vscaleL0); + notonleft1 = vmulq_f32(vinL1, vscaleL1); + + // Write results using previously stored masks + voutL0 = vbslq_f32((uint32x4_t)voutL0, onleft0, notonleft0); + voutL1 = vbslq_f32((uint32x4_t)voutL1, onleft1, notonleft1); + + // Calculate right channel assuming isOnTheLeft + onleft0 = vmulq_f32(vinR0, vscaleR0); + onleft1 = vmulq_f32(vinR1, vscaleR1); + + // Calculate right channel assuming not isOnTheLeft + notonleft0 = vmlaq_f32(vinR0, vinL0, vscaleR0); + notonleft1 = vmlaq_f32(vinR1, vinL1, vscaleR1); + + // Write results using previously stored masks + voutR0 = vbslq_f32((uint32x4_t)voutR0, onleft0, notonleft0); + voutR1 = vbslq_f32((uint32x4_t)voutR1, onleft1, notonleft1); + + vst1q_f32(ADDRESS_OF(aOutputL, i), voutL0); + vst1q_f32(ADDRESS_OF(aOutputL, i+4), voutL1); + vst1q_f32(ADDRESS_OF(aOutputR, i), voutR0); + vst1q_f32(ADDRESS_OF(aOutputR, i+4), voutR1); + } +} +} diff --git a/dom/media/webaudio/AudioNodeEngineNEON.h b/dom/media/webaudio/AudioNodeEngineNEON.h new file mode 100644 index 000000000..2b3e89b75 --- /dev/null +++ b/dom/media/webaudio/AudioNodeEngineNEON.h @@ -0,0 +1,49 @@ +/* -*- mode: c++; tab-width: 2; indent-tabs-mode: nil; c-basic-offset: 2 -*- */ +/* this source code form is subject to the terms of the mozilla public + * license, v. 2.0. if a copy of the mpl was not distributed with this file, + * You can obtain one at http://mozilla.org/MPL/2.0/. */ + +#ifndef MOZILLA_AUDIONODEENGINENEON_H_ +#define MOZILLA_AUDIONODEENGINENEON_H_ + +#include "AudioNodeEngine.h" + +namespace mozilla { +void AudioBufferAddWithScale_NEON(const float* aInput, + float aScale, + float* aOutput, + uint32_t aSize); + +void +AudioBlockCopyChannelWithScale_NEON(const float* aInput, + float aScale, + float* aOutput); + +void +AudioBlockCopyChannelWithScale_NEON(const float aInput[WEBAUDIO_BLOCK_SIZE], + const float aScale[WEBAUDIO_BLOCK_SIZE], + float aOutput[WEBAUDIO_BLOCK_SIZE]); + +void +AudioBufferInPlaceScale_NEON(float* aBlock, + float aScale, + uint32_t aSize); + +void +AudioBlockPanStereoToStereo_NEON(const float aInputL[WEBAUDIO_BLOCK_SIZE], + const float aInputR[WEBAUDIO_BLOCK_SIZE], + float aGainL, float aGainR, bool aIsOnTheLeft, + float aOutputL[WEBAUDIO_BLOCK_SIZE], + float aOutputR[WEBAUDIO_BLOCK_SIZE]); + +void +AudioBlockPanStereoToStereo_NEON(const float aInputL[WEBAUDIO_BLOCK_SIZE], + const float aInputR[WEBAUDIO_BLOCK_SIZE], + float aGainL[WEBAUDIO_BLOCK_SIZE], + float aGainR[WEBAUDIO_BLOCK_SIZE], + const bool aIsOnTheLeft[WEBAUDIO_BLOCK_SIZE], + float aOutputL[WEBAUDIO_BLOCK_SIZE], + float aOutputR[WEBAUDIO_BLOCK_SIZE]); +} + +#endif /* MOZILLA_AUDIONODEENGINENEON_H_ */ diff --git a/dom/media/webaudio/AudioNodeEngineSSE2.cpp b/dom/media/webaudio/AudioNodeEngineSSE2.cpp new file mode 100644 index 000000000..a03323239 --- /dev/null +++ b/dom/media/webaudio/AudioNodeEngineSSE2.cpp @@ -0,0 +1,315 @@ +/* -*- mode: c++; tab-width: 2; indent-tabs-mode: nil; c-basic-offset: 2 -*- */ +/* this source code form is subject to the terms of the mozilla public + * license, v. 2.0. if a copy of the mpl was not distributed with this file, + * You can obtain one at http://mozilla.org/MPL/2.0/. */ + +#include "AudioNodeEngineSSE2.h" +#include "AlignmentUtils.h" +#include <emmintrin.h> + + +namespace mozilla { +void +AudioBufferAddWithScale_SSE(const float* aInput, + float aScale, + float* aOutput, + uint32_t aSize) +{ + __m128 vin0, vin1, vin2, vin3, + vscaled0, vscaled1, vscaled2, vscaled3, + vout0, vout1, vout2, vout3, + vgain; + + ASSERT_ALIGNED16(aInput); + ASSERT_ALIGNED16(aOutput); + ASSERT_MULTIPLE16(aSize); + + vgain = _mm_load1_ps(&aScale); + + for (unsigned i = 0; i < aSize; i+=16) { + vin0 = _mm_load_ps(&aInput[i]); + vin1 = _mm_load_ps(&aInput[i + 4]); + vin2 = _mm_load_ps(&aInput[i + 8]); + vin3 = _mm_load_ps(&aInput[i + 12]); + + vscaled0 = _mm_mul_ps(vin0, vgain); + vscaled1 = _mm_mul_ps(vin1, vgain); + vscaled2 = _mm_mul_ps(vin2, vgain); + vscaled3 = _mm_mul_ps(vin3, vgain); + + vin0 = _mm_load_ps(&aOutput[i]); + vin1 = _mm_load_ps(&aOutput[i + 4]); + vin2 = _mm_load_ps(&aOutput[i + 8]); + vin3 = _mm_load_ps(&aOutput[i + 12]); + + vout0 = _mm_add_ps(vin0, vscaled0); + vout1 = _mm_add_ps(vin1, vscaled1); + vout2 = _mm_add_ps(vin2, vscaled2); + vout3 = _mm_add_ps(vin3, vscaled3); + + _mm_store_ps(&aOutput[i], vout0); + _mm_store_ps(&aOutput[i + 4], vout1); + _mm_store_ps(&aOutput[i + 8], vout2); + _mm_store_ps(&aOutput[i + 12], vout3); + } +} + +void +AudioBlockCopyChannelWithScale_SSE(const float* aInput, + float aScale, + float* aOutput) +{ + __m128 vin0, vin1, vin2, vin3, + vout0, vout1, vout2, vout3; + + ASSERT_ALIGNED16(aInput); + ASSERT_ALIGNED16(aOutput); + + __m128 vgain = _mm_load1_ps(&aScale); + + for (unsigned i = 0 ; i < WEBAUDIO_BLOCK_SIZE; i+=16) { + vin0 = _mm_load_ps(&aInput[i]); + vin1 = _mm_load_ps(&aInput[i + 4]); + vin2 = _mm_load_ps(&aInput[i + 8]); + vin3 = _mm_load_ps(&aInput[i + 12]); + vout0 = _mm_mul_ps(vin0, vgain); + vout1 = _mm_mul_ps(vin1, vgain); + vout2 = _mm_mul_ps(vin2, vgain); + vout3 = _mm_mul_ps(vin3, vgain); + _mm_store_ps(&aOutput[i], vout0); + _mm_store_ps(&aOutput[i + 4], vout1); + _mm_store_ps(&aOutput[i + 8], vout2); + _mm_store_ps(&aOutput[i + 12], vout3); + } +} + +void +AudioBlockCopyChannelWithScale_SSE(const float aInput[WEBAUDIO_BLOCK_SIZE], + const float aScale[WEBAUDIO_BLOCK_SIZE], + float aOutput[WEBAUDIO_BLOCK_SIZE]) +{ + __m128 vin0, vin1, vin2, vin3, + vscaled0, vscaled1, vscaled2, vscaled3, + vout0, vout1, vout2, vout3; + + ASSERT_ALIGNED16(aInput); + ASSERT_ALIGNED16(aScale); + ASSERT_ALIGNED16(aOutput); + + for (unsigned i = 0 ; i < WEBAUDIO_BLOCK_SIZE; i+=16) { + vscaled0 = _mm_load_ps(&aScale[i]); + vscaled1 = _mm_load_ps(&aScale[i+4]); + vscaled2 = _mm_load_ps(&aScale[i+8]); + vscaled3 = _mm_load_ps(&aScale[i+12]); + + vin0 = _mm_load_ps(&aInput[i]); + vin1 = _mm_load_ps(&aInput[i + 4]); + vin2 = _mm_load_ps(&aInput[i + 8]); + vin3 = _mm_load_ps(&aInput[i + 12]); + + vout0 = _mm_mul_ps(vin0, vscaled0); + vout1 = _mm_mul_ps(vin1, vscaled1); + vout2 = _mm_mul_ps(vin2, vscaled2); + vout3 = _mm_mul_ps(vin3, vscaled3); + + _mm_store_ps(&aOutput[i], vout0); + _mm_store_ps(&aOutput[i + 4], vout1); + _mm_store_ps(&aOutput[i + 8], vout2); + _mm_store_ps(&aOutput[i + 12], vout3); + } +} + +void +AudioBufferInPlaceScale_SSE(float* aBlock, + float aScale, + uint32_t aSize) +{ + __m128 vout0, vout1, vout2, vout3, + vin0, vin1, vin2, vin3; + + ASSERT_ALIGNED16(aBlock); + ASSERT_MULTIPLE16(aSize); + + __m128 vgain = _mm_load1_ps(&aScale); + + for (unsigned i = 0; i < aSize; i+=16) { + vin0 = _mm_load_ps(&aBlock[i]); + vin1 = _mm_load_ps(&aBlock[i + 4]); + vin2 = _mm_load_ps(&aBlock[i + 8]); + vin3 = _mm_load_ps(&aBlock[i + 12]); + vout0 = _mm_mul_ps(vin0, vgain); + vout1 = _mm_mul_ps(vin1, vgain); + vout2 = _mm_mul_ps(vin2, vgain); + vout3 = _mm_mul_ps(vin3, vgain); + _mm_store_ps(&aBlock[i], vout0); + _mm_store_ps(&aBlock[i + 4], vout1); + _mm_store_ps(&aBlock[i + 8], vout2); + _mm_store_ps(&aBlock[i + 12], vout3); + } +} + +void +AudioBlockPanStereoToStereo_SSE(const float aInputL[WEBAUDIO_BLOCK_SIZE], + const float aInputR[WEBAUDIO_BLOCK_SIZE], + float aGainL, float aGainR, bool aIsOnTheLeft, + float aOutputL[WEBAUDIO_BLOCK_SIZE], + float aOutputR[WEBAUDIO_BLOCK_SIZE]) +{ + __m128 vinl0, vinr0, vinl1, vinr1, + vout0, vout1, + vscaled0, vscaled1, + vgainl, vgainr; + + ASSERT_ALIGNED16(aInputL); + ASSERT_ALIGNED16(aInputR); + ASSERT_ALIGNED16(aOutputL); + ASSERT_ALIGNED16(aOutputR); + + vgainl = _mm_load1_ps(&aGainL); + vgainr = _mm_load1_ps(&aGainR); + + if (aIsOnTheLeft) { + for (unsigned i = 0; i < WEBAUDIO_BLOCK_SIZE; i+=8) { + vinl0 = _mm_load_ps(&aInputL[i]); + vinr0 = _mm_load_ps(&aInputR[i]); + vinl1 = _mm_load_ps(&aInputL[i+4]); + vinr1 = _mm_load_ps(&aInputR[i+4]); + + /* left channel : aOutputL = aInputL + aInputR * gainL */ + vscaled0 = _mm_mul_ps(vinr0, vgainl); + vscaled1 = _mm_mul_ps(vinr1, vgainl); + vout0 = _mm_add_ps(vscaled0, vinl0); + vout1 = _mm_add_ps(vscaled1, vinl1); + _mm_store_ps(&aOutputL[i], vout0); + _mm_store_ps(&aOutputL[i+4], vout1); + + /* right channel : aOutputR = aInputR * gainR */ + vscaled0 = _mm_mul_ps(vinr0, vgainr); + vscaled1 = _mm_mul_ps(vinr1, vgainr); + _mm_store_ps(&aOutputR[i], vscaled0); + _mm_store_ps(&aOutputR[i+4], vscaled1); + } + } else { + for (unsigned i = 0; i < WEBAUDIO_BLOCK_SIZE; i+=8) { + vinl0 = _mm_load_ps(&aInputL[i]); + vinr0 = _mm_load_ps(&aInputR[i]); + vinl1 = _mm_load_ps(&aInputL[i+4]); + vinr1 = _mm_load_ps(&aInputR[i+4]); + + /* left channel : aInputL * gainL */ + vscaled0 = _mm_mul_ps(vinl0, vgainl); + vscaled1 = _mm_mul_ps(vinl1, vgainl); + _mm_store_ps(&aOutputL[i], vscaled0); + _mm_store_ps(&aOutputL[i+4], vscaled1); + + /* right channel: aOutputR = aInputR + aInputL * gainR */ + vscaled0 = _mm_mul_ps(vinl0, vgainr); + vscaled1 = _mm_mul_ps(vinl1, vgainr); + vout0 = _mm_add_ps(vscaled0, vinr0); + vout1 = _mm_add_ps(vscaled1, vinr1); + _mm_store_ps(&aOutputR[i], vout0); + _mm_store_ps(&aOutputR[i+4], vout1); + } + } +} + +void BufferComplexMultiply_SSE(const float* aInput, + const float* aScale, + float* aOutput, + uint32_t aSize) +{ + unsigned i; + __m128 in0, in1, in2, in3, + outreal0, outreal1, outreal2, outreal3, + outimag0, outimag1, outimag2, outimag3; + + ASSERT_ALIGNED16(aInput); + ASSERT_ALIGNED16(aScale); + ASSERT_ALIGNED16(aOutput); + ASSERT_MULTIPLE16(aSize); + + for (i = 0; i < aSize * 2; i += 16) { + in0 = _mm_load_ps(&aInput[i]); + in1 = _mm_load_ps(&aInput[i + 4]); + in2 = _mm_load_ps(&aInput[i + 8]); + in3 = _mm_load_ps(&aInput[i + 12]); + + outreal0 = _mm_shuffle_ps(in0, in1, _MM_SHUFFLE(2, 0, 2, 0)); + outimag0 = _mm_shuffle_ps(in0, in1, _MM_SHUFFLE(3, 1, 3, 1)); + outreal2 = _mm_shuffle_ps(in2, in3, _MM_SHUFFLE(2, 0, 2, 0)); + outimag2 = _mm_shuffle_ps(in2, in3, _MM_SHUFFLE(3, 1, 3, 1)); + + in0 = _mm_load_ps(&aScale[i]); + in1 = _mm_load_ps(&aScale[i + 4]); + in2 = _mm_load_ps(&aScale[i + 8]); + in3 = _mm_load_ps(&aScale[i + 12]); + + outreal1 = _mm_shuffle_ps(in0, in1, _MM_SHUFFLE(2, 0, 2, 0)); + outimag1 = _mm_shuffle_ps(in0, in1, _MM_SHUFFLE(3, 1, 3, 1)); + outreal3 = _mm_shuffle_ps(in2, in3, _MM_SHUFFLE(2, 0, 2, 0)); + outimag3 = _mm_shuffle_ps(in2, in3, _MM_SHUFFLE(3, 1, 3, 1)); + + in0 = _mm_sub_ps(_mm_mul_ps(outreal0, outreal1), + _mm_mul_ps(outimag0, outimag1)); + in1 = _mm_add_ps(_mm_mul_ps(outreal0, outimag1), + _mm_mul_ps(outimag0, outreal1)); + in2 = _mm_sub_ps(_mm_mul_ps(outreal2, outreal3), + _mm_mul_ps(outimag2, outimag3)); + in3 = _mm_add_ps(_mm_mul_ps(outreal2, outimag3), + _mm_mul_ps(outimag2, outreal3)); + + outreal0 = _mm_unpacklo_ps(in0, in1); + outreal1 = _mm_unpackhi_ps(in0, in1); + outreal2 = _mm_unpacklo_ps(in2, in3); + outreal3 = _mm_unpackhi_ps(in2, in3); + + _mm_store_ps(&aOutput[i], outreal0); + _mm_store_ps(&aOutput[i + 4], outreal1); + _mm_store_ps(&aOutput[i + 8], outreal2); + _mm_store_ps(&aOutput[i + 12], outreal3); + } +} + +float +AudioBufferSumOfSquares_SSE(const float* aInput, uint32_t aLength) +{ + unsigned i; + __m128 in0, in1, in2, in3, + acc0, acc1, acc2, acc3; + float out[4]; + + ASSERT_ALIGNED16(aInput); + ASSERT_MULTIPLE16(aLength); + + acc0 = _mm_setzero_ps(); + acc1 = _mm_setzero_ps(); + acc2 = _mm_setzero_ps(); + acc3 = _mm_setzero_ps(); + + for (i = 0; i < aLength; i+=16) { + in0 = _mm_load_ps(&aInput[i]); + in1 = _mm_load_ps(&aInput[i + 4]); + in2 = _mm_load_ps(&aInput[i + 8]); + in3 = _mm_load_ps(&aInput[i + 12]); + + in0 = _mm_mul_ps(in0, in0); + in1 = _mm_mul_ps(in1, in1); + in2 = _mm_mul_ps(in2, in2); + in3 = _mm_mul_ps(in3, in3); + + acc0 = _mm_add_ps(acc0, in0); + acc1 = _mm_add_ps(acc1, in1); + acc2 = _mm_add_ps(acc2, in2); + acc3 = _mm_add_ps(acc3, in3); + } + + acc0 = _mm_add_ps(acc0, acc1); + acc0 = _mm_add_ps(acc0, acc2); + acc0 = _mm_add_ps(acc0, acc3); + + _mm_store_ps(out, acc0); + + return out[0] + out[1] + out[2] + out[3]; +} + +} diff --git a/dom/media/webaudio/AudioNodeEngineSSE2.h b/dom/media/webaudio/AudioNodeEngineSSE2.h new file mode 100644 index 000000000..d24641249 --- /dev/null +++ b/dom/media/webaudio/AudioNodeEngineSSE2.h @@ -0,0 +1,45 @@ +/* -*- mode: c++; tab-width: 2; indent-tabs-mode: nil; c-basic-offset: 2 -*- */ +/* this source code form is subject to the terms of the mozilla public + * license, v. 2.0. if a copy of the mpl was not distributed with this file, + * You can obtain one at http://mozilla.org/MPL/2.0/. */ + +#include "AudioNodeEngine.h" + +namespace mozilla { +void +AudioBufferAddWithScale_SSE(const float* aInput, + float aScale, + float* aOutput, + uint32_t aSize); + +void +AudioBlockCopyChannelWithScale_SSE(const float* aInput, + float aScale, + float* aOutput); + +void +AudioBlockCopyChannelWithScale_SSE(const float aInput[WEBAUDIO_BLOCK_SIZE], + const float aScale[WEBAUDIO_BLOCK_SIZE], + float aOutput[WEBAUDIO_BLOCK_SIZE]); + +void +AudioBufferInPlaceScale_SSE(float* aBlock, + float aScale, + uint32_t aSize); + +void +AudioBlockPanStereoToStereo_SSE(const float aInputL[WEBAUDIO_BLOCK_SIZE], + const float aInputR[WEBAUDIO_BLOCK_SIZE], + float aGainL, float aGainR, bool aIsOnTheLeft, + float aOutputL[WEBAUDIO_BLOCK_SIZE], + float aOutputR[WEBAUDIO_BLOCK_SIZE]); + +float +AudioBufferSumOfSquares_SSE(const float* aInput, uint32_t aLength); + +void +BufferComplexMultiply_SSE(const float* aInput, + const float* aScale, + float* aOutput, + uint32_t aSize); +} diff --git a/dom/media/webaudio/AudioNodeExternalInputStream.cpp b/dom/media/webaudio/AudioNodeExternalInputStream.cpp new file mode 100644 index 000000000..2dff1488b --- /dev/null +++ b/dom/media/webaudio/AudioNodeExternalInputStream.cpp @@ -0,0 +1,238 @@ +/* -*- Mode: C++; tab-width: 2; indent-tabs-mode: nil; c-basic-offset: 2 -*-*/ +/* This Source Code Form is subject to the terms of the Mozilla Public + * License, v. 2.0. If a copy of the MPL was not distributed with this file, + * You can obtain one at http://mozilla.org/MPL/2.0/. */ + +#include "AlignedTArray.h" +#include "AlignmentUtils.h" +#include "AudioNodeEngine.h" +#include "AudioNodeExternalInputStream.h" +#include "AudioChannelFormat.h" +#include "mozilla/dom/MediaStreamAudioSourceNode.h" + +using namespace mozilla::dom; + +namespace mozilla { + +AudioNodeExternalInputStream::AudioNodeExternalInputStream(AudioNodeEngine* aEngine, TrackRate aSampleRate) + : AudioNodeStream(aEngine, NO_STREAM_FLAGS, aSampleRate) +{ + MOZ_COUNT_CTOR(AudioNodeExternalInputStream); +} + +AudioNodeExternalInputStream::~AudioNodeExternalInputStream() +{ + MOZ_COUNT_DTOR(AudioNodeExternalInputStream); +} + +/* static */ already_AddRefed<AudioNodeExternalInputStream> +AudioNodeExternalInputStream::Create(MediaStreamGraph* aGraph, + AudioNodeEngine* aEngine) +{ + AudioContext* ctx = aEngine->NodeMainThread()->Context(); + MOZ_ASSERT(NS_IsMainThread()); + MOZ_ASSERT(aGraph->GraphRate() == ctx->SampleRate()); + + RefPtr<AudioNodeExternalInputStream> stream = + new AudioNodeExternalInputStream(aEngine, aGraph->GraphRate()); + stream->mSuspendedCount += ctx->ShouldSuspendNewStream(); + aGraph->AddStream(stream); + return stream.forget(); +} + +/** + * Copies the data in aInput to aOffsetInBlock within aBlock. + * aBlock must have been allocated with AllocateInputBlock and have a channel + * count that's a superset of the channels in aInput. + */ +template <typename T> +static void +CopyChunkToBlock(AudioChunk& aInput, AudioBlock *aBlock, + uint32_t aOffsetInBlock) +{ + uint32_t blockChannels = aBlock->ChannelCount(); + AutoTArray<const T*,2> channels; + if (aInput.IsNull()) { + channels.SetLength(blockChannels); + PodZero(channels.Elements(), blockChannels); + } else { + const nsTArray<const T*>& inputChannels = aInput.ChannelData<T>(); + channels.SetLength(inputChannels.Length()); + PodCopy(channels.Elements(), inputChannels.Elements(), channels.Length()); + if (channels.Length() != blockChannels) { + // We only need to upmix here because aBlock's channel count has been + // chosen to be a superset of the channel count of every chunk. + AudioChannelsUpMix(&channels, blockChannels, static_cast<T*>(nullptr)); + } + } + + for (uint32_t c = 0; c < blockChannels; ++c) { + float* outputData = aBlock->ChannelFloatsForWrite(c) + aOffsetInBlock; + if (channels[c]) { + ConvertAudioSamplesWithScale(channels[c], outputData, aInput.GetDuration(), aInput.mVolume); + } else { + PodZero(outputData, aInput.GetDuration()); + } + } +} + +/** + * Converts the data in aSegment to a single chunk aBlock. aSegment must have + * duration WEBAUDIO_BLOCK_SIZE. aFallbackChannelCount is a superset of the + * channels in every chunk of aSegment. aBlock must be float format or null. + */ +static void ConvertSegmentToAudioBlock(AudioSegment* aSegment, + AudioBlock* aBlock, + int32_t aFallbackChannelCount) +{ + NS_ASSERTION(aSegment->GetDuration() == WEBAUDIO_BLOCK_SIZE, "Bad segment duration"); + + { + AudioSegment::ChunkIterator ci(*aSegment); + NS_ASSERTION(!ci.IsEnded(), "Should be at least one chunk!"); + if (ci->GetDuration() == WEBAUDIO_BLOCK_SIZE && + (ci->IsNull() || ci->mBufferFormat == AUDIO_FORMAT_FLOAT32)) { + + bool aligned = true; + for (size_t i = 0; i < ci->mChannelData.Length(); ++i) { + if (!IS_ALIGNED16(ci->mChannelData[i])) { + aligned = false; + break; + } + } + + // Return this chunk directly to avoid copying data. + if (aligned) { + *aBlock = *ci; + return; + } + } + } + + aBlock->AllocateChannels(aFallbackChannelCount); + + uint32_t duration = 0; + for (AudioSegment::ChunkIterator ci(*aSegment); !ci.IsEnded(); ci.Next()) { + switch (ci->mBufferFormat) { + case AUDIO_FORMAT_S16: { + CopyChunkToBlock<int16_t>(*ci, aBlock, duration); + break; + } + case AUDIO_FORMAT_FLOAT32: { + CopyChunkToBlock<float>(*ci, aBlock, duration); + break; + } + case AUDIO_FORMAT_SILENCE: { + // The actual type of the sample does not matter here, but we still need + // to send some audio to the graph. + CopyChunkToBlock<float>(*ci, aBlock, duration); + break; + } + } + duration += ci->GetDuration(); + } +} + +void +AudioNodeExternalInputStream::ProcessInput(GraphTime aFrom, GraphTime aTo, + uint32_t aFlags) +{ + // According to spec, number of outputs is always 1. + MOZ_ASSERT(mLastChunks.Length() == 1); + + // GC stuff can result in our input stream being destroyed before this stream. + // Handle that. + if (!IsEnabled() || mInputs.IsEmpty() || mPassThrough) { + mLastChunks[0].SetNull(WEBAUDIO_BLOCK_SIZE); + return; + } + + MOZ_ASSERT(mInputs.Length() == 1); + + MediaStream* source = mInputs[0]->GetSource(); + AutoTArray<AudioSegment,1> audioSegments; + uint32_t inputChannels = 0; + for (StreamTracks::TrackIter tracks(source->mTracks); + !tracks.IsEnded(); tracks.Next()) { + const StreamTracks::Track& inputTrack = *tracks; + if (!mInputs[0]->PassTrackThrough(tracks->GetID())) { + continue; + } + + if (inputTrack.GetSegment()->GetType() == MediaSegment::VIDEO) { + MOZ_ASSERT(false, "AudioNodeExternalInputStream shouldn't have video tracks"); + continue; + } + + const AudioSegment& inputSegment = + *static_cast<AudioSegment*>(inputTrack.GetSegment()); + if (inputSegment.IsNull()) { + continue; + } + + AudioSegment& segment = *audioSegments.AppendElement(); + GraphTime next; + for (GraphTime t = aFrom; t < aTo; t = next) { + MediaInputPort::InputInterval interval = mInputs[0]->GetNextInputInterval(t); + interval.mEnd = std::min(interval.mEnd, aTo); + if (interval.mStart >= interval.mEnd) + break; + next = interval.mEnd; + + // We know this stream does not block during the processing interval --- + // we're not finished, we don't underrun, and we're not suspended. + StreamTime outputStart = GraphTimeToStreamTime(interval.mStart); + StreamTime outputEnd = GraphTimeToStreamTime(interval.mEnd); + StreamTime ticks = outputEnd - outputStart; + + if (interval.mInputIsBlocked) { + segment.AppendNullData(ticks); + } else { + // The input stream is not blocked in this interval, so no need to call + // GraphTimeToStreamTimeWithBlocking. + StreamTime inputStart = + std::min(inputSegment.GetDuration(), + source->GraphTimeToStreamTime(interval.mStart)); + StreamTime inputEnd = + std::min(inputSegment.GetDuration(), + source->GraphTimeToStreamTime(interval.mEnd)); + + segment.AppendSlice(inputSegment, inputStart, inputEnd); + // Pad if we're looking past the end of the track + segment.AppendNullData(ticks - (inputEnd - inputStart)); + } + } + + for (AudioSegment::ChunkIterator iter(segment); !iter.IsEnded(); iter.Next()) { + inputChannels = GetAudioChannelsSuperset(inputChannels, iter->ChannelCount()); + } + } + + uint32_t accumulateIndex = 0; + if (inputChannels) { + DownmixBufferType downmixBuffer; + ASSERT_ALIGNED16(downmixBuffer.Elements()); + for (uint32_t i = 0; i < audioSegments.Length(); ++i) { + AudioBlock tmpChunk; + ConvertSegmentToAudioBlock(&audioSegments[i], &tmpChunk, inputChannels); + if (!tmpChunk.IsNull()) { + if (accumulateIndex == 0) { + mLastChunks[0].AllocateChannels(inputChannels); + } + AccumulateInputChunk(accumulateIndex, tmpChunk, &mLastChunks[0], &downmixBuffer); + accumulateIndex++; + } + } + } + if (accumulateIndex == 0) { + mLastChunks[0].SetNull(WEBAUDIO_BLOCK_SIZE); + } +} + +bool +AudioNodeExternalInputStream::IsEnabled() +{ + return ((MediaStreamAudioSourceNodeEngine*)Engine())->IsEnabled(); +} + +} // namespace mozilla diff --git a/dom/media/webaudio/AudioNodeExternalInputStream.h b/dom/media/webaudio/AudioNodeExternalInputStream.h new file mode 100644 index 000000000..83d2bba74 --- /dev/null +++ b/dom/media/webaudio/AudioNodeExternalInputStream.h @@ -0,0 +1,45 @@ +/* -*- Mode: C++; tab-width: 2; indent-tabs-mode: nil; c-basic-offset: 2 -*-*/ +/* This Source Code Form is subject to the terms of the Mozilla Public + * License, v. 2.0. If a copy of the MPL was not distributed with this file, + * You can obtain one at http://mozilla.org/MPL/2.0/. */ + +#ifndef MOZILLA_AUDIONODEEXTERNALINPUTSTREAM_H_ +#define MOZILLA_AUDIONODEEXTERNALINPUTSTREAM_H_ + +#include "MediaStreamGraph.h" +#include "AudioNodeStream.h" +#include "mozilla/Atomics.h" + +namespace mozilla { + +/** + * This is a MediaStream implementation that acts for a Web Audio node but + * unlike other AudioNodeStreams, supports any kind of MediaStream as an + * input --- handling any number of audio tracks and handling blocking of + * the input MediaStream. + */ +class AudioNodeExternalInputStream final : public AudioNodeStream +{ +public: + static already_AddRefed<AudioNodeExternalInputStream> + Create(MediaStreamGraph* aGraph, AudioNodeEngine* aEngine); + +protected: + AudioNodeExternalInputStream(AudioNodeEngine* aEngine, TrackRate aSampleRate); + ~AudioNodeExternalInputStream(); + +public: + void ProcessInput(GraphTime aFrom, GraphTime aTo, uint32_t aFlags) override; + +private: + /** + * Determines if this is enabled or not. Disabled nodes produce silence. + * This node becomes disabled if the document principal does not subsume the + * DOMMediaStream principal. + */ + bool IsEnabled(); +}; + +} // namespace mozilla + +#endif /* MOZILLA_AUDIONODESTREAM_H_ */ diff --git a/dom/media/webaudio/AudioNodeStream.cpp b/dom/media/webaudio/AudioNodeStream.cpp new file mode 100644 index 000000000..0e5aa3fc7 --- /dev/null +++ b/dom/media/webaudio/AudioNodeStream.cpp @@ -0,0 +1,783 @@ +/* -*- Mode: C++; tab-width: 2; indent-tabs-mode: nil; c-basic-offset: 2 -*-*/ +/* This Source Code Form is subject to the terms of the Mozilla Public + * License, v. 2.0. If a copy of the MPL was not distributed with this file, + * You can obtain one at http://mozilla.org/MPL/2.0/. */ + +#include "AudioNodeStream.h" + +#include "MediaStreamGraphImpl.h" +#include "MediaStreamListener.h" +#include "AudioNodeEngine.h" +#include "ThreeDPoint.h" +#include "AudioChannelFormat.h" +#include "AudioParamTimeline.h" +#include "AudioContext.h" +#include "nsMathUtils.h" + +using namespace mozilla::dom; + +namespace mozilla { + +/** + * An AudioNodeStream produces a single audio track with ID + * AUDIO_TRACK. This track has rate AudioContext::sIdealAudioRate + * for regular audio contexts, and the rate requested by the web content + * for offline audio contexts. + * Each chunk in the track is a single block of WEBAUDIO_BLOCK_SIZE samples. + * Note: This must be a different value than MEDIA_STREAM_DEST_TRACK_ID + */ + +AudioNodeStream::AudioNodeStream(AudioNodeEngine* aEngine, + Flags aFlags, + TrackRate aSampleRate) + : ProcessedMediaStream(), + mEngine(aEngine), + mSampleRate(aSampleRate), + mFlags(aFlags), + mNumberOfInputChannels(2), + mIsActive(aEngine->IsActive()), + mMarkAsFinishedAfterThisBlock(false), + mAudioParamStream(false), + mPassThrough(false) +{ + MOZ_ASSERT(NS_IsMainThread()); + mSuspendedCount = !(mIsActive || mFlags & EXTERNAL_OUTPUT); + mChannelCountMode = ChannelCountMode::Max; + mChannelInterpretation = ChannelInterpretation::Speakers; + // AudioNodes are always producing data + mHasCurrentData = true; + mLastChunks.SetLength(std::max(uint16_t(1), mEngine->OutputCount())); + MOZ_COUNT_CTOR(AudioNodeStream); +} + +AudioNodeStream::~AudioNodeStream() +{ + MOZ_ASSERT(mActiveInputCount == 0); + MOZ_COUNT_DTOR(AudioNodeStream); +} + +void +AudioNodeStream::DestroyImpl() +{ + // These are graph thread objects, so clean up on graph thread. + mInputChunks.Clear(); + mLastChunks.Clear(); + + ProcessedMediaStream::DestroyImpl(); +} + +/* static */ already_AddRefed<AudioNodeStream> +AudioNodeStream::Create(AudioContext* aCtx, AudioNodeEngine* aEngine, + Flags aFlags, MediaStreamGraph* aGraph) +{ + MOZ_ASSERT(NS_IsMainThread()); + MOZ_RELEASE_ASSERT(aGraph); + + // MediaRecorders use an AudioNodeStream, but no AudioNode + AudioNode* node = aEngine->NodeMainThread(); + + RefPtr<AudioNodeStream> stream = + new AudioNodeStream(aEngine, aFlags, aGraph->GraphRate()); + stream->mSuspendedCount += aCtx->ShouldSuspendNewStream(); + if (node) { + stream->SetChannelMixingParametersImpl(node->ChannelCount(), + node->ChannelCountModeValue(), + node->ChannelInterpretationValue()); + } + aGraph->AddStream(stream); + return stream.forget(); +} + +size_t +AudioNodeStream::SizeOfExcludingThis(MallocSizeOf aMallocSizeOf) const +{ + size_t amount = 0; + + // Not reported: + // - mEngine + + amount += ProcessedMediaStream::SizeOfExcludingThis(aMallocSizeOf); + amount += mLastChunks.ShallowSizeOfExcludingThis(aMallocSizeOf); + for (size_t i = 0; i < mLastChunks.Length(); i++) { + // NB: This is currently unshared only as there are instances of + // double reporting in DMD otherwise. + amount += mLastChunks[i].SizeOfExcludingThisIfUnshared(aMallocSizeOf); + } + + return amount; +} + +size_t +AudioNodeStream::SizeOfIncludingThis(MallocSizeOf aMallocSizeOf) const +{ + return aMallocSizeOf(this) + SizeOfExcludingThis(aMallocSizeOf); +} + +void +AudioNodeStream::SizeOfAudioNodesIncludingThis(MallocSizeOf aMallocSizeOf, + AudioNodeSizes& aUsage) const +{ + // Explicitly separate out the stream memory. + aUsage.mStream = SizeOfIncludingThis(aMallocSizeOf); + + if (mEngine) { + // This will fill out the rest of |aUsage|. + mEngine->SizeOfIncludingThis(aMallocSizeOf, aUsage); + } +} + +void +AudioNodeStream::SetStreamTimeParameter(uint32_t aIndex, AudioContext* aContext, + double aStreamTime) +{ + class Message final : public ControlMessage + { + public: + Message(AudioNodeStream* aStream, uint32_t aIndex, MediaStream* aRelativeToStream, + double aStreamTime) + : ControlMessage(aStream), mStreamTime(aStreamTime), + mRelativeToStream(aRelativeToStream), mIndex(aIndex) + {} + void Run() override + { + static_cast<AudioNodeStream*>(mStream)-> + SetStreamTimeParameterImpl(mIndex, mRelativeToStream, mStreamTime); + } + double mStreamTime; + MediaStream* mRelativeToStream; + uint32_t mIndex; + }; + + GraphImpl()->AppendMessage(MakeUnique<Message>(this, aIndex, + aContext->DestinationStream(), + aStreamTime)); +} + +void +AudioNodeStream::SetStreamTimeParameterImpl(uint32_t aIndex, MediaStream* aRelativeToStream, + double aStreamTime) +{ + StreamTime ticks = aRelativeToStream->SecondsToNearestStreamTime(aStreamTime); + mEngine->SetStreamTimeParameter(aIndex, ticks); +} + +void +AudioNodeStream::SetDoubleParameter(uint32_t aIndex, double aValue) +{ + class Message final : public ControlMessage + { + public: + Message(AudioNodeStream* aStream, uint32_t aIndex, double aValue) + : ControlMessage(aStream), mValue(aValue), mIndex(aIndex) + {} + void Run() override + { + static_cast<AudioNodeStream*>(mStream)->Engine()-> + SetDoubleParameter(mIndex, mValue); + } + double mValue; + uint32_t mIndex; + }; + + GraphImpl()->AppendMessage(MakeUnique<Message>(this, aIndex, aValue)); +} + +void +AudioNodeStream::SetInt32Parameter(uint32_t aIndex, int32_t aValue) +{ + class Message final : public ControlMessage + { + public: + Message(AudioNodeStream* aStream, uint32_t aIndex, int32_t aValue) + : ControlMessage(aStream), mValue(aValue), mIndex(aIndex) + {} + void Run() override + { + static_cast<AudioNodeStream*>(mStream)->Engine()-> + SetInt32Parameter(mIndex, mValue); + } + int32_t mValue; + uint32_t mIndex; + }; + + GraphImpl()->AppendMessage(MakeUnique<Message>(this, aIndex, aValue)); +} + +void +AudioNodeStream::SendTimelineEvent(uint32_t aIndex, + const AudioTimelineEvent& aEvent) +{ + class Message final : public ControlMessage + { + public: + Message(AudioNodeStream* aStream, uint32_t aIndex, + const AudioTimelineEvent& aEvent) + : ControlMessage(aStream), + mEvent(aEvent), + mSampleRate(aStream->SampleRate()), + mIndex(aIndex) + {} + void Run() override + { + static_cast<AudioNodeStream*>(mStream)->Engine()-> + RecvTimelineEvent(mIndex, mEvent); + } + AudioTimelineEvent mEvent; + TrackRate mSampleRate; + uint32_t mIndex; + }; + GraphImpl()->AppendMessage(MakeUnique<Message>(this, aIndex, aEvent)); +} + +void +AudioNodeStream::SetThreeDPointParameter(uint32_t aIndex, const ThreeDPoint& aValue) +{ + class Message final : public ControlMessage + { + public: + Message(AudioNodeStream* aStream, uint32_t aIndex, const ThreeDPoint& aValue) + : ControlMessage(aStream), mValue(aValue), mIndex(aIndex) + {} + void Run() override + { + static_cast<AudioNodeStream*>(mStream)->Engine()-> + SetThreeDPointParameter(mIndex, mValue); + } + ThreeDPoint mValue; + uint32_t mIndex; + }; + + GraphImpl()->AppendMessage(MakeUnique<Message>(this, aIndex, aValue)); +} + +void +AudioNodeStream::SetBuffer(already_AddRefed<ThreadSharedFloatArrayBufferList>&& aBuffer) +{ + class Message final : public ControlMessage + { + public: + Message(AudioNodeStream* aStream, + already_AddRefed<ThreadSharedFloatArrayBufferList>& aBuffer) + : ControlMessage(aStream), mBuffer(aBuffer) + {} + void Run() override + { + static_cast<AudioNodeStream*>(mStream)->Engine()-> + SetBuffer(mBuffer.forget()); + } + RefPtr<ThreadSharedFloatArrayBufferList> mBuffer; + }; + + GraphImpl()->AppendMessage(MakeUnique<Message>(this, aBuffer)); +} + +void +AudioNodeStream::SetRawArrayData(nsTArray<float>& aData) +{ + class Message final : public ControlMessage + { + public: + Message(AudioNodeStream* aStream, + nsTArray<float>& aData) + : ControlMessage(aStream) + { + mData.SwapElements(aData); + } + void Run() override + { + static_cast<AudioNodeStream*>(mStream)->Engine()->SetRawArrayData(mData); + } + nsTArray<float> mData; + }; + + GraphImpl()->AppendMessage(MakeUnique<Message>(this, aData)); +} + +void +AudioNodeStream::SetChannelMixingParameters(uint32_t aNumberOfChannels, + ChannelCountMode aChannelCountMode, + ChannelInterpretation aChannelInterpretation) +{ + class Message final : public ControlMessage + { + public: + Message(AudioNodeStream* aStream, + uint32_t aNumberOfChannels, + ChannelCountMode aChannelCountMode, + ChannelInterpretation aChannelInterpretation) + : ControlMessage(aStream), + mNumberOfChannels(aNumberOfChannels), + mChannelCountMode(aChannelCountMode), + mChannelInterpretation(aChannelInterpretation) + {} + void Run() override + { + static_cast<AudioNodeStream*>(mStream)-> + SetChannelMixingParametersImpl(mNumberOfChannels, mChannelCountMode, + mChannelInterpretation); + } + uint32_t mNumberOfChannels; + ChannelCountMode mChannelCountMode; + ChannelInterpretation mChannelInterpretation; + }; + + GraphImpl()->AppendMessage(MakeUnique<Message>(this, aNumberOfChannels, + aChannelCountMode, + aChannelInterpretation)); +} + +void +AudioNodeStream::SetPassThrough(bool aPassThrough) +{ + class Message final : public ControlMessage + { + public: + Message(AudioNodeStream* aStream, bool aPassThrough) + : ControlMessage(aStream), mPassThrough(aPassThrough) + {} + void Run() override + { + static_cast<AudioNodeStream*>(mStream)->mPassThrough = mPassThrough; + } + bool mPassThrough; + }; + + GraphImpl()->AppendMessage(MakeUnique<Message>(this, aPassThrough)); +} + +void +AudioNodeStream::SetChannelMixingParametersImpl(uint32_t aNumberOfChannels, + ChannelCountMode aChannelCountMode, + ChannelInterpretation aChannelInterpretation) +{ + // Make sure that we're not clobbering any significant bits by fitting these + // values in 16 bits. + MOZ_ASSERT(int(aChannelCountMode) < INT16_MAX); + MOZ_ASSERT(int(aChannelInterpretation) < INT16_MAX); + + mNumberOfInputChannels = aNumberOfChannels; + mChannelCountMode = aChannelCountMode; + mChannelInterpretation = aChannelInterpretation; +} + +uint32_t +AudioNodeStream::ComputedNumberOfChannels(uint32_t aInputChannelCount) +{ + switch (mChannelCountMode) { + case ChannelCountMode::Explicit: + // Disregard the channel count we've calculated from inputs, and just use + // mNumberOfInputChannels. + return mNumberOfInputChannels; + case ChannelCountMode::Clamped_max: + // Clamp the computed output channel count to mNumberOfInputChannels. + return std::min(aInputChannelCount, mNumberOfInputChannels); + default: + case ChannelCountMode::Max: + // Nothing to do here, just shut up the compiler warning. + return aInputChannelCount; + } +} + +class AudioNodeStream::AdvanceAndResumeMessage final : public ControlMessage { +public: + AdvanceAndResumeMessage(AudioNodeStream* aStream, StreamTime aAdvance) : + ControlMessage(aStream), mAdvance(aAdvance) {} + void Run() override + { + auto ns = static_cast<AudioNodeStream*>(mStream); + ns->mTracksStartTime -= mAdvance; + + StreamTracks::Track* track = ns->EnsureTrack(AUDIO_TRACK); + track->Get<AudioSegment>()->AppendNullData(mAdvance); + + ns->GraphImpl()->DecrementSuspendCount(mStream); + } +private: + StreamTime mAdvance; +}; + +void +AudioNodeStream::AdvanceAndResume(StreamTime aAdvance) +{ + mMainThreadCurrentTime += aAdvance; + GraphImpl()->AppendMessage(MakeUnique<AdvanceAndResumeMessage>(this, aAdvance)); +} + +void +AudioNodeStream::ObtainInputBlock(AudioBlock& aTmpChunk, + uint32_t aPortIndex) +{ + uint32_t inputCount = mInputs.Length(); + uint32_t outputChannelCount = 1; + AutoTArray<const AudioBlock*,250> inputChunks; + for (uint32_t i = 0; i < inputCount; ++i) { + if (aPortIndex != mInputs[i]->InputNumber()) { + // This input is connected to a different port + continue; + } + MediaStream* s = mInputs[i]->GetSource(); + AudioNodeStream* a = static_cast<AudioNodeStream*>(s); + MOZ_ASSERT(a == s->AsAudioNodeStream()); + if (a->IsAudioParamStream()) { + continue; + } + + const AudioBlock* chunk = &a->mLastChunks[mInputs[i]->OutputNumber()]; + MOZ_ASSERT(chunk); + if (chunk->IsNull() || chunk->mChannelData.IsEmpty()) { + continue; + } + + inputChunks.AppendElement(chunk); + outputChannelCount = + GetAudioChannelsSuperset(outputChannelCount, chunk->ChannelCount()); + } + + outputChannelCount = ComputedNumberOfChannels(outputChannelCount); + + uint32_t inputChunkCount = inputChunks.Length(); + if (inputChunkCount == 0 || + (inputChunkCount == 1 && inputChunks[0]->ChannelCount() == 0)) { + aTmpChunk.SetNull(WEBAUDIO_BLOCK_SIZE); + return; + } + + if (inputChunkCount == 1 && + inputChunks[0]->ChannelCount() == outputChannelCount) { + aTmpChunk = *inputChunks[0]; + return; + } + + if (outputChannelCount == 0) { + aTmpChunk.SetNull(WEBAUDIO_BLOCK_SIZE); + return; + } + + aTmpChunk.AllocateChannels(outputChannelCount); + DownmixBufferType downmixBuffer; + ASSERT_ALIGNED16(downmixBuffer.Elements()); + + for (uint32_t i = 0; i < inputChunkCount; ++i) { + AccumulateInputChunk(i, *inputChunks[i], &aTmpChunk, &downmixBuffer); + } +} + +void +AudioNodeStream::AccumulateInputChunk(uint32_t aInputIndex, + const AudioBlock& aChunk, + AudioBlock* aBlock, + DownmixBufferType* aDownmixBuffer) +{ + AutoTArray<const float*,GUESS_AUDIO_CHANNELS> channels; + UpMixDownMixChunk(&aChunk, aBlock->ChannelCount(), channels, *aDownmixBuffer); + + for (uint32_t c = 0; c < channels.Length(); ++c) { + const float* inputData = static_cast<const float*>(channels[c]); + float* outputData = aBlock->ChannelFloatsForWrite(c); + if (inputData) { + if (aInputIndex == 0) { + AudioBlockCopyChannelWithScale(inputData, aChunk.mVolume, outputData); + } else { + AudioBlockAddChannelWithScale(inputData, aChunk.mVolume, outputData); + } + } else { + if (aInputIndex == 0) { + PodZero(outputData, WEBAUDIO_BLOCK_SIZE); + } + } + } +} + +void +AudioNodeStream::UpMixDownMixChunk(const AudioBlock* aChunk, + uint32_t aOutputChannelCount, + nsTArray<const float*>& aOutputChannels, + DownmixBufferType& aDownmixBuffer) +{ + for (uint32_t i = 0; i < aChunk->ChannelCount(); i++) { + aOutputChannels.AppendElement(static_cast<const float*>(aChunk->mChannelData[i])); + } + if (aOutputChannels.Length() < aOutputChannelCount) { + if (mChannelInterpretation == ChannelInterpretation::Speakers) { + AudioChannelsUpMix<float>(&aOutputChannels, aOutputChannelCount, nullptr); + NS_ASSERTION(aOutputChannelCount == aOutputChannels.Length(), + "We called GetAudioChannelsSuperset to avoid this"); + } else { + // Fill up the remaining aOutputChannels by zeros + for (uint32_t j = aOutputChannels.Length(); j < aOutputChannelCount; ++j) { + aOutputChannels.AppendElement(nullptr); + } + } + } else if (aOutputChannels.Length() > aOutputChannelCount) { + if (mChannelInterpretation == ChannelInterpretation::Speakers) { + AutoTArray<float*,GUESS_AUDIO_CHANNELS> outputChannels; + outputChannels.SetLength(aOutputChannelCount); + aDownmixBuffer.SetLength(aOutputChannelCount * WEBAUDIO_BLOCK_SIZE); + for (uint32_t j = 0; j < aOutputChannelCount; ++j) { + outputChannels[j] = &aDownmixBuffer[j * WEBAUDIO_BLOCK_SIZE]; + } + + AudioChannelsDownMix(aOutputChannels, outputChannels.Elements(), + aOutputChannelCount, WEBAUDIO_BLOCK_SIZE); + + aOutputChannels.SetLength(aOutputChannelCount); + for (uint32_t j = 0; j < aOutputChannels.Length(); ++j) { + aOutputChannels[j] = outputChannels[j]; + } + } else { + // Drop the remaining aOutputChannels + aOutputChannels.RemoveElementsAt(aOutputChannelCount, + aOutputChannels.Length() - aOutputChannelCount); + } + } +} + +// The MediaStreamGraph guarantees that this is actually one block, for +// AudioNodeStreams. +void +AudioNodeStream::ProcessInput(GraphTime aFrom, GraphTime aTo, uint32_t aFlags) +{ + uint16_t outputCount = mLastChunks.Length(); + MOZ_ASSERT(outputCount == std::max(uint16_t(1), mEngine->OutputCount())); + + if (!mIsActive) { + // mLastChunks are already null. +#ifdef DEBUG + for (const auto& chunk : mLastChunks) { + MOZ_ASSERT(chunk.IsNull()); + } +#endif + } else if (InMutedCycle()) { + mInputChunks.Clear(); + for (uint16_t i = 0; i < outputCount; ++i) { + mLastChunks[i].SetNull(WEBAUDIO_BLOCK_SIZE); + } + } else { + // We need to generate at least one input + uint16_t maxInputs = std::max(uint16_t(1), mEngine->InputCount()); + mInputChunks.SetLength(maxInputs); + for (uint16_t i = 0; i < maxInputs; ++i) { + ObtainInputBlock(mInputChunks[i], i); + } + bool finished = false; + if (mPassThrough) { + MOZ_ASSERT(outputCount == 1, "For now, we only support nodes that have one output port"); + mLastChunks[0] = mInputChunks[0]; + } else { + if (maxInputs <= 1 && outputCount <= 1) { + mEngine->ProcessBlock(this, aFrom, + mInputChunks[0], &mLastChunks[0], &finished); + } else { + mEngine->ProcessBlocksOnPorts(this, mInputChunks, mLastChunks, &finished); + } + } + for (uint16_t i = 0; i < outputCount; ++i) { + NS_ASSERTION(mLastChunks[i].GetDuration() == WEBAUDIO_BLOCK_SIZE, + "Invalid WebAudio chunk size"); + } + if (finished) { + mMarkAsFinishedAfterThisBlock = true; + if (mIsActive) { + ScheduleCheckForInactive(); + } + } + + if (GetDisabledTrackMode(static_cast<TrackID>(AUDIO_TRACK)) != DisabledTrackMode::ENABLED) { + for (uint32_t i = 0; i < outputCount; ++i) { + mLastChunks[i].SetNull(WEBAUDIO_BLOCK_SIZE); + } + } + } + + if (!mFinished) { + // Don't output anything while finished + if (mFlags & EXTERNAL_OUTPUT) { + AdvanceOutputSegment(); + } + if (mMarkAsFinishedAfterThisBlock && (aFlags & ALLOW_FINISH)) { + // This stream was finished the last time that we looked at it, and all + // of the depending streams have finished their output as well, so now + // it's time to mark this stream as finished. + if (mFlags & EXTERNAL_OUTPUT) { + FinishOutput(); + } + FinishOnGraphThread(); + } + } +} + +void +AudioNodeStream::ProduceOutputBeforeInput(GraphTime aFrom) +{ + MOZ_ASSERT(mEngine->AsDelayNodeEngine()); + MOZ_ASSERT(mEngine->OutputCount() == 1, + "DelayNodeEngine output count should be 1"); + MOZ_ASSERT(!InMutedCycle(), "DelayNodes should break cycles"); + MOZ_ASSERT(mLastChunks.Length() == 1); + + if (!mIsActive) { + mLastChunks[0].SetNull(WEBAUDIO_BLOCK_SIZE); + } else { + mEngine->ProduceBlockBeforeInput(this, aFrom, &mLastChunks[0]); + NS_ASSERTION(mLastChunks[0].GetDuration() == WEBAUDIO_BLOCK_SIZE, + "Invalid WebAudio chunk size"); + if (GetDisabledTrackMode(static_cast<TrackID>(AUDIO_TRACK)) != DisabledTrackMode::ENABLED) { + mLastChunks[0].SetNull(WEBAUDIO_BLOCK_SIZE); + } + } +} + +void +AudioNodeStream::AdvanceOutputSegment() +{ + StreamTracks::Track* track = EnsureTrack(AUDIO_TRACK); + // No more tracks will be coming + mTracks.AdvanceKnownTracksTime(STREAM_TIME_MAX); + + AudioSegment* segment = track->Get<AudioSegment>(); + + if (!mLastChunks[0].IsNull()) { + segment->AppendAndConsumeChunk(mLastChunks[0].AsMutableChunk()); + } else { + segment->AppendNullData(mLastChunks[0].GetDuration()); + } + + for (uint32_t j = 0; j < mListeners.Length(); ++j) { + MediaStreamListener* l = mListeners[j]; + AudioChunk copyChunk = mLastChunks[0].AsAudioChunk(); + AudioSegment tmpSegment; + tmpSegment.AppendAndConsumeChunk(©Chunk); + l->NotifyQueuedTrackChanges(Graph(), AUDIO_TRACK, + segment->GetDuration(), TrackEventCommand::TRACK_EVENT_NONE, tmpSegment); + } +} + +void +AudioNodeStream::FinishOutput() +{ + StreamTracks::Track* track = EnsureTrack(AUDIO_TRACK); + track->SetEnded(); + + for (uint32_t j = 0; j < mListeners.Length(); ++j) { + MediaStreamListener* l = mListeners[j]; + AudioSegment emptySegment; + l->NotifyQueuedTrackChanges(Graph(), AUDIO_TRACK, + track->GetSegment()->GetDuration(), + TrackEventCommand::TRACK_EVENT_ENDED, emptySegment); + } +} + +void +AudioNodeStream::AddInput(MediaInputPort* aPort) +{ + ProcessedMediaStream::AddInput(aPort); + AudioNodeStream* ns = aPort->GetSource()->AsAudioNodeStream(); + // Streams that are not AudioNodeStreams are considered active. + if (!ns || (ns->mIsActive && !ns->IsAudioParamStream())) { + IncrementActiveInputCount(); + } +} +void +AudioNodeStream::RemoveInput(MediaInputPort* aPort) +{ + ProcessedMediaStream::RemoveInput(aPort); + AudioNodeStream* ns = aPort->GetSource()->AsAudioNodeStream(); + // Streams that are not AudioNodeStreams are considered active. + if (!ns || (ns->mIsActive && !ns->IsAudioParamStream())) { + DecrementActiveInputCount(); + } +} + +void +AudioNodeStream::SetActive() +{ + if (mIsActive || mMarkAsFinishedAfterThisBlock) { + return; + } + + mIsActive = true; + if (!(mFlags & EXTERNAL_OUTPUT)) { + GraphImpl()->DecrementSuspendCount(this); + } + if (IsAudioParamStream()) { + // Consumers merely influence stream order. + // They do not read from the stream. + return; + } + + for (const auto& consumer : mConsumers) { + AudioNodeStream* ns = consumer->GetDestination()->AsAudioNodeStream(); + if (ns) { + ns->IncrementActiveInputCount(); + } + } +} + +class AudioNodeStream::CheckForInactiveMessage final : public ControlMessage +{ +public: + explicit CheckForInactiveMessage(AudioNodeStream* aStream) : + ControlMessage(aStream) {} + void Run() override + { + auto ns = static_cast<AudioNodeStream*>(mStream); + ns->CheckForInactive(); + } +}; + +void +AudioNodeStream::ScheduleCheckForInactive() +{ + if (mActiveInputCount > 0 && !mMarkAsFinishedAfterThisBlock) { + return; + } + + auto message = MakeUnique<CheckForInactiveMessage>(this); + GraphImpl()->RunMessageAfterProcessing(Move(message)); +} + +void +AudioNodeStream::CheckForInactive() +{ + if (((mActiveInputCount > 0 || mEngine->IsActive()) && + !mMarkAsFinishedAfterThisBlock) || + !mIsActive) { + return; + } + + mIsActive = false; + mInputChunks.Clear(); // not required for foreseeable future + for (auto& chunk : mLastChunks) { + chunk.SetNull(WEBAUDIO_BLOCK_SIZE); + } + if (!(mFlags & EXTERNAL_OUTPUT)) { + GraphImpl()->IncrementSuspendCount(this); + } + if (IsAudioParamStream()) { + return; + } + + for (const auto& consumer : mConsumers) { + AudioNodeStream* ns = consumer->GetDestination()->AsAudioNodeStream(); + if (ns) { + ns->DecrementActiveInputCount(); + } + } +} + +void +AudioNodeStream::IncrementActiveInputCount() +{ + ++mActiveInputCount; + SetActive(); +} + +void +AudioNodeStream::DecrementActiveInputCount() +{ + MOZ_ASSERT(mActiveInputCount > 0); + --mActiveInputCount; + CheckForInactive(); +} + +} // namespace mozilla diff --git a/dom/media/webaudio/AudioNodeStream.h b/dom/media/webaudio/AudioNodeStream.h new file mode 100644 index 000000000..87f6fa221 --- /dev/null +++ b/dom/media/webaudio/AudioNodeStream.h @@ -0,0 +1,239 @@ +/* -*- Mode: C++; tab-width: 2; indent-tabs-mode: nil; c-basic-offset: 2 -*-*/ +/* This Source Code Form is subject to the terms of the Mozilla Public + * License, v. 2.0. If a copy of the MPL was not distributed with this file, + * You can obtain one at http://mozilla.org/MPL/2.0/. */ + +#ifndef MOZILLA_AUDIONODESTREAM_H_ +#define MOZILLA_AUDIONODESTREAM_H_ + +#include "MediaStreamGraph.h" +#include "mozilla/dom/AudioNodeBinding.h" +#include "nsAutoPtr.h" +#include "AlignedTArray.h" +#include "AudioBlock.h" +#include "AudioSegment.h" + +namespace mozilla { + +namespace dom { +struct ThreeDPoint; +struct AudioTimelineEvent; +class AudioContext; +} // namespace dom + +class ThreadSharedFloatArrayBufferList; +class AudioNodeEngine; + +typedef AlignedAutoTArray<float, GUESS_AUDIO_CHANNELS*WEBAUDIO_BLOCK_SIZE, 16> DownmixBufferType; + +/** + * An AudioNodeStream produces one audio track with ID AUDIO_TRACK. + * The start time of the AudioTrack is aligned to the start time of the + * AudioContext's destination node stream, plus some multiple of BLOCK_SIZE + * samples. + * + * An AudioNodeStream has an AudioNodeEngine plugged into it that does the + * actual audio processing. AudioNodeStream contains the glue code that + * integrates audio processing with the MediaStreamGraph. + */ +class AudioNodeStream : public ProcessedMediaStream +{ + typedef dom::ChannelCountMode ChannelCountMode; + typedef dom::ChannelInterpretation ChannelInterpretation; + +public: + typedef mozilla::dom::AudioContext AudioContext; + + enum { AUDIO_TRACK = 1 }; + + typedef AutoTArray<AudioBlock, 1> OutputChunks; + + // Flags re main thread updates and stream output. + typedef unsigned Flags; + enum : Flags { + NO_STREAM_FLAGS = 0U, + NEED_MAIN_THREAD_FINISHED = 1U << 0, + NEED_MAIN_THREAD_CURRENT_TIME = 1U << 1, + // Internal AudioNodeStreams can only pass their output to another + // AudioNode, whereas external AudioNodeStreams can pass their output + // to other ProcessedMediaStreams or hardware audio output. + EXTERNAL_OUTPUT = 1U << 2, + }; + /** + * Create a stream that will process audio for an AudioNode. + * Takes ownership of aEngine. + * aGraph is required and equals the graph of aCtx in most cases. An exception + * is AudioDestinationNode where the context's graph hasn't been set up yet. + */ + static already_AddRefed<AudioNodeStream> + Create(AudioContext* aCtx, AudioNodeEngine* aEngine, Flags aKind, + MediaStreamGraph* aGraph); + +protected: + /** + * Transfers ownership of aEngine to the new AudioNodeStream. + */ + AudioNodeStream(AudioNodeEngine* aEngine, + Flags aFlags, + TrackRate aSampleRate); + + ~AudioNodeStream(); + +public: + // Control API + /** + * Sets a parameter that's a time relative to some stream's played time. + * This time is converted to a time relative to this stream when it's set. + */ + void SetStreamTimeParameter(uint32_t aIndex, AudioContext* aContext, + double aStreamTime); + void SetDoubleParameter(uint32_t aIndex, double aValue); + void SetInt32Parameter(uint32_t aIndex, int32_t aValue); + void SetThreeDPointParameter(uint32_t aIndex, const dom::ThreeDPoint& aValue); + void SetBuffer(already_AddRefed<ThreadSharedFloatArrayBufferList>&& aBuffer); + // This sends a single event to the timeline on the MSG thread side. + void SendTimelineEvent(uint32_t aIndex, const dom::AudioTimelineEvent& aEvent); + // This consumes the contents of aData. aData will be emptied after this returns. + void SetRawArrayData(nsTArray<float>& aData); + void SetChannelMixingParameters(uint32_t aNumberOfChannels, + ChannelCountMode aChannelCountMoe, + ChannelInterpretation aChannelInterpretation); + void SetPassThrough(bool aPassThrough); + ChannelInterpretation GetChannelInterpretation() + { + return mChannelInterpretation; + } + + void SetAudioParamHelperStream() + { + MOZ_ASSERT(!mAudioParamStream, "Can only do this once"); + mAudioParamStream = true; + } + + /* + * Resume stream after updating its concept of current time by aAdvance. + * Main thread. Used only from AudioDestinationNode when resuming a stream + * suspended to save running the MediaStreamGraph when there are no other + * nodes in the AudioContext. + */ + void AdvanceAndResume(StreamTime aAdvance); + + AudioNodeStream* AsAudioNodeStream() override { return this; } + void AddInput(MediaInputPort* aPort) override; + void RemoveInput(MediaInputPort* aPort) override; + + // Graph thread only + void SetStreamTimeParameterImpl(uint32_t aIndex, MediaStream* aRelativeToStream, + double aStreamTime); + void SetChannelMixingParametersImpl(uint32_t aNumberOfChannels, + ChannelCountMode aChannelCountMoe, + ChannelInterpretation aChannelInterpretation); + void ProcessInput(GraphTime aFrom, GraphTime aTo, uint32_t aFlags) override; + /** + * Produce the next block of output, before input is provided. + * ProcessInput() will be called later, and it then should not change + * the output. This is used only for DelayNodeEngine in a feedback loop. + */ + void ProduceOutputBeforeInput(GraphTime aFrom); + bool IsAudioParamStream() const + { + return mAudioParamStream; + } + + const OutputChunks& LastChunks() const + { + return mLastChunks; + } + bool MainThreadNeedsUpdates() const override + { + return ((mFlags & NEED_MAIN_THREAD_FINISHED) && mFinished) || + (mFlags & NEED_MAIN_THREAD_CURRENT_TIME); + } + + // Any thread + AudioNodeEngine* Engine() { return mEngine; } + TrackRate SampleRate() const { return mSampleRate; } + + size_t SizeOfExcludingThis(MallocSizeOf aMallocSizeOf) const override; + size_t SizeOfIncludingThis(MallocSizeOf aMallocSizeOf) const override; + + void SizeOfAudioNodesIncludingThis(MallocSizeOf aMallocSizeOf, + AudioNodeSizes& aUsage) const; + + /* + * SetActive() is called when either an active input is added or the engine + * for a source node transitions from inactive to active. This is not + * called from engines for processing nodes because they only become active + * when there are active input streams, in which case this stream is already + * active. + */ + void SetActive(); + /* + * ScheduleCheckForInactive() is called during stream processing when the + * engine transitions from active to inactive, or the stream finishes. It + * schedules a call to CheckForInactive() after stream processing. + */ + void ScheduleCheckForInactive(); + +protected: + class AdvanceAndResumeMessage; + class CheckForInactiveMessage; + + void DestroyImpl() override; + + /* + * CheckForInactive() is called when the engine transitions from active to + * inactive, or an active input is removed, or the stream finishes. If the + * stream is now inactive, then mInputChunks will be cleared and mLastChunks + * will be set to null. ProcessBlock() will not be called on the engine + * again until SetActive() is called. + */ + void CheckForInactive(); + + void AdvanceOutputSegment(); + void FinishOutput(); + void AccumulateInputChunk(uint32_t aInputIndex, const AudioBlock& aChunk, + AudioBlock* aBlock, + DownmixBufferType* aDownmixBuffer); + void UpMixDownMixChunk(const AudioBlock* aChunk, uint32_t aOutputChannelCount, + nsTArray<const float*>& aOutputChannels, + DownmixBufferType& aDownmixBuffer); + + uint32_t ComputedNumberOfChannels(uint32_t aInputChannelCount); + void ObtainInputBlock(AudioBlock& aTmpChunk, uint32_t aPortIndex); + void IncrementActiveInputCount(); + void DecrementActiveInputCount(); + + // The engine that will generate output for this node. + nsAutoPtr<AudioNodeEngine> mEngine; + // The mixed input blocks are kept from iteration to iteration to avoid + // reallocating channel data arrays and any buffers for mixing. + OutputChunks mInputChunks; + // The last block produced by this node. + OutputChunks mLastChunks; + // The stream's sampling rate + const TrackRate mSampleRate; + // Whether this is an internal or external stream + const Flags mFlags; + // The number of input streams that may provide non-silent input. + uint32_t mActiveInputCount = 0; + // The number of input channels that this stream requires. 0 means don't care. + uint32_t mNumberOfInputChannels; + // The mixing modes + ChannelCountMode mChannelCountMode; + ChannelInterpretation mChannelInterpretation; + // Streams are considered active if the stream has not finished and either + // the engine is active or there are active input streams. + bool mIsActive; + // Whether the stream should be marked as finished as soon + // as the current time range has been computed block by block. + bool mMarkAsFinishedAfterThisBlock; + // Whether the stream is an AudioParamHelper stream. + bool mAudioParamStream; + // Whether the stream just passes its input through. + bool mPassThrough; +}; + +} // namespace mozilla + +#endif /* MOZILLA_AUDIONODESTREAM_H_ */ diff --git a/dom/media/webaudio/AudioParam.cpp b/dom/media/webaudio/AudioParam.cpp new file mode 100644 index 000000000..6f5574993 --- /dev/null +++ b/dom/media/webaudio/AudioParam.cpp @@ -0,0 +1,199 @@ +/* -*- Mode: C++; tab-width: 2; indent-tabs-mode: nil; c-basic-offset: 2 -*- */ +/* vim:set ts=2 sw=2 sts=2 et cindent: */ +/* This Source Code Form is subject to the terms of the Mozilla Public + * License, v. 2.0. If a copy of the MPL was not distributed with this + * file, You can obtain one at http://mozilla.org/MPL/2.0/. */ + +#include "AudioParam.h" +#include "mozilla/dom/AudioParamBinding.h" +#include "AudioNodeEngine.h" +#include "AudioNodeStream.h" +#include "AudioContext.h" + +namespace mozilla { +namespace dom { + +NS_IMPL_CYCLE_COLLECTION_CLASS(AudioParam) + +NS_IMPL_CYCLE_COLLECTION_UNLINK_BEGIN(AudioParam) + tmp->DisconnectFromGraphAndDestroyStream(); + NS_IMPL_CYCLE_COLLECTION_UNLINK(mNode) + NS_IMPL_CYCLE_COLLECTION_UNLINK_PRESERVED_WRAPPER +NS_IMPL_CYCLE_COLLECTION_UNLINK_END +NS_IMPL_CYCLE_COLLECTION_TRAVERSE_BEGIN(AudioParam) + NS_IMPL_CYCLE_COLLECTION_TRAVERSE(mNode) + NS_IMPL_CYCLE_COLLECTION_TRAVERSE_SCRIPT_OBJECTS +NS_IMPL_CYCLE_COLLECTION_TRAVERSE_END + +NS_IMPL_CYCLE_COLLECTION_TRACE_WRAPPERCACHE(AudioParam) + +NS_IMPL_CYCLE_COLLECTING_NATIVE_ADDREF(AudioParam) +NS_IMPL_CYCLE_COLLECTING_NATIVE_RELEASE(AudioParam) + +NS_IMPL_CYCLE_COLLECTION_ROOT_NATIVE(AudioParam, AddRef) +NS_IMPL_CYCLE_COLLECTION_UNROOT_NATIVE(AudioParam, Release) + +AudioParam::AudioParam(AudioNode* aNode, + uint32_t aIndex, + float aDefaultValue, + const char* aName) + : AudioParamTimeline(aDefaultValue) + , mNode(aNode) + , mName(aName) + , mIndex(aIndex) + , mDefaultValue(aDefaultValue) +{ +} + +AudioParam::~AudioParam() +{ + DisconnectFromGraphAndDestroyStream(); +} + +JSObject* +AudioParam::WrapObject(JSContext* aCx, JS::Handle<JSObject*> aGivenProto) +{ + return AudioParamBinding::Wrap(aCx, this, aGivenProto); +} + +void +AudioParam::DisconnectFromGraphAndDestroyStream() +{ + MOZ_ASSERT(mRefCnt.get() > mInputNodes.Length(), + "Caller should be holding a reference or have called " + "mRefCnt.stabilizeForDeletion()"); + + while (!mInputNodes.IsEmpty()) { + uint32_t i = mInputNodes.Length() - 1; + RefPtr<AudioNode> input = mInputNodes[i].mInputNode; + mInputNodes.RemoveElementAt(i); + input->RemoveOutputParam(this); + } + + if (mNodeStreamPort) { + mNodeStreamPort->Destroy(); + mNodeStreamPort = nullptr; + } + + if (mStream) { + mStream->Destroy(); + mStream = nullptr; + } +} + +MediaStream* +AudioParam::Stream() +{ + if (mStream) { + return mStream; + } + + AudioNodeEngine* engine = new AudioNodeEngine(nullptr); + RefPtr<AudioNodeStream> stream = + AudioNodeStream::Create(mNode->Context(), engine, + AudioNodeStream::NO_STREAM_FLAGS, + mNode->Context()->Graph()); + + // Force the input to have only one channel, and make it down-mix using + // the speaker rules if needed. + stream->SetChannelMixingParametersImpl(1, ChannelCountMode::Explicit, ChannelInterpretation::Speakers); + // Mark as an AudioParam helper stream + stream->SetAudioParamHelperStream(); + + mStream = stream.forget(); + + // Setup the AudioParam's stream as an input to the owner AudioNode's stream + AudioNodeStream* nodeStream = mNode->GetStream(); + if (nodeStream) { + mNodeStreamPort = + nodeStream->AllocateInputPort(mStream, AudioNodeStream::AUDIO_TRACK); + } + + // Send the stream to the timeline on the MSG side. + AudioTimelineEvent event(mStream); + SendEventToEngine(event); + + return mStream; +} + +static const char* +ToString(AudioTimelineEvent::Type aType) +{ + switch (aType) { + case AudioTimelineEvent::SetValue: + return "SetValue"; + case AudioTimelineEvent::SetValueAtTime: + return "SetValueAtTime"; + case AudioTimelineEvent::LinearRamp: + return "LinearRamp"; + case AudioTimelineEvent::ExponentialRamp: + return "ExponentialRamp"; + case AudioTimelineEvent::SetTarget: + return "SetTarget"; + case AudioTimelineEvent::SetValueCurve: + return "SetValueCurve"; + case AudioTimelineEvent::Stream: + return "Stream"; + case AudioTimelineEvent::Cancel: + return "Cancel"; + default: + return "unknown AudioTimelineEvent"; + } +} + +void +AudioParam::SendEventToEngine(const AudioTimelineEvent& aEvent) +{ + WEB_AUDIO_API_LOG("%f: %s for %u %s %s=%g time=%f %s=%g", + GetParentObject()->CurrentTime(), + mName, ParentNodeId(), ToString(aEvent.mType), + aEvent.mType == AudioTimelineEvent::SetValueCurve ? + "length" : "value", + aEvent.mType == AudioTimelineEvent::SetValueCurve ? + static_cast<double>(aEvent.mCurveLength) : + static_cast<double>(aEvent.mValue), + aEvent.Time<double>(), + aEvent.mType == AudioTimelineEvent::SetValueCurve ? + "duration" : "constant", + aEvent.mType == AudioTimelineEvent::SetValueCurve ? + aEvent.mDuration : aEvent.mTimeConstant); + + AudioNodeStream* stream = mNode->GetStream(); + if (stream) { + stream->SendTimelineEvent(mIndex, aEvent); + } +} + +void +AudioParam::CleanupOldEvents() +{ + MOZ_ASSERT(NS_IsMainThread()); + double currentTime = mNode->Context()->CurrentTime(); + + CleanupEventsOlderThan(currentTime); +} + +float +AudioParamTimeline::AudioNodeInputValue(size_t aCounter) const +{ + MOZ_ASSERT(mStream); + + // If we have a chunk produced by the AudioNode inputs to the AudioParam, + // get its value now. We use aCounter to tell us which frame of the last + // AudioChunk to look at. + float audioNodeInputValue = 0.0f; + const AudioBlock& lastAudioNodeChunk = + static_cast<AudioNodeStream*>(mStream.get())->LastChunks()[0]; + if (!lastAudioNodeChunk.IsNull()) { + MOZ_ASSERT(lastAudioNodeChunk.GetDuration() == WEBAUDIO_BLOCK_SIZE); + audioNodeInputValue = + static_cast<const float*>(lastAudioNodeChunk.mChannelData[0])[aCounter]; + audioNodeInputValue *= lastAudioNodeChunk.mVolume; + } + + return audioNodeInputValue; +} + +} // namespace dom +} // namespace mozilla + diff --git a/dom/media/webaudio/AudioParam.h b/dom/media/webaudio/AudioParam.h new file mode 100644 index 000000000..90686cb89 --- /dev/null +++ b/dom/media/webaudio/AudioParam.h @@ -0,0 +1,246 @@ +/* -*- Mode: C++; tab-width: 2; indent-tabs-mode: nil; c-basic-offset: 2 -*- */ +/* vim:set ts=2 sw=2 sts=2 et cindent: */ +/* This Source Code Form is subject to the terms of the Mozilla Public + * License, v. 2.0. If a copy of the MPL was not distributed with this + * file, You can obtain one at http://mozilla.org/MPL/2.0/. */ + +#ifndef AudioParam_h_ +#define AudioParam_h_ + +#include "AudioParamTimeline.h" +#include "nsWrapperCache.h" +#include "nsCycleCollectionParticipant.h" +#include "AudioNode.h" +#include "mozilla/dom/TypedArray.h" +#include "WebAudioUtils.h" +#include "js/TypeDecls.h" + +namespace mozilla { + +namespace dom { + +class AudioParam final : public nsWrapperCache, + public AudioParamTimeline +{ + virtual ~AudioParam(); + +public: + AudioParam(AudioNode* aNode, + uint32_t aIndex, + float aDefaultValue, + const char* aName); + + NS_IMETHOD_(MozExternalRefCountType) AddRef(void); + NS_IMETHOD_(MozExternalRefCountType) Release(void); + NS_DECL_CYCLE_COLLECTION_SCRIPT_HOLDER_NATIVE_CLASS(AudioParam) + + AudioContext* GetParentObject() const + { + return mNode->Context(); + } + + JSObject* WrapObject(JSContext* aCx, JS::Handle<JSObject*> aGivenProto) override; + + // We override SetValueCurveAtTime to convert the Float32Array to the wrapper + // object. + AudioParam* SetValueCurveAtTime(const Float32Array& aValues, + double aStartTime, + double aDuration, + ErrorResult& aRv) + { + if (!WebAudioUtils::IsTimeValid(aStartTime)) { + aRv.Throw(NS_ERROR_DOM_NOT_SUPPORTED_ERR); + return this; + } + aValues.ComputeLengthAndData(); + + EventInsertionHelper(aRv, AudioTimelineEvent::SetValueCurve, + aStartTime, 0.0f, 0.0f, aDuration, aValues.Data(), + aValues.Length()); + return this; + } + + void SetValue(float aValue) + { + AudioTimelineEvent event(AudioTimelineEvent::SetValue, 0.0f, aValue); + + ErrorResult rv; + if (!ValidateEvent(event, rv)) { + MOZ_ASSERT(false, "This should not happen, " + "setting the value should always work"); + return; + } + + AudioParamTimeline::SetValue(aValue); + + SendEventToEngine(event); + } + + AudioParam* SetValueAtTime(float aValue, double aStartTime, ErrorResult& aRv) + { + if (!WebAudioUtils::IsTimeValid(aStartTime)) { + aRv.Throw(NS_ERROR_DOM_NOT_SUPPORTED_ERR); + return this; + } + EventInsertionHelper(aRv, AudioTimelineEvent::SetValueAtTime, + aStartTime, aValue); + + return this; + } + + AudioParam* LinearRampToValueAtTime(float aValue, double aEndTime, + ErrorResult& aRv) + { + if (!WebAudioUtils::IsTimeValid(aEndTime)) { + aRv.Throw(NS_ERROR_DOM_NOT_SUPPORTED_ERR); + return this; + } + EventInsertionHelper(aRv, AudioTimelineEvent::LinearRamp, aEndTime, aValue); + return this; + } + + AudioParam* ExponentialRampToValueAtTime(float aValue, double aEndTime, + ErrorResult& aRv) + { + if (!WebAudioUtils::IsTimeValid(aEndTime)) { + aRv.Throw(NS_ERROR_DOM_NOT_SUPPORTED_ERR); + return this; + } + EventInsertionHelper(aRv, AudioTimelineEvent::ExponentialRamp, + aEndTime, aValue); + return this; + } + + AudioParam* SetTargetAtTime(float aTarget, double aStartTime, + double aTimeConstant, ErrorResult& aRv) + { + if (!WebAudioUtils::IsTimeValid(aStartTime) || + !WebAudioUtils::IsTimeValid(aTimeConstant)) { + aRv.Throw(NS_ERROR_DOM_NOT_SUPPORTED_ERR); + return this; + } + EventInsertionHelper(aRv, AudioTimelineEvent::SetTarget, + aStartTime, aTarget, + aTimeConstant); + + return this; + } + + AudioParam* CancelScheduledValues(double aStartTime, ErrorResult& aRv) + { + if (!WebAudioUtils::IsTimeValid(aStartTime)) { + aRv.Throw(NS_ERROR_DOM_NOT_SUPPORTED_ERR); + return this; + } + + // Remove some events on the main thread copy. + AudioEventTimeline::CancelScheduledValues(aStartTime); + + AudioTimelineEvent event(AudioTimelineEvent::Cancel, aStartTime, 0.0f); + + SendEventToEngine(event); + + return this; + } + + uint32_t ParentNodeId() + { + return mNode->Id(); + } + + void GetName(nsAString& aName) + { + aName.AssignASCII(mName); + } + + float DefaultValue() const + { + return mDefaultValue; + } + + const nsTArray<AudioNode::InputNode>& InputNodes() const + { + return mInputNodes; + } + + void RemoveInputNode(uint32_t aIndex) + { + mInputNodes.RemoveElementAt(aIndex); + } + + AudioNode::InputNode* AppendInputNode() + { + return mInputNodes.AppendElement(); + } + + // May create the stream if it doesn't exist + MediaStream* Stream(); + + size_t SizeOfExcludingThis(MallocSizeOf aMallocSizeOf) const override + { + size_t amount = AudioParamTimeline::SizeOfExcludingThis(aMallocSizeOf); + // Not owned: + // - mNode + + // Just count the array, actual nodes are counted in mNode. + amount += mInputNodes.ShallowSizeOfExcludingThis(aMallocSizeOf); + + if (mNodeStreamPort) { + amount += mNodeStreamPort->SizeOfIncludingThis(aMallocSizeOf); + } + + return amount; + } + + size_t SizeOfIncludingThis(MallocSizeOf aMallocSizeOf) const override + { + return aMallocSizeOf(this) + SizeOfExcludingThis(aMallocSizeOf); + } + +private: + void EventInsertionHelper(ErrorResult& aRv, + AudioTimelineEvent::Type aType, + double aTime, float aValue, + double aTimeConstant = 0.0, + float aDuration = 0.0, + const float* aCurve = nullptr, + uint32_t aCurveLength = 0) + { + AudioTimelineEvent event(aType, aTime, aValue, + aTimeConstant, aDuration, aCurve, aCurveLength); + + if (!ValidateEvent(event, aRv)) { + return; + } + + AudioEventTimeline::InsertEvent<double>(event); + + SendEventToEngine(event); + + CleanupOldEvents(); + } + + void CleanupOldEvents(); + + void SendEventToEngine(const AudioTimelineEvent& aEvent); + + void DisconnectFromGraphAndDestroyStream(); + + nsCycleCollectingAutoRefCnt mRefCnt; + NS_DECL_OWNINGTHREAD + RefPtr<AudioNode> mNode; + // For every InputNode, there is a corresponding entry in mOutputParams of the + // InputNode's mInputNode. + nsTArray<AudioNode::InputNode> mInputNodes; + const char* mName; + // The input port used to connect the AudioParam's stream to its node's stream + RefPtr<MediaInputPort> mNodeStreamPort; + const uint32_t mIndex; + const float mDefaultValue; +}; + +} // namespace dom +} // namespace mozilla + +#endif + diff --git a/dom/media/webaudio/AudioParamTimeline.h b/dom/media/webaudio/AudioParamTimeline.h new file mode 100644 index 000000000..24ef753c3 --- /dev/null +++ b/dom/media/webaudio/AudioParamTimeline.h @@ -0,0 +1,157 @@ +/* -*- Mode: C++; tab-width: 2; indent-tabs-mode: nil; c-basic-offset: 2 -*- */ +/* vim:set ts=2 sw=2 sts=2 et cindent: */ +/* This Source Code Form is subject to the terms of the Mozilla Public + * License, v. 2.0. If a copy of the MPL was not distributed with this + * file, You can obtain one at http://mozilla.org/MPL/2.0/. */ + +#ifndef AudioParamTimeline_h_ +#define AudioParamTimeline_h_ + +#include "AudioEventTimeline.h" +#include "mozilla/ErrorResult.h" +#include "MediaStreamGraph.h" +#include "AudioSegment.h" + +namespace mozilla { + +namespace dom { + +// This helper class is used to represent the part of the AudioParam +// class that gets sent to AudioNodeEngine instances. In addition to +// AudioEventTimeline methods, it holds a pointer to an optional +// MediaStream which represents the AudioNode inputs to the AudioParam. +// This MediaStream is managed by the AudioParam subclass on the main +// thread, and can only be obtained from the AudioNodeEngine instances +// consuming this class. +class AudioParamTimeline : public AudioEventTimeline +{ + typedef AudioEventTimeline BaseClass; + +public: + explicit AudioParamTimeline(float aDefaultValue) + : BaseClass(aDefaultValue) + { + } + + MediaStream* Stream() const + { + return mStream; + } + + bool HasSimpleValue() const + { + return BaseClass::HasSimpleValue() && !mStream; + } + + template<class TimeType> + float GetValueAtTime(TimeType aTime) + { + return GetValueAtTime(aTime, 0); + } + + template<typename TimeType> + void InsertEvent(const AudioTimelineEvent& aEvent) + { + if (aEvent.mType == AudioTimelineEvent::Cancel) { + CancelScheduledValues(aEvent.template Time<TimeType>()); + return; + } + if (aEvent.mType == AudioTimelineEvent::Stream) { + mStream = aEvent.mStream; + return; + } + if (aEvent.mType == AudioTimelineEvent::SetValue) { + AudioEventTimeline::SetValue(aEvent.mValue); + return; + } + AudioEventTimeline::InsertEvent<TimeType>(aEvent); + } + + // Get the value of the AudioParam at time aTime + aCounter. + // aCounter here is an offset to aTime if we try to get the value in ticks, + // otherwise it should always be zero. aCounter is meant to be used when + template<class TimeType> + float GetValueAtTime(TimeType aTime, size_t aCounter); + + // Get the values of the AudioParam at time aTime + (0 to aSize). + // aBuffer must have the correct aSize. + // aSize here is an offset to aTime if we try to get the value in ticks, + // otherwise it should always be zero. aSize is meant to be used when + // getting the value of an a-rate AudioParam for each tick inside an + // AudioNodeEngine implementation. + template<class TimeType> + void GetValuesAtTime(TimeType aTime, float* aBuffer, const size_t aSize); + + virtual size_t SizeOfExcludingThis(MallocSizeOf aMallocSizeOf) const + { + return mStream ? mStream->SizeOfIncludingThis(aMallocSizeOf) : 0; + } + + virtual size_t SizeOfIncludingThis(MallocSizeOf aMallocSizeOf) const + { + return aMallocSizeOf(this) + SizeOfExcludingThis(aMallocSizeOf); + } + + +private: + float AudioNodeInputValue(size_t aCounter) const; + +protected: + // This is created lazily when needed. + RefPtr<MediaStream> mStream; +}; + +template<> inline float +AudioParamTimeline::GetValueAtTime(double aTime, size_t aCounter) +{ + MOZ_ASSERT(!aCounter); + + // Getting an AudioParam value on an AudioNode does not consider input from + // other AudioNodes, which is managed only on the graph thread. + return BaseClass::GetValueAtTime(aTime); +} + +template<> inline float +AudioParamTimeline::GetValueAtTime(int64_t aTime, size_t aCounter) +{ + MOZ_ASSERT(aCounter < WEBAUDIO_BLOCK_SIZE); + MOZ_ASSERT(!aCounter || !HasSimpleValue()); + + // Mix the value of the AudioParam itself with that of the AudioNode inputs. + return BaseClass::GetValueAtTime(static_cast<int64_t>(aTime + aCounter)) + + (mStream ? AudioNodeInputValue(aCounter) : 0.0f); +} + +template<> inline void +AudioParamTimeline::GetValuesAtTime(double aTime, float* aBuffer, + const size_t aSize) +{ + MOZ_ASSERT(aBuffer); + MOZ_ASSERT(aSize == 1); + + // Getting an AudioParam value on an AudioNode does not consider input from + // other AudioNodes, which is managed only on the graph thread. + *aBuffer = BaseClass::GetValueAtTime(aTime); +} + +template<> inline void +AudioParamTimeline::GetValuesAtTime(int64_t aTime, float* aBuffer, + const size_t aSize) +{ + MOZ_ASSERT(aBuffer); + MOZ_ASSERT(aSize <= WEBAUDIO_BLOCK_SIZE); + MOZ_ASSERT(aSize == 1 || !HasSimpleValue()); + + // Mix the value of the AudioParam itself with that of the AudioNode inputs. + BaseClass::GetValuesAtTime(aTime, aBuffer, aSize); + if (mStream) { + for (size_t i = 0; i < aSize; ++i) { + aBuffer[i] += AudioNodeInputValue(i); + } + } +} + +} // namespace dom +} // namespace mozilla + +#endif diff --git a/dom/media/webaudio/AudioProcessingEvent.cpp b/dom/media/webaudio/AudioProcessingEvent.cpp new file mode 100644 index 000000000..01b9585ca --- /dev/null +++ b/dom/media/webaudio/AudioProcessingEvent.cpp @@ -0,0 +1,57 @@ +/* -*- Mode: C++; tab-width: 2; indent-tabs-mode: nil; c-basic-offset: 2 -*- */ +/* vim:set ts=2 sw=2 sts=2 et cindent: */ +/* This Source Code Form is subject to the terms of the Mozilla Public + * License, v. 2.0. If a copy of the MPL was not distributed with this + * file, You can obtain one at http://mozilla.org/MPL/2.0/. */ + +#include "AudioProcessingEvent.h" +#include "mozilla/dom/AudioProcessingEventBinding.h" +#include "mozilla/dom/ScriptSettings.h" +#include "AudioContext.h" + +namespace mozilla { +namespace dom { + +NS_IMPL_CYCLE_COLLECTION_INHERITED(AudioProcessingEvent, Event, + mInputBuffer, mOutputBuffer, mNode) + +NS_INTERFACE_MAP_BEGIN_CYCLE_COLLECTION_INHERITED(AudioProcessingEvent) +NS_INTERFACE_MAP_END_INHERITING(Event) + +NS_IMPL_ADDREF_INHERITED(AudioProcessingEvent, Event) +NS_IMPL_RELEASE_INHERITED(AudioProcessingEvent, Event) + +AudioProcessingEvent::AudioProcessingEvent(ScriptProcessorNode* aOwner, + nsPresContext* aPresContext, + WidgetEvent* aEvent) + : Event(aOwner, aPresContext, aEvent) + , mPlaybackTime(0.0) + , mNode(aOwner) +{ +} + +AudioProcessingEvent::~AudioProcessingEvent() +{ +} + +JSObject* +AudioProcessingEvent::WrapObjectInternal(JSContext* aCx, JS::Handle<JSObject*> aGivenProto) +{ + return AudioProcessingEventBinding::Wrap(aCx, this, aGivenProto); +} + +already_AddRefed<AudioBuffer> +AudioProcessingEvent::LazilyCreateBuffer(uint32_t aNumberOfChannels, + ErrorResult& aRv) +{ + RefPtr<AudioBuffer> buffer = + AudioBuffer::Create(mNode->Context(), aNumberOfChannels, + mNode->BufferSize(), + mNode->Context()->SampleRate(), aRv); + MOZ_ASSERT(buffer || aRv.ErrorCodeIs(NS_ERROR_OUT_OF_MEMORY)); + return buffer.forget(); +} + +} // namespace dom +} // namespace mozilla + diff --git a/dom/media/webaudio/AudioProcessingEvent.h b/dom/media/webaudio/AudioProcessingEvent.h new file mode 100644 index 000000000..7b3b54d3e --- /dev/null +++ b/dom/media/webaudio/AudioProcessingEvent.h @@ -0,0 +1,85 @@ +/* -*- Mode: C++; tab-width: 2; indent-tabs-mode: nil; c-basic-offset: 2 -*- */ +/* vim:set ts=2 sw=2 sts=2 et cindent: */ +/* This Source Code Form is subject to the terms of the Mozilla Public + * License, v. 2.0. If a copy of the MPL was not distributed with this + * file, You can obtain one at http://mozilla.org/MPL/2.0/. */ + +#ifndef AudioProcessingEvent_h_ +#define AudioProcessingEvent_h_ + +#include "AudioBuffer.h" +#include "ScriptProcessorNode.h" +#include "mozilla/dom/Event.h" + +namespace mozilla { +namespace dom { + +class AudioProcessingEvent final : public Event +{ +public: + AudioProcessingEvent(ScriptProcessorNode* aOwner, + nsPresContext* aPresContext, + WidgetEvent* aEvent); + + NS_DECL_ISUPPORTS_INHERITED + NS_FORWARD_TO_EVENT + NS_DECL_CYCLE_COLLECTION_CLASS_INHERITED(AudioProcessingEvent, Event) + + JSObject* WrapObjectInternal(JSContext* aCx, JS::Handle<JSObject*> aGivenProto) override; + + void InitEvent(AudioBuffer* aInputBuffer, + uint32_t aNumberOfInputChannels, + double aPlaybackTime) + { + InitEvent(NS_LITERAL_STRING("audioprocess"), false, false); + mInputBuffer = aInputBuffer; + mNumberOfInputChannels = aNumberOfInputChannels; + mPlaybackTime = aPlaybackTime; + } + + double PlaybackTime() const + { + return mPlaybackTime; + } + + AudioBuffer* GetInputBuffer(ErrorResult& aRv) + { + if (!mInputBuffer) { + mInputBuffer = LazilyCreateBuffer(mNumberOfInputChannels, aRv); + } + return mInputBuffer; + } + + AudioBuffer* GetOutputBuffer(ErrorResult& aRv) + { + if (!mOutputBuffer) { + mOutputBuffer = LazilyCreateBuffer(mNode->NumberOfOutputChannels(), aRv); + } + return mOutputBuffer; + } + + bool HasOutputBuffer() const + { + return !!mOutputBuffer; + } + +protected: + virtual ~AudioProcessingEvent(); + +private: + already_AddRefed<AudioBuffer> + LazilyCreateBuffer(uint32_t aNumberOfChannels, ErrorResult& rv); + +private: + double mPlaybackTime; + RefPtr<AudioBuffer> mInputBuffer; + RefPtr<AudioBuffer> mOutputBuffer; + RefPtr<ScriptProcessorNode> mNode; + uint32_t mNumberOfInputChannels; +}; + +} // namespace dom +} // namespace mozilla + +#endif + diff --git a/dom/media/webaudio/BiquadFilterNode.cpp b/dom/media/webaudio/BiquadFilterNode.cpp new file mode 100644 index 000000000..0c8c05586 --- /dev/null +++ b/dom/media/webaudio/BiquadFilterNode.cpp @@ -0,0 +1,355 @@ +/* -*- Mode: C++; tab-width: 2; indent-tabs-mode: nil; c-basic-offset: 2 -*- */ +/* vim:set ts=2 sw=2 sts=2 et cindent: */ +/* This Source Code Form is subject to the terms of the Mozilla Public + * License, v. 2.0. If a copy of the MPL was not distributed with this + * file, You can obtain one at http://mozilla.org/MPL/2.0/. */ + +#include "BiquadFilterNode.h" +#include "AlignmentUtils.h" +#include "AudioNodeEngine.h" +#include "AudioNodeStream.h" +#include "AudioDestinationNode.h" +#include "PlayingRefChangeHandler.h" +#include "WebAudioUtils.h" +#include "blink/Biquad.h" +#include "mozilla/UniquePtr.h" +#include "AudioParamTimeline.h" + +namespace mozilla { +namespace dom { + +NS_IMPL_CYCLE_COLLECTION_INHERITED(BiquadFilterNode, AudioNode, + mFrequency, mDetune, mQ, mGain) + +NS_INTERFACE_MAP_BEGIN_CYCLE_COLLECTION_INHERITED(BiquadFilterNode) +NS_INTERFACE_MAP_END_INHERITING(AudioNode) + +NS_IMPL_ADDREF_INHERITED(BiquadFilterNode, AudioNode) +NS_IMPL_RELEASE_INHERITED(BiquadFilterNode, AudioNode) + +static void +SetParamsOnBiquad(WebCore::Biquad& aBiquad, + float aSampleRate, + BiquadFilterType aType, + double aFrequency, + double aQ, + double aGain, + double aDetune) +{ + const double nyquist = aSampleRate * 0.5; + double normalizedFrequency = aFrequency / nyquist; + + if (aDetune) { + normalizedFrequency *= std::pow(2.0, aDetune / 1200); + } + + switch (aType) { + case BiquadFilterType::Lowpass: + aBiquad.setLowpassParams(normalizedFrequency, aQ); + break; + case BiquadFilterType::Highpass: + aBiquad.setHighpassParams(normalizedFrequency, aQ); + break; + case BiquadFilterType::Bandpass: + aBiquad.setBandpassParams(normalizedFrequency, aQ); + break; + case BiquadFilterType::Lowshelf: + aBiquad.setLowShelfParams(normalizedFrequency, aGain); + break; + case BiquadFilterType::Highshelf: + aBiquad.setHighShelfParams(normalizedFrequency, aGain); + break; + case BiquadFilterType::Peaking: + aBiquad.setPeakingParams(normalizedFrequency, aQ, aGain); + break; + case BiquadFilterType::Notch: + aBiquad.setNotchParams(normalizedFrequency, aQ); + break; + case BiquadFilterType::Allpass: + aBiquad.setAllpassParams(normalizedFrequency, aQ); + break; + default: + NS_NOTREACHED("We should never see the alternate names here"); + break; + } +} + +class BiquadFilterNodeEngine final : public AudioNodeEngine +{ +public: + BiquadFilterNodeEngine(AudioNode* aNode, + AudioDestinationNode* aDestination, + uint64_t aWindowID) + : AudioNodeEngine(aNode) + , mDestination(aDestination->Stream()) + // Keep the default values in sync with the default values in + // BiquadFilterNode::BiquadFilterNode + , mType(BiquadFilterType::Lowpass) + , mFrequency(350.f) + , mDetune(0.f) + , mQ(1.f) + , mGain(0.f) + , mWindowID(aWindowID) + { + } + + enum Parameteres { + TYPE, + FREQUENCY, + DETUNE, + Q, + GAIN + }; + void SetInt32Parameter(uint32_t aIndex, int32_t aValue) override + { + switch (aIndex) { + case TYPE: mType = static_cast<BiquadFilterType>(aValue); break; + default: + NS_ERROR("Bad BiquadFilterNode Int32Parameter"); + } + } + void RecvTimelineEvent(uint32_t aIndex, + AudioTimelineEvent& aEvent) override + { + MOZ_ASSERT(mDestination); + + WebAudioUtils::ConvertAudioTimelineEventToTicks(aEvent, + mDestination); + + switch (aIndex) { + case FREQUENCY: + mFrequency.InsertEvent<int64_t>(aEvent); + break; + case DETUNE: + mDetune.InsertEvent<int64_t>(aEvent); + break; + case Q: + mQ.InsertEvent<int64_t>(aEvent); + break; + case GAIN: + mGain.InsertEvent<int64_t>(aEvent); + break; + default: + NS_ERROR("Bad BiquadFilterNodeEngine TimelineParameter"); + } + } + + void ProcessBlock(AudioNodeStream* aStream, + GraphTime aFrom, + const AudioBlock& aInput, + AudioBlock* aOutput, + bool* aFinished) override + { + float inputBuffer[WEBAUDIO_BLOCK_SIZE + 4]; + float* alignedInputBuffer = ALIGNED16(inputBuffer); + ASSERT_ALIGNED16(alignedInputBuffer); + + if (aInput.IsNull()) { + bool hasTail = false; + for (uint32_t i = 0; i < mBiquads.Length(); ++i) { + if (mBiquads[i].hasTail()) { + hasTail = true; + break; + } + } + if (!hasTail) { + if (!mBiquads.IsEmpty()) { + mBiquads.Clear(); + aStream->ScheduleCheckForInactive(); + + RefPtr<PlayingRefChangeHandler> refchanged = + new PlayingRefChangeHandler(aStream, PlayingRefChangeHandler::RELEASE); + aStream->Graph()-> + DispatchToMainThreadAfterStreamStateUpdate(refchanged.forget()); + } + + aOutput->SetNull(WEBAUDIO_BLOCK_SIZE); + return; + } + + PodArrayZero(inputBuffer); + + } else if(mBiquads.Length() != aInput.ChannelCount()){ + if (mBiquads.IsEmpty()) { + RefPtr<PlayingRefChangeHandler> refchanged = + new PlayingRefChangeHandler(aStream, PlayingRefChangeHandler::ADDREF); + aStream->Graph()-> + DispatchToMainThreadAfterStreamStateUpdate(refchanged.forget()); + } else { // Help people diagnose bug 924718 + WebAudioUtils::LogToDeveloperConsole(mWindowID, + "BiquadFilterChannelCountChangeWarning"); + } + + // Adjust the number of biquads based on the number of channels + mBiquads.SetLength(aInput.ChannelCount()); + } + + uint32_t numberOfChannels = mBiquads.Length(); + aOutput->AllocateChannels(numberOfChannels); + + StreamTime pos = mDestination->GraphTimeToStreamTime(aFrom); + + double freq = mFrequency.GetValueAtTime(pos); + double q = mQ.GetValueAtTime(pos); + double gain = mGain.GetValueAtTime(pos); + double detune = mDetune.GetValueAtTime(pos); + + for (uint32_t i = 0; i < numberOfChannels; ++i) { + const float* input; + if (aInput.IsNull()) { + input = alignedInputBuffer; + } else { + input = static_cast<const float*>(aInput.mChannelData[i]); + if (aInput.mVolume != 1.0) { + AudioBlockCopyChannelWithScale(input, aInput.mVolume, alignedInputBuffer); + input = alignedInputBuffer; + } + } + SetParamsOnBiquad(mBiquads[i], aStream->SampleRate(), mType, freq, q, gain, detune); + + mBiquads[i].process(input, + aOutput->ChannelFloatsForWrite(i), + aInput.GetDuration()); + } + } + + bool IsActive() const override + { + return !mBiquads.IsEmpty(); + } + + size_t SizeOfExcludingThis(MallocSizeOf aMallocSizeOf) const override + { + // Not owned: + // - mDestination - probably not owned + // - AudioParamTimelines - counted in the AudioNode + size_t amount = AudioNodeEngine::SizeOfExcludingThis(aMallocSizeOf); + amount += mBiquads.ShallowSizeOfExcludingThis(aMallocSizeOf); + return amount; + } + + size_t SizeOfIncludingThis(MallocSizeOf aMallocSizeOf) const override + { + return aMallocSizeOf(this) + SizeOfExcludingThis(aMallocSizeOf); + } + +private: + AudioNodeStream* mDestination; + BiquadFilterType mType; + AudioParamTimeline mFrequency; + AudioParamTimeline mDetune; + AudioParamTimeline mQ; + AudioParamTimeline mGain; + nsTArray<WebCore::Biquad> mBiquads; + uint64_t mWindowID; +}; + +BiquadFilterNode::BiquadFilterNode(AudioContext* aContext) + : AudioNode(aContext, + 2, + ChannelCountMode::Max, + ChannelInterpretation::Speakers) + , mType(BiquadFilterType::Lowpass) + , mFrequency(new AudioParam(this, BiquadFilterNodeEngine::FREQUENCY, + 350.f, "frequency")) + , mDetune(new AudioParam(this, BiquadFilterNodeEngine::DETUNE, 0.f, "detune")) + , mQ(new AudioParam(this, BiquadFilterNodeEngine::Q, 1.f, "Q")) + , mGain(new AudioParam(this, BiquadFilterNodeEngine::GAIN, 0.f, "gain")) +{ + uint64_t windowID = aContext->GetParentObject()->WindowID(); + BiquadFilterNodeEngine* engine = new BiquadFilterNodeEngine(this, aContext->Destination(), windowID); + mStream = AudioNodeStream::Create(aContext, engine, + AudioNodeStream::NO_STREAM_FLAGS, + aContext->Graph()); +} + +BiquadFilterNode::~BiquadFilterNode() +{ +} + +size_t +BiquadFilterNode::SizeOfExcludingThis(MallocSizeOf aMallocSizeOf) const +{ + size_t amount = AudioNode::SizeOfExcludingThis(aMallocSizeOf); + + if (mFrequency) { + amount += mFrequency->SizeOfIncludingThis(aMallocSizeOf); + } + + if (mDetune) { + amount += mDetune->SizeOfIncludingThis(aMallocSizeOf); + } + + if (mQ) { + amount += mQ->SizeOfIncludingThis(aMallocSizeOf); + } + + if (mGain) { + amount += mGain->SizeOfIncludingThis(aMallocSizeOf); + } + + return amount; +} + +size_t +BiquadFilterNode::SizeOfIncludingThis(MallocSizeOf aMallocSizeOf) const +{ + return aMallocSizeOf(this) + SizeOfExcludingThis(aMallocSizeOf); +} + +JSObject* +BiquadFilterNode::WrapObject(JSContext* aCx, JS::Handle<JSObject*> aGivenProto) +{ + return BiquadFilterNodeBinding::Wrap(aCx, this, aGivenProto); +} + +void +BiquadFilterNode::SetType(BiquadFilterType aType) +{ + mType = aType; + SendInt32ParameterToStream(BiquadFilterNodeEngine::TYPE, + static_cast<int32_t>(aType)); +} + +void +BiquadFilterNode::GetFrequencyResponse(const Float32Array& aFrequencyHz, + const Float32Array& aMagResponse, + const Float32Array& aPhaseResponse) +{ + aFrequencyHz.ComputeLengthAndData(); + aMagResponse.ComputeLengthAndData(); + aPhaseResponse.ComputeLengthAndData(); + + uint32_t length = std::min(std::min(aFrequencyHz.Length(), aMagResponse.Length()), + aPhaseResponse.Length()); + if (!length) { + return; + } + + auto frequencies = MakeUnique<float[]>(length); + float* frequencyHz = aFrequencyHz.Data(); + const double nyquist = Context()->SampleRate() * 0.5; + + // Normalize the frequencies + for (uint32_t i = 0; i < length; ++i) { + if (frequencyHz[i] >= 0 && frequencyHz[i] <= nyquist) { + frequencies[i] = static_cast<float>(frequencyHz[i] / nyquist); + } else { + frequencies[i] = std::numeric_limits<float>::quiet_NaN(); + } + } + + const double currentTime = Context()->CurrentTime(); + + double freq = mFrequency->GetValueAtTime(currentTime); + double q = mQ->GetValueAtTime(currentTime); + double gain = mGain->GetValueAtTime(currentTime); + double detune = mDetune->GetValueAtTime(currentTime); + + WebCore::Biquad biquad; + SetParamsOnBiquad(biquad, Context()->SampleRate(), mType, freq, q, gain, detune); + biquad.getFrequencyResponse(int(length), frequencies.get(), aMagResponse.Data(), aPhaseResponse.Data()); +} + +} // namespace dom +} // namespace mozilla diff --git a/dom/media/webaudio/BiquadFilterNode.h b/dom/media/webaudio/BiquadFilterNode.h new file mode 100644 index 000000000..f81c623f0 --- /dev/null +++ b/dom/media/webaudio/BiquadFilterNode.h @@ -0,0 +1,82 @@ +/* -*- Mode: C++; tab-width: 2; indent-tabs-mode: nil; c-basic-offset: 2 -*- */ +/* vim:set ts=2 sw=2 sts=2 et cindent: */ +/* This Source Code Form is subject to the terms of the Mozilla Public + * License, v. 2.0. If a copy of the MPL was not distributed with this + * file, You can obtain one at http://mozilla.org/MPL/2.0/. */ + +#ifndef BiquadFilterNode_h_ +#define BiquadFilterNode_h_ + +#include "AudioNode.h" +#include "AudioParam.h" +#include "mozilla/dom/BiquadFilterNodeBinding.h" + +namespace mozilla { +namespace dom { + +class AudioContext; + +class BiquadFilterNode final : public AudioNode +{ +public: + explicit BiquadFilterNode(AudioContext* aContext); + + NS_DECL_ISUPPORTS_INHERITED + NS_DECL_CYCLE_COLLECTION_CLASS_INHERITED(BiquadFilterNode, AudioNode) + + JSObject* WrapObject(JSContext* aCx, JS::Handle<JSObject*> aGivenProto) override; + + BiquadFilterType Type() const + { + return mType; + } + void SetType(BiquadFilterType aType); + + AudioParam* Frequency() const + { + return mFrequency; + } + + AudioParam* Detune() const + { + return mDetune; + } + + AudioParam* Q() const + { + return mQ; + } + + AudioParam* Gain() const + { + return mGain; + } + + void GetFrequencyResponse(const Float32Array& aFrequencyHz, + const Float32Array& aMagResponse, + const Float32Array& aPhaseResponse); + + const char* NodeType() const override + { + return "BiquadFilterNode"; + } + + size_t SizeOfExcludingThis(MallocSizeOf aMallocSizeOf) const override; + size_t SizeOfIncludingThis(MallocSizeOf aMallocSizeOf) const override; + +protected: + virtual ~BiquadFilterNode(); + +private: + BiquadFilterType mType; + RefPtr<AudioParam> mFrequency; + RefPtr<AudioParam> mDetune; + RefPtr<AudioParam> mQ; + RefPtr<AudioParam> mGain; +}; + +} // namespace dom +} // namespace mozilla + +#endif + diff --git a/dom/media/webaudio/BufferDecoder.cpp b/dom/media/webaudio/BufferDecoder.cpp new file mode 100644 index 000000000..053a13bec --- /dev/null +++ b/dom/media/webaudio/BufferDecoder.cpp @@ -0,0 +1,77 @@ +/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*- */ +/* vim: set ts=8 sts=2 et sw=2 tw=80: */ +/* This Source Code Form is subject to the terms of the Mozilla Public + * License, v. 2.0. If a copy of the MPL was not distributed with this + * file, You can obtain one at http://mozilla.org/MPL/2.0/. */ + +#include "BufferDecoder.h" + +#include "nsISupports.h" +#include "MediaResource.h" +#include "GMPService.h" + +namespace mozilla { + +NS_IMPL_ISUPPORTS0(BufferDecoder) + +BufferDecoder::BufferDecoder(MediaResource* aResource, GMPCrashHelper* aCrashHelper) + : mResource(aResource) + , mCrashHelper(aCrashHelper) +{ + MOZ_ASSERT(NS_IsMainThread()); + MOZ_COUNT_CTOR(BufferDecoder); +} + +BufferDecoder::~BufferDecoder() +{ + // The dtor may run on any thread, we cannot be sure. + MOZ_COUNT_DTOR(BufferDecoder); +} + +void +BufferDecoder::BeginDecoding(TaskQueue* aTaskQueueIdentity) +{ + MOZ_ASSERT(!mTaskQueueIdentity && aTaskQueueIdentity); + mTaskQueueIdentity = aTaskQueueIdentity; +} + +MediaResource* +BufferDecoder::GetResource() const +{ + return mResource; +} + +void +BufferDecoder::NotifyDecodedFrames(const FrameStatisticsData& aStats) +{ + // ignore +} + +VideoFrameContainer* +BufferDecoder::GetVideoFrameContainer() +{ + // no video frame + return nullptr; +} + +layers::ImageContainer* +BufferDecoder::GetImageContainer() +{ + // no image container + return nullptr; +} + +MediaDecoderOwner* +BufferDecoder::GetOwner() const +{ + // unknown + return nullptr; +} + +already_AddRefed<GMPCrashHelper> +BufferDecoder::GetCrashHelper() +{ + return do_AddRef(mCrashHelper); +} + +} // namespace mozilla diff --git a/dom/media/webaudio/BufferDecoder.h b/dom/media/webaudio/BufferDecoder.h new file mode 100644 index 000000000..52cb92489 --- /dev/null +++ b/dom/media/webaudio/BufferDecoder.h @@ -0,0 +1,54 @@ +/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*- */ +/* vim: set ts=8 sts=2 et sw=2 tw=80: */ +/* This Source Code Form is subject to the terms of the Mozilla Public + * License, v. 2.0. If a copy of the MPL was not distributed with this + * file, You can obtain one at http://mozilla.org/MPL/2.0/. */ + +#ifndef BUFFER_DECODER_H_ +#define BUFFER_DECODER_H_ + +#include "mozilla/Attributes.h" +#include "mozilla/ReentrantMonitor.h" +#include "mozilla/TaskQueue.h" + +#include "AbstractMediaDecoder.h" + +namespace mozilla { + +/** + * This class provides a decoder object which decodes a media file that lives in + * a memory buffer. + */ +class BufferDecoder final : public AbstractMediaDecoder +{ +public: + // This class holds a weak pointer to MediaResource. It's the responsibility + // of the caller to manage the memory of the MediaResource object. + explicit BufferDecoder(MediaResource* aResource, GMPCrashHelper* aCrashHelper); + + NS_DECL_THREADSAFE_ISUPPORTS + + // This has to be called before decoding begins + void BeginDecoding(TaskQueue* aTaskQueueIdentity); + + MediaResource* GetResource() const final override; + + void NotifyDecodedFrames(const FrameStatisticsData& aStats) final override; + + VideoFrameContainer* GetVideoFrameContainer() final override; + layers::ImageContainer* GetImageContainer() final override; + + MediaDecoderOwner* GetOwner() const final override; + + already_AddRefed<GMPCrashHelper> GetCrashHelper() override; + +private: + virtual ~BufferDecoder(); + RefPtr<TaskQueue> mTaskQueueIdentity; + RefPtr<MediaResource> mResource; + RefPtr<GMPCrashHelper> mCrashHelper; +}; + +} // namespace mozilla + +#endif /* BUFFER_DECODER_H_ */ diff --git a/dom/media/webaudio/ChannelMergerNode.cpp b/dom/media/webaudio/ChannelMergerNode.cpp new file mode 100644 index 000000000..7b63a98a6 --- /dev/null +++ b/dom/media/webaudio/ChannelMergerNode.cpp @@ -0,0 +1,90 @@ +/* -*- Mode: C++; tab-width: 2; indent-tabs-mode: nil; c-basic-offset: 2 -*- */ +/* vim:set ts=2 sw=2 sts=2 et cindent: */ +/* This Source Code Form is subject to the terms of the Mozilla Public + * License, v. 2.0. If a copy of the MPL was not distributed with this + * file, You can obtain one at http://mozilla.org/MPL/2.0/. */ + +#include "mozilla/dom/ChannelMergerNode.h" +#include "mozilla/dom/ChannelMergerNodeBinding.h" +#include "AudioNodeEngine.h" +#include "AudioNodeStream.h" + +namespace mozilla { +namespace dom { + +NS_IMPL_ISUPPORTS_INHERITED0(ChannelMergerNode, AudioNode) + +class ChannelMergerNodeEngine final : public AudioNodeEngine +{ +public: + explicit ChannelMergerNodeEngine(ChannelMergerNode* aNode) + : AudioNodeEngine(aNode) + { + MOZ_ASSERT(NS_IsMainThread()); + } + + void ProcessBlocksOnPorts(AudioNodeStream* aStream, + const OutputChunks& aInput, + OutputChunks& aOutput, + bool* aFinished) override + { + MOZ_ASSERT(aInput.Length() >= 1, "Should have one or more input ports"); + + // Get the number of output channels, and allocate it + size_t channelCount = InputCount(); + bool allNull = true; + for (size_t i = 0; i < channelCount; ++i) { + allNull &= aInput[i].IsNull(); + } + if (allNull) { + aOutput[0].SetNull(WEBAUDIO_BLOCK_SIZE); + return; + } + + aOutput[0].AllocateChannels(channelCount); + + for (size_t i = 0; i < channelCount; ++i) { + float* output = aOutput[0].ChannelFloatsForWrite(i); + if (aInput[i].IsNull()) { + PodZero(output, WEBAUDIO_BLOCK_SIZE); + } else { + AudioBlockCopyChannelWithScale( + static_cast<const float*>(aInput[i].mChannelData[0]), + aInput[i].mVolume, output); + } + } + } + + size_t SizeOfIncludingThis(MallocSizeOf aMallocSizeOf) const override + { + return aMallocSizeOf(this) + SizeOfExcludingThis(aMallocSizeOf); + } +}; + +ChannelMergerNode::ChannelMergerNode(AudioContext* aContext, + uint16_t aInputCount) + : AudioNode(aContext, + 1, + ChannelCountMode::Explicit, + ChannelInterpretation::Speakers) + , mInputCount(aInputCount) +{ + mStream = AudioNodeStream::Create(aContext, + new ChannelMergerNodeEngine(this), + AudioNodeStream::NO_STREAM_FLAGS, + aContext->Graph()); +} + +ChannelMergerNode::~ChannelMergerNode() +{ +} + +JSObject* +ChannelMergerNode::WrapObject(JSContext* aCx, JS::Handle<JSObject*> aGivenProto) +{ + return ChannelMergerNodeBinding::Wrap(aCx, this, aGivenProto); +} + +} // namespace dom +} // namespace mozilla + diff --git a/dom/media/webaudio/ChannelMergerNode.h b/dom/media/webaudio/ChannelMergerNode.h new file mode 100644 index 000000000..d064c8e23 --- /dev/null +++ b/dom/media/webaudio/ChannelMergerNode.h @@ -0,0 +1,50 @@ +/* -*- Mode: C++; tab-width: 2; indent-tabs-mode: nil; c-basic-offset: 2 -*- */ +/* vim:set ts=2 sw=2 sts=2 et cindent: */ +/* This Source Code Form is subject to the terms of the Mozilla Public + * License, v. 2.0. If a copy of the MPL was not distributed with this + * file, You can obtain one at http://mozilla.org/MPL/2.0/. */ + +#ifndef ChannelMergerNode_h_ +#define ChannelMergerNode_h_ + +#include "AudioNode.h" + +namespace mozilla { +namespace dom { + +class AudioContext; + +class ChannelMergerNode final : public AudioNode +{ +public: + ChannelMergerNode(AudioContext* aContext, + uint16_t aInputCount); + + NS_DECL_ISUPPORTS_INHERITED + + JSObject* WrapObject(JSContext* aCx, JS::Handle<JSObject*> aGivenProto) override; + + uint16_t NumberOfInputs() const override { return mInputCount; } + + const char* NodeType() const override + { + return "ChannelMergerNode"; + } + + size_t SizeOfIncludingThis(MallocSizeOf aMallocSizeOf) const override + { + return aMallocSizeOf(this) + SizeOfExcludingThis(aMallocSizeOf); + } + +protected: + virtual ~ChannelMergerNode(); + +private: + const uint16_t mInputCount; +}; + +} // namespace dom +} // namespace mozilla + +#endif + diff --git a/dom/media/webaudio/ChannelSplitterNode.cpp b/dom/media/webaudio/ChannelSplitterNode.cpp new file mode 100644 index 000000000..34a414d16 --- /dev/null +++ b/dom/media/webaudio/ChannelSplitterNode.cpp @@ -0,0 +1,81 @@ +/* -*- Mode: C++; tab-width: 2; indent-tabs-mode: nil; c-basic-offset: 2 -*- */ +/* vim:set ts=2 sw=2 sts=2 et cindent: */ +/* This Source Code Form is subject to the terms of the Mozilla Public + * License, v. 2.0. If a copy of the MPL was not distributed with this + * file, You can obtain one at http://mozilla.org/MPL/2.0/. */ + +#include "mozilla/dom/ChannelSplitterNode.h" +#include "mozilla/dom/ChannelSplitterNodeBinding.h" +#include "AudioNodeEngine.h" +#include "AudioNodeStream.h" + +namespace mozilla { +namespace dom { + +NS_IMPL_ISUPPORTS_INHERITED0(ChannelSplitterNode, AudioNode) + +class ChannelSplitterNodeEngine final : public AudioNodeEngine +{ +public: + explicit ChannelSplitterNodeEngine(ChannelSplitterNode* aNode) + : AudioNodeEngine(aNode) + { + MOZ_ASSERT(NS_IsMainThread()); + } + + void ProcessBlocksOnPorts(AudioNodeStream* aStream, + const OutputChunks& aInput, + OutputChunks& aOutput, + bool* aFinished) override + { + MOZ_ASSERT(aInput.Length() == 1, "Should only have one input port"); + + aOutput.SetLength(OutputCount()); + for (uint16_t i = 0; i < OutputCount(); ++i) { + if (i < aInput[0].ChannelCount()) { + // Split out existing channels + aOutput[i].AllocateChannels(1); + AudioBlockCopyChannelWithScale( + static_cast<const float*>(aInput[0].mChannelData[i]), + aInput[0].mVolume, + aOutput[i].ChannelFloatsForWrite(0)); + } else { + // Pad with silent channels if needed + aOutput[i].SetNull(WEBAUDIO_BLOCK_SIZE); + } + } + } + + size_t SizeOfIncludingThis(MallocSizeOf aMallocSizeOf) const override + { + return aMallocSizeOf(this) + SizeOfExcludingThis(aMallocSizeOf); + } +}; + +ChannelSplitterNode::ChannelSplitterNode(AudioContext* aContext, + uint16_t aOutputCount) + : AudioNode(aContext, + 2, + ChannelCountMode::Max, + ChannelInterpretation::Speakers) + , mOutputCount(aOutputCount) +{ + mStream = AudioNodeStream::Create(aContext, + new ChannelSplitterNodeEngine(this), + AudioNodeStream::NO_STREAM_FLAGS, + aContext->Graph()); +} + +ChannelSplitterNode::~ChannelSplitterNode() +{ +} + +JSObject* +ChannelSplitterNode::WrapObject(JSContext* aCx, JS::Handle<JSObject*> aGivenProto) +{ + return ChannelSplitterNodeBinding::Wrap(aCx, this, aGivenProto); +} + +} // namespace dom +} // namespace mozilla + diff --git a/dom/media/webaudio/ChannelSplitterNode.h b/dom/media/webaudio/ChannelSplitterNode.h new file mode 100644 index 000000000..3b267eccc --- /dev/null +++ b/dom/media/webaudio/ChannelSplitterNode.h @@ -0,0 +1,50 @@ +/* -*- Mode: C++; tab-width: 2; indent-tabs-mode: nil; c-basic-offset: 2 -*- */ +/* vim:set ts=2 sw=2 sts=2 et cindent: */ +/* This Source Code Form is subject to the terms of the Mozilla Public + * License, v. 2.0. If a copy of the MPL was not distributed with this + * file, You can obtain one at http://mozilla.org/MPL/2.0/. */ + +#ifndef ChannelSplitterNode_h_ +#define ChannelSplitterNode_h_ + +#include "AudioNode.h" + +namespace mozilla { +namespace dom { + +class AudioContext; + +class ChannelSplitterNode final : public AudioNode +{ +public: + ChannelSplitterNode(AudioContext* aContext, + uint16_t aOutputCount); + + NS_DECL_ISUPPORTS_INHERITED + + JSObject* WrapObject(JSContext* aCx, JS::Handle<JSObject*> aGivenProto) override; + + uint16_t NumberOfOutputs() const override { return mOutputCount; } + + const char* NodeType() const override + { + return "ChannelSplitterNode"; + } + + size_t SizeOfIncludingThis(MallocSizeOf aMallocSizeOf) const override + { + return aMallocSizeOf(this) + SizeOfExcludingThis(aMallocSizeOf); + } + +protected: + virtual ~ChannelSplitterNode(); + +private: + const uint16_t mOutputCount; +}; + +} // namespace dom +} // namespace mozilla + +#endif + diff --git a/dom/media/webaudio/ConstantSourceNode.cpp b/dom/media/webaudio/ConstantSourceNode.cpp new file mode 100644 index 000000000..b6884105c --- /dev/null +++ b/dom/media/webaudio/ConstantSourceNode.cpp @@ -0,0 +1,286 @@ +/* -*- Mode: C++; tab-width: 2; indent-tabs-mode: nil; c-basic-offset: 2 -*- */ +/* vim:set ts=2 sw=2 sts=2 et cindent: */ +/* This Source Code Form is subject to the terms of the Mozilla Public + * License, v. 2.0. If a copy of the MPL was not distributed with this + * file, You can obtain one at http://mozilla.org/MPL/2.0/. */ + +#include "ConstantSourceNode.h" + +#include "AudioDestinationNode.h" +#include "nsContentUtils.h" + +namespace mozilla { +namespace dom { + +NS_IMPL_CYCLE_COLLECTION_INHERITED(ConstantSourceNode, AudioNode, + mOffset) + +NS_INTERFACE_MAP_BEGIN_CYCLE_COLLECTION_INHERITED(ConstantSourceNode) +NS_INTERFACE_MAP_END_INHERITING(AudioNode) + +NS_IMPL_ADDREF_INHERITED(ConstantSourceNode, AudioNode) +NS_IMPL_RELEASE_INHERITED(ConstantSourceNode, AudioNode) + +class ConstantSourceNodeEngine final : public AudioNodeEngine +{ +public: + ConstantSourceNodeEngine(AudioNode* aNode, AudioDestinationNode* aDestination) + : AudioNodeEngine(aNode) + , mSource(nullptr) + , mDestination(aDestination->Stream()) + , mStart(-1) + , mStop(STREAM_TIME_MAX) + // Keep the default values in sync with ConstantSourceNode::ConstantSourceNode. + , mOffset(1.0f) + { + MOZ_ASSERT(NS_IsMainThread()); + } + + void SetSourceStream(AudioNodeStream* aSource) + { + mSource = aSource; + } + + enum Parameters { + OFFSET, + START, + STOP, + }; + void RecvTimelineEvent(uint32_t aIndex, + AudioTimelineEvent& aEvent) override + { + MOZ_ASSERT(mDestination); + + WebAudioUtils::ConvertAudioTimelineEventToTicks(aEvent, + mDestination); + + switch (aIndex) { + case OFFSET: + mOffset.InsertEvent<int64_t>(aEvent); + break; + default: + NS_ERROR("Bad ConstantSourceNodeEngine TimelineParameter"); + } + } + + void SetStreamTimeParameter(uint32_t aIndex, StreamTime aParam) override + { + switch (aIndex) { + case START: + mStart = aParam; + mSource->SetActive(); + break; + case STOP: mStop = aParam; break; + default: + NS_ERROR("Bad ConstantSourceNodeEngine StreamTimeParameter"); + } + } + + void ProcessBlock(AudioNodeStream* aStream, + GraphTime aFrom, + const AudioBlock& aInput, + AudioBlock* aOutput, + bool* aFinished) override + { + MOZ_ASSERT(mSource == aStream, "Invalid source stream"); + + StreamTime ticks = mDestination->GraphTimeToStreamTime(aFrom); + if (mStart == -1) { + aOutput->SetNull(WEBAUDIO_BLOCK_SIZE); + return; + } + + if (ticks + WEBAUDIO_BLOCK_SIZE <= mStart || ticks >= mStop) { + aOutput->SetNull(WEBAUDIO_BLOCK_SIZE); + } else { + aOutput->AllocateChannels(1); + float* output = aOutput->ChannelFloatsForWrite(0); + + if (mOffset.HasSimpleValue()) { + for (uint32_t i = 0; i < WEBAUDIO_BLOCK_SIZE; ++i) { + output[i] = mOffset.GetValueAtTime(aFrom, 0); + } + } else { + mOffset.GetValuesAtTime(ticks, output, WEBAUDIO_BLOCK_SIZE); + } + } + + if (ticks + WEBAUDIO_BLOCK_SIZE >= mStop) { + // We've finished playing. + *aFinished = true; + } + } + + bool IsActive() const override + { + // start() has been called. + return mStart != -1; + } + + size_t SizeOfExcludingThis(MallocSizeOf aMallocSizeOf) const override + { + size_t amount = AudioNodeEngine::SizeOfExcludingThis(aMallocSizeOf); + + // Not owned: + // - mSource + // - mDestination + // - mOffset (internal ref owned by node) + + return amount; + } + + size_t SizeOfIncludingThis(MallocSizeOf aMallocSizeOf) const override + { + return aMallocSizeOf(this) + SizeOfExcludingThis(aMallocSizeOf); + } + + AudioNodeStream* mSource; + AudioNodeStream* mDestination; + StreamTime mStart; + StreamTime mStop; + AudioParamTimeline mOffset; +}; + +ConstantSourceNode::ConstantSourceNode(AudioContext* aContext) + : AudioNode(aContext, + 1, + ChannelCountMode::Max, + ChannelInterpretation::Speakers) + , mOffset(new AudioParam(this, ConstantSourceNodeEngine::OFFSET, + 1.0, "offset")) + , mStartCalled(false) +{ + ConstantSourceNodeEngine* engine = new ConstantSourceNodeEngine(this, aContext->Destination()); + mStream = AudioNodeStream::Create(aContext, engine, + AudioNodeStream::NEED_MAIN_THREAD_FINISHED, + aContext->Graph()); + engine->SetSourceStream(mStream); + mStream->AddMainThreadListener(this); +} + +ConstantSourceNode::~ConstantSourceNode() +{ +} + +size_t +ConstantSourceNode::SizeOfExcludingThis(MallocSizeOf aMallocSizeOf) const +{ + size_t amount = AudioNode::SizeOfExcludingThis(aMallocSizeOf); + + amount += mOffset->SizeOfIncludingThis(aMallocSizeOf); + return amount; +} + +size_t +ConstantSourceNode::SizeOfIncludingThis(MallocSizeOf aMallocSizeOf) const +{ + return aMallocSizeOf(this) + SizeOfExcludingThis(aMallocSizeOf); +} + +JSObject* +ConstantSourceNode::WrapObject(JSContext* aCx, JS::Handle<JSObject*> aGivenProto) +{ + return ConstantSourceNodeBinding::Wrap(aCx, this, aGivenProto); +} + +already_AddRefed<ConstantSourceNode> +ConstantSourceNode::Constructor(const GlobalObject& aGlobal, + AudioContext& aContext, + const ConstantSourceOptions& aOptions, + ErrorResult& aRv) +{ + RefPtr<ConstantSourceNode> object = new ConstantSourceNode(&aContext); + object->mOffset->SetValue(aOptions.mOffset); + return object.forget(); +} + +void +ConstantSourceNode::DestroyMediaStream() +{ + if (mStream) { + mStream->RemoveMainThreadListener(this); + } + AudioNode::DestroyMediaStream(); +} + +void +ConstantSourceNode::Start(double aWhen, ErrorResult& aRv) +{ + if (!WebAudioUtils::IsTimeValid(aWhen)) { + aRv.Throw(NS_ERROR_DOM_NOT_SUPPORTED_ERR); + return; + } + + if (mStartCalled) { + aRv.Throw(NS_ERROR_DOM_INVALID_STATE_ERR); + return; + } + mStartCalled = true; + + if (!mStream) { + return; + } + + mStream->SetStreamTimeParameter(ConstantSourceNodeEngine::START, + Context(), aWhen); + + MarkActive(); +} + +void +ConstantSourceNode::Stop(double aWhen, ErrorResult& aRv) +{ + if (!WebAudioUtils::IsTimeValid(aWhen)) { + aRv.Throw(NS_ERROR_DOM_NOT_SUPPORTED_ERR); + return; + } + + if (!mStartCalled) { + aRv.Throw(NS_ERROR_DOM_INVALID_STATE_ERR); + return; + } + + if (!mStream || !Context()) { + return; + } + + mStream->SetStreamTimeParameter(ConstantSourceNodeEngine::STOP, + Context(), std::max(0.0, aWhen)); +} + +void +ConstantSourceNode::NotifyMainThreadStreamFinished() +{ + MOZ_ASSERT(mStream->IsFinished()); + + class EndedEventDispatcher final : public Runnable + { + public: + explicit EndedEventDispatcher(ConstantSourceNode* aNode) + : mNode(aNode) {} + NS_IMETHOD Run() override + { + // If it's not safe to run scripts right now, schedule this to run later + if (!nsContentUtils::IsSafeToRunScript()) { + nsContentUtils::AddScriptRunner(this); + return NS_OK; + } + + mNode->DispatchTrustedEvent(NS_LITERAL_STRING("ended")); + // Release stream resources. + mNode->DestroyMediaStream(); + return NS_OK; + } + private: + RefPtr<ConstantSourceNode> mNode; + }; + + NS_DispatchToMainThread(new EndedEventDispatcher(this)); + + // Drop the playing reference + // Warning: The below line might delete this. + MarkInactive(); +} + +} // namespace dom +} // namespace mozilla diff --git a/dom/media/webaudio/ConstantSourceNode.h b/dom/media/webaudio/ConstantSourceNode.h new file mode 100644 index 000000000..7b5e7197e --- /dev/null +++ b/dom/media/webaudio/ConstantSourceNode.h @@ -0,0 +1,76 @@ +/* -*- Mode: C++; tab-width: 2; indent-tabs-mode: nil; c-basic-offset: 2 -*- */ +/* vim:set ts=2 sw=2 sts=2 et cindent: */ +/* This Source Code Form is subject to the terms of the Mozilla Public + * License, v. 2.0. If a copy of the MPL was not distributed with this + * file, You can obtain one at http://mozilla.org/MPL/2.0/. */ + +#ifndef ConstantSourceNode_h_ +#define ConstantSourceNode_h_ + +#include "AudioNode.h" +#include "AudioParam.h" +#include "mozilla/dom/ConstantSourceNodeBinding.h" + +namespace mozilla { +namespace dom { + +class AudioContext; + +class ConstantSourceNode final : public AudioNode, + public MainThreadMediaStreamListener +{ +public: + explicit ConstantSourceNode(AudioContext* aContext); + + NS_DECL_ISUPPORTS_INHERITED + NS_DECL_CYCLE_COLLECTION_CLASS_INHERITED(ConstantSourceNode, AudioNode) + + JSObject* WrapObject(JSContext* aCx, JS::Handle<JSObject*> aGivenProto) override; + + static already_AddRefed<ConstantSourceNode> + Constructor(const GlobalObject& aGlobal, + AudioContext& aContext, + const ConstantSourceOptions& aOptions, + ErrorResult& aRv); + + void DestroyMediaStream() override; + + uint16_t NumberOfInputs() const final override + { + return 0; + } + + AudioParam* Offset() const + { + return mOffset; + } + + void Start(double aWhen, ErrorResult& rv); + void Stop(double aWhen, ErrorResult& rv); + + IMPL_EVENT_HANDLER(ended) + + void NotifyMainThreadStreamFinished() override; + + const char* NodeType() const override + { + return "ConstantSourceNode"; + } + + size_t SizeOfExcludingThis(MallocSizeOf aMallocSizeOf) const override; + size_t SizeOfIncludingThis(MallocSizeOf aMallocSizeOf) const override; + +protected: + virtual ~ConstantSourceNode(); + +private: + RefPtr<AudioParam> mOffset; + bool mStartCalled; + +}; + +} // namespace dom +} // namespace mozilla + +#endif + diff --git a/dom/media/webaudio/ConvolverNode.cpp b/dom/media/webaudio/ConvolverNode.cpp new file mode 100644 index 000000000..314cdf7cf --- /dev/null +++ b/dom/media/webaudio/ConvolverNode.cpp @@ -0,0 +1,295 @@ +/* -*- Mode: C++; tab-width: 2; indent-tabs-mode: nil; c-basic-offset: 2 -*- */ +/* vim:set ts=2 sw=2 sts=2 et cindent: */ +/* This Source Code Form is subject to the terms of the Mozilla Public + * License, v. 2.0. If a copy of the MPL was not distributed with this + * file, You can obtain one at http://mozilla.org/MPL/2.0/. */ + +#include "ConvolverNode.h" +#include "mozilla/dom/ConvolverNodeBinding.h" +#include "nsAutoPtr.h" +#include "AlignmentUtils.h" +#include "AudioNodeEngine.h" +#include "AudioNodeStream.h" +#include "blink/Reverb.h" +#include "PlayingRefChangeHandler.h" + +namespace mozilla { +namespace dom { + +NS_IMPL_CYCLE_COLLECTION_INHERITED(ConvolverNode, AudioNode, mBuffer) + +NS_INTERFACE_MAP_BEGIN_CYCLE_COLLECTION_INHERITED(ConvolverNode) +NS_INTERFACE_MAP_END_INHERITING(AudioNode) + +NS_IMPL_ADDREF_INHERITED(ConvolverNode, AudioNode) +NS_IMPL_RELEASE_INHERITED(ConvolverNode, AudioNode) + +class ConvolverNodeEngine final : public AudioNodeEngine +{ + typedef PlayingRefChangeHandler PlayingRefChanged; +public: + ConvolverNodeEngine(AudioNode* aNode, bool aNormalize) + : AudioNodeEngine(aNode) + , mBufferLength(0) + , mLeftOverData(INT32_MIN) + , mSampleRate(0.0f) + , mUseBackgroundThreads(!aNode->Context()->IsOffline()) + , mNormalize(aNormalize) + { + } + + enum Parameters { + BUFFER_LENGTH, + SAMPLE_RATE, + NORMALIZE + }; + void SetInt32Parameter(uint32_t aIndex, int32_t aParam) override + { + switch (aIndex) { + case BUFFER_LENGTH: + // BUFFER_LENGTH is the first parameter that we set when setting a new buffer, + // so we should be careful to invalidate the rest of our state here. + mBuffer = nullptr; + mSampleRate = 0.0f; + mBufferLength = aParam; + mLeftOverData = INT32_MIN; + break; + case SAMPLE_RATE: + mSampleRate = aParam; + break; + case NORMALIZE: + mNormalize = !!aParam; + break; + default: + NS_ERROR("Bad ConvolverNodeEngine Int32Parameter"); + } + } + void SetDoubleParameter(uint32_t aIndex, double aParam) override + { + switch (aIndex) { + case SAMPLE_RATE: + mSampleRate = aParam; + AdjustReverb(); + break; + default: + NS_ERROR("Bad ConvolverNodeEngine DoubleParameter"); + } + } + void SetBuffer(already_AddRefed<ThreadSharedFloatArrayBufferList> aBuffer) override + { + mBuffer = aBuffer; + AdjustReverb(); + } + + void AdjustReverb() + { + // Note about empirical tuning (this is copied from Blink) + // The maximum FFT size affects reverb performance and accuracy. + // If the reverb is single-threaded and processes entirely in the real-time audio thread, + // it's important not to make this too high. In this case 8192 is a good value. + // But, the Reverb object is multi-threaded, so we want this as high as possible without losing too much accuracy. + // Very large FFTs will have worse phase errors. Given these constraints 32768 is a good compromise. + const size_t MaxFFTSize = 32768; + + if (!mBuffer || !mBufferLength || !mSampleRate) { + mReverb = nullptr; + mLeftOverData = INT32_MIN; + return; + } + + mReverb = new WebCore::Reverb(mBuffer, mBufferLength, + MaxFFTSize, 2, mUseBackgroundThreads, + mNormalize, mSampleRate); + } + + void ProcessBlock(AudioNodeStream* aStream, + GraphTime aFrom, + const AudioBlock& aInput, + AudioBlock* aOutput, + bool* aFinished) override + { + if (!mReverb) { + aOutput->SetNull(WEBAUDIO_BLOCK_SIZE); + return; + } + + AudioBlock input = aInput; + if (aInput.IsNull()) { + if (mLeftOverData > 0) { + mLeftOverData -= WEBAUDIO_BLOCK_SIZE; + input.AllocateChannels(1); + WriteZeroesToAudioBlock(&input, 0, WEBAUDIO_BLOCK_SIZE); + } else { + if (mLeftOverData != INT32_MIN) { + mLeftOverData = INT32_MIN; + aStream->ScheduleCheckForInactive(); + RefPtr<PlayingRefChanged> refchanged = + new PlayingRefChanged(aStream, PlayingRefChanged::RELEASE); + aStream->Graph()-> + DispatchToMainThreadAfterStreamStateUpdate(refchanged.forget()); + } + aOutput->SetNull(WEBAUDIO_BLOCK_SIZE); + return; + } + } else { + if (aInput.mVolume != 1.0f) { + // Pre-multiply the input's volume + uint32_t numChannels = aInput.ChannelCount(); + input.AllocateChannels(numChannels); + for (uint32_t i = 0; i < numChannels; ++i) { + const float* src = static_cast<const float*>(aInput.mChannelData[i]); + float* dest = input.ChannelFloatsForWrite(i); + AudioBlockCopyChannelWithScale(src, aInput.mVolume, dest); + } + } + + if (mLeftOverData <= 0) { + RefPtr<PlayingRefChanged> refchanged = + new PlayingRefChanged(aStream, PlayingRefChanged::ADDREF); + aStream->Graph()-> + DispatchToMainThreadAfterStreamStateUpdate(refchanged.forget()); + } + mLeftOverData = mBufferLength; + MOZ_ASSERT(mLeftOverData > 0); + } + aOutput->AllocateChannels(2); + + mReverb->process(&input, aOutput); + } + + bool IsActive() const override + { + return mLeftOverData != INT32_MIN; + } + + size_t SizeOfExcludingThis(MallocSizeOf aMallocSizeOf) const override + { + size_t amount = AudioNodeEngine::SizeOfExcludingThis(aMallocSizeOf); + if (mBuffer && !mBuffer->IsShared()) { + amount += mBuffer->SizeOfIncludingThis(aMallocSizeOf); + } + + if (mReverb) { + amount += mReverb->sizeOfIncludingThis(aMallocSizeOf); + } + + return amount; + } + + size_t SizeOfIncludingThis(MallocSizeOf aMallocSizeOf) const override + { + return aMallocSizeOf(this) + SizeOfExcludingThis(aMallocSizeOf); + } + +private: + RefPtr<ThreadSharedFloatArrayBufferList> mBuffer; + nsAutoPtr<WebCore::Reverb> mReverb; + int32_t mBufferLength; + int32_t mLeftOverData; + float mSampleRate; + bool mUseBackgroundThreads; + bool mNormalize; +}; + +ConvolverNode::ConvolverNode(AudioContext* aContext) + : AudioNode(aContext, + 2, + ChannelCountMode::Clamped_max, + ChannelInterpretation::Speakers) + , mNormalize(true) +{ + ConvolverNodeEngine* engine = new ConvolverNodeEngine(this, mNormalize); + mStream = AudioNodeStream::Create(aContext, engine, + AudioNodeStream::NO_STREAM_FLAGS, + aContext->Graph()); +} + +ConvolverNode::~ConvolverNode() +{ +} + +size_t +ConvolverNode::SizeOfExcludingThis(MallocSizeOf aMallocSizeOf) const +{ + size_t amount = AudioNode::SizeOfExcludingThis(aMallocSizeOf); + if (mBuffer) { + // NB: mBuffer might be shared with the associated engine, by convention + // the AudioNode will report. + amount += mBuffer->SizeOfIncludingThis(aMallocSizeOf); + } + return amount; +} + +size_t +ConvolverNode::SizeOfIncludingThis(MallocSizeOf aMallocSizeOf) const +{ + return aMallocSizeOf(this) + SizeOfExcludingThis(aMallocSizeOf); +} + +JSObject* +ConvolverNode::WrapObject(JSContext* aCx, JS::Handle<JSObject*> aGivenProto) +{ + return ConvolverNodeBinding::Wrap(aCx, this, aGivenProto); +} + +void +ConvolverNode::SetBuffer(JSContext* aCx, AudioBuffer* aBuffer, ErrorResult& aRv) +{ + if (aBuffer) { + switch (aBuffer->NumberOfChannels()) { + case 1: + case 2: + case 4: + // Supported number of channels + break; + default: + aRv.Throw(NS_ERROR_DOM_SYNTAX_ERR); + return; + } + } + + mBuffer = aBuffer; + + // Send the buffer to the stream + AudioNodeStream* ns = mStream; + MOZ_ASSERT(ns, "Why don't we have a stream here?"); + if (mBuffer) { + uint32_t length = mBuffer->Length(); + RefPtr<ThreadSharedFloatArrayBufferList> data = + mBuffer->GetThreadSharedChannelsForRate(aCx); + if (data && length < WEBAUDIO_BLOCK_SIZE) { + // For very small impulse response buffers, we need to pad the + // buffer with 0 to make sure that the Reverb implementation + // has enough data to compute FFTs from. + length = WEBAUDIO_BLOCK_SIZE; + RefPtr<ThreadSharedFloatArrayBufferList> paddedBuffer = + new ThreadSharedFloatArrayBufferList(data->GetChannels()); + void* channelData = malloc(sizeof(float) * length * data->GetChannels() + 15); + float* alignedChannelData = ALIGNED16(channelData); + ASSERT_ALIGNED16(alignedChannelData); + for (uint32_t i = 0; i < data->GetChannels(); ++i) { + PodCopy(alignedChannelData + length * i, data->GetData(i), mBuffer->Length()); + PodZero(alignedChannelData + length * i + mBuffer->Length(), WEBAUDIO_BLOCK_SIZE - mBuffer->Length()); + paddedBuffer->SetData(i, (i == 0) ? channelData : nullptr, free, alignedChannelData); + } + data = paddedBuffer; + } + SendInt32ParameterToStream(ConvolverNodeEngine::BUFFER_LENGTH, length); + SendDoubleParameterToStream(ConvolverNodeEngine::SAMPLE_RATE, + mBuffer->SampleRate()); + ns->SetBuffer(data.forget()); + } else { + ns->SetBuffer(nullptr); + } +} + +void +ConvolverNode::SetNormalize(bool aNormalize) +{ + mNormalize = aNormalize; + SendInt32ParameterToStream(ConvolverNodeEngine::NORMALIZE, aNormalize); +} + +} // namespace dom +} // namespace mozilla + diff --git a/dom/media/webaudio/ConvolverNode.h b/dom/media/webaudio/ConvolverNode.h new file mode 100644 index 000000000..53cff9d27 --- /dev/null +++ b/dom/media/webaudio/ConvolverNode.h @@ -0,0 +1,78 @@ +/* -*- Mode: C++; tab-width: 2; indent-tabs-mode: nil; c-basic-offset: 2 -*- */ +/* vim:set ts=2 sw=2 sts=2 et cindent: */ +/* This Source Code Form is subject to the terms of the Mozilla Public + * License, v. 2.0. If a copy of the MPL was not distributed with this + * file, You can obtain one at http://mozilla.org/MPL/2.0/. */ + +#ifndef ConvolverNode_h_ +#define ConvolverNode_h_ + +#include "AudioNode.h" +#include "AudioBuffer.h" + +namespace mozilla { +namespace dom { + +class ConvolverNode final : public AudioNode +{ +public: + explicit ConvolverNode(AudioContext* aContext); + + NS_DECL_ISUPPORTS_INHERITED + NS_DECL_CYCLE_COLLECTION_CLASS_INHERITED(ConvolverNode, AudioNode); + + JSObject* WrapObject(JSContext* aCx, JS::Handle<JSObject*> aGivenProto) override; + + AudioBuffer* GetBuffer(JSContext* aCx) const + { + return mBuffer; + } + + void SetBuffer(JSContext* aCx, AudioBuffer* aBufferi, ErrorResult& aRv); + + bool Normalize() const + { + return mNormalize; + } + + void SetNormalize(bool aNormal); + + void SetChannelCount(uint32_t aChannelCount, ErrorResult& aRv) override + { + if (aChannelCount > 2) { + aRv.Throw(NS_ERROR_DOM_NOT_SUPPORTED_ERR); + return; + } + AudioNode::SetChannelCount(aChannelCount, aRv); + } + void SetChannelCountModeValue(ChannelCountMode aMode, ErrorResult& aRv) override + { + if (aMode == ChannelCountMode::Max) { + aRv.Throw(NS_ERROR_DOM_NOT_SUPPORTED_ERR); + return; + } + AudioNode::SetChannelCountModeValue(aMode, aRv); + } + + const char* NodeType() const override + { + return "ConvolverNode"; + } + + size_t SizeOfExcludingThis(MallocSizeOf aMallocSizeOf) const override; + size_t SizeOfIncludingThis(MallocSizeOf aMallocSizeOf) const override; + +protected: + virtual ~ConvolverNode(); + +private: + RefPtr<AudioBuffer> mBuffer; + bool mNormalize; +}; + + +} //end namespace dom +} //end namespace mozilla + +#endif + diff --git a/dom/media/webaudio/DelayBuffer.cpp b/dom/media/webaudio/DelayBuffer.cpp new file mode 100644 index 000000000..c7f7198c9 --- /dev/null +++ b/dom/media/webaudio/DelayBuffer.cpp @@ -0,0 +1,263 @@ +/* -*- Mode: C++; tab-width: 2; indent-tabs-mode: nil; c-basic-offset: 2 -*- */ +/* vim:set ts=2 sw=2 sts=2 et cindent: */ +/* This Source Code Form is subject to the terms of the Mozilla Public + * License, v. 2.0. If a copy of the MPL was not distributed with this + * file, You can obtain one at http://mozilla.org/MPL/2.0/. */ + +#include "DelayBuffer.h" + +#include "mozilla/PodOperations.h" +#include "AudioChannelFormat.h" +#include "AudioNodeEngine.h" + +namespace mozilla { + +size_t +DelayBuffer::SizeOfExcludingThis(MallocSizeOf aMallocSizeOf) const +{ + size_t amount = 0; + amount += mChunks.ShallowSizeOfExcludingThis(aMallocSizeOf); + for (size_t i = 0; i < mChunks.Length(); i++) { + amount += mChunks[i].SizeOfExcludingThis(aMallocSizeOf, false); + } + + amount += mUpmixChannels.ShallowSizeOfExcludingThis(aMallocSizeOf); + return amount; +} + +void +DelayBuffer::Write(const AudioBlock& aInputChunk) +{ + // We must have a reference to the buffer if there are channels + MOZ_ASSERT(aInputChunk.IsNull() == !aInputChunk.ChannelCount()); +#ifdef DEBUG + MOZ_ASSERT(!mHaveWrittenBlock); + mHaveWrittenBlock = true; +#endif + + if (!EnsureBuffer()) { + return; + } + + if (mCurrentChunk == mLastReadChunk) { + mLastReadChunk = -1; // invalidate cache + } + mChunks[mCurrentChunk] = aInputChunk.AsAudioChunk(); +} + +void +DelayBuffer::Read(const double aPerFrameDelays[WEBAUDIO_BLOCK_SIZE], + AudioBlock* aOutputChunk, + ChannelInterpretation aChannelInterpretation) +{ + int chunkCount = mChunks.Length(); + if (!chunkCount) { + aOutputChunk->SetNull(WEBAUDIO_BLOCK_SIZE); + return; + } + + // Find the maximum number of contributing channels to determine the output + // channel count that retains all signal information. Buffered blocks will + // be upmixed if necessary. + // + // First find the range of "delay" offsets backwards from the current + // position. Note that these may be negative for frames that are after the + // current position (including i). + double minDelay = aPerFrameDelays[0]; + double maxDelay = minDelay; + for (unsigned i = 1; i < WEBAUDIO_BLOCK_SIZE; ++i) { + minDelay = std::min(minDelay, aPerFrameDelays[i] - i); + maxDelay = std::max(maxDelay, aPerFrameDelays[i] - i); + } + + // Now find the chunks touched by this range and check their channel counts. + int oldestChunk = ChunkForDelay(int(maxDelay) + 1); + int youngestChunk = ChunkForDelay(minDelay); + + uint32_t channelCount = 0; + for (int i = oldestChunk; true; i = (i + 1) % chunkCount) { + channelCount = GetAudioChannelsSuperset(channelCount, + mChunks[i].ChannelCount()); + if (i == youngestChunk) { + break; + } + } + + if (channelCount) { + aOutputChunk->AllocateChannels(channelCount); + ReadChannels(aPerFrameDelays, aOutputChunk, + 0, channelCount, aChannelInterpretation); + } else { + aOutputChunk->SetNull(WEBAUDIO_BLOCK_SIZE); + } + + // Remember currentDelayFrames for the next ProcessBlock call + mCurrentDelay = aPerFrameDelays[WEBAUDIO_BLOCK_SIZE - 1]; +} + +void +DelayBuffer::ReadChannel(const double aPerFrameDelays[WEBAUDIO_BLOCK_SIZE], + AudioBlock* aOutputChunk, uint32_t aChannel, + ChannelInterpretation aChannelInterpretation) +{ + if (!mChunks.Length()) { + float* outputChannel = aOutputChunk->ChannelFloatsForWrite(aChannel); + PodZero(outputChannel, WEBAUDIO_BLOCK_SIZE); + return; + } + + ReadChannels(aPerFrameDelays, aOutputChunk, + aChannel, 1, aChannelInterpretation); +} + +void +DelayBuffer::ReadChannels(const double aPerFrameDelays[WEBAUDIO_BLOCK_SIZE], + AudioBlock* aOutputChunk, + uint32_t aFirstChannel, uint32_t aNumChannelsToRead, + ChannelInterpretation aChannelInterpretation) +{ + uint32_t totalChannelCount = aOutputChunk->ChannelCount(); + uint32_t readChannelsEnd = aFirstChannel + aNumChannelsToRead; + MOZ_ASSERT(readChannelsEnd <= totalChannelCount); + + if (mUpmixChannels.Length() != totalChannelCount) { + mLastReadChunk = -1; // invalidate cache + } + + for (uint32_t channel = aFirstChannel; + channel < readChannelsEnd; ++channel) { + PodZero(aOutputChunk->ChannelFloatsForWrite(channel), WEBAUDIO_BLOCK_SIZE); + } + + for (unsigned i = 0; i < WEBAUDIO_BLOCK_SIZE; ++i) { + double currentDelay = aPerFrameDelays[i]; + MOZ_ASSERT(currentDelay >= 0.0); + MOZ_ASSERT(currentDelay <= (mChunks.Length() - 1) * WEBAUDIO_BLOCK_SIZE); + + // Interpolate two input frames in case the read position does not match + // an integer index. + // Use the larger delay, for the older frame, first, as this is more + // likely to use the cached upmixed channel arrays. + int floorDelay = int(currentDelay); + double interpolationFactor = currentDelay - floorDelay; + int positions[2]; + positions[1] = PositionForDelay(floorDelay) + i; + positions[0] = positions[1] - 1; + + for (unsigned tick = 0; tick < ArrayLength(positions); ++tick) { + int readChunk = ChunkForPosition(positions[tick]); + // mVolume is not set on default initialized chunks so handle null + // chunks specially. + if (!mChunks[readChunk].IsNull()) { + int readOffset = OffsetForPosition(positions[tick]); + UpdateUpmixChannels(readChunk, totalChannelCount, + aChannelInterpretation); + double multiplier = interpolationFactor * mChunks[readChunk].mVolume; + for (uint32_t channel = aFirstChannel; + channel < readChannelsEnd; ++channel) { + aOutputChunk->ChannelFloatsForWrite(channel)[i] += multiplier * + mUpmixChannels[channel][readOffset]; + } + } + + interpolationFactor = 1.0 - interpolationFactor; + } + } +} + +void +DelayBuffer::Read(double aDelayTicks, AudioBlock* aOutputChunk, + ChannelInterpretation aChannelInterpretation) +{ + const bool firstTime = mCurrentDelay < 0.0; + double currentDelay = firstTime ? aDelayTicks : mCurrentDelay; + + double computedDelay[WEBAUDIO_BLOCK_SIZE]; + + for (unsigned i = 0; i < WEBAUDIO_BLOCK_SIZE; ++i) { + // If the value has changed, smoothly approach it + currentDelay += (aDelayTicks - currentDelay) * mSmoothingRate; + computedDelay[i] = currentDelay; + } + + Read(computedDelay, aOutputChunk, aChannelInterpretation); +} + +bool +DelayBuffer::EnsureBuffer() +{ + if (mChunks.Length() == 0) { + // The length of the buffer is at least one block greater than the maximum + // delay so that writing an input block does not overwrite the block that + // would subsequently be read at maximum delay. Also round up to the next + // block size, so that no block of writes will need to wrap. + const int chunkCount = (mMaxDelayTicks + 2 * WEBAUDIO_BLOCK_SIZE - 1) >> + WEBAUDIO_BLOCK_SIZE_BITS; + if (!mChunks.SetLength(chunkCount, fallible)) { + return false; + } + + mLastReadChunk = -1; + } + return true; +} + +int +DelayBuffer::PositionForDelay(int aDelay) { + // Adding mChunks.Length() keeps integers positive for defined and + // appropriate bitshift, remainder, and bitwise operations. + return ((mCurrentChunk + mChunks.Length()) * WEBAUDIO_BLOCK_SIZE) - aDelay; +} + +int +DelayBuffer::ChunkForPosition(int aPosition) +{ + MOZ_ASSERT(aPosition >= 0); + return (aPosition >> WEBAUDIO_BLOCK_SIZE_BITS) % mChunks.Length(); +} + +int +DelayBuffer::OffsetForPosition(int aPosition) +{ + MOZ_ASSERT(aPosition >= 0); + return aPosition & (WEBAUDIO_BLOCK_SIZE - 1); +} + +int +DelayBuffer::ChunkForDelay(int aDelay) +{ + return ChunkForPosition(PositionForDelay(aDelay)); +} + +void +DelayBuffer::UpdateUpmixChannels(int aNewReadChunk, uint32_t aChannelCount, + ChannelInterpretation aChannelInterpretation) +{ + if (aNewReadChunk == mLastReadChunk) { + MOZ_ASSERT(mUpmixChannels.Length() == aChannelCount); + return; + } + + NS_WARNING_ASSERTION(mHaveWrittenBlock || aNewReadChunk != mCurrentChunk, + "Smoothing is making feedback delay too small."); + + mLastReadChunk = aNewReadChunk; + mUpmixChannels = mChunks[aNewReadChunk].ChannelData<float>(); + MOZ_ASSERT(mUpmixChannels.Length() <= aChannelCount); + if (mUpmixChannels.Length() < aChannelCount) { + if (aChannelInterpretation == ChannelInterpretation::Speakers) { + AudioChannelsUpMix(&mUpmixChannels, + aChannelCount, SilentChannel::ZeroChannel<float>()); + MOZ_ASSERT(mUpmixChannels.Length() == aChannelCount, + "We called GetAudioChannelsSuperset to avoid this"); + } else { + // Fill up the remaining channels with zeros + for (uint32_t channel = mUpmixChannels.Length(); + channel < aChannelCount; ++channel) { + mUpmixChannels.AppendElement(SilentChannel::ZeroChannel<float>()); + } + } + } +} + +} // namespace mozilla diff --git a/dom/media/webaudio/DelayBuffer.h b/dom/media/webaudio/DelayBuffer.h new file mode 100644 index 000000000..e55d0ba83 --- /dev/null +++ b/dom/media/webaudio/DelayBuffer.h @@ -0,0 +1,115 @@ +/* -*- Mode: C++; tab-width: 2; indent-tabs-mode: nil; c-basic-offset: 2 -*- */ +/* vim:set ts=2 sw=2 sts=2 et cindent: */ +/* This Source Code Form is subject to the terms of the Mozilla Public + * License, v. 2.0. If a copy of the MPL was not distributed with this + * file, You can obtain one at http://mozilla.org/MPL/2.0/. */ + +#ifndef DelayBuffer_h_ +#define DelayBuffer_h_ + +#include "nsTArray.h" +#include "AudioSegment.h" +#include "mozilla/dom/AudioNodeBinding.h" // for ChannelInterpretation + +namespace mozilla { + +class DelayBuffer final +{ + typedef dom::ChannelInterpretation ChannelInterpretation; + +public: + // See WebAudioUtils::ComputeSmoothingRate() for frame to frame exponential + // |smoothingRate| multiplier. + DelayBuffer(double aMaxDelayTicks, double aSmoothingRate) + : mSmoothingRate(aSmoothingRate) + , mCurrentDelay(-1.0) + // Round the maximum delay up to the next tick. + , mMaxDelayTicks(ceil(aMaxDelayTicks)) + , mCurrentChunk(0) + // mLastReadChunk is initialized in EnsureBuffer +#ifdef DEBUG + , mHaveWrittenBlock(false) +#endif + { + // The 180 second limit in AudioContext::CreateDelay() and the + // 1 << MEDIA_TIME_FRAC_BITS limit on sample rate provide a limit on the + // maximum delay. + MOZ_ASSERT(aMaxDelayTicks <= + std::numeric_limits<decltype(mMaxDelayTicks)>::max()); + } + + // Write a WEBAUDIO_BLOCK_SIZE block for aChannelCount channels. + void Write(const AudioBlock& aInputChunk); + + // Read a block with an array of delays, in ticks, for each sample frame. + // Each delay should be >= 0 and <= MaxDelayTicks(). + void Read(const double aPerFrameDelays[WEBAUDIO_BLOCK_SIZE], + AudioBlock* aOutputChunk, + ChannelInterpretation aChannelInterpretation); + // Read a block with a constant delay, which will be smoothed with the + // previous delay. The delay should be >= 0 and <= MaxDelayTicks(). + void Read(double aDelayTicks, AudioBlock* aOutputChunk, + ChannelInterpretation aChannelInterpretation); + + // Read into one of the channels of aOutputChunk, given an array of + // delays in ticks. This is useful when delays are different on different + // channels. aOutputChunk must have already been allocated with at least as + // many channels as were in any of the blocks passed to Write(). + void ReadChannel(const double aPerFrameDelays[WEBAUDIO_BLOCK_SIZE], + AudioBlock* aOutputChunk, uint32_t aChannel, + ChannelInterpretation aChannelInterpretation); + + // Advance the buffer pointer + void NextBlock() + { + mCurrentChunk = (mCurrentChunk + 1) % mChunks.Length(); +#ifdef DEBUG + MOZ_ASSERT(mHaveWrittenBlock); + mHaveWrittenBlock = false; +#endif + } + + void Reset() { + mChunks.Clear(); + mCurrentDelay = -1.0; + }; + + int MaxDelayTicks() const { return mMaxDelayTicks; } + + size_t SizeOfExcludingThis(MallocSizeOf aMallocSizeOf) const; + +private: + void ReadChannels(const double aPerFrameDelays[WEBAUDIO_BLOCK_SIZE], + AudioBlock* aOutputChunk, + uint32_t aFirstChannel, uint32_t aNumChannelsToRead, + ChannelInterpretation aChannelInterpretation); + bool EnsureBuffer(); + int PositionForDelay(int aDelay); + int ChunkForPosition(int aPosition); + int OffsetForPosition(int aPosition); + int ChunkForDelay(int aDelay); + void UpdateUpmixChannels(int aNewReadChunk, uint32_t channelCount, + ChannelInterpretation aChannelInterpretation); + + // Circular buffer for capturing delayed samples. + FallibleTArray<AudioChunk> mChunks; + // Cache upmixed channel arrays. + AutoTArray<const float*,GUESS_AUDIO_CHANNELS> mUpmixChannels; + double mSmoothingRate; + // Current delay, in fractional ticks + double mCurrentDelay; + // Maximum delay, in ticks + int mMaxDelayTicks; + // The current position in the circular buffer. The next write will be to + // this chunk, and the next read may begin before this chunk. + int mCurrentChunk; + // The chunk owning the pointers in mUpmixChannels + int mLastReadChunk; +#ifdef DEBUG + bool mHaveWrittenBlock; +#endif +}; + +} // namespace mozilla + +#endif // DelayBuffer_h_ diff --git a/dom/media/webaudio/DelayNode.cpp b/dom/media/webaudio/DelayNode.cpp new file mode 100644 index 000000000..17dc72514 --- /dev/null +++ b/dom/media/webaudio/DelayNode.cpp @@ -0,0 +1,234 @@ +/* -*- Mode: C++; tab-width: 2; indent-tabs-mode: nil; c-basic-offset: 2 -*- */ +/* vim:set ts=2 sw=2 sts=2 et cindent: */ +/* This Source Code Form is subject to the terms of the Mozilla Public + * License, v. 2.0. If a copy of the MPL was not distributed with this + * file, You can obtain one at http://mozilla.org/MPL/2.0/. */ + +#include "DelayNode.h" +#include "mozilla/dom/DelayNodeBinding.h" +#include "AudioNodeEngine.h" +#include "AudioNodeStream.h" +#include "AudioDestinationNode.h" +#include "WebAudioUtils.h" +#include "DelayBuffer.h" +#include "PlayingRefChangeHandler.h" + +namespace mozilla { +namespace dom { + +NS_IMPL_CYCLE_COLLECTION_INHERITED(DelayNode, AudioNode, + mDelay) + +NS_INTERFACE_MAP_BEGIN_CYCLE_COLLECTION_INHERITED(DelayNode) +NS_INTERFACE_MAP_END_INHERITING(AudioNode) + +NS_IMPL_ADDREF_INHERITED(DelayNode, AudioNode) +NS_IMPL_RELEASE_INHERITED(DelayNode, AudioNode) + +class DelayNodeEngine final : public AudioNodeEngine +{ + typedef PlayingRefChangeHandler PlayingRefChanged; +public: + DelayNodeEngine(AudioNode* aNode, AudioDestinationNode* aDestination, + double aMaxDelayTicks) + : AudioNodeEngine(aNode) + , mDestination(aDestination->Stream()) + // Keep the default value in sync with the default value in DelayNode::DelayNode. + , mDelay(0.f) + // Use a smoothing range of 20ms + , mBuffer(std::max(aMaxDelayTicks, + static_cast<double>(WEBAUDIO_BLOCK_SIZE)), + WebAudioUtils::ComputeSmoothingRate(0.02, + mDestination->SampleRate())) + , mMaxDelay(aMaxDelayTicks) + , mHaveProducedBeforeInput(false) + , mLeftOverData(INT32_MIN) + { + } + + DelayNodeEngine* AsDelayNodeEngine() override + { + return this; + } + + enum Parameters { + DELAY, + }; + void RecvTimelineEvent(uint32_t aIndex, + AudioTimelineEvent& aEvent) override + { + MOZ_ASSERT(mDestination); + WebAudioUtils::ConvertAudioTimelineEventToTicks(aEvent, + mDestination); + + switch (aIndex) { + case DELAY: + mDelay.InsertEvent<int64_t>(aEvent); + break; + default: + NS_ERROR("Bad DelayNodeEngine TimelineParameter"); + } + } + + void ProcessBlock(AudioNodeStream* aStream, + GraphTime aFrom, + const AudioBlock& aInput, + AudioBlock* aOutput, + bool* aFinished) override + { + MOZ_ASSERT(aStream->SampleRate() == mDestination->SampleRate()); + + if (!aInput.IsSilentOrSubnormal()) { + if (mLeftOverData <= 0) { + RefPtr<PlayingRefChanged> refchanged = + new PlayingRefChanged(aStream, PlayingRefChanged::ADDREF); + aStream->Graph()-> + DispatchToMainThreadAfterStreamStateUpdate(refchanged.forget()); + } + mLeftOverData = mBuffer.MaxDelayTicks(); + } else if (mLeftOverData > 0) { + mLeftOverData -= WEBAUDIO_BLOCK_SIZE; + } else { + if (mLeftOverData != INT32_MIN) { + mLeftOverData = INT32_MIN; + aStream->ScheduleCheckForInactive(); + + // Delete our buffered data now we no longer need it + mBuffer.Reset(); + + RefPtr<PlayingRefChanged> refchanged = + new PlayingRefChanged(aStream, PlayingRefChanged::RELEASE); + aStream->Graph()-> + DispatchToMainThreadAfterStreamStateUpdate(refchanged.forget()); + } + aOutput->SetNull(WEBAUDIO_BLOCK_SIZE); + return; + } + + mBuffer.Write(aInput); + + // Skip output update if mLastChunks has already been set by + // ProduceBlockBeforeInput() when in a cycle. + if (!mHaveProducedBeforeInput) { + UpdateOutputBlock(aStream, aFrom, aOutput, 0.0); + } + mHaveProducedBeforeInput = false; + mBuffer.NextBlock(); + } + + void UpdateOutputBlock(AudioNodeStream* aStream, GraphTime aFrom, + AudioBlock* aOutput, double minDelay) + { + double maxDelay = mMaxDelay; + double sampleRate = aStream->SampleRate(); + ChannelInterpretation channelInterpretation = + aStream->GetChannelInterpretation(); + if (mDelay.HasSimpleValue()) { + // If this DelayNode is in a cycle, make sure the delay value is at least + // one block, even if that is greater than maxDelay. + double delayFrames = mDelay.GetValue() * sampleRate; + double delayFramesClamped = + std::max(minDelay, std::min(delayFrames, maxDelay)); + mBuffer.Read(delayFramesClamped, aOutput, channelInterpretation); + } else { + // Compute the delay values for the duration of the input AudioChunk + // If this DelayNode is in a cycle, make sure the delay value is at least + // one block. + StreamTime tick = mDestination->GraphTimeToStreamTime(aFrom); + float values[WEBAUDIO_BLOCK_SIZE]; + mDelay.GetValuesAtTime(tick, values,WEBAUDIO_BLOCK_SIZE); + + double computedDelay[WEBAUDIO_BLOCK_SIZE]; + for (size_t counter = 0; counter < WEBAUDIO_BLOCK_SIZE; ++counter) { + double delayAtTick = values[counter] * sampleRate; + double delayAtTickClamped = + std::max(minDelay, std::min(delayAtTick, maxDelay)); + computedDelay[counter] = delayAtTickClamped; + } + mBuffer.Read(computedDelay, aOutput, channelInterpretation); + } + } + + void ProduceBlockBeforeInput(AudioNodeStream* aStream, + GraphTime aFrom, + AudioBlock* aOutput) override + { + if (mLeftOverData <= 0) { + aOutput->SetNull(WEBAUDIO_BLOCK_SIZE); + } else { + UpdateOutputBlock(aStream, aFrom, aOutput, WEBAUDIO_BLOCK_SIZE); + } + mHaveProducedBeforeInput = true; + } + + bool IsActive() const override + { + return mLeftOverData != INT32_MIN; + } + + size_t SizeOfExcludingThis(MallocSizeOf aMallocSizeOf) const override + { + size_t amount = AudioNodeEngine::SizeOfExcludingThis(aMallocSizeOf); + // Not owned: + // - mDestination - probably not owned + // - mDelay - shares ref with AudioNode, don't count + amount += mBuffer.SizeOfExcludingThis(aMallocSizeOf); + return amount; + } + + size_t SizeOfIncludingThis(MallocSizeOf aMallocSizeOf) const override + { + return aMallocSizeOf(this) + SizeOfExcludingThis(aMallocSizeOf); + } + + AudioNodeStream* mDestination; + AudioParamTimeline mDelay; + DelayBuffer mBuffer; + double mMaxDelay; + bool mHaveProducedBeforeInput; + // How much data we have in our buffer which needs to be flushed out when our inputs + // finish. + int32_t mLeftOverData; +}; + +DelayNode::DelayNode(AudioContext* aContext, double aMaxDelay) + : AudioNode(aContext, + 2, + ChannelCountMode::Max, + ChannelInterpretation::Speakers) + , mDelay(new AudioParam(this, DelayNodeEngine::DELAY, 0.0f, "delayTime")) +{ + DelayNodeEngine* engine = + new DelayNodeEngine(this, aContext->Destination(), + aContext->SampleRate() * aMaxDelay); + mStream = AudioNodeStream::Create(aContext, engine, + AudioNodeStream::NO_STREAM_FLAGS, + aContext->Graph()); +} + +DelayNode::~DelayNode() +{ +} + +size_t +DelayNode::SizeOfExcludingThis(MallocSizeOf aMallocSizeOf) const +{ + size_t amount = AudioNode::SizeOfExcludingThis(aMallocSizeOf); + amount += mDelay->SizeOfIncludingThis(aMallocSizeOf); + return amount; +} + +size_t +DelayNode::SizeOfIncludingThis(MallocSizeOf aMallocSizeOf) const +{ + return aMallocSizeOf(this) + SizeOfExcludingThis(aMallocSizeOf); +} + +JSObject* +DelayNode::WrapObject(JSContext* aCx, JS::Handle<JSObject*> aGivenProto) +{ + return DelayNodeBinding::Wrap(aCx, this, aGivenProto); +} + +} // namespace dom +} // namespace mozilla diff --git a/dom/media/webaudio/DelayNode.h b/dom/media/webaudio/DelayNode.h new file mode 100644 index 000000000..dfee970bc --- /dev/null +++ b/dom/media/webaudio/DelayNode.h @@ -0,0 +1,55 @@ +/* -*- Mode: C++; tab-width: 2; indent-tabs-mode: nil; c-basic-offset: 2 -*- */ +/* vim:set ts=2 sw=2 sts=2 et cindent: */ +/* This Source Code Form is subject to the terms of the Mozilla Public + * License, v. 2.0. If a copy of the MPL was not distributed with this + * file, You can obtain one at http://mozilla.org/MPL/2.0/. */ + +#ifndef DelayNode_h_ +#define DelayNode_h_ + +#include "AudioNode.h" +#include "AudioParam.h" + +namespace mozilla { +namespace dom { + +class AudioContext; + +class DelayNode final : public AudioNode +{ +public: + DelayNode(AudioContext* aContext, double aMaxDelay); + + NS_DECL_ISUPPORTS_INHERITED + NS_DECL_CYCLE_COLLECTION_CLASS_INHERITED(DelayNode, AudioNode) + + JSObject* WrapObject(JSContext* aCx, JS::Handle<JSObject*> aGivenProto) override; + + AudioParam* DelayTime() const + { + return mDelay; + } + + const char* NodeType() const override + { + return "DelayNode"; + } + + size_t SizeOfExcludingThis(MallocSizeOf aMallocSizeOf) const override; + size_t SizeOfIncludingThis(MallocSizeOf aMallocSizeOf) const override; + +protected: + virtual ~DelayNode(); + +private: + friend class DelayNodeEngine; + +private: + RefPtr<AudioParam> mDelay; +}; + +} // namespace dom +} // namespace mozilla + +#endif + diff --git a/dom/media/webaudio/DynamicsCompressorNode.cpp b/dom/media/webaudio/DynamicsCompressorNode.cpp new file mode 100644 index 000000000..3a3dc9849 --- /dev/null +++ b/dom/media/webaudio/DynamicsCompressorNode.cpp @@ -0,0 +1,237 @@ +/* -*- Mode: C++; tab-width: 2; indent-tabs-mode: nil; c-basic-offset: 2 -*- */ +/* vim:set ts=2 sw=2 sts=2 et cindent: */ +/* This Source Code Form is subject to the terms of the Mozilla Public + * License, v. 2.0. If a copy of the MPL was not distributed with this + * file, You can obtain one at http://mozilla.org/MPL/2.0/. */ + +#include "DynamicsCompressorNode.h" +#include "mozilla/dom/DynamicsCompressorNodeBinding.h" +#include "nsAutoPtr.h" +#include "AudioNodeEngine.h" +#include "AudioNodeStream.h" +#include "AudioDestinationNode.h" +#include "WebAudioUtils.h" +#include "blink/DynamicsCompressor.h" + +using WebCore::DynamicsCompressor; + +namespace mozilla { +namespace dom { + +NS_IMPL_CYCLE_COLLECTION_INHERITED(DynamicsCompressorNode, AudioNode, + mThreshold, + mKnee, + mRatio, + mAttack, + mRelease) + +NS_INTERFACE_MAP_BEGIN_CYCLE_COLLECTION_INHERITED(DynamicsCompressorNode) +NS_INTERFACE_MAP_END_INHERITING(AudioNode) + +NS_IMPL_ADDREF_INHERITED(DynamicsCompressorNode, AudioNode) +NS_IMPL_RELEASE_INHERITED(DynamicsCompressorNode, AudioNode) + +class DynamicsCompressorNodeEngine final : public AudioNodeEngine +{ +public: + explicit DynamicsCompressorNodeEngine(AudioNode* aNode, + AudioDestinationNode* aDestination) + : AudioNodeEngine(aNode) + , mDestination(aDestination->Stream()) + // Keep the default value in sync with the default value in + // DynamicsCompressorNode::DynamicsCompressorNode. + , mThreshold(-24.f) + , mKnee(30.f) + , mRatio(12.f) + , mAttack(0.003f) + , mRelease(0.25f) + , mCompressor(new DynamicsCompressor(mDestination->SampleRate(), 2)) + { + } + + enum Parameters { + THRESHOLD, + KNEE, + RATIO, + ATTACK, + RELEASE + }; + void RecvTimelineEvent(uint32_t aIndex, + AudioTimelineEvent& aEvent) override + { + MOZ_ASSERT(mDestination); + + WebAudioUtils::ConvertAudioTimelineEventToTicks(aEvent, + mDestination); + + switch (aIndex) { + case THRESHOLD: + mThreshold.InsertEvent<int64_t>(aEvent); + break; + case KNEE: + mKnee.InsertEvent<int64_t>(aEvent); + break; + case RATIO: + mRatio.InsertEvent<int64_t>(aEvent); + break; + case ATTACK: + mAttack.InsertEvent<int64_t>(aEvent); + break; + case RELEASE: + mRelease.InsertEvent<int64_t>(aEvent); + break; + default: + NS_ERROR("Bad DynamicsCompresssorNodeEngine TimelineParameter"); + } + } + + void ProcessBlock(AudioNodeStream* aStream, + GraphTime aFrom, + const AudioBlock& aInput, + AudioBlock* aOutput, + bool* aFinished) override + { + if (aInput.IsNull()) { + // Just output silence + *aOutput = aInput; + return; + } + + const uint32_t channelCount = aInput.ChannelCount(); + if (mCompressor->numberOfChannels() != channelCount) { + // Create a new compressor object with a new channel count + mCompressor = new WebCore::DynamicsCompressor(aStream->SampleRate(), + aInput.ChannelCount()); + } + + StreamTime pos = mDestination->GraphTimeToStreamTime(aFrom); + mCompressor->setParameterValue(DynamicsCompressor::ParamThreshold, + mThreshold.GetValueAtTime(pos)); + mCompressor->setParameterValue(DynamicsCompressor::ParamKnee, + mKnee.GetValueAtTime(pos)); + mCompressor->setParameterValue(DynamicsCompressor::ParamRatio, + mRatio.GetValueAtTime(pos)); + mCompressor->setParameterValue(DynamicsCompressor::ParamAttack, + mAttack.GetValueAtTime(pos)); + mCompressor->setParameterValue(DynamicsCompressor::ParamRelease, + mRelease.GetValueAtTime(pos)); + + aOutput->AllocateChannels(channelCount); + mCompressor->process(&aInput, aOutput, aInput.GetDuration()); + + SendReductionParamToMainThread(aStream, + mCompressor->parameterValue(DynamicsCompressor::ParamReduction)); + } + + size_t SizeOfExcludingThis(MallocSizeOf aMallocSizeOf) const override + { + // Not owned: + // - mDestination (probably) + // - Don't count the AudioParamTimelines, their inner refs are owned by the + // AudioNode. + size_t amount = AudioNodeEngine::SizeOfExcludingThis(aMallocSizeOf); + amount += mCompressor->sizeOfIncludingThis(aMallocSizeOf); + return amount; + } + + size_t SizeOfIncludingThis(MallocSizeOf aMallocSizeOf) const override + { + return aMallocSizeOf(this) + SizeOfExcludingThis(aMallocSizeOf); + } + +private: + void SendReductionParamToMainThread(AudioNodeStream* aStream, float aReduction) + { + MOZ_ASSERT(!NS_IsMainThread()); + + class Command final : public Runnable + { + public: + Command(AudioNodeStream* aStream, float aReduction) + : mStream(aStream) + , mReduction(aReduction) + { + } + + NS_IMETHOD Run() override + { + RefPtr<DynamicsCompressorNode> node = + static_cast<DynamicsCompressorNode*> + (mStream->Engine()->NodeMainThread()); + if (node) { + node->SetReduction(mReduction); + } + return NS_OK; + } + + private: + RefPtr<AudioNodeStream> mStream; + float mReduction; + }; + + NS_DispatchToMainThread(new Command(aStream, aReduction)); + } + +private: + AudioNodeStream* mDestination; + AudioParamTimeline mThreshold; + AudioParamTimeline mKnee; + AudioParamTimeline mRatio; + AudioParamTimeline mAttack; + AudioParamTimeline mRelease; + nsAutoPtr<DynamicsCompressor> mCompressor; +}; + +DynamicsCompressorNode::DynamicsCompressorNode(AudioContext* aContext) + : AudioNode(aContext, + 2, + ChannelCountMode::Explicit, + ChannelInterpretation::Speakers) + , mThreshold(new AudioParam(this, DynamicsCompressorNodeEngine::THRESHOLD, + -24.f, "threshold")) + , mKnee(new AudioParam(this, DynamicsCompressorNodeEngine::KNEE, + 30.f, "knee")) + , mRatio(new AudioParam(this, DynamicsCompressorNodeEngine::RATIO, + 12.f, "ratio")) + , mReduction(0) + , mAttack(new AudioParam(this, DynamicsCompressorNodeEngine::ATTACK, + 0.003f, "attack")) + , mRelease(new AudioParam(this, DynamicsCompressorNodeEngine::RELEASE, + 0.25f, "release")) +{ + DynamicsCompressorNodeEngine* engine = new DynamicsCompressorNodeEngine(this, aContext->Destination()); + mStream = AudioNodeStream::Create(aContext, engine, + AudioNodeStream::NO_STREAM_FLAGS, + aContext->Graph()); +} + +DynamicsCompressorNode::~DynamicsCompressorNode() +{ +} + +size_t +DynamicsCompressorNode::SizeOfExcludingThis(MallocSizeOf aMallocSizeOf) const +{ + size_t amount = AudioNode::SizeOfExcludingThis(aMallocSizeOf); + amount += mThreshold->SizeOfIncludingThis(aMallocSizeOf); + amount += mKnee->SizeOfIncludingThis(aMallocSizeOf); + amount += mRatio->SizeOfIncludingThis(aMallocSizeOf); + amount += mAttack->SizeOfIncludingThis(aMallocSizeOf); + amount += mRelease->SizeOfIncludingThis(aMallocSizeOf); + return amount; +} + +size_t +DynamicsCompressorNode::SizeOfIncludingThis(MallocSizeOf aMallocSizeOf) const +{ + return aMallocSizeOf(this) + SizeOfExcludingThis(aMallocSizeOf); +} + +JSObject* +DynamicsCompressorNode::WrapObject(JSContext* aCx, JS::Handle<JSObject*> aGivenProto) +{ + return DynamicsCompressorNodeBinding::Wrap(aCx, this, aGivenProto); +} + +} // namespace dom +} // namespace mozilla diff --git a/dom/media/webaudio/DynamicsCompressorNode.h b/dom/media/webaudio/DynamicsCompressorNode.h new file mode 100644 index 000000000..5bdd5f2d0 --- /dev/null +++ b/dom/media/webaudio/DynamicsCompressorNode.h @@ -0,0 +1,89 @@ +/* -*- Mode: C++; tab-width: 2; indent-tabs-mode: nil; c-basic-offset: 2 -*- */ +/* vim:set ts=2 sw=2 sts=2 et cindent: */ +/* This Source Code Form is subject to the terms of the Mozilla Public + * License, v. 2.0. If a copy of the MPL was not distributed with this + * file, You can obtain one at http://mozilla.org/MPL/2.0/. */ + +#ifndef DynamicsCompressorNode_h_ +#define DynamicsCompressorNode_h_ + +#include "AudioNode.h" +#include "AudioParam.h" + +namespace mozilla { +namespace dom { + +class AudioContext; + +class DynamicsCompressorNode final : public AudioNode +{ +public: + explicit DynamicsCompressorNode(AudioContext* aContext); + + NS_DECL_ISUPPORTS_INHERITED + NS_DECL_CYCLE_COLLECTION_CLASS_INHERITED(DynamicsCompressorNode, AudioNode) + + JSObject* WrapObject(JSContext* aCx, JS::Handle<JSObject*> aGivenProto) override; + + AudioParam* Threshold() const + { + return mThreshold; + } + + AudioParam* Knee() const + { + return mKnee; + } + + AudioParam* Ratio() const + { + return mRatio; + } + + AudioParam* Attack() const + { + return mAttack; + } + + // Called GetRelease to prevent clashing with the nsISupports::Release name + AudioParam* GetRelease() const + { + return mRelease; + } + + float Reduction() const + { + return mReduction; + } + + const char* NodeType() const override + { + return "DynamicsCompressorNode"; + } + + size_t SizeOfExcludingThis(MallocSizeOf aMallocSizeOf) const override; + size_t SizeOfIncludingThis(MallocSizeOf aMallocSizeOf) const override; + + void SetReduction(float aReduction) + { + MOZ_ASSERT(NS_IsMainThread()); + mReduction = aReduction; + } + +protected: + virtual ~DynamicsCompressorNode(); + +private: + RefPtr<AudioParam> mThreshold; + RefPtr<AudioParam> mKnee; + RefPtr<AudioParam> mRatio; + float mReduction; + RefPtr<AudioParam> mAttack; + RefPtr<AudioParam> mRelease; +}; + +} // namespace dom +} // namespace mozilla + +#endif + diff --git a/dom/media/webaudio/FFTBlock.cpp b/dom/media/webaudio/FFTBlock.cpp new file mode 100644 index 000000000..f517ef283 --- /dev/null +++ b/dom/media/webaudio/FFTBlock.cpp @@ -0,0 +1,226 @@ +/* -*- Mode: C++; tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 4 -*- */ +/* vim:set ts=4 sw=4 sts=4 et cindent: */ +/* + * Copyright (C) 2010 Google Inc. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * 3. Neither the name of Apple Computer, Inc. ("Apple") nor the names of + * its contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY APPLE AND ITS CONTRIBUTORS "AS IS" AND ANY + * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + * DISCLAIMED. IN NO EVENT SHALL APPLE OR ITS CONTRIBUTORS BE LIABLE FOR ANY + * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES + * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; + * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND + * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF + * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +#include "FFTBlock.h" + +#include <complex> + +namespace mozilla { + +typedef std::complex<double> Complex; + +FFTBlock* FFTBlock::CreateInterpolatedBlock(const FFTBlock& block0, const FFTBlock& block1, double interp) +{ + FFTBlock* newBlock = new FFTBlock(block0.FFTSize()); + + newBlock->InterpolateFrequencyComponents(block0, block1, interp); + + // In the time-domain, the 2nd half of the response must be zero, to avoid circular convolution aliasing... + int fftSize = newBlock->FFTSize(); + AlignedTArray<float> buffer(fftSize); + newBlock->GetInverseWithoutScaling(buffer.Elements()); + AudioBufferInPlaceScale(buffer.Elements(), 1.0f / fftSize, fftSize / 2); + PodZero(buffer.Elements() + fftSize / 2, fftSize / 2); + + // Put back into frequency domain. + newBlock->PerformFFT(buffer.Elements()); + + return newBlock; +} + +void FFTBlock::InterpolateFrequencyComponents(const FFTBlock& block0, const FFTBlock& block1, double interp) +{ + // FIXME : with some work, this method could be optimized + + ComplexU* dft = mOutputBuffer.Elements(); + + const ComplexU* dft1 = block0.mOutputBuffer.Elements(); + const ComplexU* dft2 = block1.mOutputBuffer.Elements(); + + MOZ_ASSERT(mFFTSize == block0.FFTSize()); + MOZ_ASSERT(mFFTSize == block1.FFTSize()); + double s1base = (1.0 - interp); + double s2base = interp; + + double phaseAccum = 0.0; + double lastPhase1 = 0.0; + double lastPhase2 = 0.0; + + int n = mFFTSize / 2; + + dft[0].r = static_cast<float>(s1base * dft1[0].r + s2base * dft2[0].r); + dft[n].r = static_cast<float>(s1base * dft1[n].r + s2base * dft2[n].r); + + for (int i = 1; i < n; ++i) { + Complex c1(dft1[i].r, dft1[i].i); + Complex c2(dft2[i].r, dft2[i].i); + + double mag1 = abs(c1); + double mag2 = abs(c2); + + // Interpolate magnitudes in decibels + double mag1db = 20.0 * log10(mag1); + double mag2db = 20.0 * log10(mag2); + + double s1 = s1base; + double s2 = s2base; + + double magdbdiff = mag1db - mag2db; + + // Empirical tweak to retain higher-frequency zeroes + double threshold = (i > 16) ? 5.0 : 2.0; + + if (magdbdiff < -threshold && mag1db < 0.0) { + s1 = pow(s1, 0.75); + s2 = 1.0 - s1; + } else if (magdbdiff > threshold && mag2db < 0.0) { + s2 = pow(s2, 0.75); + s1 = 1.0 - s2; + } + + // Average magnitude by decibels instead of linearly + double magdb = s1 * mag1db + s2 * mag2db; + double mag = pow(10.0, 0.05 * magdb); + + // Now, deal with phase + double phase1 = arg(c1); + double phase2 = arg(c2); + + double deltaPhase1 = phase1 - lastPhase1; + double deltaPhase2 = phase2 - lastPhase2; + lastPhase1 = phase1; + lastPhase2 = phase2; + + // Unwrap phase deltas + if (deltaPhase1 > M_PI) + deltaPhase1 -= 2.0 * M_PI; + if (deltaPhase1 < -M_PI) + deltaPhase1 += 2.0 * M_PI; + if (deltaPhase2 > M_PI) + deltaPhase2 -= 2.0 * M_PI; + if (deltaPhase2 < -M_PI) + deltaPhase2 += 2.0 * M_PI; + + // Blend group-delays + double deltaPhaseBlend; + + if (deltaPhase1 - deltaPhase2 > M_PI) + deltaPhaseBlend = s1 * deltaPhase1 + s2 * (2.0 * M_PI + deltaPhase2); + else if (deltaPhase2 - deltaPhase1 > M_PI) + deltaPhaseBlend = s1 * (2.0 * M_PI + deltaPhase1) + s2 * deltaPhase2; + else + deltaPhaseBlend = s1 * deltaPhase1 + s2 * deltaPhase2; + + phaseAccum += deltaPhaseBlend; + + // Unwrap + if (phaseAccum > M_PI) + phaseAccum -= 2.0 * M_PI; + if (phaseAccum < -M_PI) + phaseAccum += 2.0 * M_PI; + + dft[i].r = static_cast<float>(mag * cos(phaseAccum)); + dft[i].i = static_cast<float>(mag * sin(phaseAccum)); + } +} + +double FFTBlock::ExtractAverageGroupDelay() +{ + ComplexU* dft = mOutputBuffer.Elements(); + + double aveSum = 0.0; + double weightSum = 0.0; + double lastPhase = 0.0; + + int halfSize = FFTSize() / 2; + + const double kSamplePhaseDelay = (2.0 * M_PI) / double(FFTSize()); + + // Remove DC offset + dft[0].r = 0.0f; + + // Calculate weighted average group delay + for (int i = 1; i < halfSize; i++) { + Complex c(dft[i].r, dft[i].i); + double mag = abs(c); + double phase = arg(c); + + double deltaPhase = phase - lastPhase; + lastPhase = phase; + + // Unwrap + if (deltaPhase < -M_PI) + deltaPhase += 2.0 * M_PI; + if (deltaPhase > M_PI) + deltaPhase -= 2.0 * M_PI; + + aveSum += mag * deltaPhase; + weightSum += mag; + } + + // Note how we invert the phase delta wrt frequency since this is how group delay is defined + double ave = aveSum / weightSum; + double aveSampleDelay = -ave / kSamplePhaseDelay; + + // Leave 20 sample headroom (for leading edge of impulse) + aveSampleDelay -= 20.0; + if (aveSampleDelay <= 0.0) + return 0.0; + + // Remove average group delay (minus 20 samples for headroom) + AddConstantGroupDelay(-aveSampleDelay); + + return aveSampleDelay; +} + +void FFTBlock::AddConstantGroupDelay(double sampleFrameDelay) +{ + int halfSize = FFTSize() / 2; + + ComplexU* dft = mOutputBuffer.Elements(); + + const double kSamplePhaseDelay = (2.0 * M_PI) / double(FFTSize()); + + double phaseAdj = -sampleFrameDelay * kSamplePhaseDelay; + + // Add constant group delay + for (int i = 1; i < halfSize; i++) { + Complex c(dft[i].r, dft[i].i); + double mag = abs(c); + double phase = arg(c); + + phase += i * phaseAdj; + + dft[i].r = static_cast<float>(mag * cos(phase)); + dft[i].i = static_cast<float>(mag * sin(phase)); + } +} + +} // namespace mozilla diff --git a/dom/media/webaudio/FFTBlock.h b/dom/media/webaudio/FFTBlock.h new file mode 100644 index 000000000..84b9f38aa --- /dev/null +++ b/dom/media/webaudio/FFTBlock.h @@ -0,0 +1,319 @@ +/* -*- Mode: C++; tab-width: 2; indent-tabs-mode: nil; c-basic-offset: 2 -*- */ +/* vim:set ts=2 sw=2 sts=2 et cindent: */ +/* This Source Code Form is subject to the terms of the Mozilla Public + * License, v. 2.0. If a copy of the MPL was not distributed with this + * file, You can obtain one at http://mozilla.org/MPL/2.0/. */ + +#ifndef FFTBlock_h_ +#define FFTBlock_h_ + +#ifdef BUILD_ARM_NEON +#include <cmath> +#include "mozilla/arm.h" +#include "dl/sp/api/omxSP.h" +#endif + +#include "AlignedTArray.h" +#include "AudioNodeEngine.h" +#if defined(MOZ_LIBAV_FFT) +#ifdef __cplusplus +extern "C" { +#endif +#include "libavcodec/avfft.h" +#ifdef __cplusplus +} +#endif +#else +#include "kiss_fft/kiss_fftr.h" +#endif + +namespace mozilla { + +// This class defines an FFT block, loosely modeled after Blink's FFTFrame +// class to make sharing code with Blink easy. +// Currently it's implemented on top of KissFFT on all platforms. +class FFTBlock final +{ + union ComplexU { +#if !defined(MOZ_LIBAV_FFT) + kiss_fft_cpx c; +#endif + float f[2]; + struct { + float r; + float i; + }; + }; + +public: + explicit FFTBlock(uint32_t aFFTSize) +#if defined(MOZ_LIBAV_FFT) + : mAvRDFT(nullptr) + , mAvIRDFT(nullptr) +#else + : mKissFFT(nullptr) + , mKissIFFT(nullptr) +#ifdef BUILD_ARM_NEON + , mOmxFFT(nullptr) + , mOmxIFFT(nullptr) +#endif +#endif + { + MOZ_COUNT_CTOR(FFTBlock); + SetFFTSize(aFFTSize); + } + ~FFTBlock() + { + MOZ_COUNT_DTOR(FFTBlock); + Clear(); + } + + // Return a new FFTBlock with frequency components interpolated between + // |block0| and |block1| with |interp| between 0.0 and 1.0. + static FFTBlock* + CreateInterpolatedBlock(const FFTBlock& block0, + const FFTBlock& block1, double interp); + + // Transform FFTSize() points of aData and store the result internally. + void PerformFFT(const float* aData) + { + EnsureFFT(); +#if defined(MOZ_LIBAV_FFT) + PodCopy(mOutputBuffer.Elements()->f, aData, mFFTSize); + av_rdft_calc(mAvRDFT, mOutputBuffer.Elements()->f); + // Recover packed Nyquist. + mOutputBuffer[mFFTSize / 2].r = mOutputBuffer[0].i; + mOutputBuffer[0].i = 0.0f; +#else +#ifdef BUILD_ARM_NEON + if (mozilla::supports_neon()) { + omxSP_FFTFwd_RToCCS_F32_Sfs(aData, mOutputBuffer.Elements()->f, mOmxFFT); + } else +#endif + { + kiss_fftr(mKissFFT, aData, &(mOutputBuffer.Elements()->c)); + } +#endif + } + // Inverse-transform internal data and store the resulting FFTSize() + // points in aDataOut. + void GetInverse(float* aDataOut) + { + GetInverseWithoutScaling(aDataOut); + AudioBufferInPlaceScale(aDataOut, 1.0f / mFFTSize, mFFTSize); + } + // Inverse-transform internal frequency data and store the resulting + // FFTSize() points in |aDataOut|. If frequency data has not already been + // scaled, then the output will need scaling by 1/FFTSize(). + void GetInverseWithoutScaling(float* aDataOut) + { + EnsureIFFT(); +#if defined(MOZ_LIBAV_FFT) + { + // Even though this function doesn't scale, the libav forward transform + // gives a value that needs scaling by 2 in order for things to turn out + // similar to how we expect from kissfft/openmax. + AudioBufferCopyWithScale(mOutputBuffer.Elements()->f, 2.0f, + aDataOut, mFFTSize); + aDataOut[1] = 2.0f * mOutputBuffer[mFFTSize/2].r; // Packed Nyquist + av_rdft_calc(mAvIRDFT, aDataOut); + } +#else +#ifdef BUILD_ARM_NEON + if (mozilla::supports_neon()) { + omxSP_FFTInv_CCSToR_F32_Sfs_unscaled(mOutputBuffer.Elements()->f, aDataOut, mOmxIFFT); + } else +#endif + { + kiss_fftri(mKissIFFT, &(mOutputBuffer.Elements()->c), aDataOut); + } +#endif + } + + void Multiply(const FFTBlock& aFrame) + { + uint32_t halfSize = mFFTSize / 2; + // DFTs are not packed. + MOZ_ASSERT(mOutputBuffer[0].i == 0); + MOZ_ASSERT(aFrame.mOutputBuffer[0].i == 0); + + BufferComplexMultiply(mOutputBuffer.Elements()->f, + aFrame.mOutputBuffer.Elements()->f, + mOutputBuffer.Elements()->f, + halfSize); + mOutputBuffer[halfSize].r *= aFrame.mOutputBuffer[halfSize].r; + // This would have been set to NaN if either real component was NaN. + mOutputBuffer[0].i = 0.0f; + } + + // Perform a forward FFT on |aData|, assuming zeros after dataSize samples, + // and pre-scale the generated internal frequency domain coefficients so + // that GetInverseWithoutScaling() can be used to transform to the time + // domain. This is useful for convolution kernels. + void PadAndMakeScaledDFT(const float* aData, size_t dataSize) + { + MOZ_ASSERT(dataSize <= FFTSize()); + AlignedTArray<float> paddedData; + paddedData.SetLength(FFTSize()); + AudioBufferCopyWithScale(aData, 1.0f / FFTSize(), + paddedData.Elements(), dataSize); + PodZero(paddedData.Elements() + dataSize, mFFTSize - dataSize); + PerformFFT(paddedData.Elements()); + } + + void SetFFTSize(uint32_t aSize) + { + mFFTSize = aSize; + mOutputBuffer.SetLength(aSize / 2 + 1); + PodZero(mOutputBuffer.Elements(), aSize / 2 + 1); + Clear(); + } + + // Return the average group delay and removes this from the frequency data. + double ExtractAverageGroupDelay(); + + uint32_t FFTSize() const + { + return mFFTSize; + } + float RealData(uint32_t aIndex) const + { + return mOutputBuffer[aIndex].r; + } + float& RealData(uint32_t aIndex) + { + return mOutputBuffer[aIndex].r; + } + float ImagData(uint32_t aIndex) const + { + return mOutputBuffer[aIndex].i; + } + float& ImagData(uint32_t aIndex) + { + return mOutputBuffer[aIndex].i; + } + + size_t SizeOfExcludingThis(MallocSizeOf aMallocSizeOf) const + { + size_t amount = 0; +#if defined(MOZ_LIBAV_FFT) + amount += aMallocSizeOf(mAvRDFT); + amount += aMallocSizeOf(mAvIRDFT); +#else + amount += aMallocSizeOf(mKissFFT); + amount += aMallocSizeOf(mKissIFFT); +#endif + amount += mOutputBuffer.ShallowSizeOfExcludingThis(aMallocSizeOf); + return amount; + } + + size_t SizeOfIncludingThis(MallocSizeOf aMallocSizeOf) const + { + return aMallocSizeOf(this) + SizeOfExcludingThis(aMallocSizeOf); + } + +private: + FFTBlock(const FFTBlock& other) = delete; + void operator=(const FFTBlock& other) = delete; + + void EnsureFFT() + { +#if defined(MOZ_LIBAV_FFT) + if (!mAvRDFT) { + mAvRDFT = av_rdft_init(log((double)mFFTSize)/M_LN2, DFT_R2C); + } +#else +#ifdef BUILD_ARM_NEON + if (mozilla::supports_neon()) { + if (!mOmxFFT) { + mOmxFFT = createOmxFFT(mFFTSize); + } + } else +#endif + { + if (!mKissFFT) { + mKissFFT = kiss_fftr_alloc(mFFTSize, 0, nullptr, nullptr); + } + } +#endif + } + void EnsureIFFT() + { +#if defined(MOZ_LIBAV_FFT) + if (!mAvIRDFT) { + mAvIRDFT = av_rdft_init(log((double)mFFTSize)/M_LN2, IDFT_C2R); + } +#else +#ifdef BUILD_ARM_NEON + if (mozilla::supports_neon()) { + if (!mOmxIFFT) { + mOmxIFFT = createOmxFFT(mFFTSize); + } + } else +#endif + { + if (!mKissIFFT) { + mKissIFFT = kiss_fftr_alloc(mFFTSize, 1, nullptr, nullptr); + } + } +#endif + } + +#ifdef BUILD_ARM_NEON + static OMXFFTSpec_R_F32* createOmxFFT(uint32_t aFFTSize) + { + MOZ_ASSERT((aFFTSize & (aFFTSize-1)) == 0); + OMX_INT bufSize; + OMX_INT order = log((double)aFFTSize)/M_LN2; + MOZ_ASSERT(aFFTSize>>order == 1); + OMXResult status = omxSP_FFTGetBufSize_R_F32(order, &bufSize); + if (status == OMX_Sts_NoErr) { + OMXFFTSpec_R_F32* context = static_cast<OMXFFTSpec_R_F32*>(malloc(bufSize)); + if (omxSP_FFTInit_R_F32(context, order) != OMX_Sts_NoErr) { + return nullptr; + } + return context; + } + return nullptr; + } +#endif + + void Clear() + { +#if defined(MOZ_LIBAV_FFT) + av_rdft_end(mAvRDFT); + av_rdft_end(mAvIRDFT); + mAvRDFT = mAvIRDFT = nullptr; +#else +#ifdef BUILD_ARM_NEON + free(mOmxFFT); + free(mOmxIFFT); + mOmxFFT = mOmxIFFT = nullptr; +#endif + free(mKissFFT); + free(mKissIFFT); + mKissFFT = mKissIFFT = nullptr; +#endif + } + void AddConstantGroupDelay(double sampleFrameDelay); + void InterpolateFrequencyComponents(const FFTBlock& block0, + const FFTBlock& block1, double interp); +#if defined(MOZ_LIBAV_FFT) + RDFTContext *mAvRDFT; + RDFTContext *mAvIRDFT; +#else + kiss_fftr_cfg mKissFFT; + kiss_fftr_cfg mKissIFFT; +#ifdef BUILD_ARM_NEON + OMXFFTSpec_R_F32* mOmxFFT; + OMXFFTSpec_R_F32* mOmxIFFT; +#endif +#endif + AlignedTArray<ComplexU> mOutputBuffer; + uint32_t mFFTSize; +}; + +} // namespace mozilla + +#endif + diff --git a/dom/media/webaudio/GainNode.cpp b/dom/media/webaudio/GainNode.cpp new file mode 100644 index 000000000..46ac99763 --- /dev/null +++ b/dom/media/webaudio/GainNode.cpp @@ -0,0 +1,156 @@ +/* -*- Mode: C++; tab-width: 2; indent-tabs-mode: nil; c-basic-offset: 2 -*- */ +/* vim:set ts=2 sw=2 sts=2 et cindent: */ +/* This Source Code Form is subject to the terms of the Mozilla Public + * License, v. 2.0. If a copy of the MPL was not distributed with this + * file, You can obtain one at http://mozilla.org/MPL/2.0/. */ + +#include "GainNode.h" +#include "mozilla/dom/GainNodeBinding.h" +#include "AlignmentUtils.h" +#include "AudioNodeEngine.h" +#include "AudioNodeStream.h" +#include "AudioDestinationNode.h" +#include "WebAudioUtils.h" + +namespace mozilla { +namespace dom { + +NS_IMPL_CYCLE_COLLECTION_INHERITED(GainNode, AudioNode, + mGain) + +NS_INTERFACE_MAP_BEGIN_CYCLE_COLLECTION_INHERITED(GainNode) +NS_INTERFACE_MAP_END_INHERITING(AudioNode) + +NS_IMPL_ADDREF_INHERITED(GainNode, AudioNode) +NS_IMPL_RELEASE_INHERITED(GainNode, AudioNode) + +class GainNodeEngine final : public AudioNodeEngine +{ +public: + GainNodeEngine(AudioNode* aNode, AudioDestinationNode* aDestination) + : AudioNodeEngine(aNode) + , mDestination(aDestination->Stream()) + // Keep the default value in sync with the default value in GainNode::GainNode. + , mGain(1.f) + { + } + + enum Parameters { + GAIN + }; + void RecvTimelineEvent(uint32_t aIndex, + AudioTimelineEvent& aEvent) override + { + MOZ_ASSERT(mDestination); + WebAudioUtils::ConvertAudioTimelineEventToTicks(aEvent, + mDestination); + + switch (aIndex) { + case GAIN: + mGain.InsertEvent<int64_t>(aEvent); + break; + default: + NS_ERROR("Bad GainNodeEngine TimelineParameter"); + } + } + + void ProcessBlock(AudioNodeStream* aStream, + GraphTime aFrom, + const AudioBlock& aInput, + AudioBlock* aOutput, + bool* aFinished) override + { + if (aInput.IsNull()) { + // If input is silent, so is the output + aOutput->SetNull(WEBAUDIO_BLOCK_SIZE); + } else if (mGain.HasSimpleValue()) { + // Optimize the case where we only have a single value set as the volume + float gain = mGain.GetValue(); + if (gain == 0.0f) { + aOutput->SetNull(WEBAUDIO_BLOCK_SIZE); + } else { + *aOutput = aInput; + aOutput->mVolume *= gain; + } + } else { + // First, compute a vector of gains for each track tick based on the + // timeline at hand, and then for each channel, multiply the values + // in the buffer with the gain vector. + aOutput->AllocateChannels(aInput.ChannelCount()); + + // Compute the gain values for the duration of the input AudioChunk + StreamTime tick = mDestination->GraphTimeToStreamTime(aFrom); + float computedGain[WEBAUDIO_BLOCK_SIZE + 4]; + float* alignedComputedGain = ALIGNED16(computedGain); + ASSERT_ALIGNED16(alignedComputedGain); + mGain.GetValuesAtTime(tick, alignedComputedGain, WEBAUDIO_BLOCK_SIZE); + + for (size_t counter = 0; counter < WEBAUDIO_BLOCK_SIZE; ++counter) { + alignedComputedGain[counter] *= aInput.mVolume; + } + + // Apply the gain to the output buffer + for (size_t channel = 0; channel < aOutput->ChannelCount(); ++channel) { + const float* inputBuffer = static_cast<const float*> (aInput.mChannelData[channel]); + float* buffer = aOutput->ChannelFloatsForWrite(channel); + AudioBlockCopyChannelWithScale(inputBuffer, alignedComputedGain, buffer); + } + } + } + + size_t SizeOfExcludingThis(MallocSizeOf aMallocSizeOf) const override + { + // Not owned: + // - mDestination (probably) + // - mGain - Internal ref owned by AudioNode + return AudioNodeEngine::SizeOfExcludingThis(aMallocSizeOf); + } + + size_t SizeOfIncludingThis(MallocSizeOf aMallocSizeOf) const override + { + return aMallocSizeOf(this) + SizeOfExcludingThis(aMallocSizeOf); + } + + AudioNodeStream* mDestination; + AudioParamTimeline mGain; +}; + +GainNode::GainNode(AudioContext* aContext) + : AudioNode(aContext, + 2, + ChannelCountMode::Max, + ChannelInterpretation::Speakers) + , mGain(new AudioParam(this, GainNodeEngine::GAIN, 1.0f, "gain")) +{ + GainNodeEngine* engine = new GainNodeEngine(this, aContext->Destination()); + mStream = AudioNodeStream::Create(aContext, engine, + AudioNodeStream::NO_STREAM_FLAGS, + aContext->Graph()); +} + +GainNode::~GainNode() +{ +} + +size_t +GainNode::SizeOfExcludingThis(MallocSizeOf aMallocSizeOf) const +{ + size_t amount = AudioNode::SizeOfExcludingThis(aMallocSizeOf); + amount += mGain->SizeOfIncludingThis(aMallocSizeOf); + return amount; +} + +size_t +GainNode::SizeOfIncludingThis(MallocSizeOf aMallocSizeOf) const +{ + return aMallocSizeOf(this) + SizeOfExcludingThis(aMallocSizeOf); +} + +JSObject* +GainNode::WrapObject(JSContext* aCx, JS::Handle<JSObject*> aGivenProto) +{ + return GainNodeBinding::Wrap(aCx, this, aGivenProto); +} + +} // namespace dom +} // namespace mozilla diff --git a/dom/media/webaudio/GainNode.h b/dom/media/webaudio/GainNode.h new file mode 100644 index 000000000..aab22ad65 --- /dev/null +++ b/dom/media/webaudio/GainNode.h @@ -0,0 +1,52 @@ +/* -*- Mode: C++; tab-width: 2; indent-tabs-mode: nil; c-basic-offset: 2 -*- */ +/* vim:set ts=2 sw=2 sts=2 et cindent: */ +/* This Source Code Form is subject to the terms of the Mozilla Public + * License, v. 2.0. If a copy of the MPL was not distributed with this + * file, You can obtain one at http://mozilla.org/MPL/2.0/. */ + +#ifndef GainNode_h_ +#define GainNode_h_ + +#include "AudioNode.h" +#include "AudioParam.h" + +namespace mozilla { +namespace dom { + +class AudioContext; + +class GainNode final : public AudioNode +{ +public: + explicit GainNode(AudioContext* aContext); + + NS_DECL_ISUPPORTS_INHERITED + NS_DECL_CYCLE_COLLECTION_CLASS_INHERITED(GainNode, AudioNode) + + JSObject* WrapObject(JSContext* aCx, JS::Handle<JSObject*> aGivenProto) override; + + AudioParam* Gain() const + { + return mGain; + } + + const char* NodeType() const override + { + return "GainNode"; + } + + size_t SizeOfExcludingThis(MallocSizeOf aMallocSizeOf) const override; + size_t SizeOfIncludingThis(MallocSizeOf aMallocSizeOf) const override; + +protected: + virtual ~GainNode(); + +private: + RefPtr<AudioParam> mGain; +}; + +} // namespace dom +} // namespace mozilla + +#endif + diff --git a/dom/media/webaudio/IIRFilterNode.cpp b/dom/media/webaudio/IIRFilterNode.cpp new file mode 100644 index 000000000..3a69a94c8 --- /dev/null +++ b/dom/media/webaudio/IIRFilterNode.cpp @@ -0,0 +1,228 @@ +/* -*- Mode: C++; tab-width: 2; indent-tabs-mode: nil; c-basic-offset: 2 -*- */ +/* vim:set ts=2 sw=2 sts=2 et cindent: */ +/* This Source Code Form is subject to the terms of the Mozilla Public + * License, v. 2.0. If a copy of the MPL was not distributed with this + * file, You can obtain one at http://mozilla.org/MPL/2.0/. */ + +#include "IIRFilterNode.h" +#include "AudioNodeEngine.h" + +#include "blink/IIRFilter.h" + +#include "nsGkAtoms.h" + +namespace mozilla { +namespace dom { + +NS_IMPL_ISUPPORTS_INHERITED0(IIRFilterNode, AudioNode) + +class IIRFilterNodeEngine final : public AudioNodeEngine +{ +public: + IIRFilterNodeEngine(AudioNode* aNode, AudioDestinationNode* aDestination, + const AudioDoubleArray &aFeedforward, + const AudioDoubleArray &aFeedback, + uint64_t aWindowID) + : AudioNodeEngine(aNode) + , mDestination(aDestination->Stream()) + , mFeedforward(aFeedforward) + , mFeedback(aFeedback) + , mWindowID(aWindowID) + { + } + + void ProcessBlock(AudioNodeStream* aStream, + GraphTime aFrom, + const AudioBlock& aInput, + AudioBlock* aOutput, + bool* aFinished) override + { + float inputBuffer[WEBAUDIO_BLOCK_SIZE + 4]; + float* alignedInputBuffer = ALIGNED16(inputBuffer); + ASSERT_ALIGNED16(alignedInputBuffer); + + if (aInput.IsNull()) { + if (!mIIRFilters.IsEmpty()) { + bool allZero = true; + for (uint32_t i = 0; i < mIIRFilters.Length(); ++i) { + allZero &= mIIRFilters[i]->buffersAreZero(); + } + + // all filter buffer values are zero, so the output will be zero + // as well. + if (allZero) { + mIIRFilters.Clear(); + aStream->ScheduleCheckForInactive(); + + RefPtr<PlayingRefChangeHandler> refchanged = + new PlayingRefChangeHandler(aStream, PlayingRefChangeHandler::RELEASE); + aStream->Graph()-> + DispatchToMainThreadAfterStreamStateUpdate(refchanged.forget()); + + aOutput->SetNull(WEBAUDIO_BLOCK_SIZE); + return; + } + + PodZero(alignedInputBuffer, WEBAUDIO_BLOCK_SIZE); + } + } else if(mIIRFilters.Length() != aInput.ChannelCount()){ + if (mIIRFilters.IsEmpty()) { + RefPtr<PlayingRefChangeHandler> refchanged = + new PlayingRefChangeHandler(aStream, PlayingRefChangeHandler::ADDREF); + aStream->Graph()-> + DispatchToMainThreadAfterStreamStateUpdate(refchanged.forget()); + } else { + WebAudioUtils::LogToDeveloperConsole(mWindowID, + "IIRFilterChannelCountChangeWarning"); + } + + // Adjust the number of filters based on the number of channels + mIIRFilters.SetLength(aInput.ChannelCount()); + for (size_t i = 0; i < aInput.ChannelCount(); ++i) { + mIIRFilters[i] = new blink::IIRFilter(&mFeedforward, &mFeedback); + } + } + + uint32_t numberOfChannels = mIIRFilters.Length(); + aOutput->AllocateChannels(numberOfChannels); + + for (uint32_t i = 0; i < numberOfChannels; ++i) { + const float* input; + if (aInput.IsNull()) { + input = alignedInputBuffer; + } else { + input = static_cast<const float*>(aInput.mChannelData[i]); + if (aInput.mVolume != 1.0) { + AudioBlockCopyChannelWithScale(input, aInput.mVolume, alignedInputBuffer); + input = alignedInputBuffer; + } + } + + mIIRFilters[i]->process(input, + aOutput->ChannelFloatsForWrite(i), + aInput.GetDuration()); + } + } + + bool IsActive() const override + { + return !mIIRFilters.IsEmpty(); + } + + size_t SizeOfExcludingThis(MallocSizeOf aMallocSizeOf) const override + { + // Not owned: + // - mDestination - probably not owned + // - AudioParamTimelines - counted in the AudioNode + size_t amount = AudioNodeEngine::SizeOfExcludingThis(aMallocSizeOf); + amount += mIIRFilters.ShallowSizeOfExcludingThis(aMallocSizeOf); + return amount; + } + + size_t SizeOfIncludingThis(MallocSizeOf aMallocSizeOf) const override + { + return aMallocSizeOf(this) + SizeOfExcludingThis(aMallocSizeOf); + } + +private: + AudioNodeStream* mDestination; + nsTArray<nsAutoPtr<blink::IIRFilter>> mIIRFilters; + AudioDoubleArray mFeedforward; + AudioDoubleArray mFeedback; + uint64_t mWindowID; +}; + +IIRFilterNode::IIRFilterNode(AudioContext* aContext, + const mozilla::dom::binding_detail::AutoSequence<double>& aFeedforward, + const mozilla::dom::binding_detail::AutoSequence<double>& aFeedback) + : AudioNode(aContext, + 2, + ChannelCountMode::Max, + ChannelInterpretation::Speakers) +{ + mFeedforward.SetLength(aFeedforward.Length()); + PodCopy(mFeedforward.Elements(), aFeedforward.Elements(), aFeedforward.Length()); + mFeedback.SetLength(aFeedback.Length()); + PodCopy(mFeedback.Elements(), aFeedback.Elements(), aFeedback.Length()); + + // Scale coefficients -- we guarantee that mFeedback != 0 when creating + // the IIRFilterNode. + double scale = mFeedback[0]; + double* elements = mFeedforward.Elements(); + for (size_t i = 0; i < mFeedforward.Length(); ++i) { + elements[i] /= scale; + } + + elements = mFeedback.Elements(); + for (size_t i = 0; i < mFeedback.Length(); ++i) { + elements[i] /= scale; + } + + // We check that this is exactly equal to one later in blink/IIRFilter.cpp + elements[0] = 1.0; + + uint64_t windowID = aContext->GetParentObject()->WindowID(); + IIRFilterNodeEngine* engine = new IIRFilterNodeEngine(this, aContext->Destination(), mFeedforward, mFeedback, windowID); + mStream = AudioNodeStream::Create(aContext, engine, + AudioNodeStream::NO_STREAM_FLAGS, + aContext->Graph()); +} + +IIRFilterNode::~IIRFilterNode() +{ +} + +size_t +IIRFilterNode::SizeOfExcludingThis(MallocSizeOf aMallocSizeOf) const +{ + size_t amount = AudioNode::SizeOfExcludingThis(aMallocSizeOf); + return amount; +} + +size_t +IIRFilterNode::SizeOfIncludingThis(MallocSizeOf aMallocSizeOf) const +{ + return aMallocSizeOf(this) + SizeOfExcludingThis(aMallocSizeOf); +} + +JSObject* +IIRFilterNode::WrapObject(JSContext* aCx, JS::Handle<JSObject*> aGivenProto) +{ + return IIRFilterNodeBinding::Wrap(aCx, this, aGivenProto); +} + +void +IIRFilterNode::GetFrequencyResponse(const Float32Array& aFrequencyHz, + const Float32Array& aMagResponse, + const Float32Array& aPhaseResponse) +{ + aFrequencyHz.ComputeLengthAndData(); + aMagResponse.ComputeLengthAndData(); + aPhaseResponse.ComputeLengthAndData(); + + uint32_t length = std::min(std::min(aFrequencyHz.Length(), + aMagResponse.Length()), + aPhaseResponse.Length()); + if (!length) { + return; + } + + auto frequencies = MakeUnique<float[]>(length); + float* frequencyHz = aFrequencyHz.Data(); + const double nyquist = Context()->SampleRate() * 0.5; + + // Normalize the frequencies + for (uint32_t i = 0; i < length; ++i) { + if (frequencyHz[i] >= 0 && frequencyHz[i] <= nyquist) { + frequencies[i] = static_cast<float>(frequencyHz[i] / nyquist); + } else { + frequencies[i] = std::numeric_limits<float>::quiet_NaN(); + } + } + + blink::IIRFilter filter(&mFeedforward, &mFeedback); + filter.getFrequencyResponse(int(length), frequencies.get(), aMagResponse.Data(), aPhaseResponse.Data()); +} + +} // namespace dom +} // namespace mozilla diff --git a/dom/media/webaudio/IIRFilterNode.h b/dom/media/webaudio/IIRFilterNode.h new file mode 100644 index 000000000..78546c3e5 --- /dev/null +++ b/dom/media/webaudio/IIRFilterNode.h @@ -0,0 +1,55 @@ +/* -*- Mode: C++; tab-width: 2; indent-tabs-mode: nil; c-basic-offset: 2 -*- */ +/* vim:set ts=2 sw=2 sts=2 et cindent: */ +/* This Source Code Form is subject to the terms of the Mozilla Public + * License, v. 2.0. If a copy of the MPL was not distributed with this + * file, You can obtain one at http://mozilla.org/MPL/2.0/. */ + +#ifndef IIRFilterNode_h_ +#define IIRFilterNode_h_ + +#include "AudioNode.h" +#include "AudioParam.h" +#include "mozilla/dom/IIRFilterNodeBinding.h" + +namespace mozilla { +namespace dom { + +class AudioContext; + +class IIRFilterNode final : public AudioNode +{ +public: + explicit IIRFilterNode(AudioContext* aContext, + const mozilla::dom::binding_detail::AutoSequence<double>& aFeedforward, + const mozilla::dom::binding_detail::AutoSequence<double>& aFeedback); + + NS_DECL_ISUPPORTS_INHERITED + + JSObject* WrapObject(JSContext* aCx, JS::Handle<JSObject*> aGivenProto) override; + + + void GetFrequencyResponse(const Float32Array& aFrequencyHz, + const Float32Array& aMagResponse, + const Float32Array& aPhaseResponse); + + const char* NodeType() const override + { + return "IIRFilterNode"; + } + + size_t SizeOfExcludingThis(MallocSizeOf aMallocSizeOf) const override; + size_t SizeOfIncludingThis(MallocSizeOf aMallocSizeOf) const override; + +protected: + virtual ~IIRFilterNode(); + +private: + nsTArray<double> mFeedback; + nsTArray<double> mFeedforward; +}; + +} // namespace dom +} // namespace mozilla + +#endif + diff --git a/dom/media/webaudio/MediaBufferDecoder.cpp b/dom/media/webaudio/MediaBufferDecoder.cpp new file mode 100644 index 000000000..e9f1d5a47 --- /dev/null +++ b/dom/media/webaudio/MediaBufferDecoder.cpp @@ -0,0 +1,649 @@ +/* -*- Mode: C++; tab-width: 2; indent-tabs-mode: nil; c-basic-offset: 2 -*- */ +/* vim:set ts=2 sw=2 sts=2 et cindent: */ +/* This Source Code Form is subject to the terms of the Mozilla Public + * License, v. 2.0. If a copy of the MPL was not distributed with this + * file, You can obtain one at http://mozilla.org/MPL/2.0/. */ + +#include "MediaBufferDecoder.h" +#include "BufferDecoder.h" +#include "mozilla/dom/AudioContextBinding.h" +#include "mozilla/dom/ScriptSettings.h" +#include <speex/speex_resampler.h> +#include "nsXPCOMCIDInternal.h" +#include "nsComponentManagerUtils.h" +#include "MediaDecoderReader.h" +#include "BufferMediaResource.h" +#include "DecoderTraits.h" +#include "AudioContext.h" +#include "AudioBuffer.h" +#include "nsContentUtils.h" +#include "nsIScriptObjectPrincipal.h" +#include "nsIScriptError.h" +#include "nsMimeTypes.h" +#include "VideoUtils.h" +#include "WebAudioUtils.h" +#include "mozilla/dom/Promise.h" +#include "mozilla/Telemetry.h" +#include "nsPrintfCString.h" +#include "GMPService.h" + +namespace mozilla { + +extern LazyLogModule gMediaDecoderLog; + +NS_IMPL_CYCLE_COLLECTION_CLASS(WebAudioDecodeJob) + +NS_IMPL_CYCLE_COLLECTION_UNLINK_BEGIN(WebAudioDecodeJob) + NS_IMPL_CYCLE_COLLECTION_UNLINK(mContext) + NS_IMPL_CYCLE_COLLECTION_UNLINK(mOutput) + NS_IMPL_CYCLE_COLLECTION_UNLINK(mSuccessCallback) + NS_IMPL_CYCLE_COLLECTION_UNLINK(mFailureCallback) +NS_IMPL_CYCLE_COLLECTION_UNLINK_END + +NS_IMPL_CYCLE_COLLECTION_TRAVERSE_BEGIN(WebAudioDecodeJob) + NS_IMPL_CYCLE_COLLECTION_TRAVERSE(mContext) + NS_IMPL_CYCLE_COLLECTION_TRAVERSE(mOutput) + NS_IMPL_CYCLE_COLLECTION_TRAVERSE(mSuccessCallback) + NS_IMPL_CYCLE_COLLECTION_TRAVERSE(mFailureCallback) + NS_IMPL_CYCLE_COLLECTION_TRAVERSE_SCRIPT_OBJECTS +NS_IMPL_CYCLE_COLLECTION_TRAVERSE_END + +NS_IMPL_CYCLE_COLLECTION_TRACE_BEGIN(WebAudioDecodeJob) +NS_IMPL_CYCLE_COLLECTION_TRACE_END +NS_IMPL_CYCLE_COLLECTION_ROOT_NATIVE(WebAudioDecodeJob, AddRef) +NS_IMPL_CYCLE_COLLECTION_UNROOT_NATIVE(WebAudioDecodeJob, Release) + +using namespace dom; + +class ReportResultTask final : public Runnable +{ +public: + ReportResultTask(WebAudioDecodeJob& aDecodeJob, + WebAudioDecodeJob::ResultFn aFunction, + WebAudioDecodeJob::ErrorCode aErrorCode) + : mDecodeJob(aDecodeJob) + , mFunction(aFunction) + , mErrorCode(aErrorCode) + { + MOZ_ASSERT(aFunction); + } + + NS_IMETHOD Run() override + { + MOZ_ASSERT(NS_IsMainThread()); + + (mDecodeJob.*mFunction)(mErrorCode); + + return NS_OK; + } + +private: + // Note that the mDecodeJob member will probably die when mFunction is run. + // Therefore, it is not safe to do anything fancy with it in this class. + // Really, this class is only used because nsRunnableMethod doesn't support + // methods accepting arguments. + WebAudioDecodeJob& mDecodeJob; + WebAudioDecodeJob::ResultFn mFunction; + WebAudioDecodeJob::ErrorCode mErrorCode; +}; + +enum class PhaseEnum : int +{ + Decode, + AllocateBuffer, + Done +}; + +class MediaDecodeTask final : public Runnable +{ +public: + MediaDecodeTask(const char* aContentType, uint8_t* aBuffer, + uint32_t aLength, + WebAudioDecodeJob& aDecodeJob) + : mContentType(aContentType) + , mBuffer(aBuffer) + , mLength(aLength) + , mDecodeJob(aDecodeJob) + , mPhase(PhaseEnum::Decode) + , mFirstFrameDecoded(false) + { + MOZ_ASSERT(aBuffer); + MOZ_ASSERT(NS_IsMainThread()); + } + + NS_IMETHOD Run(); + bool CreateReader(); + MediaDecoderReader* Reader() { MOZ_ASSERT(mDecoderReader); return mDecoderReader; } + +private: + void ReportFailureOnMainThread(WebAudioDecodeJob::ErrorCode aErrorCode) { + if (NS_IsMainThread()) { + Cleanup(); + mDecodeJob.OnFailure(aErrorCode); + } else { + // Take extra care to cleanup on the main thread + NS_DispatchToMainThread(NewRunnableMethod(this, &MediaDecodeTask::Cleanup)); + + nsCOMPtr<nsIRunnable> event = + new ReportResultTask(mDecodeJob, &WebAudioDecodeJob::OnFailure, aErrorCode); + NS_DispatchToMainThread(event); + } + } + + void Decode(); + void OnMetadataRead(MetadataHolder* aMetadata); + void OnMetadataNotRead(const MediaResult& aError); + void RequestSample(); + void SampleDecoded(MediaData* aData); + void SampleNotDecoded(const MediaResult& aError); + void FinishDecode(); + void AllocateBuffer(); + void CallbackTheResult(); + + void Cleanup() + { + MOZ_ASSERT(NS_IsMainThread()); + // MediaDecoderReader expects that BufferDecoder is alive. + // Destruct MediaDecoderReader first. + mDecoderReader = nullptr; + mBufferDecoder = nullptr; + JS_free(nullptr, mBuffer); + } + +private: + nsCString mContentType; + uint8_t* mBuffer; + uint32_t mLength; + WebAudioDecodeJob& mDecodeJob; + PhaseEnum mPhase; + RefPtr<BufferDecoder> mBufferDecoder; + RefPtr<MediaDecoderReader> mDecoderReader; + MediaInfo mMediaInfo; + MediaQueue<MediaData> mAudioQueue; + bool mFirstFrameDecoded; +}; + +NS_IMETHODIMP +MediaDecodeTask::Run() +{ + MOZ_ASSERT(mBufferDecoder); + MOZ_ASSERT(mDecoderReader); + switch (mPhase) { + case PhaseEnum::Decode: + Decode(); + break; + case PhaseEnum::AllocateBuffer: + AllocateBuffer(); + break; + case PhaseEnum::Done: + break; + } + + return NS_OK; +} + +class BufferDecoderGMPCrashHelper : public GMPCrashHelper +{ +public: + explicit BufferDecoderGMPCrashHelper(nsPIDOMWindowInner* aParent) + : mParent(do_GetWeakReference(aParent)) + { + MOZ_ASSERT(NS_IsMainThread()); + } + already_AddRefed<nsPIDOMWindowInner> GetPluginCrashedEventTarget() override + { + MOZ_ASSERT(NS_IsMainThread()); + nsCOMPtr<nsPIDOMWindowInner> window = do_QueryReferent(mParent); + return window.forget(); + } +private: + nsWeakPtr mParent; +}; + +bool +MediaDecodeTask::CreateReader() +{ + MOZ_ASSERT(NS_IsMainThread()); + + + nsCOMPtr<nsIPrincipal> principal; + nsCOMPtr<nsIScriptObjectPrincipal> sop = do_QueryInterface(mDecodeJob.mContext->GetParentObject()); + if (sop) { + principal = sop->GetPrincipal(); + } + + RefPtr<BufferMediaResource> resource = + new BufferMediaResource(static_cast<uint8_t*> (mBuffer), + mLength, principal, mContentType); + + MOZ_ASSERT(!mBufferDecoder); + mBufferDecoder = new BufferDecoder(resource, + new BufferDecoderGMPCrashHelper(mDecodeJob.mContext->GetParentObject())); + + // If you change this list to add support for new decoders, please consider + // updating HTMLMediaElement::CreateDecoder as well. + + mDecoderReader = DecoderTraits::CreateReader(mContentType, mBufferDecoder); + + if (!mDecoderReader) { + return false; + } + + nsresult rv = mDecoderReader->Init(); + if (NS_FAILED(rv)) { + return false; + } + + return true; +} + +class AutoResampler final +{ +public: + AutoResampler() + : mResampler(nullptr) + {} + ~AutoResampler() + { + if (mResampler) { + speex_resampler_destroy(mResampler); + } + } + operator SpeexResamplerState*() const + { + MOZ_ASSERT(mResampler); + return mResampler; + } + void operator=(SpeexResamplerState* aResampler) + { + mResampler = aResampler; + } + +private: + SpeexResamplerState* mResampler; +}; + +void +MediaDecodeTask::Decode() +{ + MOZ_ASSERT(!NS_IsMainThread()); + + mBufferDecoder->BeginDecoding(mDecoderReader->OwnerThread()); + + // Tell the decoder reader that we are not going to play the data directly, + // and that we should not reject files with more channels than the audio + // backend support. + mDecoderReader->SetIgnoreAudioOutputFormat(); + + mDecoderReader->AsyncReadMetadata()->Then(mDecoderReader->OwnerThread(), __func__, this, + &MediaDecodeTask::OnMetadataRead, + &MediaDecodeTask::OnMetadataNotRead); +} + +void +MediaDecodeTask::OnMetadataRead(MetadataHolder* aMetadata) +{ + mMediaInfo = aMetadata->mInfo; + if (!mMediaInfo.HasAudio()) { + mDecoderReader->Shutdown(); + ReportFailureOnMainThread(WebAudioDecodeJob::NoAudio); + return; + } + + nsCString codec; + if (!mMediaInfo.mAudio.GetAsAudioInfo()->mMimeType.IsEmpty()) { + codec = nsPrintfCString("webaudio; %s", mMediaInfo.mAudio.GetAsAudioInfo()->mMimeType.get()); + } else { + codec = nsPrintfCString("webaudio;resource; %s", mContentType.get()); + } + + nsCOMPtr<nsIRunnable> task = NS_NewRunnableFunction([codec]() -> void { + MOZ_ASSERT(!codec.IsEmpty()); + MOZ_LOG(gMediaDecoderLog, + LogLevel::Debug, + ("Telemetry (WebAudio) MEDIA_CODEC_USED= '%s'", codec.get())); + Telemetry::Accumulate(Telemetry::ID::MEDIA_CODEC_USED, codec); + }); + AbstractThread::MainThread()->Dispatch(task.forget()); + + RequestSample(); +} + +void +MediaDecodeTask::OnMetadataNotRead(const MediaResult& aReason) +{ + mDecoderReader->Shutdown(); + ReportFailureOnMainThread(WebAudioDecodeJob::InvalidContent); +} + +void +MediaDecodeTask::RequestSample() +{ + mDecoderReader->RequestAudioData()->Then(mDecoderReader->OwnerThread(), __func__, this, + &MediaDecodeTask::SampleDecoded, + &MediaDecodeTask::SampleNotDecoded); +} + +void +MediaDecodeTask::SampleDecoded(MediaData* aData) +{ + MOZ_ASSERT(!NS_IsMainThread()); + mAudioQueue.Push(aData); + if (!mFirstFrameDecoded) { + mDecoderReader->ReadUpdatedMetadata(&mMediaInfo); + mFirstFrameDecoded = true; + } + RequestSample(); +} + +void +MediaDecodeTask::SampleNotDecoded(const MediaResult& aError) +{ + MOZ_ASSERT(!NS_IsMainThread()); + if (aError == NS_ERROR_DOM_MEDIA_END_OF_STREAM) { + FinishDecode(); + } else { + mDecoderReader->Shutdown(); + ReportFailureOnMainThread(WebAudioDecodeJob::InvalidContent); + } +} + +void +MediaDecodeTask::FinishDecode() +{ + mDecoderReader->Shutdown(); + + uint32_t frameCount = mAudioQueue.FrameCount(); + uint32_t channelCount = mMediaInfo.mAudio.mChannels; + uint32_t sampleRate = mMediaInfo.mAudio.mRate; + + if (!frameCount || !channelCount || !sampleRate) { + ReportFailureOnMainThread(WebAudioDecodeJob::InvalidContent); + return; + } + + const uint32_t destSampleRate = mDecodeJob.mContext->SampleRate(); + AutoResampler resampler; + + uint32_t resampledFrames = frameCount; + if (sampleRate != destSampleRate) { + resampledFrames = static_cast<uint32_t>( + static_cast<uint64_t>(destSampleRate) * + static_cast<uint64_t>(frameCount) / + static_cast<uint64_t>(sampleRate) + ); + + resampler = speex_resampler_init(channelCount, + sampleRate, + destSampleRate, + SPEEX_RESAMPLER_QUALITY_DEFAULT, nullptr); + speex_resampler_skip_zeros(resampler); + resampledFrames += speex_resampler_get_output_latency(resampler); + } + + // Allocate the channel buffers. Note that if we end up resampling, we may + // write fewer bytes than mResampledFrames to the output buffer, in which + // case mWriteIndex will tell us how many valid samples we have. + mDecodeJob.mBuffer = ThreadSharedFloatArrayBufferList:: + Create(channelCount, resampledFrames, fallible); + if (!mDecodeJob.mBuffer) { + ReportFailureOnMainThread(WebAudioDecodeJob::UnknownError); + return; + } + + RefPtr<MediaData> mediaData; + while ((mediaData = mAudioQueue.PopFront())) { + RefPtr<AudioData> audioData = mediaData->As<AudioData>(); + audioData->EnsureAudioBuffer(); // could lead to a copy :( + AudioDataValue* bufferData = static_cast<AudioDataValue*> + (audioData->mAudioBuffer->Data()); + + if (sampleRate != destSampleRate) { + const uint32_t maxOutSamples = resampledFrames - mDecodeJob.mWriteIndex; + + for (uint32_t i = 0; i < audioData->mChannels; ++i) { + uint32_t inSamples = audioData->mFrames; + uint32_t outSamples = maxOutSamples; + float* outData = + mDecodeJob.mBuffer->GetDataForWrite(i) + mDecodeJob.mWriteIndex; + + WebAudioUtils::SpeexResamplerProcess( + resampler, i, &bufferData[i * audioData->mFrames], &inSamples, + outData, &outSamples); + + if (i == audioData->mChannels - 1) { + mDecodeJob.mWriteIndex += outSamples; + MOZ_ASSERT(mDecodeJob.mWriteIndex <= resampledFrames); + MOZ_ASSERT(inSamples == audioData->mFrames); + } + } + } else { + for (uint32_t i = 0; i < audioData->mChannels; ++i) { + float* outData = + mDecodeJob.mBuffer->GetDataForWrite(i) + mDecodeJob.mWriteIndex; + ConvertAudioSamples(&bufferData[i * audioData->mFrames], + outData, audioData->mFrames); + + if (i == audioData->mChannels - 1) { + mDecodeJob.mWriteIndex += audioData->mFrames; + } + } + } + } + + if (sampleRate != destSampleRate) { + uint32_t inputLatency = speex_resampler_get_input_latency(resampler); + const uint32_t maxOutSamples = resampledFrames - mDecodeJob.mWriteIndex; + for (uint32_t i = 0; i < channelCount; ++i) { + uint32_t inSamples = inputLatency; + uint32_t outSamples = maxOutSamples; + float* outData = + mDecodeJob.mBuffer->GetDataForWrite(i) + mDecodeJob.mWriteIndex; + + WebAudioUtils::SpeexResamplerProcess( + resampler, i, (AudioDataValue*)nullptr, &inSamples, + outData, &outSamples); + + if (i == channelCount - 1) { + mDecodeJob.mWriteIndex += outSamples; + MOZ_ASSERT(mDecodeJob.mWriteIndex <= resampledFrames); + MOZ_ASSERT(inSamples == inputLatency); + } + } + } + + mPhase = PhaseEnum::AllocateBuffer; + NS_DispatchToMainThread(this); +} + +void +MediaDecodeTask::AllocateBuffer() +{ + MOZ_ASSERT(NS_IsMainThread()); + + if (!mDecodeJob.AllocateBuffer()) { + ReportFailureOnMainThread(WebAudioDecodeJob::UnknownError); + return; + } + + mPhase = PhaseEnum::Done; + CallbackTheResult(); +} + +void +MediaDecodeTask::CallbackTheResult() +{ + MOZ_ASSERT(NS_IsMainThread()); + + Cleanup(); + + // Now, we're ready to call the script back with the resulting buffer + mDecodeJob.OnSuccess(WebAudioDecodeJob::NoError); +} + +bool +WebAudioDecodeJob::AllocateBuffer() +{ + MOZ_ASSERT(!mOutput); + MOZ_ASSERT(NS_IsMainThread()); + + // Now create the AudioBuffer + ErrorResult rv; + uint32_t channelCount = mBuffer->GetChannels(); + mOutput = AudioBuffer::Create(mContext, channelCount, + mWriteIndex, mContext->SampleRate(), + mBuffer.forget(), rv); + return !rv.Failed(); +} + +void +AsyncDecodeWebAudio(const char* aContentType, uint8_t* aBuffer, + uint32_t aLength, WebAudioDecodeJob& aDecodeJob) +{ + // Do not attempt to decode the media if we were not successful at sniffing + // the content type. + if (!*aContentType || + strcmp(aContentType, APPLICATION_OCTET_STREAM) == 0) { + nsCOMPtr<nsIRunnable> event = + new ReportResultTask(aDecodeJob, + &WebAudioDecodeJob::OnFailure, + WebAudioDecodeJob::UnknownContent); + JS_free(nullptr, aBuffer); + NS_DispatchToMainThread(event); + return; + } + + RefPtr<MediaDecodeTask> task = + new MediaDecodeTask(aContentType, aBuffer, aLength, aDecodeJob); + if (!task->CreateReader()) { + nsCOMPtr<nsIRunnable> event = + new ReportResultTask(aDecodeJob, + &WebAudioDecodeJob::OnFailure, + WebAudioDecodeJob::UnknownError); + NS_DispatchToMainThread(event); + } else { + // If we did this without a temporary: + // task->Reader()->OwnerThread()->Dispatch(task.forget()) + // we might evaluate the task.forget() before calling Reader(). Enforce + // a non-crashy order-of-operations. + TaskQueue* taskQueue = task->Reader()->OwnerThread(); + taskQueue->Dispatch(task.forget()); + } +} + +WebAudioDecodeJob::WebAudioDecodeJob(const nsACString& aContentType, + AudioContext* aContext, + Promise* aPromise, + DecodeSuccessCallback* aSuccessCallback, + DecodeErrorCallback* aFailureCallback) + : mContentType(aContentType) + , mWriteIndex(0) + , mContext(aContext) + , mPromise(aPromise) + , mSuccessCallback(aSuccessCallback) + , mFailureCallback(aFailureCallback) +{ + MOZ_ASSERT(aContext); + MOZ_ASSERT(NS_IsMainThread()); + MOZ_COUNT_CTOR(WebAudioDecodeJob); +} + +WebAudioDecodeJob::~WebAudioDecodeJob() +{ + MOZ_ASSERT(NS_IsMainThread()); + MOZ_COUNT_DTOR(WebAudioDecodeJob); +} + +void +WebAudioDecodeJob::OnSuccess(ErrorCode aErrorCode) +{ + MOZ_ASSERT(NS_IsMainThread()); + MOZ_ASSERT(aErrorCode == NoError); + + if (mSuccessCallback) { + ErrorResult rv; + mSuccessCallback->Call(*mOutput, rv); + // Ignore errors in calling the callback, since there is not much that we can + // do about it here. + rv.SuppressException(); + } + mPromise->MaybeResolve(mOutput); + + mContext->RemoveFromDecodeQueue(this); + +} + +void +WebAudioDecodeJob::OnFailure(ErrorCode aErrorCode) +{ + MOZ_ASSERT(NS_IsMainThread()); + + const char* errorMessage; + switch (aErrorCode) { + case NoError: + MOZ_FALLTHROUGH_ASSERT("Who passed NoError to OnFailure?"); + // Fall through to get some sort of a sane error message if this actually + // happens at runtime. + case UnknownError: + errorMessage = "MediaDecodeAudioDataUnknownError"; + break; + case UnknownContent: + errorMessage = "MediaDecodeAudioDataUnknownContentType"; + break; + case InvalidContent: + errorMessage = "MediaDecodeAudioDataInvalidContent"; + break; + case NoAudio: + errorMessage = "MediaDecodeAudioDataNoAudio"; + break; + } + + nsIDocument* doc = nullptr; + if (nsPIDOMWindowInner* pWindow = mContext->GetParentObject()) { + doc = pWindow->GetExtantDoc(); + } + nsContentUtils::ReportToConsole(nsIScriptError::errorFlag, + NS_LITERAL_CSTRING("Media"), + doc, + nsContentUtils::eDOM_PROPERTIES, + errorMessage); + + // Ignore errors in calling the callback, since there is not much that we can + // do about it here. + if (mFailureCallback) { + mFailureCallback->Call(); + } + + mPromise->MaybeReject(NS_ERROR_DOM_ENCODING_NOT_SUPPORTED_ERR); + + mContext->RemoveFromDecodeQueue(this); +} + +size_t +WebAudioDecodeJob::SizeOfExcludingThis(MallocSizeOf aMallocSizeOf) const +{ + size_t amount = 0; + amount += mContentType.SizeOfExcludingThisIfUnshared(aMallocSizeOf); + if (mSuccessCallback) { + amount += mSuccessCallback->SizeOfIncludingThis(aMallocSizeOf); + } + if (mFailureCallback) { + amount += mFailureCallback->SizeOfIncludingThis(aMallocSizeOf); + } + if (mOutput) { + amount += mOutput->SizeOfIncludingThis(aMallocSizeOf); + } + if (mBuffer) { + amount += mBuffer->SizeOfIncludingThis(aMallocSizeOf); + } + return amount; +} + +size_t +WebAudioDecodeJob::SizeOfIncludingThis(MallocSizeOf aMallocSizeOf) const +{ + return aMallocSizeOf(this) + SizeOfExcludingThis(aMallocSizeOf); +} + +} // namespace mozilla + diff --git a/dom/media/webaudio/MediaBufferDecoder.h b/dom/media/webaudio/MediaBufferDecoder.h new file mode 100644 index 000000000..3e79b37ff --- /dev/null +++ b/dom/media/webaudio/MediaBufferDecoder.h @@ -0,0 +1,79 @@ +/* -*- Mode: C++; tab-width: 2; indent-tabs-mode: nil; c-basic-offset: 2 -*- */ +/* vim:set ts=2 sw=2 sts=2 et cindent: */ +/* This Source Code Form is subject to the terms of the Mozilla Public + * License, v. 2.0. If a copy of the MPL was not distributed with this + * file, You can obtain one at http://mozilla.org/MPL/2.0/. */ + +#ifndef MediaBufferDecoder_h_ +#define MediaBufferDecoder_h_ + +#include "nsWrapperCache.h" +#include "nsCOMPtr.h" +#include "nsString.h" +#include "nsTArray.h" +#include "mozilla/dom/TypedArray.h" +#include "mozilla/MemoryReporting.h" + +namespace mozilla { + +class ThreadSharedFloatArrayBufferList; + +namespace dom { +class AudioBuffer; +class AudioContext; +class DecodeErrorCallback; +class DecodeSuccessCallback; +class Promise; +} // namespace dom + +struct WebAudioDecodeJob final +{ + // You may omit both the success and failure callback, or you must pass both. + // The callbacks are only necessary for asynchronous operation. + WebAudioDecodeJob(const nsACString& aContentType, + dom::AudioContext* aContext, + dom::Promise* aPromise, + dom::DecodeSuccessCallback* aSuccessCallback = nullptr, + dom::DecodeErrorCallback* aFailureCallback = nullptr); + + NS_INLINE_DECL_CYCLE_COLLECTING_NATIVE_REFCOUNTING(WebAudioDecodeJob) + NS_DECL_CYCLE_COLLECTION_SCRIPT_HOLDER_NATIVE_CLASS(WebAudioDecodeJob) + + enum ErrorCode { + NoError, + UnknownContent, + UnknownError, + InvalidContent, + NoAudio + }; + + typedef void (WebAudioDecodeJob::*ResultFn)(ErrorCode); + + void OnSuccess(ErrorCode /* ignored */); + void OnFailure(ErrorCode aErrorCode); + + bool AllocateBuffer(); + + size_t SizeOfExcludingThis(mozilla::MallocSizeOf aMallocSizeOf) const; + size_t SizeOfIncludingThis(mozilla::MallocSizeOf aMallocSizeOf) const; + + nsCString mContentType; + uint32_t mWriteIndex; + RefPtr<dom::AudioContext> mContext; + RefPtr<dom::Promise> mPromise; + RefPtr<dom::DecodeSuccessCallback> mSuccessCallback; + RefPtr<dom::DecodeErrorCallback> mFailureCallback; // can be null + RefPtr<dom::AudioBuffer> mOutput; + RefPtr<ThreadSharedFloatArrayBufferList> mBuffer; + +private: + ~WebAudioDecodeJob(); +}; + +void AsyncDecodeWebAudio(const char* aContentType, uint8_t* aBuffer, + uint32_t aLength, WebAudioDecodeJob& aDecodeJob); + +} // namespace mozilla + +#endif + diff --git a/dom/media/webaudio/MediaElementAudioSourceNode.cpp b/dom/media/webaudio/MediaElementAudioSourceNode.cpp new file mode 100644 index 000000000..ebf7dc44f --- /dev/null +++ b/dom/media/webaudio/MediaElementAudioSourceNode.cpp @@ -0,0 +1,40 @@ +/* -*- Mode: C++; tab-width: 2; indent-tabs-mode: nil; c-basic-offset: 2 -*- */ +/* vim:set ts=2 sw=2 sts=2 et cindent: */ +/* This Source Code Form is subject to the terms of the Mozilla Public + * License, v. 2.0. If a copy of the MPL was not distributed with this + * file, You can obtain one at http://mozilla.org/MPL/2.0/. */ + +#include "MediaElementAudioSourceNode.h" +#include "mozilla/dom/MediaElementAudioSourceNodeBinding.h" + +namespace mozilla { +namespace dom { + +MediaElementAudioSourceNode::MediaElementAudioSourceNode(AudioContext* aContext) + : MediaStreamAudioSourceNode(aContext) +{ +} + +/* static */ already_AddRefed<MediaElementAudioSourceNode> +MediaElementAudioSourceNode::Create(AudioContext* aContext, + DOMMediaStream* aStream, ErrorResult& aRv) +{ + RefPtr<MediaElementAudioSourceNode> node = + new MediaElementAudioSourceNode(aContext); + + node->Init(aStream, aRv); + if (aRv.Failed()) { + return nullptr; + } + + return node.forget(); +} + +JSObject* +MediaElementAudioSourceNode::WrapObject(JSContext* aCx, JS::Handle<JSObject*> aGivenProto) +{ + return MediaElementAudioSourceNodeBinding::Wrap(aCx, this, aGivenProto); +} + +} // namespace dom +} // namespace mozilla diff --git a/dom/media/webaudio/MediaElementAudioSourceNode.h b/dom/media/webaudio/MediaElementAudioSourceNode.h new file mode 100644 index 000000000..f6791f355 --- /dev/null +++ b/dom/media/webaudio/MediaElementAudioSourceNode.h @@ -0,0 +1,44 @@ +/* -*- Mode: C++; tab-width: 2; indent-tabs-mode: nil; c-basic-offset: 2 -*- */ +/* vim:set ts=2 sw=2 sts=2 et cindent: */ +/* This Source Code Form is subject to the terms of the Mozilla Public + * License, v. 2.0. If a copy of the MPL was not distributed with this + * file, You can obtain one at http://mozilla.org/MPL/2.0/. */ + +#ifndef MediaElementAudioSourceNode_h_ +#define MediaElementAudioSourceNode_h_ + +#include "MediaStreamAudioSourceNode.h" + +namespace mozilla { +namespace dom { + +class MediaElementAudioSourceNode final : public MediaStreamAudioSourceNode +{ +public: + static already_AddRefed<MediaElementAudioSourceNode> + Create(AudioContext* aContext, DOMMediaStream* aStream, ErrorResult& aRv); + + JSObject* WrapObject(JSContext* aCx, JS::Handle<JSObject*> aGivenProto) override; + + const char* NodeType() const override + { + return "MediaElementAudioSourceNode"; + } + + const char* CrossOriginErrorString() const override + { + return "MediaElementAudioSourceNodeCrossOrigin"; + } + + size_t SizeOfIncludingThis(MallocSizeOf aMallocSizeOf) const override + { + return aMallocSizeOf(this) + SizeOfExcludingThis(aMallocSizeOf); + } +private: + explicit MediaElementAudioSourceNode(AudioContext* aContext); +}; + +} // namespace dom +} // namespace mozilla + +#endif diff --git a/dom/media/webaudio/MediaStreamAudioDestinationNode.cpp b/dom/media/webaudio/MediaStreamAudioDestinationNode.cpp new file mode 100644 index 000000000..d8c732e47 --- /dev/null +++ b/dom/media/webaudio/MediaStreamAudioDestinationNode.cpp @@ -0,0 +1,142 @@ +/* -*- Mode: C++; tab-width: 2; indent-tabs-mode: nil; c-basic-offset: 2 -*- */ +/* vim:set ts=2 sw=2 sts=2 et cindent: */ +/* This Source Code Form is subject to the terms of the Mozilla Public + * License, v. 2.0. If a copy of the MPL was not distributed with this + * file, You can obtain one at http://mozilla.org/MPL/2.0/. */ + +#include "MediaStreamAudioDestinationNode.h" +#include "nsIDocument.h" +#include "mozilla/dom/MediaStreamAudioDestinationNodeBinding.h" +#include "AudioNodeEngine.h" +#include "AudioNodeStream.h" +#include "DOMMediaStream.h" +#include "MediaStreamTrack.h" +#include "TrackUnionStream.h" + +namespace mozilla { +namespace dom { + +class AudioDestinationTrackSource : + public MediaStreamTrackSource +{ +public: + NS_DECL_ISUPPORTS_INHERITED + NS_DECL_CYCLE_COLLECTION_CLASS_INHERITED(AudioDestinationTrackSource, + MediaStreamTrackSource) + + AudioDestinationTrackSource(MediaStreamAudioDestinationNode* aNode, + nsIPrincipal* aPrincipal) + : MediaStreamTrackSource(aPrincipal, nsString()) + , mNode(aNode) + { + } + + void Destroy() override + { + if (mNode) { + mNode->DestroyMediaStream(); + mNode = nullptr; + } + } + + MediaSourceEnum GetMediaSource() const override + { + return MediaSourceEnum::AudioCapture; + } + + void Stop() override + { + Destroy(); + } + +private: + virtual ~AudioDestinationTrackSource() {} + + RefPtr<MediaStreamAudioDestinationNode> mNode; +}; + +NS_IMPL_ADDREF_INHERITED(AudioDestinationTrackSource, + MediaStreamTrackSource) +NS_IMPL_RELEASE_INHERITED(AudioDestinationTrackSource, + MediaStreamTrackSource) +NS_INTERFACE_MAP_BEGIN_CYCLE_COLLECTION_INHERITED(AudioDestinationTrackSource) +NS_INTERFACE_MAP_END_INHERITING(MediaStreamTrackSource) +NS_IMPL_CYCLE_COLLECTION_INHERITED(AudioDestinationTrackSource, + MediaStreamTrackSource, + mNode) + +NS_IMPL_CYCLE_COLLECTION_INHERITED(MediaStreamAudioDestinationNode, AudioNode, mDOMStream) + +NS_INTERFACE_MAP_BEGIN_CYCLE_COLLECTION_INHERITED(MediaStreamAudioDestinationNode) +NS_INTERFACE_MAP_END_INHERITING(AudioNode) + +NS_IMPL_ADDREF_INHERITED(MediaStreamAudioDestinationNode, AudioNode) +NS_IMPL_RELEASE_INHERITED(MediaStreamAudioDestinationNode, AudioNode) + +MediaStreamAudioDestinationNode::MediaStreamAudioDestinationNode(AudioContext* aContext) + : AudioNode(aContext, + 2, + ChannelCountMode::Explicit, + ChannelInterpretation::Speakers) + , mDOMStream( + DOMAudioNodeMediaStream::CreateTrackUnionStreamAsInput(GetOwner(), + this, + aContext->Graph())) +{ + // Ensure an audio track with the correct ID is exposed to JS + nsIDocument* doc = aContext->GetParentObject()->GetExtantDoc(); + RefPtr<MediaStreamTrackSource> source = + new AudioDestinationTrackSource(this, doc->NodePrincipal()); + RefPtr<MediaStreamTrack> track = + mDOMStream->CreateDOMTrack(AudioNodeStream::AUDIO_TRACK, + MediaSegment::AUDIO, source, + MediaTrackConstraints()); + mDOMStream->AddTrackInternal(track); + + ProcessedMediaStream* outputStream = mDOMStream->GetInputStream()->AsProcessedStream(); + MOZ_ASSERT(!!outputStream); + AudioNodeEngine* engine = new AudioNodeEngine(this); + mStream = AudioNodeStream::Create(aContext, engine, + AudioNodeStream::EXTERNAL_OUTPUT, + aContext->Graph()); + mPort = outputStream->AllocateInputPort(mStream, AudioNodeStream::AUDIO_TRACK); +} + +MediaStreamAudioDestinationNode::~MediaStreamAudioDestinationNode() +{ +} + +size_t +MediaStreamAudioDestinationNode::SizeOfExcludingThis(MallocSizeOf aMallocSizeOf) const +{ + // Future: + // - mDOMStream + size_t amount = AudioNode::SizeOfExcludingThis(aMallocSizeOf); + amount += mPort->SizeOfIncludingThis(aMallocSizeOf); + return amount; +} + +size_t +MediaStreamAudioDestinationNode::SizeOfIncludingThis(MallocSizeOf aMallocSizeOf) const +{ + return aMallocSizeOf(this) + SizeOfExcludingThis(aMallocSizeOf); +} + +void +MediaStreamAudioDestinationNode::DestroyMediaStream() +{ + AudioNode::DestroyMediaStream(); + if (mPort) { + mPort->Destroy(); + mPort = nullptr; + } +} + +JSObject* +MediaStreamAudioDestinationNode::WrapObject(JSContext* aCx, JS::Handle<JSObject*> aGivenProto) +{ + return MediaStreamAudioDestinationNodeBinding::Wrap(aCx, this, aGivenProto); +} + +} // namespace dom +} // namespace mozilla diff --git a/dom/media/webaudio/MediaStreamAudioDestinationNode.h b/dom/media/webaudio/MediaStreamAudioDestinationNode.h new file mode 100644 index 000000000..6c033b466 --- /dev/null +++ b/dom/media/webaudio/MediaStreamAudioDestinationNode.h @@ -0,0 +1,56 @@ +/* -*- Mode: C++; tab-width: 2; indent-tabs-mode: nil; c-basic-offset: 2 -*- */ +/* vim:set ts=2 sw=2 sts=2 et cindent: */ +/* This Source Code Form is subject to the terms of the Mozilla Public + * License, v. 2.0. If a copy of the MPL was not distributed with this + * file, You can obtain one at http://mozilla.org/MPL/2.0/. */ + +#ifndef MediaStreamAudioDestinationNode_h_ +#define MediaStreamAudioDestinationNode_h_ + +#include "AudioNode.h" + +namespace mozilla { +namespace dom { + +class MediaStreamAudioDestinationNode final : public AudioNode +{ +public: + explicit MediaStreamAudioDestinationNode(AudioContext* aContext); + + NS_DECL_ISUPPORTS_INHERITED + NS_DECL_CYCLE_COLLECTION_CLASS_INHERITED(MediaStreamAudioDestinationNode, AudioNode) + + JSObject* WrapObject(JSContext* aCx, JS::Handle<JSObject*> aGivenProto) override; + + uint16_t NumberOfOutputs() const final override + { + return 0; + } + + void DestroyMediaStream() override; + + DOMMediaStream* DOMStream() const + { + return mDOMStream; + } + + const char* NodeType() const override + { + return "MediaStreamAudioDestinationNode"; + } + + size_t SizeOfExcludingThis(MallocSizeOf aMallocSizeOf) const override; + size_t SizeOfIncludingThis(MallocSizeOf aMallocSizeOf) const override; + +protected: + virtual ~MediaStreamAudioDestinationNode(); + +private: + RefPtr<DOMMediaStream> mDOMStream; + RefPtr<MediaInputPort> mPort; +}; + +} // namespace dom +} // namespace mozilla + +#endif diff --git a/dom/media/webaudio/MediaStreamAudioSourceNode.cpp b/dom/media/webaudio/MediaStreamAudioSourceNode.cpp new file mode 100644 index 000000000..beedd5300 --- /dev/null +++ b/dom/media/webaudio/MediaStreamAudioSourceNode.cpp @@ -0,0 +1,254 @@ +/* -*- Mode: C++; tab-width: 2; indent-tabs-mode: nil; c-basic-offset: 2 -*- */ +/* vim:set ts=2 sw=2 sts=2 et cindent: */ +/* This Source Code Form is subject to the terms of the Mozilla Public + * License, v. 2.0. If a copy of the MPL was not distributed with this + * file, You can obtain one at http://mozilla.org/MPL/2.0/. */ + +#include "MediaStreamAudioSourceNode.h" +#include "mozilla/dom/MediaStreamAudioSourceNodeBinding.h" +#include "AudioNodeEngine.h" +#include "AudioNodeExternalInputStream.h" +#include "AudioStreamTrack.h" +#include "nsIDocument.h" +#include "mozilla/CORSMode.h" + +namespace mozilla { +namespace dom { + +NS_IMPL_CYCLE_COLLECTION_CLASS(MediaStreamAudioSourceNode) + +NS_IMPL_CYCLE_COLLECTION_UNLINK_BEGIN(MediaStreamAudioSourceNode) + tmp->Destroy(); + NS_IMPL_CYCLE_COLLECTION_UNLINK(mInputStream) + NS_IMPL_CYCLE_COLLECTION_UNLINK(mInputTrack) +NS_IMPL_CYCLE_COLLECTION_UNLINK_END_INHERITED(AudioNode) + +NS_IMPL_CYCLE_COLLECTION_TRAVERSE_BEGIN_INHERITED(MediaStreamAudioSourceNode, AudioNode) + NS_IMPL_CYCLE_COLLECTION_TRAVERSE(mInputStream) + NS_IMPL_CYCLE_COLLECTION_TRAVERSE(mInputTrack) +NS_IMPL_CYCLE_COLLECTION_TRAVERSE_END + +NS_INTERFACE_MAP_BEGIN_CYCLE_COLLECTION_INHERITED(MediaStreamAudioSourceNode) +NS_INTERFACE_MAP_END_INHERITING(AudioNode) + +NS_IMPL_ADDREF_INHERITED(MediaStreamAudioSourceNode, AudioNode) +NS_IMPL_RELEASE_INHERITED(MediaStreamAudioSourceNode, AudioNode) + +MediaStreamAudioSourceNode::MediaStreamAudioSourceNode(AudioContext* aContext) + : AudioNode(aContext, + 2, + ChannelCountMode::Max, + ChannelInterpretation::Speakers) +{ +} + +/* static */ already_AddRefed<MediaStreamAudioSourceNode> +MediaStreamAudioSourceNode::Create(AudioContext* aContext, + DOMMediaStream* aStream, ErrorResult& aRv) +{ + RefPtr<MediaStreamAudioSourceNode> node = + new MediaStreamAudioSourceNode(aContext); + + node->Init(aStream, aRv); + if (aRv.Failed()) { + return nullptr; + } + + return node.forget(); +} + +void +MediaStreamAudioSourceNode::Init(DOMMediaStream* aMediaStream, ErrorResult& aRv) +{ + if (!aMediaStream) { + aRv.Throw(NS_ERROR_FAILURE); + return; + } + + MediaStream* inputStream = aMediaStream->GetPlaybackStream(); + MediaStreamGraph* graph = Context()->Graph(); + if (NS_WARN_IF(graph != inputStream->Graph())) { + aRv.Throw(NS_ERROR_DOM_NOT_SUPPORTED_ERR); + return; + } + + mInputStream = aMediaStream; + AudioNodeEngine* engine = new MediaStreamAudioSourceNodeEngine(this); + mStream = AudioNodeExternalInputStream::Create(graph, engine); + mInputStream->AddConsumerToKeepAlive(static_cast<nsIDOMEventTarget*>(this)); + + mInputStream->RegisterTrackListener(this); + AttachToFirstTrack(mInputStream); +} + +void +MediaStreamAudioSourceNode::Destroy() +{ + if (mInputStream) { + mInputStream->UnregisterTrackListener(this); + mInputStream = nullptr; + } + DetachFromTrack(); +} + +MediaStreamAudioSourceNode::~MediaStreamAudioSourceNode() +{ + Destroy(); +} + +void +MediaStreamAudioSourceNode::AttachToTrack(const RefPtr<MediaStreamTrack>& aTrack) +{ + MOZ_ASSERT(!mInputTrack); + MOZ_ASSERT(aTrack->AsAudioStreamTrack()); + + if (!mStream) { + return; + } + + mInputTrack = aTrack; + ProcessedMediaStream* outputStream = + static_cast<ProcessedMediaStream*>(mStream.get()); + mInputPort = mInputTrack->ForwardTrackContentsTo(outputStream); + PrincipalChanged(mInputTrack); // trigger enabling/disabling of the connector + mInputTrack->AddPrincipalChangeObserver(this); +} + +void +MediaStreamAudioSourceNode::DetachFromTrack() +{ + if (mInputTrack) { + mInputTrack->RemovePrincipalChangeObserver(this); + mInputTrack = nullptr; + } + if (mInputPort) { + mInputPort->Destroy(); + mInputPort = nullptr; + } +} + +void +MediaStreamAudioSourceNode::AttachToFirstTrack(const RefPtr<DOMMediaStream>& aMediaStream) +{ + nsTArray<RefPtr<AudioStreamTrack>> tracks; + aMediaStream->GetAudioTracks(tracks); + + for (const RefPtr<AudioStreamTrack>& track : tracks) { + if (track->Ended()) { + continue; + } + + AttachToTrack(track); + MarkActive(); + return; + } + + // There was no track available. We'll allow the node to be garbage collected. + MarkInactive(); +} + +void +MediaStreamAudioSourceNode::NotifyTrackAdded(const RefPtr<MediaStreamTrack>& aTrack) +{ + if (mInputTrack) { + return; + } + + if (!aTrack->AsAudioStreamTrack()) { + return; + } + + AttachToTrack(aTrack); +} + +void +MediaStreamAudioSourceNode::NotifyTrackRemoved(const RefPtr<MediaStreamTrack>& aTrack) +{ + if (aTrack != mInputTrack) { + return; + } + + DetachFromTrack(); + AttachToFirstTrack(mInputStream); +} + +/** + * Changes the principal. Note that this will be called on the main thread, but + * changes will be enacted on the MediaStreamGraph thread. If the principal + * change results in the document principal losing access to the stream, then + * there needs to be other measures in place to ensure that any media that is + * governed by the new stream principal is not available to the MediaStreamGraph + * before this change completes. Otherwise, a site could get access to + * media that they are not authorized to receive. + * + * One solution is to block the altered content, call this method, then dispatch + * another change request to the MediaStreamGraph thread that allows the content + * under the new principal to flow. This might be unnecessary if the principal + * change is changing to be the document principal. + */ +void +MediaStreamAudioSourceNode::PrincipalChanged(MediaStreamTrack* aMediaStreamTrack) +{ + MOZ_ASSERT(aMediaStreamTrack == mInputTrack); + + bool subsumes = false; + nsIDocument* doc = nullptr; + if (nsPIDOMWindowInner* parent = Context()->GetParentObject()) { + doc = parent->GetExtantDoc(); + if (doc) { + nsIPrincipal* docPrincipal = doc->NodePrincipal(); + nsIPrincipal* trackPrincipal = aMediaStreamTrack->GetPrincipal(); + if (!trackPrincipal || NS_FAILED(docPrincipal->Subsumes(trackPrincipal, &subsumes))) { + subsumes = false; + } + } + } + auto stream = static_cast<AudioNodeExternalInputStream*>(mStream.get()); + bool enabled = subsumes || aMediaStreamTrack->GetCORSMode() != CORS_NONE; + stream->SetInt32Parameter(MediaStreamAudioSourceNodeEngine::ENABLE, enabled); + + if (!enabled && doc) { + nsContentUtils::ReportToConsole(nsIScriptError::warningFlag, + NS_LITERAL_CSTRING("Web Audio"), + doc, + nsContentUtils::eDOM_PROPERTIES, + CrossOriginErrorString()); + } +} + +size_t +MediaStreamAudioSourceNode::SizeOfExcludingThis(MallocSizeOf aMallocSizeOf) const +{ + // Future: + // - mInputStream + size_t amount = AudioNode::SizeOfExcludingThis(aMallocSizeOf); + if (mInputPort) { + amount += mInputPort->SizeOfIncludingThis(aMallocSizeOf); + } + return amount; +} + +size_t +MediaStreamAudioSourceNode::SizeOfIncludingThis(MallocSizeOf aMallocSizeOf) const +{ + return aMallocSizeOf(this) + SizeOfExcludingThis(aMallocSizeOf); +} + +void +MediaStreamAudioSourceNode::DestroyMediaStream() +{ + if (mInputPort) { + mInputPort->Destroy(); + mInputPort = nullptr; + } + AudioNode::DestroyMediaStream(); +} + +JSObject* +MediaStreamAudioSourceNode::WrapObject(JSContext* aCx, JS::Handle<JSObject*> aGivenProto) +{ + return MediaStreamAudioSourceNodeBinding::Wrap(aCx, this, aGivenProto); +} + +} // namespace dom +} // namespace mozilla diff --git a/dom/media/webaudio/MediaStreamAudioSourceNode.h b/dom/media/webaudio/MediaStreamAudioSourceNode.h new file mode 100644 index 000000000..5383eb2c6 --- /dev/null +++ b/dom/media/webaudio/MediaStreamAudioSourceNode.h @@ -0,0 +1,106 @@ +/* -*- Mode: C++; tab-width: 2; indent-tabs-mode: nil; c-basic-offset: 2 -*- */ +/* vim:set ts=2 sw=2 sts=2 et cindent: */ +/* This Source Code Form is subject to the terms of the Mozilla Public + * License, v. 2.0. If a copy of the MPL was not distributed with this + * file, You can obtain one at http://mozilla.org/MPL/2.0/. */ + +#ifndef MediaStreamAudioSourceNode_h_ +#define MediaStreamAudioSourceNode_h_ + +#include "AudioNode.h" +#include "DOMMediaStream.h" +#include "AudioNodeEngine.h" + +namespace mozilla { + +namespace dom { + +class MediaStreamAudioSourceNodeEngine final : public AudioNodeEngine +{ +public: + explicit MediaStreamAudioSourceNodeEngine(AudioNode* aNode) + : AudioNodeEngine(aNode), mEnabled(false) {} + + bool IsEnabled() const { return mEnabled; } + enum Parameters { + ENABLE + }; + void SetInt32Parameter(uint32_t aIndex, int32_t aValue) override + { + switch (aIndex) { + case ENABLE: + mEnabled = !!aValue; + break; + default: + NS_ERROR("MediaStreamAudioSourceNodeEngine bad parameter index"); + } + } + +private: + bool mEnabled; +}; + +class MediaStreamAudioSourceNode : public AudioNode, + public DOMMediaStream::TrackListener, + public PrincipalChangeObserver<MediaStreamTrack> +{ +public: + static already_AddRefed<MediaStreamAudioSourceNode> + Create(AudioContext* aContext, DOMMediaStream* aStream, ErrorResult& aRv); + + NS_DECL_ISUPPORTS_INHERITED + NS_DECL_CYCLE_COLLECTION_CLASS_INHERITED(MediaStreamAudioSourceNode, AudioNode) + + JSObject* WrapObject(JSContext* aCx, JS::Handle<JSObject*> aGivenProto) override; + + void DestroyMediaStream() override; + + uint16_t NumberOfInputs() const override { return 0; } + + const char* NodeType() const override + { + return "MediaStreamAudioSourceNode"; + } + + virtual const char* CrossOriginErrorString() const + { + return "MediaStreamAudioSourceNodeCrossOrigin"; + } + + size_t SizeOfExcludingThis(MallocSizeOf aMallocSizeOf) const override; + size_t SizeOfIncludingThis(MallocSizeOf aMallocSizeOf) const override; + + // Attaches to aTrack so that its audio content will be used as input. + void AttachToTrack(const RefPtr<MediaStreamTrack>& aTrack); + + // Detaches from the currently attached track if there is one. + void DetachFromTrack(); + + // Attaches to the first available audio track in aMediaStream. + void AttachToFirstTrack(const RefPtr<DOMMediaStream>& aMediaStream); + + // From DOMMediaStream::TrackListener. + void NotifyTrackAdded(const RefPtr<MediaStreamTrack>& aTrack) override; + void NotifyTrackRemoved(const RefPtr<MediaStreamTrack>& aTrack) override; + + // From PrincipalChangeObserver<MediaStreamTrack>. + void PrincipalChanged(MediaStreamTrack* aMediaStreamTrack) override; + +protected: + explicit MediaStreamAudioSourceNode(AudioContext* aContext); + void Init(DOMMediaStream* aMediaStream, ErrorResult& aRv); + void Destroy(); + virtual ~MediaStreamAudioSourceNode(); + +private: + RefPtr<MediaInputPort> mInputPort; + RefPtr<DOMMediaStream> mInputStream; + + // On construction we set this to the first audio track of mInputStream. + RefPtr<MediaStreamTrack> mInputTrack; +}; + +} // namespace dom +} // namespace mozilla + +#endif diff --git a/dom/media/webaudio/OfflineAudioCompletionEvent.cpp b/dom/media/webaudio/OfflineAudioCompletionEvent.cpp new file mode 100644 index 000000000..30a571719 --- /dev/null +++ b/dom/media/webaudio/OfflineAudioCompletionEvent.cpp @@ -0,0 +1,42 @@ +/* -*- Mode: C++; tab-width: 2; indent-tabs-mode: nil; c-basic-offset: 2 -*- */ +/* vim:set ts=2 sw=2 sts=2 et cindent: */ +/* This Source Code Form is subject to the terms of the Mozilla Public + * License, v. 2.0. If a copy of the MPL was not distributed with this + * file, You can obtain one at http://mozilla.org/MPL/2.0/. */ + +#include "OfflineAudioCompletionEvent.h" +#include "mozilla/dom/OfflineAudioCompletionEventBinding.h" +#include "AudioContext.h" + +namespace mozilla { +namespace dom { + +NS_IMPL_CYCLE_COLLECTION_INHERITED(OfflineAudioCompletionEvent, Event, + mRenderedBuffer) + +NS_INTERFACE_MAP_BEGIN_CYCLE_COLLECTION_INHERITED(OfflineAudioCompletionEvent) +NS_INTERFACE_MAP_END_INHERITING(Event) + +NS_IMPL_ADDREF_INHERITED(OfflineAudioCompletionEvent, Event) +NS_IMPL_RELEASE_INHERITED(OfflineAudioCompletionEvent, Event) + +OfflineAudioCompletionEvent::OfflineAudioCompletionEvent(AudioContext* aOwner, + nsPresContext* aPresContext, + WidgetEvent* aEvent) + : Event(aOwner, aPresContext, aEvent) +{ +} + +OfflineAudioCompletionEvent::~OfflineAudioCompletionEvent() +{ +} + +JSObject* +OfflineAudioCompletionEvent::WrapObjectInternal(JSContext* aCx, JS::Handle<JSObject*> aGivenProto) +{ + return OfflineAudioCompletionEventBinding::Wrap(aCx, this, aGivenProto); +} + +} // namespace dom +} // namespace mozilla + diff --git a/dom/media/webaudio/OfflineAudioCompletionEvent.h b/dom/media/webaudio/OfflineAudioCompletionEvent.h new file mode 100644 index 000000000..bc21fdec3 --- /dev/null +++ b/dom/media/webaudio/OfflineAudioCompletionEvent.h @@ -0,0 +1,53 @@ +/* -*- Mode: C++; tab-width: 2; indent-tabs-mode: nil; c-basic-offset: 2 -*- */ +/* vim:set ts=2 sw=2 sts=2 et cindent: */ +/* This Source Code Form is subject to the terms of the Mozilla Public + * License, v. 2.0. If a copy of the MPL was not distributed with this + * file, You can obtain one at http://mozilla.org/MPL/2.0/. */ + +#ifndef OfflineAudioCompletionEvent_h_ +#define OfflineAudioCompletionEvent_h_ + +#include "AudioBuffer.h" +#include "mozilla/dom/Event.h" + +namespace mozilla { +namespace dom { + +class AudioContext; + +class OfflineAudioCompletionEvent final : public Event +{ +public: + OfflineAudioCompletionEvent(AudioContext* aOwner, + nsPresContext* aPresContext, + WidgetEvent* aEvent); + + NS_DECL_ISUPPORTS_INHERITED + NS_FORWARD_TO_EVENT + NS_DECL_CYCLE_COLLECTION_CLASS_INHERITED(OfflineAudioCompletionEvent, Event) + + JSObject* WrapObjectInternal(JSContext* aCx, JS::Handle<JSObject*> aGivenProto) override; + + void InitEvent(AudioBuffer* aRenderedBuffer) + { + InitEvent(NS_LITERAL_STRING("complete"), false, false); + mRenderedBuffer = aRenderedBuffer; + } + + AudioBuffer* RenderedBuffer() const + { + return mRenderedBuffer; + } + +protected: + virtual ~OfflineAudioCompletionEvent(); + +private: + RefPtr<AudioBuffer> mRenderedBuffer; +}; + +} // namespace dom +} // namespace mozilla + +#endif + diff --git a/dom/media/webaudio/OscillatorNode.cpp b/dom/media/webaudio/OscillatorNode.cpp new file mode 100644 index 000000000..8e7c103a9 --- /dev/null +++ b/dom/media/webaudio/OscillatorNode.cpp @@ -0,0 +1,580 @@ +/* -*- Mode: C++; tab-width: 2; indent-tabs-mode: nil; c-basic-offset: 2 -*- */ +/* vim:set ts=2 sw=2 sts=2 et cindent: */ +/* This Source Code Form is subject to the terms of the Mozilla Public + * License, v. 2.0. If a copy of the MPL was not distributed with this + * file, You can obtain one at http://mozilla.org/MPL/2.0/. */ + +#include "OscillatorNode.h" +#include "AudioNodeEngine.h" +#include "AudioNodeStream.h" +#include "AudioDestinationNode.h" +#include "nsContentUtils.h" +#include "WebAudioUtils.h" +#include "blink/PeriodicWave.h" + +namespace mozilla { +namespace dom { + +NS_IMPL_CYCLE_COLLECTION_INHERITED(OscillatorNode, AudioNode, + mPeriodicWave, mFrequency, mDetune) + +NS_INTERFACE_MAP_BEGIN_CYCLE_COLLECTION_INHERITED(OscillatorNode) +NS_INTERFACE_MAP_END_INHERITING(AudioNode) + +NS_IMPL_ADDREF_INHERITED(OscillatorNode, AudioNode) +NS_IMPL_RELEASE_INHERITED(OscillatorNode, AudioNode) + +class OscillatorNodeEngine final : public AudioNodeEngine +{ +public: + OscillatorNodeEngine(AudioNode* aNode, AudioDestinationNode* aDestination) + : AudioNodeEngine(aNode) + , mSource(nullptr) + , mDestination(aDestination->Stream()) + , mStart(-1) + , mStop(STREAM_TIME_MAX) + // Keep the default values in sync with OscillatorNode::OscillatorNode. + , mFrequency(440.f) + , mDetune(0.f) + , mType(OscillatorType::Sine) + , mPhase(0.) + , mFinalFrequency(0.) + , mPhaseIncrement(0.) + , mRecomputeParameters(true) + , mCustomLength(0) + , mCustomDisableNormalization(false) + { + MOZ_ASSERT(NS_IsMainThread()); + mBasicWaveFormCache = aDestination->Context()->GetBasicWaveFormCache(); + } + + void SetSourceStream(AudioNodeStream* aSource) + { + mSource = aSource; + } + + enum Parameters { + FREQUENCY, + DETUNE, + TYPE, + PERIODICWAVE_LENGTH, + DISABLE_NORMALIZATION, + START, + STOP, + }; + void RecvTimelineEvent(uint32_t aIndex, + AudioTimelineEvent& aEvent) override + { + mRecomputeParameters = true; + + MOZ_ASSERT(mDestination); + + WebAudioUtils::ConvertAudioTimelineEventToTicks(aEvent, + mDestination); + + switch (aIndex) { + case FREQUENCY: + mFrequency.InsertEvent<int64_t>(aEvent); + break; + case DETUNE: + mDetune.InsertEvent<int64_t>(aEvent); + break; + default: + NS_ERROR("Bad OscillatorNodeEngine TimelineParameter"); + } + } + + void SetStreamTimeParameter(uint32_t aIndex, StreamTime aParam) override + { + switch (aIndex) { + case START: + mStart = aParam; + mSource->SetActive(); + break; + case STOP: mStop = aParam; break; + default: + NS_ERROR("Bad OscillatorNodeEngine StreamTimeParameter"); + } + } + + void SetInt32Parameter(uint32_t aIndex, int32_t aParam) override + { + switch (aIndex) { + case TYPE: + // Set the new type. + mType = static_cast<OscillatorType>(aParam); + if (mType == OscillatorType::Sine) { + // Forget any previous custom data. + mCustomLength = 0; + mCustomDisableNormalization = false; + mCustom = nullptr; + mPeriodicWave = nullptr; + mRecomputeParameters = true; + } + switch (mType) { + case OscillatorType::Sine: + mPhase = 0.0; + break; + case OscillatorType::Square: + case OscillatorType::Triangle: + case OscillatorType::Sawtooth: + mPeriodicWave = mBasicWaveFormCache->GetBasicWaveForm(mType); + break; + case OscillatorType::Custom: + break; + default: + NS_ERROR("Bad OscillatorNodeEngine type parameter."); + } + // End type switch. + break; + case PERIODICWAVE_LENGTH: + MOZ_ASSERT(aParam >= 0, "negative custom array length"); + mCustomLength = static_cast<uint32_t>(aParam); + break; + case DISABLE_NORMALIZATION: + MOZ_ASSERT(aParam >= 0, "negative custom array length"); + mCustomDisableNormalization = static_cast<uint32_t>(aParam); + break; + default: + NS_ERROR("Bad OscillatorNodeEngine Int32Parameter."); + } + // End index switch. + } + + void SetBuffer(already_AddRefed<ThreadSharedFloatArrayBufferList> aBuffer) override + { + MOZ_ASSERT(mCustomLength, "Custom buffer sent before length"); + mCustom = aBuffer; + MOZ_ASSERT(mCustom->GetChannels() == 2, + "PeriodicWave should have sent two channels"); + mPeriodicWave = WebCore::PeriodicWave::create(mSource->SampleRate(), + mCustom->GetData(0), + mCustom->GetData(1), + mCustomLength, + mCustomDisableNormalization); + } + + void IncrementPhase() + { + const float twoPiFloat = float(2 * M_PI); + mPhase += mPhaseIncrement; + if (mPhase > twoPiFloat) { + mPhase -= twoPiFloat; + } else if (mPhase < -twoPiFloat) { + mPhase += twoPiFloat; + } + } + + // Returns true if the final frequency (and thus the phase increment) changed, + // false otherwise. This allow some optimizations at callsite. + bool UpdateParametersIfNeeded(StreamTime ticks, size_t count) + { + double frequency, detune; + + // Shortcut if frequency-related AudioParam are not automated, and we + // already have computed the frequency information and related parameters. + if (!ParametersMayNeedUpdate()) { + return false; + } + + bool simpleFrequency = mFrequency.HasSimpleValue(); + bool simpleDetune = mDetune.HasSimpleValue(); + + if (simpleFrequency) { + frequency = mFrequency.GetValue(); + } else { + frequency = mFrequency.GetValueAtTime(ticks, count); + } + if (simpleDetune) { + detune = mDetune.GetValue(); + } else { + detune = mDetune.GetValueAtTime(ticks, count); + } + + float finalFrequency = frequency * pow(2., detune / 1200.); + float signalPeriod = mSource->SampleRate() / finalFrequency; + mRecomputeParameters = false; + + mPhaseIncrement = 2 * M_PI / signalPeriod; + + if (finalFrequency != mFinalFrequency) { + mFinalFrequency = finalFrequency; + return true; + } + return false; + } + + void FillBounds(float* output, StreamTime ticks, + uint32_t& start, uint32_t& end) + { + MOZ_ASSERT(output); + static_assert(StreamTime(WEBAUDIO_BLOCK_SIZE) < UINT_MAX, + "WEBAUDIO_BLOCK_SIZE overflows interator bounds."); + start = 0; + if (ticks < mStart) { + start = mStart - ticks; + for (uint32_t i = 0; i < start; ++i) { + output[i] = 0.0; + } + } + end = WEBAUDIO_BLOCK_SIZE; + if (ticks + end > mStop) { + end = mStop - ticks; + for (uint32_t i = end; i < WEBAUDIO_BLOCK_SIZE; ++i) { + output[i] = 0.0; + } + } + } + + void ComputeSine(float * aOutput, StreamTime ticks, uint32_t aStart, uint32_t aEnd) + { + for (uint32_t i = aStart; i < aEnd; ++i) { + // We ignore the return value, changing the frequency has no impact on + // performances here. + UpdateParametersIfNeeded(ticks, i); + + aOutput[i] = sin(mPhase); + + IncrementPhase(); + } + } + + bool ParametersMayNeedUpdate() + { + return !mDetune.HasSimpleValue() || + !mFrequency.HasSimpleValue() || + mRecomputeParameters; + } + + void ComputeCustom(float* aOutput, + StreamTime ticks, + uint32_t aStart, + uint32_t aEnd) + { + MOZ_ASSERT(mPeriodicWave, "No custom waveform data"); + + uint32_t periodicWaveSize = mPeriodicWave->periodicWaveSize(); + // Mask to wrap wave data indices into the range [0,periodicWaveSize). + uint32_t indexMask = periodicWaveSize - 1; + MOZ_ASSERT(periodicWaveSize && (periodicWaveSize & indexMask) == 0, + "periodicWaveSize must be power of 2"); + float* higherWaveData = nullptr; + float* lowerWaveData = nullptr; + float tableInterpolationFactor; + // Phase increment at frequency of 1 Hz. + // mPhase runs [0,periodicWaveSize) here instead of [0,2*M_PI). + float basePhaseIncrement = mPeriodicWave->rateScale(); + + bool needToFetchWaveData = UpdateParametersIfNeeded(ticks, aStart); + + bool parametersMayNeedUpdate = ParametersMayNeedUpdate(); + mPeriodicWave->waveDataForFundamentalFrequency(mFinalFrequency, + lowerWaveData, + higherWaveData, + tableInterpolationFactor); + + for (uint32_t i = aStart; i < aEnd; ++i) { + if (parametersMayNeedUpdate) { + if (needToFetchWaveData) { + mPeriodicWave->waveDataForFundamentalFrequency(mFinalFrequency, + lowerWaveData, + higherWaveData, + tableInterpolationFactor); + } + needToFetchWaveData = UpdateParametersIfNeeded(ticks, i); + } + // Bilinear interpolation between adjacent samples in each table. + float floorPhase = floorf(mPhase); + int j1Signed = static_cast<int>(floorPhase); + uint32_t j1 = j1Signed & indexMask; + uint32_t j2 = j1 + 1; + j2 &= indexMask; + + float sampleInterpolationFactor = mPhase - floorPhase; + + float lower = (1.0f - sampleInterpolationFactor) * lowerWaveData[j1] + + sampleInterpolationFactor * lowerWaveData[j2]; + float higher = (1.0f - sampleInterpolationFactor) * higherWaveData[j1] + + sampleInterpolationFactor * higherWaveData[j2]; + aOutput[i] = (1.0f - tableInterpolationFactor) * lower + + tableInterpolationFactor * higher; + + // Calculate next phase position from wrapped value j1 to avoid loss of + // precision at large values. + mPhase = + j1 + sampleInterpolationFactor + basePhaseIncrement * mFinalFrequency; + } + } + + void ComputeSilence(AudioBlock *aOutput) + { + aOutput->SetNull(WEBAUDIO_BLOCK_SIZE); + } + + void ProcessBlock(AudioNodeStream* aStream, + GraphTime aFrom, + const AudioBlock& aInput, + AudioBlock* aOutput, + bool* aFinished) override + { + MOZ_ASSERT(mSource == aStream, "Invalid source stream"); + + StreamTime ticks = mDestination->GraphTimeToStreamTime(aFrom); + if (mStart == -1) { + ComputeSilence(aOutput); + return; + } + + if (ticks + WEBAUDIO_BLOCK_SIZE <= mStart || ticks >= mStop) { + ComputeSilence(aOutput); + + } else { + aOutput->AllocateChannels(1); + float* output = aOutput->ChannelFloatsForWrite(0); + + uint32_t start, end; + FillBounds(output, ticks, start, end); + + // Synthesize the correct waveform. + switch(mType) { + case OscillatorType::Sine: + ComputeSine(output, ticks, start, end); + break; + case OscillatorType::Square: + case OscillatorType::Triangle: + case OscillatorType::Sawtooth: + case OscillatorType::Custom: + ComputeCustom(output, ticks, start, end); + break; + default: + ComputeSilence(aOutput); + }; + } + + if (ticks + WEBAUDIO_BLOCK_SIZE >= mStop) { + // We've finished playing. + *aFinished = true; + } + } + + bool IsActive() const override + { + // start() has been called. + return mStart != -1; + } + + size_t SizeOfExcludingThis(MallocSizeOf aMallocSizeOf) const override + { + size_t amount = AudioNodeEngine::SizeOfExcludingThis(aMallocSizeOf); + + // Not owned: + // - mSource + // - mDestination + // - mFrequency (internal ref owned by node) + // - mDetune (internal ref owned by node) + + if (mCustom) { + amount += mCustom->SizeOfIncludingThis(aMallocSizeOf); + } + + if (mPeriodicWave) { + amount += mPeriodicWave->sizeOfIncludingThis(aMallocSizeOf); + } + + return amount; + } + + size_t SizeOfIncludingThis(MallocSizeOf aMallocSizeOf) const override + { + return aMallocSizeOf(this) + SizeOfExcludingThis(aMallocSizeOf); + } + + AudioNodeStream* mSource; + AudioNodeStream* mDestination; + StreamTime mStart; + StreamTime mStop; + AudioParamTimeline mFrequency; + AudioParamTimeline mDetune; + OscillatorType mType; + float mPhase; + float mFinalFrequency; + float mPhaseIncrement; + bool mRecomputeParameters; + RefPtr<ThreadSharedFloatArrayBufferList> mCustom; + RefPtr<BasicWaveFormCache> mBasicWaveFormCache; + uint32_t mCustomLength; + bool mCustomDisableNormalization; + RefPtr<WebCore::PeriodicWave> mPeriodicWave; +}; + +OscillatorNode::OscillatorNode(AudioContext* aContext) + : AudioNode(aContext, + 2, + ChannelCountMode::Max, + ChannelInterpretation::Speakers) + , mType(OscillatorType::Sine) + , mFrequency(new AudioParam(this, OscillatorNodeEngine::FREQUENCY, + 440.0f, "frequency")) + , mDetune(new AudioParam(this, OscillatorNodeEngine::DETUNE, 0.0f, "detune")) + , mStartCalled(false) +{ + OscillatorNodeEngine* engine = new OscillatorNodeEngine(this, aContext->Destination()); + mStream = AudioNodeStream::Create(aContext, engine, + AudioNodeStream::NEED_MAIN_THREAD_FINISHED, + aContext->Graph()); + engine->SetSourceStream(mStream); + mStream->AddMainThreadListener(this); +} + +OscillatorNode::~OscillatorNode() +{ +} + +size_t +OscillatorNode::SizeOfExcludingThis(MallocSizeOf aMallocSizeOf) const +{ + size_t amount = AudioNode::SizeOfExcludingThis(aMallocSizeOf); + + // For now only report if we know for sure that it's not shared. + if (mPeriodicWave) { + amount += mPeriodicWave->SizeOfIncludingThisIfNotShared(aMallocSizeOf); + } + amount += mFrequency->SizeOfIncludingThis(aMallocSizeOf); + amount += mDetune->SizeOfIncludingThis(aMallocSizeOf); + return amount; +} + +size_t +OscillatorNode::SizeOfIncludingThis(MallocSizeOf aMallocSizeOf) const +{ + return aMallocSizeOf(this) + SizeOfExcludingThis(aMallocSizeOf); +} + +JSObject* +OscillatorNode::WrapObject(JSContext* aCx, JS::Handle<JSObject*> aGivenProto) +{ + return OscillatorNodeBinding::Wrap(aCx, this, aGivenProto); +} + +void +OscillatorNode::DestroyMediaStream() +{ + if (mStream) { + mStream->RemoveMainThreadListener(this); + } + AudioNode::DestroyMediaStream(); +} + +void +OscillatorNode::SendTypeToStream() +{ + if (!mStream) { + return; + } + if (mType == OscillatorType::Custom) { + // The engine assumes we'll send the custom data before updating the type. + SendPeriodicWaveToStream(); + } + SendInt32ParameterToStream(OscillatorNodeEngine::TYPE, static_cast<int32_t>(mType)); +} + +void OscillatorNode::SendPeriodicWaveToStream() +{ + NS_ASSERTION(mType == OscillatorType::Custom, + "Sending custom waveform to engine thread with non-custom type"); + MOZ_ASSERT(mStream, "Missing node stream."); + MOZ_ASSERT(mPeriodicWave, "Send called without PeriodicWave object."); + SendInt32ParameterToStream(OscillatorNodeEngine::PERIODICWAVE_LENGTH, + mPeriodicWave->DataLength()); + SendInt32ParameterToStream(OscillatorNodeEngine::DISABLE_NORMALIZATION, + mPeriodicWave->DisableNormalization()); + RefPtr<ThreadSharedFloatArrayBufferList> data = + mPeriodicWave->GetThreadSharedBuffer(); + mStream->SetBuffer(data.forget()); +} + +void +OscillatorNode::Start(double aWhen, ErrorResult& aRv) +{ + if (!WebAudioUtils::IsTimeValid(aWhen)) { + aRv.Throw(NS_ERROR_DOM_NOT_SUPPORTED_ERR); + return; + } + + if (mStartCalled) { + aRv.Throw(NS_ERROR_DOM_INVALID_STATE_ERR); + return; + } + mStartCalled = true; + + if (!mStream) { + // Nothing to play, or we're already dead for some reason + return; + } + + // TODO: Perhaps we need to do more here. + mStream->SetStreamTimeParameter(OscillatorNodeEngine::START, + Context(), aWhen); + + MarkActive(); +} + +void +OscillatorNode::Stop(double aWhen, ErrorResult& aRv) +{ + if (!WebAudioUtils::IsTimeValid(aWhen)) { + aRv.Throw(NS_ERROR_DOM_NOT_SUPPORTED_ERR); + return; + } + + if (!mStartCalled) { + aRv.Throw(NS_ERROR_DOM_INVALID_STATE_ERR); + return; + } + + if (!mStream || !Context()) { + // We've already stopped and had our stream shut down + return; + } + + // TODO: Perhaps we need to do more here. + mStream->SetStreamTimeParameter(OscillatorNodeEngine::STOP, + Context(), std::max(0.0, aWhen)); +} + +void +OscillatorNode::NotifyMainThreadStreamFinished() +{ + MOZ_ASSERT(mStream->IsFinished()); + + class EndedEventDispatcher final : public Runnable + { + public: + explicit EndedEventDispatcher(OscillatorNode* aNode) + : mNode(aNode) {} + NS_IMETHOD Run() override + { + // If it's not safe to run scripts right now, schedule this to run later + if (!nsContentUtils::IsSafeToRunScript()) { + nsContentUtils::AddScriptRunner(this); + return NS_OK; + } + + mNode->DispatchTrustedEvent(NS_LITERAL_STRING("ended")); + // Release stream resources. + mNode->DestroyMediaStream(); + return NS_OK; + } + private: + RefPtr<OscillatorNode> mNode; + }; + + NS_DispatchToMainThread(new EndedEventDispatcher(this)); + + // Drop the playing reference + // Warning: The below line might delete this. + MarkInactive(); +} + +} // namespace dom +} // namespace mozilla diff --git a/dom/media/webaudio/OscillatorNode.h b/dom/media/webaudio/OscillatorNode.h new file mode 100644 index 000000000..1e17e319e --- /dev/null +++ b/dom/media/webaudio/OscillatorNode.h @@ -0,0 +1,104 @@ +/* -*- Mode: C++; tab-width: 2; indent-tabs-mode: nil; c-basic-offset: 2 -*- */ +/* vim:set ts=2 sw=2 sts=2 et cindent: */ +/* This Source Code Form is subject to the terms of the Mozilla Public + * License, v. 2.0. If a copy of the MPL was not distributed with this + * file, You can obtain one at http://mozilla.org/MPL/2.0/. */ + +#ifndef OscillatorNode_h_ +#define OscillatorNode_h_ + +#include "AudioNode.h" +#include "AudioParam.h" +#include "PeriodicWave.h" +#include "mozilla/dom/OscillatorNodeBinding.h" + +namespace mozilla { +namespace dom { + +class AudioContext; + +class OscillatorNode final : public AudioNode, + public MainThreadMediaStreamListener +{ +public: + explicit OscillatorNode(AudioContext* aContext); + + NS_DECL_ISUPPORTS_INHERITED + NS_DECL_CYCLE_COLLECTION_CLASS_INHERITED(OscillatorNode, AudioNode) + + JSObject* WrapObject(JSContext* aCx, JS::Handle<JSObject*> aGivenProto) override; + + void DestroyMediaStream() override; + + uint16_t NumberOfInputs() const final override + { + return 0; + } + + OscillatorType Type() const + { + return mType; + } + void SetType(OscillatorType aType, ErrorResult& aRv) + { + if (aType == OscillatorType::Custom) { + // ::Custom can only be set by setPeriodicWave(). + // https://github.com/WebAudio/web-audio-api/issues/105 for exception. + aRv.Throw(NS_ERROR_DOM_INVALID_STATE_ERR); + return; + } + mType = aType; + SendTypeToStream(); + } + + AudioParam* Frequency() const + { + return mFrequency; + } + AudioParam* Detune() const + { + return mDetune; + } + + void Start(double aWhen, ErrorResult& aRv); + void Stop(double aWhen, ErrorResult& aRv); + void SetPeriodicWave(PeriodicWave& aPeriodicWave) + { + mPeriodicWave = &aPeriodicWave; + // SendTypeToStream will call SendPeriodicWaveToStream for us. + mType = OscillatorType::Custom; + SendTypeToStream(); + } + + IMPL_EVENT_HANDLER(ended) + + void NotifyMainThreadStreamFinished() override; + + const char* NodeType() const override + { + return "OscillatorNode"; + } + + size_t SizeOfExcludingThis(MallocSizeOf aMallocSizeOf) const override; + size_t SizeOfIncludingThis(MallocSizeOf aMallocSizeOf) const override; + +protected: + virtual ~OscillatorNode(); + +private: + void SendTypeToStream(); + void SendPeriodicWaveToStream(); + +private: + OscillatorType mType; + RefPtr<PeriodicWave> mPeriodicWave; + RefPtr<AudioParam> mFrequency; + RefPtr<AudioParam> mDetune; + bool mStartCalled; +}; + +} // namespace dom +} // namespace mozilla + +#endif + diff --git a/dom/media/webaudio/PannerNode.cpp b/dom/media/webaudio/PannerNode.cpp new file mode 100644 index 000000000..7696e984e --- /dev/null +++ b/dom/media/webaudio/PannerNode.cpp @@ -0,0 +1,786 @@ +/* -*- Mode: C++; tab-width: 2; indent-tabs-mode: nil; c-basic-offset: 2 -*- */ +/* vim:set ts=2 sw=2 sts=2 et cindent: */ +/* This Source Code Form is subject to the terms of the Mozilla Public + * License, v. 2.0. If a copy of the MPL was not distributed with this + * file, You can obtain one at http://mozilla.org/MPL/2.0/. */ + +#include "PannerNode.h" +#include "AlignmentUtils.h" +#include "AudioDestinationNode.h" +#include "AudioNodeEngine.h" +#include "AudioNodeStream.h" +#include "AudioListener.h" +#include "PanningUtils.h" +#include "AudioBufferSourceNode.h" +#include "PlayingRefChangeHandler.h" +#include "blink/HRTFPanner.h" +#include "blink/HRTFDatabaseLoader.h" +#include "nsAutoPtr.h" + +using WebCore::HRTFDatabaseLoader; +using WebCore::HRTFPanner; + +namespace mozilla { +namespace dom { + +using namespace std; + +NS_IMPL_CYCLE_COLLECTION_CLASS(PannerNode) +NS_IMPL_CYCLE_COLLECTION_UNLINK_BEGIN_INHERITED(PannerNode, AudioNode) + if (tmp->Context()) { + tmp->Context()->UnregisterPannerNode(tmp); + } +NS_IMPL_CYCLE_COLLECTION_UNLINK(mPositionX, mPositionY, mPositionZ, mOrientationX, mOrientationY, mOrientationZ) +NS_IMPL_CYCLE_COLLECTION_UNLINK_END +NS_IMPL_CYCLE_COLLECTION_TRAVERSE_BEGIN_INHERITED(PannerNode, AudioNode) +NS_IMPL_CYCLE_COLLECTION_TRAVERSE(mPositionX, mPositionY, mPositionZ, mOrientationX, mOrientationY, mOrientationZ) +NS_IMPL_CYCLE_COLLECTION_TRAVERSE_END + +NS_INTERFACE_MAP_BEGIN_CYCLE_COLLECTION_INHERITED(PannerNode) +NS_INTERFACE_MAP_END_INHERITING(AudioNode) + +NS_IMPL_ADDREF_INHERITED(PannerNode, AudioNode) +NS_IMPL_RELEASE_INHERITED(PannerNode, AudioNode) + +class PannerNodeEngine final : public AudioNodeEngine +{ +public: + explicit PannerNodeEngine(AudioNode* aNode, AudioDestinationNode* aDestination) + : AudioNodeEngine(aNode) + , mDestination(aDestination->Stream()) + // Please keep these default values consistent with PannerNode::PannerNode below. + , mPanningModelFunction(&PannerNodeEngine::EqualPowerPanningFunction) + , mDistanceModelFunction(&PannerNodeEngine::InverseGainFunction) + , mPositionX(0.) + , mPositionY(0.) + , mPositionZ(0.) + , mOrientationX(1.) + , mOrientationY(0.) + , mOrientationZ(0.) + , mVelocity() + , mRefDistance(1.) + , mMaxDistance(10000.) + , mRolloffFactor(1.) + , mConeInnerAngle(360.) + , mConeOuterAngle(360.) + , mConeOuterGain(0.) + // These will be initialized when a PannerNode is created, so just initialize them + // to some dummy values here. + , mListenerDopplerFactor(0.) + , mListenerSpeedOfSound(0.) + , mLeftOverData(INT_MIN) + { + } + + void RecvTimelineEvent(uint32_t aIndex, AudioTimelineEvent& aEvent) override + { + MOZ_ASSERT(mDestination); + WebAudioUtils::ConvertAudioTimelineEventToTicks(aEvent, + mDestination); + switch (aIndex) { + case PannerNode::POSITIONX: + mPositionX.InsertEvent<int64_t>(aEvent); + break; + case PannerNode::POSITIONY: + mPositionY.InsertEvent<int64_t>(aEvent); + break; + case PannerNode::POSITIONZ: + mPositionZ.InsertEvent<int64_t>(aEvent); + break; + case PannerNode::ORIENTATIONX: + mOrientationX.InsertEvent<int64_t>(aEvent); + break; + case PannerNode::ORIENTATIONY: + mOrientationY.InsertEvent<int64_t>(aEvent); + break; + case PannerNode::ORIENTATIONZ: + mOrientationZ.InsertEvent<int64_t>(aEvent); + break; + default: + NS_ERROR("Bad PannerNode TimelineParameter"); + } + } + + void CreateHRTFPanner() + { + MOZ_ASSERT(NS_IsMainThread()); + if (mHRTFPanner) { + return; + } + // HRTFDatabaseLoader needs to be fetched on the main thread. + already_AddRefed<HRTFDatabaseLoader> loader = + HRTFDatabaseLoader::createAndLoadAsynchronouslyIfNecessary(NodeMainThread()->Context()->SampleRate()); + mHRTFPanner = new HRTFPanner(NodeMainThread()->Context()->SampleRate(), Move(loader)); + } + + void SetInt32Parameter(uint32_t aIndex, int32_t aParam) override + { + switch (aIndex) { + case PannerNode::PANNING_MODEL: + switch (PanningModelType(aParam)) { + case PanningModelType::Equalpower: + mPanningModelFunction = &PannerNodeEngine::EqualPowerPanningFunction; + break; + case PanningModelType::HRTF: + mPanningModelFunction = &PannerNodeEngine::HRTFPanningFunction; + break; + default: + NS_NOTREACHED("We should never see the alternate names here"); + break; + } + break; + case PannerNode::DISTANCE_MODEL: + switch (DistanceModelType(aParam)) { + case DistanceModelType::Inverse: + mDistanceModelFunction = &PannerNodeEngine::InverseGainFunction; + break; + case DistanceModelType::Linear: + mDistanceModelFunction = &PannerNodeEngine::LinearGainFunction; + break; + case DistanceModelType::Exponential: + mDistanceModelFunction = &PannerNodeEngine::ExponentialGainFunction; + break; + default: + NS_NOTREACHED("We should never see the alternate names here"); + break; + } + break; + default: + NS_ERROR("Bad PannerNodeEngine Int32Parameter"); + } + } + void SetThreeDPointParameter(uint32_t aIndex, const ThreeDPoint& aParam) override + { + switch (aIndex) { + case PannerNode::LISTENER_POSITION: mListenerPosition = aParam; break; + case PannerNode::LISTENER_FRONT_VECTOR: mListenerFrontVector = aParam; break; + case PannerNode::LISTENER_RIGHT_VECTOR: mListenerRightVector = aParam; break; + case PannerNode::LISTENER_VELOCITY: mListenerVelocity = aParam; break; + case PannerNode::POSITION: + mPositionX.SetValue(aParam.x); + mPositionY.SetValue(aParam.y); + mPositionZ.SetValue(aParam.z); + break; + case PannerNode::ORIENTATION: + mOrientationX.SetValue(aParam.x); + mOrientationY.SetValue(aParam.y); + mOrientationZ.SetValue(aParam.z); + break; + case PannerNode::VELOCITY: mVelocity = aParam; break; + default: + NS_ERROR("Bad PannerNodeEngine ThreeDPointParameter"); + } + } + void SetDoubleParameter(uint32_t aIndex, double aParam) override + { + switch (aIndex) { + case PannerNode::LISTENER_DOPPLER_FACTOR: mListenerDopplerFactor = aParam; break; + case PannerNode::LISTENER_SPEED_OF_SOUND: mListenerSpeedOfSound = aParam; break; + case PannerNode::REF_DISTANCE: mRefDistance = aParam; break; + case PannerNode::MAX_DISTANCE: mMaxDistance = aParam; break; + case PannerNode::ROLLOFF_FACTOR: mRolloffFactor = aParam; break; + case PannerNode::CONE_INNER_ANGLE: mConeInnerAngle = aParam; break; + case PannerNode::CONE_OUTER_ANGLE: mConeOuterAngle = aParam; break; + case PannerNode::CONE_OUTER_GAIN: mConeOuterGain = aParam; break; + default: + NS_ERROR("Bad PannerNodeEngine DoubleParameter"); + } + } + + void ProcessBlock(AudioNodeStream* aStream, + GraphTime aFrom, + const AudioBlock& aInput, + AudioBlock* aOutput, + bool *aFinished) override + { + if (aInput.IsNull()) { + // mLeftOverData != INT_MIN means that the panning model was HRTF and a + // tail-time reference was added. Even if the model is now equalpower, + // the reference will need to be removed. + if (mLeftOverData > 0 && + mPanningModelFunction == &PannerNodeEngine::HRTFPanningFunction) { + mLeftOverData -= WEBAUDIO_BLOCK_SIZE; + } else { + if (mLeftOverData != INT_MIN) { + mLeftOverData = INT_MIN; + aStream->ScheduleCheckForInactive(); + mHRTFPanner->reset(); + + RefPtr<PlayingRefChangeHandler> refchanged = + new PlayingRefChangeHandler(aStream, PlayingRefChangeHandler::RELEASE); + aStream->Graph()-> + DispatchToMainThreadAfterStreamStateUpdate(refchanged.forget()); + } + aOutput->SetNull(WEBAUDIO_BLOCK_SIZE); + return; + } + } else if (mPanningModelFunction == &PannerNodeEngine::HRTFPanningFunction) { + if (mLeftOverData == INT_MIN) { + RefPtr<PlayingRefChangeHandler> refchanged = + new PlayingRefChangeHandler(aStream, PlayingRefChangeHandler::ADDREF); + aStream->Graph()-> + DispatchToMainThreadAfterStreamStateUpdate(refchanged.forget()); + } + mLeftOverData = mHRTFPanner->maxTailFrames(); + } + + StreamTime tick = mDestination->GraphTimeToStreamTime(aFrom); + (this->*mPanningModelFunction)(aInput, aOutput, tick); + } + + bool IsActive() const override + { + return mLeftOverData != INT_MIN; + } + + void ComputeAzimuthAndElevation(const ThreeDPoint& position, float& aAzimuth, float& aElevation); + float ComputeConeGain(const ThreeDPoint& position, const ThreeDPoint& orientation); + // Compute how much the distance contributes to the gain reduction. + double ComputeDistanceGain(const ThreeDPoint& position); + + void EqualPowerPanningFunction(const AudioBlock& aInput, AudioBlock* aOutput, StreamTime tick); + void HRTFPanningFunction(const AudioBlock& aInput, AudioBlock* aOutput, StreamTime tick); + + float LinearGainFunction(double aDistance); + float InverseGainFunction(double aDistance); + float ExponentialGainFunction(double aDistance); + + ThreeDPoint ConvertAudioParamTimelineTo3DP(AudioParamTimeline& aX, AudioParamTimeline& aY, AudioParamTimeline& aZ, StreamTime& tick); + + size_t SizeOfExcludingThis(MallocSizeOf aMallocSizeOf) const override + { + size_t amount = AudioNodeEngine::SizeOfExcludingThis(aMallocSizeOf); + if (mHRTFPanner) { + amount += mHRTFPanner->sizeOfIncludingThis(aMallocSizeOf); + } + + return amount; + } + + size_t SizeOfIncludingThis(MallocSizeOf aMallocSizeOf) const override + { + return aMallocSizeOf(this) + SizeOfExcludingThis(aMallocSizeOf); + } + + AudioNodeStream* mDestination; + // This member is set on the main thread, but is not accessed on the rendering + // thread untile mPanningModelFunction has changed, and this happens strictly + // later, via a MediaStreamGraph ControlMessage. + nsAutoPtr<HRTFPanner> mHRTFPanner; + typedef void (PannerNodeEngine::*PanningModelFunction)(const AudioBlock& aInput, AudioBlock* aOutput, StreamTime tick); + PanningModelFunction mPanningModelFunction; + typedef float (PannerNodeEngine::*DistanceModelFunction)(double aDistance); + DistanceModelFunction mDistanceModelFunction; + AudioParamTimeline mPositionX; + AudioParamTimeline mPositionY; + AudioParamTimeline mPositionZ; + AudioParamTimeline mOrientationX; + AudioParamTimeline mOrientationY; + AudioParamTimeline mOrientationZ; + ThreeDPoint mVelocity; + double mRefDistance; + double mMaxDistance; + double mRolloffFactor; + double mConeInnerAngle; + double mConeOuterAngle; + double mConeOuterGain; + ThreeDPoint mListenerPosition; + ThreeDPoint mListenerFrontVector; + ThreeDPoint mListenerRightVector; + ThreeDPoint mListenerVelocity; + double mListenerDopplerFactor; + double mListenerSpeedOfSound; + int mLeftOverData; +}; + +PannerNode::PannerNode(AudioContext* aContext) + : AudioNode(aContext, + 2, + ChannelCountMode::Clamped_max, + ChannelInterpretation::Speakers) + // Please keep these default values consistent with PannerNodeEngine::PannerNodeEngine above. + , mPanningModel(PanningModelType::Equalpower) + , mDistanceModel(DistanceModelType::Inverse) + , mPositionX(new AudioParam(this, PannerNode::POSITIONX, 0., this->NodeType())) + , mPositionY(new AudioParam(this, PannerNode::POSITIONY, 0., this->NodeType())) + , mPositionZ(new AudioParam(this, PannerNode::POSITIONZ, 0., this->NodeType())) + , mOrientationX(new AudioParam(this, PannerNode::ORIENTATIONX, 1., this->NodeType())) + , mOrientationY(new AudioParam(this, PannerNode::ORIENTATIONY, 0., this->NodeType())) + , mOrientationZ(new AudioParam(this, PannerNode::ORIENTATIONZ, 0., this->NodeType())) + , mVelocity() + , mRefDistance(1.) + , mMaxDistance(10000.) + , mRolloffFactor(1.) + , mConeInnerAngle(360.) + , mConeOuterAngle(360.) + , mConeOuterGain(0.) +{ + mStream = AudioNodeStream::Create(aContext, + new PannerNodeEngine(this, aContext->Destination()), + AudioNodeStream::NO_STREAM_FLAGS, + aContext->Graph()); + // We should register once we have set up our stream and engine. + Context()->Listener()->RegisterPannerNode(this); +} + +PannerNode::~PannerNode() +{ + if (Context()) { + Context()->UnregisterPannerNode(this); + } +} + +void PannerNode::SetPanningModel(PanningModelType aPanningModel) +{ + mPanningModel = aPanningModel; + if (mPanningModel == PanningModelType::HRTF) { + // We can set the engine's `mHRTFPanner` member here from the main thread, + // because the engine will not touch it from the MediaStreamGraph + // thread until the PANNING_MODEL message sent below is received. + static_cast<PannerNodeEngine*>(mStream->Engine())->CreateHRTFPanner(); + } + SendInt32ParameterToStream(PANNING_MODEL, int32_t(mPanningModel)); +} + +size_t +PannerNode::SizeOfExcludingThis(MallocSizeOf aMallocSizeOf) const +{ + size_t amount = AudioNode::SizeOfExcludingThis(aMallocSizeOf); + amount += mSources.ShallowSizeOfExcludingThis(aMallocSizeOf); + return amount; +} + +size_t +PannerNode::SizeOfIncludingThis(MallocSizeOf aMallocSizeOf) const +{ + return aMallocSizeOf(this) + SizeOfExcludingThis(aMallocSizeOf); +} + +JSObject* +PannerNode::WrapObject(JSContext* aCx, JS::Handle<JSObject*> aGivenProto) +{ + return PannerNodeBinding::Wrap(aCx, this, aGivenProto); +} + +void PannerNode::DestroyMediaStream() +{ + if (Context()) { + Context()->UnregisterPannerNode(this); + } + AudioNode::DestroyMediaStream(); +} + +// Those three functions are described in the spec. +float +PannerNodeEngine::LinearGainFunction(double aDistance) +{ + return 1 - mRolloffFactor * (std::max(std::min(aDistance, mMaxDistance), mRefDistance) - mRefDistance) / (mMaxDistance - mRefDistance); +} + +float +PannerNodeEngine::InverseGainFunction(double aDistance) +{ + return mRefDistance / (mRefDistance + mRolloffFactor * (std::max(aDistance, mRefDistance) - mRefDistance)); +} + +float +PannerNodeEngine::ExponentialGainFunction(double aDistance) +{ + return pow(std::max(aDistance, mRefDistance) / mRefDistance, -mRolloffFactor); +} + +void +PannerNodeEngine::HRTFPanningFunction(const AudioBlock& aInput, + AudioBlock* aOutput, + StreamTime tick) +{ + // The output of this node is always stereo, no matter what the inputs are. + aOutput->AllocateChannels(2); + + float azimuth, elevation; + + ThreeDPoint position = ConvertAudioParamTimelineTo3DP(mPositionX, mPositionY, mPositionZ, tick); + ThreeDPoint orientation = ConvertAudioParamTimelineTo3DP(mOrientationX, mOrientationY, mOrientationZ, tick); + if (!orientation.IsZero()) { + orientation.Normalize(); + } + ComputeAzimuthAndElevation(position, azimuth, elevation); + + AudioBlock input = aInput; + // Gain is applied before the delay and convolution of the HRTF. + input.mVolume *= ComputeConeGain(position, orientation) * ComputeDistanceGain(position); + + mHRTFPanner->pan(azimuth, elevation, &input, aOutput); +} + +ThreeDPoint +PannerNodeEngine::ConvertAudioParamTimelineTo3DP(AudioParamTimeline& aX, AudioParamTimeline& aY, AudioParamTimeline& aZ, StreamTime &tick) +{ + return ThreeDPoint(aX.GetValueAtTime(tick), + aY.GetValueAtTime(tick), + aZ.GetValueAtTime(tick)); +} + +void +PannerNodeEngine::EqualPowerPanningFunction(const AudioBlock& aInput, + AudioBlock* aOutput, + StreamTime tick) +{ + float azimuth, elevation, gainL, gainR, normalizedAzimuth, distanceGain, coneGain; + int inputChannels = aInput.ChannelCount(); + + // Optimize the case where the position and orientation is constant for this + // processing block: we can just apply a constant gain on the left and right + // channel + if (mPositionX.HasSimpleValue() && + mPositionY.HasSimpleValue() && + mPositionZ.HasSimpleValue() && + mOrientationX.HasSimpleValue() && + mOrientationY.HasSimpleValue() && + mOrientationZ.HasSimpleValue()) { + + ThreeDPoint position = ConvertAudioParamTimelineTo3DP(mPositionX, mPositionY, mPositionZ, tick); + ThreeDPoint orientation = ConvertAudioParamTimelineTo3DP(mOrientationX, mOrientationY, mOrientationZ, tick); + if (!orientation.IsZero()) { + orientation.Normalize(); + } + + // If both the listener are in the same spot, and no cone gain is specified, + // this node is noop. + if (mListenerPosition == position && + mConeInnerAngle == 360 && + mConeOuterAngle == 360) { + *aOutput = aInput; + return; + } + + // The output of this node is always stereo, no matter what the inputs are. + aOutput->AllocateChannels(2); + + ComputeAzimuthAndElevation(position, azimuth, elevation); + coneGain = ComputeConeGain(position, orientation); + + // The following algorithm is described in the spec. + // Clamp azimuth in the [-90, 90] range. + azimuth = min(180.f, max(-180.f, azimuth)); + + // Wrap around + if (azimuth < -90.f) { + azimuth = -180.f - azimuth; + } else if (azimuth > 90) { + azimuth = 180.f - azimuth; + } + + // Normalize the value in the [0, 1] range. + if (inputChannels == 1) { + normalizedAzimuth = (azimuth + 90.f) / 180.f; + } else { + if (azimuth <= 0) { + normalizedAzimuth = (azimuth + 90.f) / 90.f; + } else { + normalizedAzimuth = azimuth / 90.f; + } + } + + distanceGain = ComputeDistanceGain(position); + + // Actually compute the left and right gain. + gainL = cos(0.5 * M_PI * normalizedAzimuth); + gainR = sin(0.5 * M_PI * normalizedAzimuth); + + // Compute the output. + ApplyStereoPanning(aInput, aOutput, gainL, gainR, azimuth <= 0); + + aOutput->mVolume = aInput.mVolume * distanceGain * coneGain; + } else { + float positionX[WEBAUDIO_BLOCK_SIZE]; + float positionY[WEBAUDIO_BLOCK_SIZE]; + float positionZ[WEBAUDIO_BLOCK_SIZE]; + float orientationX[WEBAUDIO_BLOCK_SIZE]; + float orientationY[WEBAUDIO_BLOCK_SIZE]; + float orientationZ[WEBAUDIO_BLOCK_SIZE]; + + // The output of this node is always stereo, no matter what the inputs are. + aOutput->AllocateChannels(2); + + if (!mPositionX.HasSimpleValue()) { + mPositionX.GetValuesAtTime(tick, positionX, WEBAUDIO_BLOCK_SIZE); + } else { + positionX[0] = mPositionX.GetValueAtTime(tick); + } + if (!mPositionY.HasSimpleValue()) { + mPositionY.GetValuesAtTime(tick, positionY, WEBAUDIO_BLOCK_SIZE); + } else { + positionY[0] = mPositionY.GetValueAtTime(tick); + } + if (!mPositionZ.HasSimpleValue()) { + mPositionZ.GetValuesAtTime(tick, positionZ, WEBAUDIO_BLOCK_SIZE); + } else { + positionZ[0] = mPositionZ.GetValueAtTime(tick); + } + if (!mOrientationX.HasSimpleValue()) { + mOrientationX.GetValuesAtTime(tick, orientationX, WEBAUDIO_BLOCK_SIZE); + } else { + orientationX[0] = mOrientationX.GetValueAtTime(tick); + } + if (!mOrientationY.HasSimpleValue()) { + mOrientationY.GetValuesAtTime(tick, orientationY, WEBAUDIO_BLOCK_SIZE); + } else { + orientationY[0] = mOrientationY.GetValueAtTime(tick); + } + if (!mOrientationZ.HasSimpleValue()) { + mOrientationZ.GetValuesAtTime(tick, orientationZ, WEBAUDIO_BLOCK_SIZE); + } else { + orientationZ[0] = mOrientationZ.GetValueAtTime(tick); + } + + float computedGain[2*WEBAUDIO_BLOCK_SIZE + 4]; + bool onLeft[WEBAUDIO_BLOCK_SIZE]; + + float* alignedComputedGain = ALIGNED16(computedGain); + ASSERT_ALIGNED16(alignedComputedGain); + for (size_t counter = 0; counter < WEBAUDIO_BLOCK_SIZE; ++counter) { + ThreeDPoint position(mPositionX.HasSimpleValue() ? positionX[0] : positionX[counter], + mPositionY.HasSimpleValue() ? positionY[0] : positionY[counter], + mPositionZ.HasSimpleValue() ? positionZ[0] : positionZ[counter]); + ThreeDPoint orientation(mOrientationX.HasSimpleValue() ? orientationX[0] : orientationX[counter], + mOrientationY.HasSimpleValue() ? orientationY[0] : orientationY[counter], + mOrientationZ.HasSimpleValue() ? orientationZ[0] : orientationZ[counter]); + if (!orientation.IsZero()) { + orientation.Normalize(); + } + + ComputeAzimuthAndElevation(position, azimuth, elevation); + coneGain = ComputeConeGain(position, orientation); + + // The following algorithm is described in the spec. + // Clamp azimuth in the [-90, 90] range. + azimuth = min(180.f, max(-180.f, azimuth)); + + // Wrap around + if (azimuth < -90.f) { + azimuth = -180.f - azimuth; + } else if (azimuth > 90) { + azimuth = 180.f - azimuth; + } + + // Normalize the value in the [0, 1] range. + if (inputChannels == 1) { + normalizedAzimuth = (azimuth + 90.f) / 180.f; + } else { + if (azimuth <= 0) { + normalizedAzimuth = (azimuth + 90.f) / 90.f; + } else { + normalizedAzimuth = azimuth / 90.f; + } + } + + distanceGain = ComputeDistanceGain(position); + + // Actually compute the left and right gain. + float gainL = cos(0.5 * M_PI * normalizedAzimuth) * aInput.mVolume * distanceGain * coneGain; + float gainR = sin(0.5 * M_PI * normalizedAzimuth) * aInput.mVolume * distanceGain * coneGain; + + alignedComputedGain[counter] = gainL; + alignedComputedGain[WEBAUDIO_BLOCK_SIZE + counter] = gainR; + onLeft[counter] = azimuth <= 0; + } + + // Apply the gain to the output buffer + ApplyStereoPanning(aInput, aOutput, alignedComputedGain, &alignedComputedGain[WEBAUDIO_BLOCK_SIZE], onLeft); + + } +} + +// This algorithm is specified in the webaudio spec. +void +PannerNodeEngine::ComputeAzimuthAndElevation(const ThreeDPoint& position, float& aAzimuth, float& aElevation) +{ + ThreeDPoint sourceListener = position - mListenerPosition; + if (sourceListener.IsZero()) { + aAzimuth = 0.0; + aElevation = 0.0; + return; + } + + sourceListener.Normalize(); + + // Project the source-listener vector on the x-z plane. + const ThreeDPoint& listenerFront = mListenerFrontVector; + const ThreeDPoint& listenerRight = mListenerRightVector; + ThreeDPoint up = listenerRight.CrossProduct(listenerFront); + + double upProjection = sourceListener.DotProduct(up); + aElevation = 90 - 180 * acos(upProjection) / M_PI; + + if (aElevation > 90) { + aElevation = 180 - aElevation; + } else if (aElevation < -90) { + aElevation = -180 - aElevation; + } + + ThreeDPoint projectedSource = sourceListener - up * upProjection; + if (projectedSource.IsZero()) { + // source - listener direction is up or down. + aAzimuth = 0.0; + return; + } + projectedSource.Normalize(); + + // Actually compute the angle, and convert to degrees + double projection = projectedSource.DotProduct(listenerRight); + aAzimuth = 180 * acos(projection) / M_PI; + + // Compute whether the source is in front or behind the listener. + double frontBack = projectedSource.DotProduct(listenerFront); + if (frontBack < 0) { + aAzimuth = 360 - aAzimuth; + } + // Rotate the azimuth so it is relative to the listener front vector instead + // of the right vector. + if ((aAzimuth >= 0) && (aAzimuth <= 270)) { + aAzimuth = 90 - aAzimuth; + } else { + aAzimuth = 450 - aAzimuth; + } +} + +// This algorithm is described in the WebAudio spec. +float +PannerNodeEngine::ComputeConeGain(const ThreeDPoint& position, + const ThreeDPoint& orientation) +{ + // Omnidirectional source + if (orientation.IsZero() || ((mConeInnerAngle == 360) && (mConeOuterAngle == 360))) { + return 1; + } + + // Normalized source-listener vector + ThreeDPoint sourceToListener = mListenerPosition - position; + sourceToListener.Normalize(); + + // Angle between the source orientation vector and the source-listener vector + double dotProduct = sourceToListener.DotProduct(orientation); + double angle = 180 * acos(dotProduct) / M_PI; + double absAngle = fabs(angle); + + // Divide by 2 here since API is entire angle (not half-angle) + double absInnerAngle = fabs(mConeInnerAngle) / 2; + double absOuterAngle = fabs(mConeOuterAngle) / 2; + double gain = 1; + + if (absAngle <= absInnerAngle) { + // No attenuation + gain = 1; + } else if (absAngle >= absOuterAngle) { + // Max attenuation + gain = mConeOuterGain; + } else { + // Between inner and outer cones + // inner -> outer, x goes from 0 -> 1 + double x = (absAngle - absInnerAngle) / (absOuterAngle - absInnerAngle); + gain = (1 - x) + mConeOuterGain * x; + } + + return gain; +} + +double +PannerNodeEngine::ComputeDistanceGain(const ThreeDPoint& position) +{ + ThreeDPoint distanceVec = position - mListenerPosition; + float distance = sqrt(distanceVec.DotProduct(distanceVec)); + return std::max(0.0f, (this->*mDistanceModelFunction)(distance)); +} + +float +PannerNode::ComputeDopplerShift() +{ + double dopplerShift = 1.0; // Initialize to default value + + AudioListener* listener = Context()->Listener(); + + if (listener->DopplerFactor() > 0) { + // Don't bother if both source and listener have no velocity. + if (!mVelocity.IsZero() || !listener->Velocity().IsZero()) { + // Calculate the source to listener vector. + ThreeDPoint sourceToListener = ConvertAudioParamTo3DP(mPositionX, mPositionY, mPositionZ) - listener->Velocity(); + + double sourceListenerMagnitude = sourceToListener.Magnitude(); + + double listenerProjection = sourceToListener.DotProduct(listener->Velocity()) / sourceListenerMagnitude; + double sourceProjection = sourceToListener.DotProduct(mVelocity) / sourceListenerMagnitude; + + listenerProjection = -listenerProjection; + sourceProjection = -sourceProjection; + + double scaledSpeedOfSound = listener->SpeedOfSound() / listener->DopplerFactor(); + listenerProjection = min(listenerProjection, scaledSpeedOfSound); + sourceProjection = min(sourceProjection, scaledSpeedOfSound); + + dopplerShift = ((listener->SpeedOfSound() - listener->DopplerFactor() * listenerProjection) / (listener->SpeedOfSound() - listener->DopplerFactor() * sourceProjection)); + + WebAudioUtils::FixNaN(dopplerShift); // Avoid illegal values + + // Limit the pitch shifting to 4 octaves up and 3 octaves down. + dopplerShift = min(dopplerShift, 16.); + dopplerShift = max(dopplerShift, 0.125); + } + } + + return dopplerShift; +} + +void +PannerNode::FindConnectedSources() +{ + mSources.Clear(); + std::set<AudioNode*> cycleSet; + FindConnectedSources(this, mSources, cycleSet); +} + +void +PannerNode::FindConnectedSources(AudioNode* aNode, + nsTArray<AudioBufferSourceNode*>& aSources, + std::set<AudioNode*>& aNodesSeen) +{ + if (!aNode) { + return; + } + + const nsTArray<InputNode>& inputNodes = aNode->InputNodes(); + + for(unsigned i = 0; i < inputNodes.Length(); i++) { + // Return if we find a node that we have seen already. + if (aNodesSeen.find(inputNodes[i].mInputNode) != aNodesSeen.end()) { + return; + } + aNodesSeen.insert(inputNodes[i].mInputNode); + // Recurse + FindConnectedSources(inputNodes[i].mInputNode, aSources, aNodesSeen); + + // Check if this node is an AudioBufferSourceNode that still have a stream, + // which means it has not finished playing. + AudioBufferSourceNode* node = inputNodes[i].mInputNode->AsAudioBufferSourceNode(); + if (node && node->GetStream()) { + aSources.AppendElement(node); + } + } +} + +void +PannerNode::SendDopplerToSourcesIfNeeded() +{ + // Don't bother sending the doppler shift if both the source and the listener + // are not moving, because the doppler shift is going to be 1.0. + if (!(Context()->Listener()->Velocity().IsZero() && mVelocity.IsZero())) { + for(uint32_t i = 0; i < mSources.Length(); i++) { + mSources[i]->SendDopplerShiftToStream(ComputeDopplerShift()); + } + } +} + + +} // namespace dom +} // namespace mozilla + diff --git a/dom/media/webaudio/PannerNode.h b/dom/media/webaudio/PannerNode.h new file mode 100644 index 000000000..184db4603 --- /dev/null +++ b/dom/media/webaudio/PannerNode.h @@ -0,0 +1,296 @@ +/* -*- Mode: C++; tab-width: 2; indent-tabs-mode: nil; c-basic-offset: 2 -*- */ +/* vim:set ts=2 sw=2 sts=2 et cindent: */ +/* This Source Code Form is subject to the terms of the Mozilla Public + * License, v. 2.0. If a copy of the MPL was not distributed with this + * file, You can obtain one at http://mozilla.org/MPL/2.0/. */ + +#ifndef PannerNode_h_ +#define PannerNode_h_ + +#include "AudioNode.h" +#include "AudioParam.h" +#include "mozilla/dom/PannerNodeBinding.h" +#include "ThreeDPoint.h" +#include "mozilla/WeakPtr.h" +#include <limits> +#include <set> + +namespace mozilla { +namespace dom { + +class AudioContext; +class AudioBufferSourceNode; + +class PannerNode final : public AudioNode, + public SupportsWeakPtr<PannerNode> +{ +public: + MOZ_DECLARE_WEAKREFERENCE_TYPENAME(PannerNode) + explicit PannerNode(AudioContext* aContext); + + JSObject* WrapObject(JSContext* aCx, JS::Handle<JSObject*> aGivenProto) override; + + void DestroyMediaStream() override; + + void SetChannelCount(uint32_t aChannelCount, ErrorResult& aRv) override + { + if (aChannelCount > 2) { + aRv.Throw(NS_ERROR_DOM_NOT_SUPPORTED_ERR); + return; + } + AudioNode::SetChannelCount(aChannelCount, aRv); + } + void SetChannelCountModeValue(ChannelCountMode aMode, ErrorResult& aRv) override + { + if (aMode == ChannelCountMode::Max) { + aRv.Throw(NS_ERROR_DOM_NOT_SUPPORTED_ERR); + return; + } + AudioNode::SetChannelCountModeValue(aMode, aRv); + } + + NS_DECL_ISUPPORTS_INHERITED + NS_DECL_CYCLE_COLLECTION_CLASS_INHERITED(PannerNode, AudioNode) + + PanningModelType PanningModel() const + { + return mPanningModel; + } + void SetPanningModel(PanningModelType aPanningModel); + + DistanceModelType DistanceModel() const + { + return mDistanceModel; + } + void SetDistanceModel(DistanceModelType aDistanceModel) + { + mDistanceModel = aDistanceModel; + SendInt32ParameterToStream(DISTANCE_MODEL, int32_t(mDistanceModel)); + } + + void SetPosition(double aX, double aY, double aZ) + { + if (fabs(aX) > std::numeric_limits<float>::max() || + fabs(aY) > std::numeric_limits<float>::max() || + fabs(aZ) > std::numeric_limits<float>::max()) { + return; + } + mPositionX->SetValue(aX); + mPositionY->SetValue(aY); + mPositionZ->SetValue(aZ); + SendThreeDPointParameterToStream(POSITION, ConvertAudioParamTo3DP(mPositionX, mPositionY, mPositionZ)); + } + + void SetOrientation(double aX, double aY, double aZ) + { + if (fabs(aX) > std::numeric_limits<float>::max() || + fabs(aY) > std::numeric_limits<float>::max() || + fabs(aZ) > std::numeric_limits<float>::max()) { + return; + } + mOrientationX->SetValue(aX); + mOrientationY->SetValue(aY); + mOrientationZ->SetValue(aZ); + SendThreeDPointParameterToStream(ORIENTATION, ConvertAudioParamTo3DP(mOrientationX, mOrientationY, mOrientationZ)); + } + + void SetVelocity(double aX, double aY, double aZ) + { + if (WebAudioUtils::FuzzyEqual(mVelocity.x, aX) && + WebAudioUtils::FuzzyEqual(mVelocity.y, aY) && + WebAudioUtils::FuzzyEqual(mVelocity.z, aZ)) { + return; + } + mVelocity.x = aX; + mVelocity.y = aY; + mVelocity.z = aZ; + SendThreeDPointParameterToStream(VELOCITY, mVelocity); + SendDopplerToSourcesIfNeeded(); + } + + double RefDistance() const + { + return mRefDistance; + } + void SetRefDistance(double aRefDistance) + { + if (WebAudioUtils::FuzzyEqual(mRefDistance, aRefDistance)) { + return; + } + mRefDistance = aRefDistance; + SendDoubleParameterToStream(REF_DISTANCE, mRefDistance); + } + + double MaxDistance() const + { + return mMaxDistance; + } + void SetMaxDistance(double aMaxDistance) + { + if (WebAudioUtils::FuzzyEqual(mMaxDistance, aMaxDistance)) { + return; + } + mMaxDistance = aMaxDistance; + SendDoubleParameterToStream(MAX_DISTANCE, mMaxDistance); + } + + double RolloffFactor() const + { + return mRolloffFactor; + } + void SetRolloffFactor(double aRolloffFactor) + { + if (WebAudioUtils::FuzzyEqual(mRolloffFactor, aRolloffFactor)) { + return; + } + mRolloffFactor = aRolloffFactor; + SendDoubleParameterToStream(ROLLOFF_FACTOR, mRolloffFactor); + } + + double ConeInnerAngle() const + { + return mConeInnerAngle; + } + void SetConeInnerAngle(double aConeInnerAngle) + { + if (WebAudioUtils::FuzzyEqual(mConeInnerAngle, aConeInnerAngle)) { + return; + } + mConeInnerAngle = aConeInnerAngle; + SendDoubleParameterToStream(CONE_INNER_ANGLE, mConeInnerAngle); + } + + double ConeOuterAngle() const + { + return mConeOuterAngle; + } + void SetConeOuterAngle(double aConeOuterAngle) + { + if (WebAudioUtils::FuzzyEqual(mConeOuterAngle, aConeOuterAngle)) { + return; + } + mConeOuterAngle = aConeOuterAngle; + SendDoubleParameterToStream(CONE_OUTER_ANGLE, mConeOuterAngle); + } + + double ConeOuterGain() const + { + return mConeOuterGain; + } + void SetConeOuterGain(double aConeOuterGain) + { + if (WebAudioUtils::FuzzyEqual(mConeOuterGain, aConeOuterGain)) { + return; + } + mConeOuterGain = aConeOuterGain; + SendDoubleParameterToStream(CONE_OUTER_GAIN, mConeOuterGain); + } + + AudioParam* PositionX() + { + return mPositionX; + } + + AudioParam* PositionY() + { + return mPositionY; + } + + AudioParam* PositionZ() + { + return mPositionZ; + } + + AudioParam* OrientationX() + { + return mOrientationX; + } + + AudioParam* OrientationY() + { + return mOrientationY; + } + + AudioParam* OrientationZ() + { + return mOrientationZ; + } + + + float ComputeDopplerShift(); + void SendDopplerToSourcesIfNeeded(); + void FindConnectedSources(); + void FindConnectedSources(AudioNode* aNode, nsTArray<AudioBufferSourceNode*>& aSources, std::set<AudioNode*>& aSeenNodes); + + const char* NodeType() const override + { + return "PannerNode"; + } + + size_t SizeOfExcludingThis(MallocSizeOf aMallocSizeOf) const override; + size_t SizeOfIncludingThis(MallocSizeOf aMallocSizeOf) const override; + +protected: + virtual ~PannerNode(); + +private: + friend class AudioListener; + friend class PannerNodeEngine; + enum EngineParameters { + LISTENER_POSITION, + LISTENER_FRONT_VECTOR, // unit length + LISTENER_RIGHT_VECTOR, // unit length, orthogonal to LISTENER_FRONT_VECTOR + LISTENER_VELOCITY, + LISTENER_DOPPLER_FACTOR, + LISTENER_SPEED_OF_SOUND, + PANNING_MODEL, + DISTANCE_MODEL, + POSITION, + POSITIONX, + POSITIONY, + POSITIONZ, + ORIENTATION, // unit length or zero + ORIENTATIONX, + ORIENTATIONY, + ORIENTATIONZ, + VELOCITY, + REF_DISTANCE, + MAX_DISTANCE, + ROLLOFF_FACTOR, + CONE_INNER_ANGLE, + CONE_OUTER_ANGLE, + CONE_OUTER_GAIN + }; + + ThreeDPoint ConvertAudioParamTo3DP(RefPtr <AudioParam> aX, RefPtr <AudioParam> aY, RefPtr <AudioParam> aZ) + { + return ThreeDPoint(aX->GetValue(), aY->GetValue(), aZ->GetValue()); + } + + PanningModelType mPanningModel; + DistanceModelType mDistanceModel; + RefPtr<AudioParam> mPositionX; + RefPtr<AudioParam> mPositionY; + RefPtr<AudioParam> mPositionZ; + RefPtr<AudioParam> mOrientationX; + RefPtr<AudioParam> mOrientationY; + RefPtr<AudioParam> mOrientationZ; + ThreeDPoint mVelocity; + + double mRefDistance; + double mMaxDistance; + double mRolloffFactor; + double mConeInnerAngle; + double mConeOuterAngle; + double mConeOuterGain; + + // An array of all the AudioBufferSourceNode connected directly or indirectly + // to this AudioPannerNode. + nsTArray<AudioBufferSourceNode*> mSources; +}; + +} // namespace dom +} // namespace mozilla + +#endif + diff --git a/dom/media/webaudio/PanningUtils.h b/dom/media/webaudio/PanningUtils.h new file mode 100644 index 000000000..a3be3f45e --- /dev/null +++ b/dom/media/webaudio/PanningUtils.h @@ -0,0 +1,65 @@ +/* -*- Mode: C++; tab-width: 2; indent-tabs-mode: nil; c-basic-offset: 2 -*- */ +/* vim:set ts=2 sw=2 sts=2 et cindent: */ +/* This Source Code Form is subject to the terms of the Mozilla Public + * License, v. 2.0. If a copy of the MPL was not distributed with this + * file, You can obtain one at http://mozilla.org/MPL/2.0/. */ + +#ifndef PANNING_UTILS_H +#define PANNING_UTILS_H + +#include "AudioSegment.h" +#include "AudioNodeEngine.h" + +namespace mozilla { +namespace dom { + +template<typename T> +void +GainMonoToStereo(const AudioBlock& aInput, AudioBlock* aOutput, + T aGainL, T aGainR) +{ + float* outputL = aOutput->ChannelFloatsForWrite(0); + float* outputR = aOutput->ChannelFloatsForWrite(1); + const float* input = static_cast<const float*>(aInput.mChannelData[0]); + + MOZ_ASSERT(aInput.ChannelCount() == 1); + MOZ_ASSERT(aOutput->ChannelCount() == 2); + + AudioBlockPanMonoToStereo(input, aGainL, aGainR, outputL, outputR); +} + +// T can be float or an array of float, and U can be bool or an array of bool, +// depending if the value of the parameters are constant for this block. +template<typename T, typename U> +void +GainStereoToStereo(const AudioBlock& aInput, AudioBlock* aOutput, + T aGainL, T aGainR, U aOnLeft) +{ + float* outputL = aOutput->ChannelFloatsForWrite(0); + float* outputR = aOutput->ChannelFloatsForWrite(1); + const float* inputL = static_cast<const float*>(aInput.mChannelData[0]); + const float* inputR = static_cast<const float*>(aInput.mChannelData[1]); + + MOZ_ASSERT(aInput.ChannelCount() == 2); + MOZ_ASSERT(aOutput->ChannelCount() == 2); + + AudioBlockPanStereoToStereo(inputL, inputR, aGainL, aGainR, aOnLeft, outputL, outputR); +} + +// T can be float or an array of float, and U can be bool or an array of bool, +// depending if the value of the parameters are constant for this block. +template<typename T, typename U> +void ApplyStereoPanning(const AudioBlock& aInput, AudioBlock* aOutput, + T aGainL, T aGainR, U aOnLeft) +{ + if (aInput.ChannelCount() == 1) { + GainMonoToStereo(aInput, aOutput, aGainL, aGainR); + } else { + GainStereoToStereo(aInput, aOutput, aGainL, aGainR, aOnLeft); + } +} + +} // namespace dom +} // namespace mozilla + +#endif // PANNING_UTILS_H diff --git a/dom/media/webaudio/PeriodicWave.cpp b/dom/media/webaudio/PeriodicWave.cpp new file mode 100644 index 000000000..396a93e13 --- /dev/null +++ b/dom/media/webaudio/PeriodicWave.cpp @@ -0,0 +1,74 @@ +/* -*- Mode: C++; tab-width: 2; indent-tabs-mode: nil; c-basic-offset: 2 -*- */ +/* vim:set ts=2 sw=2 sts=2 et cindent: */ +/* This Source Code Form is subject to the terms of the Mozilla Public + * License, v. 2.0. If a copy of the MPL was not distributed with this + * file, You can obtain one at http://mozilla.org/MPL/2.0/. */ + +#include "PeriodicWave.h" +#include "AudioContext.h" +#include "mozilla/dom/PeriodicWaveBinding.h" + +namespace mozilla { +namespace dom { + +NS_IMPL_CYCLE_COLLECTION_WRAPPERCACHE(PeriodicWave, mContext) + +NS_IMPL_CYCLE_COLLECTION_ROOT_NATIVE(PeriodicWave, AddRef) +NS_IMPL_CYCLE_COLLECTION_UNROOT_NATIVE(PeriodicWave, Release) + +PeriodicWave::PeriodicWave(AudioContext* aContext, + const float* aRealData, + const float* aImagData, + const uint32_t aLength, + const bool aDisableNormalization, + ErrorResult& aRv) + : mContext(aContext) + , mDisableNormalization(aDisableNormalization) +{ + MOZ_ASSERT(aContext); + + // Caller should have checked this and thrown. + MOZ_ASSERT(aLength > 0); + mLength = aLength; + + // Copy coefficient data. The two arrays share an allocation. + mCoefficients = new ThreadSharedFloatArrayBufferList(2); + float* buffer = static_cast<float*>(malloc(aLength*sizeof(float)*2)); + if (buffer == nullptr) { + aRv.Throw(NS_ERROR_OUT_OF_MEMORY); + return; + } + PodCopy(buffer, aRealData, aLength); + mCoefficients->SetData(0, buffer, free, buffer); + PodCopy(buffer+aLength, aImagData, aLength); + mCoefficients->SetData(1, nullptr, free, buffer+aLength); +} + +size_t +PeriodicWave::SizeOfExcludingThisIfNotShared(MallocSizeOf aMallocSizeOf) const +{ + // Not owned: + // - mContext + size_t amount = 0; + if (!mCoefficients->IsShared()) { + amount += mCoefficients->SizeOfIncludingThis(aMallocSizeOf); + } + + return amount; +} + +size_t +PeriodicWave::SizeOfIncludingThisIfNotShared(MallocSizeOf aMallocSizeOf) const +{ + return aMallocSizeOf(this) + SizeOfExcludingThisIfNotShared(aMallocSizeOf); +} + +JSObject* +PeriodicWave::WrapObject(JSContext* aCx, JS::Handle<JSObject*> aGivenProto) +{ + return PeriodicWaveBinding::Wrap(aCx, this, aGivenProto); +} + +} // namespace dom +} // namespace mozilla + diff --git a/dom/media/webaudio/PeriodicWave.h b/dom/media/webaudio/PeriodicWave.h new file mode 100644 index 000000000..b67d597e4 --- /dev/null +++ b/dom/media/webaudio/PeriodicWave.h @@ -0,0 +1,70 @@ +/* -*- Mode: C++; tab-width: 2; indent-tabs-mode: nil; c-basic-offset: 2 -*- */ +/* vim:set ts=2 sw=2 sts=2 et cindent: */ +/* This Source Code Form is subject to the terms of the Mozilla Public + * License, v. 2.0. If a copy of the MPL was not distributed with this + * file, You can obtain one at http://mozilla.org/MPL/2.0/. */ + +#ifndef PeriodicWave_h_ +#define PeriodicWave_h_ + +#include "nsWrapperCache.h" +#include "nsCycleCollectionParticipant.h" +#include "mozilla/Attributes.h" +#include "AudioContext.h" +#include "AudioNodeEngine.h" + +namespace mozilla { + +namespace dom { + +class PeriodicWave final : public nsWrapperCache +{ +public: + PeriodicWave(AudioContext* aContext, + const float* aRealData, + const float* aImagData, + const uint32_t aLength, + const bool aDisableNormalization, + ErrorResult& aRv); + + NS_INLINE_DECL_CYCLE_COLLECTING_NATIVE_REFCOUNTING(PeriodicWave) + NS_DECL_CYCLE_COLLECTION_SCRIPT_HOLDER_NATIVE_CLASS(PeriodicWave) + + AudioContext* GetParentObject() const + { + return mContext; + } + + JSObject* WrapObject(JSContext* aCx, JS::Handle<JSObject*> aGivenProto) override; + + uint32_t DataLength() const + { + return mLength; + } + + bool DisableNormalization() const + { + return mDisableNormalization; + } + + ThreadSharedFloatArrayBufferList* GetThreadSharedBuffer() const + { + return mCoefficients; + } + + size_t SizeOfExcludingThisIfNotShared(MallocSizeOf aMallocSizeOf) const; + size_t SizeOfIncludingThisIfNotShared(MallocSizeOf aMallocSizeOf) const; + +private: + ~PeriodicWave() {} + + RefPtr<AudioContext> mContext; + RefPtr<ThreadSharedFloatArrayBufferList> mCoefficients; + uint32_t mLength; + bool mDisableNormalization; +}; + +} // namespace dom +} // namespace mozilla + +#endif diff --git a/dom/media/webaudio/PlayingRefChangeHandler.h b/dom/media/webaudio/PlayingRefChangeHandler.h new file mode 100644 index 000000000..6436d1dbc --- /dev/null +++ b/dom/media/webaudio/PlayingRefChangeHandler.h @@ -0,0 +1,48 @@ +/* -*- Mode: C++; tab-width: 2; indent-tabs-mode: nil; c-basic-offset: 2 -*- */ +/* vim:set ts=2 sw=2 sts=2 et cindent: */ +/* This Source Code Form is subject to the terms of the Mozilla Public + * License, v. 2.0. If a copy of the MPL was not distributed with this + * file, You can obtain one at http://mozilla.org/MPL/2.0/. */ + +#ifndef PlayingRefChangeHandler_h__ +#define PlayingRefChangeHandler_h__ + +#include "nsThreadUtils.h" +#include "AudioNodeStream.h" + +namespace mozilla { +namespace dom { + +class PlayingRefChangeHandler final : public Runnable +{ +public: + enum ChangeType { ADDREF, RELEASE }; + PlayingRefChangeHandler(AudioNodeStream* aStream, ChangeType aChange) + : mStream(aStream) + , mChange(aChange) + { + } + + NS_IMETHOD Run() override + { + RefPtr<AudioNode> node = mStream->Engine()->NodeMainThread(); + if (node) { + if (mChange == ADDREF) { + node->MarkActive(); + } else if (mChange == RELEASE) { + node->MarkInactive(); + } + } + return NS_OK; + } + +private: + RefPtr<AudioNodeStream> mStream; + ChangeType mChange; +}; + +} // namespace dom +} // namespace mozilla + +#endif + diff --git a/dom/media/webaudio/ReportDecodeResultTask.h b/dom/media/webaudio/ReportDecodeResultTask.h new file mode 100644 index 000000000..5d34f3438 --- /dev/null +++ b/dom/media/webaudio/ReportDecodeResultTask.h @@ -0,0 +1,43 @@ +/* -*- Mode: C++; tab-width: 2; indent-tabs-mode: nil; c-basic-offset: 2 -*- */ +/* vim:set ts=2 sw=2 sts=2 et cindent: */ +/* This Source Code Form is subject to the terms of the Mozilla Public + * License, v. 2.0. If a copy of the MPL was not distributed with this + * file, You can obtain one at http://mozilla.org/MPL/2.0/. */ + +#ifndef ReportDecodeResultTask_h_ +#define ReportDecodeResultTask_h_ + +#include "mozilla/Attributes.h" +#include "MediaBufferDecoder.h" + +namespace mozilla { + +class ReportDecodeResultTask final : public Runnable +{ +public: + ReportDecodeResultTask(DecodeJob& aDecodeJob, + DecodeJob::ResultFn aFunction) + : mDecodeJob(aDecodeJob) + , mFunction(aFunction) + { + MOZ_ASSERT(aFunction); + } + + NS_IMETHOD Run() override + { + MOZ_ASSERT(NS_IsMainThread()); + + (mDecodeJob.*mFunction)(); + + return NS_OK; + } + +private: + DecodeJob& mDecodeJob; + DecodeJob::ResultFn mFunction; +}; + +} + +#endif + diff --git a/dom/media/webaudio/ScriptProcessorNode.cpp b/dom/media/webaudio/ScriptProcessorNode.cpp new file mode 100644 index 000000000..3b5df51ef --- /dev/null +++ b/dom/media/webaudio/ScriptProcessorNode.cpp @@ -0,0 +1,573 @@ +/* -*- Mode: C++; tab-width: 2; indent-tabs-mode: nil; c-basic-offset: 2 -*- */ +/* vim:set ts=2 sw=2 sts=2 et cindent: */ +/* This Source Code Form is subject to the terms of the Mozilla Public + * License, v. 2.0. If a copy of the MPL was not distributed with this + * file, You can obtain one at http://mozilla.org/MPL/2.0/. */ + +#include "ScriptProcessorNode.h" +#include "mozilla/dom/ScriptProcessorNodeBinding.h" +#include "AudioBuffer.h" +#include "AudioDestinationNode.h" +#include "AudioNodeEngine.h" +#include "AudioNodeStream.h" +#include "AudioProcessingEvent.h" +#include "WebAudioUtils.h" +#include "mozilla/dom/ScriptSettings.h" +#include "mozilla/Mutex.h" +#include "mozilla/PodOperations.h" +#include "nsAutoPtr.h" +#include <deque> + +namespace mozilla { +namespace dom { + +// The maximum latency, in seconds, that we can live with before dropping +// buffers. +static const float MAX_LATENCY_S = 0.5; + +NS_IMPL_ISUPPORTS_INHERITED0(ScriptProcessorNode, AudioNode) + +// This class manages a queue of output buffers shared between +// the main thread and the Media Stream Graph thread. +class SharedBuffers final +{ +private: + class OutputQueue final + { + public: + explicit OutputQueue(const char* aName) + : mMutex(aName) + {} + + size_t SizeOfExcludingThis(MallocSizeOf aMallocSizeOf) const + { + mMutex.AssertCurrentThreadOwns(); + + size_t amount = 0; + for (size_t i = 0; i < mBufferList.size(); i++) { + amount += mBufferList[i].SizeOfExcludingThis(aMallocSizeOf, false); + } + + return amount; + } + + Mutex& Lock() const { return const_cast<OutputQueue*>(this)->mMutex; } + + size_t ReadyToConsume() const + { + // Accessed on both main thread and media graph thread. + mMutex.AssertCurrentThreadOwns(); + return mBufferList.size(); + } + + // Produce one buffer + AudioChunk& Produce() + { + mMutex.AssertCurrentThreadOwns(); + MOZ_ASSERT(NS_IsMainThread()); + mBufferList.push_back(AudioChunk()); + return mBufferList.back(); + } + + // Consumes one buffer. + AudioChunk Consume() + { + mMutex.AssertCurrentThreadOwns(); + MOZ_ASSERT(!NS_IsMainThread()); + MOZ_ASSERT(ReadyToConsume() > 0); + AudioChunk front = mBufferList.front(); + mBufferList.pop_front(); + return front; + } + + // Empties the buffer queue. + void Clear() + { + mMutex.AssertCurrentThreadOwns(); + mBufferList.clear(); + } + + private: + typedef std::deque<AudioChunk> BufferList; + + // Synchronizes access to mBufferList. Note that it's the responsibility + // of the callers to perform the required locking, and we assert that every + // time we access mBufferList. + Mutex mMutex; + // The list representing the queue. + BufferList mBufferList; + }; + +public: + explicit SharedBuffers(float aSampleRate) + : mOutputQueue("SharedBuffers::outputQueue") + , mDelaySoFar(STREAM_TIME_MAX) + , mSampleRate(aSampleRate) + , mLatency(0.0) + , mDroppingBuffers(false) + { + } + + size_t SizeOfIncludingThis(MallocSizeOf aMallocSizeOf) const + { + size_t amount = aMallocSizeOf(this); + + { + MutexAutoLock lock(mOutputQueue.Lock()); + amount += mOutputQueue.SizeOfExcludingThis(aMallocSizeOf); + } + + return amount; + } + + // main thread + void FinishProducingOutputBuffer(ThreadSharedFloatArrayBufferList* aBuffer, + uint32_t aBufferSize) + { + MOZ_ASSERT(NS_IsMainThread()); + + TimeStamp now = TimeStamp::Now(); + + if (mLastEventTime.IsNull()) { + mLastEventTime = now; + } else { + // When main thread blocking has built up enough so + // |mLatency > MAX_LATENCY_S|, frame dropping starts. It continues until + // the output buffer is completely empty, at which point the accumulated + // latency is also reset to 0. + // It could happen that the output queue becomes empty before the input + // node has fully caught up. In this case there will be events where + // |(now - mLastEventTime)| is very short, making mLatency negative. + // As this happens and the size of |mLatency| becomes greater than + // MAX_LATENCY_S, frame dropping starts again to maintain an as short + // output queue as possible. + float latency = (now - mLastEventTime).ToSeconds(); + float bufferDuration = aBufferSize / mSampleRate; + mLatency += latency - bufferDuration; + mLastEventTime = now; + if (fabs(mLatency) > MAX_LATENCY_S) { + mDroppingBuffers = true; + } + } + + MutexAutoLock lock(mOutputQueue.Lock()); + if (mDroppingBuffers) { + if (mOutputQueue.ReadyToConsume()) { + return; + } + mDroppingBuffers = false; + mLatency = 0; + } + + for (uint32_t offset = 0; offset < aBufferSize; offset += WEBAUDIO_BLOCK_SIZE) { + AudioChunk& chunk = mOutputQueue.Produce(); + if (aBuffer) { + chunk.mDuration = WEBAUDIO_BLOCK_SIZE; + chunk.mBuffer = aBuffer; + chunk.mChannelData.SetLength(aBuffer->GetChannels()); + for (uint32_t i = 0; i < aBuffer->GetChannels(); ++i) { + chunk.mChannelData[i] = aBuffer->GetData(i) + offset; + } + chunk.mVolume = 1.0f; + chunk.mBufferFormat = AUDIO_FORMAT_FLOAT32; + } else { + chunk.SetNull(WEBAUDIO_BLOCK_SIZE); + } + } + } + + // graph thread + AudioChunk GetOutputBuffer() + { + MOZ_ASSERT(!NS_IsMainThread()); + AudioChunk buffer; + + { + MutexAutoLock lock(mOutputQueue.Lock()); + if (mOutputQueue.ReadyToConsume() > 0) { + if (mDelaySoFar == STREAM_TIME_MAX) { + mDelaySoFar = 0; + } + buffer = mOutputQueue.Consume(); + } else { + // If we're out of buffers to consume, just output silence + buffer.SetNull(WEBAUDIO_BLOCK_SIZE); + if (mDelaySoFar != STREAM_TIME_MAX) { + // Remember the delay that we just hit + mDelaySoFar += WEBAUDIO_BLOCK_SIZE; + } + } + } + + return buffer; + } + + StreamTime DelaySoFar() const + { + MOZ_ASSERT(!NS_IsMainThread()); + return mDelaySoFar == STREAM_TIME_MAX ? 0 : mDelaySoFar; + } + + void Reset() + { + MOZ_ASSERT(!NS_IsMainThread()); + mDelaySoFar = STREAM_TIME_MAX; + mLatency = 0.0f; + { + MutexAutoLock lock(mOutputQueue.Lock()); + mOutputQueue.Clear(); + } + mLastEventTime = TimeStamp(); + } + +private: + OutputQueue mOutputQueue; + // How much delay we've seen so far. This measures the amount of delay + // caused by the main thread lagging behind in producing output buffers. + // STREAM_TIME_MAX means that we have not received our first buffer yet. + StreamTime mDelaySoFar; + // The samplerate of the context. + float mSampleRate; + // This is the latency caused by the buffering. If this grows too high, we + // will drop buffers until it is acceptable. + float mLatency; + // This is the time at which we last produced a buffer, to detect if the main + // thread has been blocked. + TimeStamp mLastEventTime; + // True if we should be dropping buffers. + bool mDroppingBuffers; +}; + +class ScriptProcessorNodeEngine final : public AudioNodeEngine +{ +public: + ScriptProcessorNodeEngine(ScriptProcessorNode* aNode, + AudioDestinationNode* aDestination, + uint32_t aBufferSize, + uint32_t aNumberOfInputChannels) + : AudioNodeEngine(aNode) + , mDestination(aDestination->Stream()) + , mSharedBuffers(new SharedBuffers(mDestination->SampleRate())) + , mBufferSize(aBufferSize) + , mInputChannelCount(aNumberOfInputChannels) + , mInputWriteIndex(0) + { + } + + SharedBuffers* GetSharedBuffers() const + { + return mSharedBuffers; + } + + enum { + IS_CONNECTED, + }; + + void SetInt32Parameter(uint32_t aIndex, int32_t aParam) override + { + switch (aIndex) { + case IS_CONNECTED: + mIsConnected = aParam; + break; + default: + NS_ERROR("Bad Int32Parameter"); + } // End index switch. + } + + void ProcessBlock(AudioNodeStream* aStream, + GraphTime aFrom, + const AudioBlock& aInput, + AudioBlock* aOutput, + bool* aFinished) override + { + // This node is not connected to anything. Per spec, we don't fire the + // onaudioprocess event. We also want to clear out the input and output + // buffer queue, and output a null buffer. + if (!mIsConnected) { + aOutput->SetNull(WEBAUDIO_BLOCK_SIZE); + mSharedBuffers->Reset(); + mInputWriteIndex = 0; + return; + } + + // The input buffer is allocated lazily when non-null input is received. + if (!aInput.IsNull() && !mInputBuffer) { + mInputBuffer = ThreadSharedFloatArrayBufferList:: + Create(mInputChannelCount, mBufferSize, fallible); + if (mInputBuffer && mInputWriteIndex) { + // Zero leading for null chunks that were skipped. + for (uint32_t i = 0; i < mInputChannelCount; ++i) { + float* channelData = mInputBuffer->GetDataForWrite(i); + PodZero(channelData, mInputWriteIndex); + } + } + } + + // First, record our input buffer, if its allocation succeeded. + uint32_t inputChannelCount = mInputBuffer ? mInputBuffer->GetChannels() : 0; + for (uint32_t i = 0; i < inputChannelCount; ++i) { + float* writeData = mInputBuffer->GetDataForWrite(i) + mInputWriteIndex; + if (aInput.IsNull()) { + PodZero(writeData, aInput.GetDuration()); + } else { + MOZ_ASSERT(aInput.GetDuration() == WEBAUDIO_BLOCK_SIZE, "sanity check"); + MOZ_ASSERT(aInput.ChannelCount() == inputChannelCount); + AudioBlockCopyChannelWithScale(static_cast<const float*>(aInput.mChannelData[i]), + aInput.mVolume, writeData); + } + } + mInputWriteIndex += aInput.GetDuration(); + + // Now, see if we have data to output + // Note that we need to do this before sending the buffer to the main + // thread so that our delay time is updated. + *aOutput = mSharedBuffers->GetOutputBuffer(); + + if (mInputWriteIndex >= mBufferSize) { + SendBuffersToMainThread(aStream, aFrom); + mInputWriteIndex -= mBufferSize; + } + } + + bool IsActive() const override + { + // Could return false when !mIsConnected after all output chunks produced + // by main thread events calling + // SharedBuffers::FinishProducingOutputBuffer() have been processed. + return true; + } + + size_t SizeOfExcludingThis(MallocSizeOf aMallocSizeOf) const override + { + // Not owned: + // - mDestination (probably) + size_t amount = AudioNodeEngine::SizeOfExcludingThis(aMallocSizeOf); + amount += mSharedBuffers->SizeOfIncludingThis(aMallocSizeOf); + if (mInputBuffer) { + amount += mInputBuffer->SizeOfIncludingThis(aMallocSizeOf); + } + + return amount; + } + + size_t SizeOfIncludingThis(MallocSizeOf aMallocSizeOf) const override + { + return aMallocSizeOf(this) + SizeOfExcludingThis(aMallocSizeOf); + } + +private: + void SendBuffersToMainThread(AudioNodeStream* aStream, GraphTime aFrom) + { + MOZ_ASSERT(!NS_IsMainThread()); + + // we now have a full input buffer ready to be sent to the main thread. + StreamTime playbackTick = mDestination->GraphTimeToStreamTime(aFrom); + // Add the duration of the current sample + playbackTick += WEBAUDIO_BLOCK_SIZE; + // Add the delay caused by the main thread + playbackTick += mSharedBuffers->DelaySoFar(); + // Compute the playback time in the coordinate system of the destination + double playbackTime = mDestination->StreamTimeToSeconds(playbackTick); + + class Command final : public Runnable + { + public: + Command(AudioNodeStream* aStream, + already_AddRefed<ThreadSharedFloatArrayBufferList> aInputBuffer, + double aPlaybackTime) + : mStream(aStream) + , mInputBuffer(aInputBuffer) + , mPlaybackTime(aPlaybackTime) + { + } + + NS_IMETHOD Run() override + { + RefPtr<ThreadSharedFloatArrayBufferList> output; + + auto engine = + static_cast<ScriptProcessorNodeEngine*>(mStream->Engine()); + { + auto node = static_cast<ScriptProcessorNode*> + (engine->NodeMainThread()); + if (!node) { + return NS_OK; + } + + if (node->HasListenersFor(nsGkAtoms::onaudioprocess)) { + output = DispatchAudioProcessEvent(node); + } + // The node may have been destroyed during event dispatch. + } + + // Append it to our output buffer queue + engine->GetSharedBuffers()-> + FinishProducingOutputBuffer(output, engine->mBufferSize); + + return NS_OK; + } + + // Returns the output buffers if set in event handlers. + ThreadSharedFloatArrayBufferList* + DispatchAudioProcessEvent(ScriptProcessorNode* aNode) + { + AudioContext* context = aNode->Context(); + if (!context) { + return nullptr; + } + + AutoJSAPI jsapi; + if (NS_WARN_IF(!jsapi.Init(aNode->GetOwner()))) { + return nullptr; + } + JSContext* cx = jsapi.cx(); + uint32_t inputChannelCount = aNode->ChannelCount(); + + // Create the input buffer + RefPtr<AudioBuffer> inputBuffer; + if (mInputBuffer) { + ErrorResult rv; + inputBuffer = + AudioBuffer::Create(context, inputChannelCount, + aNode->BufferSize(), context->SampleRate(), + mInputBuffer.forget(), rv); + if (rv.Failed()) { + rv.SuppressException(); + return nullptr; + } + } + + // Ask content to produce data in the output buffer + // Note that we always avoid creating the output buffer here, and we try to + // avoid creating the input buffer as well. The AudioProcessingEvent class + // knows how to lazily create them if needed once the script tries to access + // them. Otherwise, we may be able to get away without creating them! + RefPtr<AudioProcessingEvent> event = + new AudioProcessingEvent(aNode, nullptr, nullptr); + event->InitEvent(inputBuffer, inputChannelCount, mPlaybackTime); + aNode->DispatchTrustedEvent(event); + + // Steal the output buffers if they have been set. + // Don't create a buffer if it hasn't been used to return output; + // FinishProducingOutputBuffer() will optimize output = null. + // GetThreadSharedChannelsForRate() may also return null after OOM. + if (event->HasOutputBuffer()) { + ErrorResult rv; + AudioBuffer* buffer = event->GetOutputBuffer(rv); + // HasOutputBuffer() returning true means that GetOutputBuffer() + // will not fail. + MOZ_ASSERT(!rv.Failed()); + return buffer->GetThreadSharedChannelsForRate(cx); + } + + return nullptr; + } + private: + RefPtr<AudioNodeStream> mStream; + RefPtr<ThreadSharedFloatArrayBufferList> mInputBuffer; + double mPlaybackTime; + }; + + NS_DispatchToMainThread(new Command(aStream, mInputBuffer.forget(), + playbackTime)); + } + + friend class ScriptProcessorNode; + + AudioNodeStream* mDestination; + nsAutoPtr<SharedBuffers> mSharedBuffers; + RefPtr<ThreadSharedFloatArrayBufferList> mInputBuffer; + const uint32_t mBufferSize; + const uint32_t mInputChannelCount; + // The write index into the current input buffer + uint32_t mInputWriteIndex; + bool mIsConnected = false; +}; + +ScriptProcessorNode::ScriptProcessorNode(AudioContext* aContext, + uint32_t aBufferSize, + uint32_t aNumberOfInputChannels, + uint32_t aNumberOfOutputChannels) + : AudioNode(aContext, + aNumberOfInputChannels, + mozilla::dom::ChannelCountMode::Explicit, + mozilla::dom::ChannelInterpretation::Speakers) + , mBufferSize(aBufferSize ? + aBufferSize : // respect what the web developer requested + 4096) // choose our own buffer size -- 4KB for now + , mNumberOfOutputChannels(aNumberOfOutputChannels) +{ + MOZ_ASSERT(BufferSize() % WEBAUDIO_BLOCK_SIZE == 0, "Invalid buffer size"); + ScriptProcessorNodeEngine* engine = + new ScriptProcessorNodeEngine(this, + aContext->Destination(), + BufferSize(), + aNumberOfInputChannels); + mStream = AudioNodeStream::Create(aContext, engine, + AudioNodeStream::NO_STREAM_FLAGS, + aContext->Graph()); +} + +ScriptProcessorNode::~ScriptProcessorNode() +{ +} + +size_t +ScriptProcessorNode::SizeOfExcludingThis(MallocSizeOf aMallocSizeOf) const +{ + size_t amount = AudioNode::SizeOfExcludingThis(aMallocSizeOf); + return amount; +} + +size_t +ScriptProcessorNode::SizeOfIncludingThis(MallocSizeOf aMallocSizeOf) const +{ + return aMallocSizeOf(this) + SizeOfExcludingThis(aMallocSizeOf); +} + +void +ScriptProcessorNode::EventListenerAdded(nsIAtom* aType) +{ + AudioNode::EventListenerAdded(aType); + if (aType == nsGkAtoms::onaudioprocess) { + UpdateConnectedStatus(); + } +} + +void +ScriptProcessorNode::EventListenerRemoved(nsIAtom* aType) +{ + AudioNode::EventListenerRemoved(aType); + if (aType == nsGkAtoms::onaudioprocess) { + UpdateConnectedStatus(); + } +} + +JSObject* +ScriptProcessorNode::WrapObject(JSContext* aCx, JS::Handle<JSObject*> aGivenProto) +{ + return ScriptProcessorNodeBinding::Wrap(aCx, this, aGivenProto); +} + +void +ScriptProcessorNode::UpdateConnectedStatus() +{ + bool isConnected = mHasPhantomInput || + !(OutputNodes().IsEmpty() && OutputParams().IsEmpty() + && InputNodes().IsEmpty()); + + // Events are queued even when there is no listener because a listener + // may be added while events are in the queue. + SendInt32ParameterToStream(ScriptProcessorNodeEngine::IS_CONNECTED, + isConnected); + + if (isConnected && HasListenersFor(nsGkAtoms::onaudioprocess)) { + MarkActive(); + } else { + MarkInactive(); + } +} + +} // namespace dom +} // namespace mozilla + diff --git a/dom/media/webaudio/ScriptProcessorNode.h b/dom/media/webaudio/ScriptProcessorNode.h new file mode 100644 index 000000000..bd1170e9c --- /dev/null +++ b/dom/media/webaudio/ScriptProcessorNode.h @@ -0,0 +1,147 @@ +/* -*- Mode: C++; tab-width: 2; indent-tabs-mode: nil; c-basic-offset: 2 -*- */ +/* vim:set ts=2 sw=2 sts=2 et cindent: */ +/* This Source Code Form is subject to the terms of the Mozilla Public + * License, v. 2.0. If a copy of the MPL was not distributed with this + * file, You can obtain one at http://mozilla.org/MPL/2.0/. */ + +#ifndef ScriptProcessorNode_h_ +#define ScriptProcessorNode_h_ + +#include "AudioNode.h" + +namespace mozilla { +namespace dom { + +class AudioContext; +class SharedBuffers; + +class ScriptProcessorNode final : public AudioNode +{ +public: + ScriptProcessorNode(AudioContext* aContext, + uint32_t aBufferSize, + uint32_t aNumberOfInputChannels, + uint32_t aNumberOfOutputChannels); + + NS_DECL_ISUPPORTS_INHERITED + + IMPL_EVENT_HANDLER(audioprocess) + + void EventListenerAdded(nsIAtom* aType) override; + void EventListenerRemoved(nsIAtom* aType) override; + + JSObject* WrapObject(JSContext* aCx, JS::Handle<JSObject*> aGivenProto) override; + + AudioNode* Connect(AudioNode& aDestination, uint32_t aOutput, + uint32_t aInput, ErrorResult& aRv) override + { + AudioNode* node = AudioNode::Connect(aDestination, aOutput, aInput, aRv); + if (!aRv.Failed()) { + UpdateConnectedStatus(); + } + return node; + } + + void Connect(AudioParam& aDestination, uint32_t aOutput, + ErrorResult& aRv) override + { + AudioNode::Connect(aDestination, aOutput, aRv); + if (!aRv.Failed()) { + UpdateConnectedStatus(); + } + } + void Disconnect(ErrorResult& aRv) override + { + AudioNode::Disconnect(aRv); + UpdateConnectedStatus(); + } + void Disconnect(uint32_t aOutput, ErrorResult& aRv) override + { + AudioNode::Disconnect(aOutput, aRv); + UpdateConnectedStatus(); + } + void NotifyInputsChanged() override + { + UpdateConnectedStatus(); + } + void NotifyHasPhantomInput() override + { + mHasPhantomInput = true; + // No need to UpdateConnectedStatus() because there was previously an + // input in InputNodes(). + } + void Disconnect(AudioNode& aDestination, ErrorResult& aRv) override + { + AudioNode::Disconnect(aDestination, aRv); + UpdateConnectedStatus(); + } + void Disconnect(AudioNode& aDestination, uint32_t aOutput, ErrorResult& aRv) override + { + AudioNode::Disconnect(aDestination, aOutput, aRv); + UpdateConnectedStatus(); + } + void Disconnect(AudioNode& aDestination, uint32_t aOutput, uint32_t aInput, ErrorResult& aRv) override + { + AudioNode::Disconnect(aDestination, aOutput, aInput, aRv); + UpdateConnectedStatus(); + } + void Disconnect(AudioParam& aDestination, ErrorResult& aRv) override + { + AudioNode::Disconnect(aDestination, aRv); + UpdateConnectedStatus(); + } + void Disconnect(AudioParam& aDestination, uint32_t aOutput, ErrorResult& aRv) override + { + AudioNode::Disconnect(aDestination, aOutput, aRv); + UpdateConnectedStatus(); + } + void SetChannelCount(uint32_t aChannelCount, ErrorResult& aRv) override + { + if (aChannelCount != ChannelCount()) { + aRv.Throw(NS_ERROR_DOM_NOT_SUPPORTED_ERR); + } + return; + } + void SetChannelCountModeValue(ChannelCountMode aMode, ErrorResult& aRv) override + { + if (aMode != ChannelCountMode::Explicit) { + aRv.Throw(NS_ERROR_DOM_NOT_SUPPORTED_ERR); + } + return; + } + + uint32_t BufferSize() const + { + return mBufferSize; + } + + uint32_t NumberOfOutputChannels() const + { + return mNumberOfOutputChannels; + } + + using DOMEventTargetHelper::DispatchTrustedEvent; + + const char* NodeType() const override + { + return "ScriptProcessorNode"; + } + + size_t SizeOfExcludingThis(MallocSizeOf aMallocSizeOf) const override; + size_t SizeOfIncludingThis(MallocSizeOf aMallocSizeOf) const override; + +private: + virtual ~ScriptProcessorNode(); + + void UpdateConnectedStatus(); + + const uint32_t mBufferSize; + const uint32_t mNumberOfOutputChannels; + bool mHasPhantomInput = false; +}; + +} // namespace dom +} // namespace mozilla + +#endif + diff --git a/dom/media/webaudio/StereoPannerNode.cpp b/dom/media/webaudio/StereoPannerNode.cpp new file mode 100644 index 000000000..fc804e7b4 --- /dev/null +++ b/dom/media/webaudio/StereoPannerNode.cpp @@ -0,0 +1,211 @@ +/* -*- Mode: C++; tab-width: 2; indent-tabs-mode: nil; c-basic-offset: 2 -*- */ +/* vim:set ts=2 sw=2 sts=2 et cindent: */ +/* This Source Code Form is subject to the terms of the Mozilla Public + * License, v. 2.0. If a copy of the MPL was not distributed with this + * file, You can obtain one at http://mozilla.org/MPL/2.0/. */ + +#include "StereoPannerNode.h" +#include "mozilla/dom/StereoPannerNodeBinding.h" +#include "AudioNodeEngine.h" +#include "AudioNodeStream.h" +#include "AudioDestinationNode.h" +#include "AlignmentUtils.h" +#include "WebAudioUtils.h" +#include "PanningUtils.h" +#include "AudioParamTimeline.h" +#include "AudioParam.h" + +namespace mozilla { +namespace dom { + +using namespace std; + +NS_IMPL_CYCLE_COLLECTION_INHERITED(StereoPannerNode, AudioNode, mPan) + +NS_INTERFACE_MAP_BEGIN_CYCLE_COLLECTION_INHERITED(StereoPannerNode) +NS_INTERFACE_MAP_END_INHERITING(AudioNode) + +NS_IMPL_ADDREF_INHERITED(StereoPannerNode, AudioNode) +NS_IMPL_RELEASE_INHERITED(StereoPannerNode, AudioNode) + +class StereoPannerNodeEngine final : public AudioNodeEngine +{ +public: + StereoPannerNodeEngine(AudioNode* aNode, + AudioDestinationNode* aDestination) + : AudioNodeEngine(aNode) + , mDestination(aDestination->Stream()) + // Keep the default value in sync with the default value in + // StereoPannerNode::StereoPannerNode. + , mPan(0.f) + { + } + + enum Parameters { + PAN + }; + void RecvTimelineEvent(uint32_t aIndex, + AudioTimelineEvent& aEvent) override + { + MOZ_ASSERT(mDestination); + WebAudioUtils::ConvertAudioTimelineEventToTicks(aEvent, + mDestination); + + switch (aIndex) { + case PAN: + mPan.InsertEvent<int64_t>(aEvent); + break; + default: + NS_ERROR("Bad StereoPannerNode TimelineParameter"); + } + } + + void GetGainValuesForPanning(float aPanning, + bool aMonoToStereo, + float& aLeftGain, + float& aRightGain) + { + // Clamp and normalize the panning in [0; 1] + aPanning = std::min(std::max(aPanning, -1.f), 1.f); + + if (aMonoToStereo) { + aPanning += 1; + aPanning /= 2; + } else if (aPanning <= 0) { + aPanning += 1; + } + + aLeftGain = cos(0.5 * M_PI * aPanning); + aRightGain = sin(0.5 * M_PI * aPanning); + } + + void SetToSilentStereoBlock(AudioBlock* aChunk) + { + for (uint32_t channel = 0; channel < 2; channel++) { + float* samples = aChunk->ChannelFloatsForWrite(channel); + for (uint32_t i = 0; i < WEBAUDIO_BLOCK_SIZE; i++) { + samples[i] = 0.f; + } + } + } + + void UpmixToStereoIfNeeded(const AudioBlock& aInput, AudioBlock* aOutput) + { + if (aInput.ChannelCount() == 2) { + const float* inputL = static_cast<const float*>(aInput.mChannelData[0]); + const float* inputR = static_cast<const float*>(aInput.mChannelData[1]); + float* outputL = aOutput->ChannelFloatsForWrite(0); + float* outputR = aOutput->ChannelFloatsForWrite(1); + + AudioBlockCopyChannelWithScale(inputL, aInput.mVolume, outputL); + AudioBlockCopyChannelWithScale(inputR, aInput.mVolume, outputR); + } else { + MOZ_ASSERT(aInput.ChannelCount() == 1); + GainMonoToStereo(aInput, aOutput, aInput.mVolume, aInput.mVolume); + } + } + + virtual void ProcessBlock(AudioNodeStream* aStream, + GraphTime aFrom, + const AudioBlock& aInput, + AudioBlock* aOutput, + bool *aFinished) override + { + // The output of this node is always stereo, no matter what the inputs are. + MOZ_ASSERT(aInput.ChannelCount() <= 2); + aOutput->AllocateChannels(2); + bool monoToStereo = aInput.ChannelCount() == 1; + + if (aInput.IsNull()) { + // If input is silent, so is the output + SetToSilentStereoBlock(aOutput); + } else if (mPan.HasSimpleValue()) { + float panning = mPan.GetValue(); + // If the panning is 0.0, we can simply copy the input to the + // output with gain applied, up-mixing to stereo if needed. + if (panning == 0.0f) { + UpmixToStereoIfNeeded(aInput, aOutput); + } else { + // Optimize the case where the panning is constant for this processing + // block: we can just apply a constant gain on the left and right + // channel + float gainL, gainR; + + GetGainValuesForPanning(panning, monoToStereo, gainL, gainR); + ApplyStereoPanning(aInput, aOutput, + gainL * aInput.mVolume, + gainR * aInput.mVolume, + panning <= 0); + } + } else { + float computedGain[2*WEBAUDIO_BLOCK_SIZE + 4]; + bool onLeft[WEBAUDIO_BLOCK_SIZE]; + + float values[WEBAUDIO_BLOCK_SIZE]; + StreamTime tick = mDestination->GraphTimeToStreamTime(aFrom); + mPan.GetValuesAtTime(tick, values, WEBAUDIO_BLOCK_SIZE); + + float* alignedComputedGain = ALIGNED16(computedGain); + ASSERT_ALIGNED16(alignedComputedGain); + for (size_t counter = 0; counter < WEBAUDIO_BLOCK_SIZE; ++counter) { + float left, right; + GetGainValuesForPanning(values[counter], monoToStereo, left, right); + + alignedComputedGain[counter] = left * aInput.mVolume; + alignedComputedGain[WEBAUDIO_BLOCK_SIZE + counter] = right * aInput.mVolume; + onLeft[counter] = values[counter] <= 0; + } + + // Apply the gain to the output buffer + ApplyStereoPanning(aInput, aOutput, alignedComputedGain, &alignedComputedGain[WEBAUDIO_BLOCK_SIZE], onLeft); + } + } + + virtual size_t SizeOfIncludingThis(MallocSizeOf aMallocSizeOf) const override + { + return aMallocSizeOf(this) + SizeOfExcludingThis(aMallocSizeOf); + } + + AudioNodeStream* mDestination; + AudioParamTimeline mPan; +}; + +StereoPannerNode::StereoPannerNode(AudioContext* aContext) + : AudioNode(aContext, + 2, + ChannelCountMode::Clamped_max, + ChannelInterpretation::Speakers) + , mPan(new AudioParam(this, StereoPannerNodeEngine::PAN, 0.f, "pan")) +{ + StereoPannerNodeEngine* engine = new StereoPannerNodeEngine(this, aContext->Destination()); + mStream = AudioNodeStream::Create(aContext, engine, + AudioNodeStream::NO_STREAM_FLAGS, + aContext->Graph()); +} + +StereoPannerNode::~StereoPannerNode() +{ +} + +size_t +StereoPannerNode::SizeOfExcludingThis(MallocSizeOf aMallocSizeOf) const +{ + size_t amount = AudioNode::SizeOfExcludingThis(aMallocSizeOf); + amount += mPan->SizeOfIncludingThis(aMallocSizeOf); + return amount; +} + +size_t +StereoPannerNode::SizeOfIncludingThis(MallocSizeOf aMallocSizeOf) const +{ + return aMallocSizeOf(this) + SizeOfExcludingThis(aMallocSizeOf); +} + +JSObject* +StereoPannerNode::WrapObject(JSContext* aCx, JS::Handle<JSObject*> aGivenProto) +{ + return StereoPannerNodeBinding::Wrap(aCx, this, aGivenProto); +} + +} // namespace dom +} // namespace mozilla diff --git a/dom/media/webaudio/StereoPannerNode.h b/dom/media/webaudio/StereoPannerNode.h new file mode 100644 index 000000000..68204f757 --- /dev/null +++ b/dom/media/webaudio/StereoPannerNode.h @@ -0,0 +1,70 @@ +/* -*- Mode: C++; tab-width: 2; indent-tabs-mode: nil; c-basic-offset: 2 -*- */ +/* vim:set ts=2 sw=2 sts=2 et cindent: */ +/* This Source Code Form is subject to the terms of the Mozilla Public + * License, v. 2.0. If a copy of the MPL was not distributed with this + * file, You can obtain one at http://mozilla.org/MPL/2.0/. */ + +#ifndef StereoPannerNode_h_ +#define StereoPannerNode_h_ + +#include "AudioNode.h" +#include "mozilla/dom/StereoPannerNodeBinding.h" + +namespace mozilla { +namespace dom { + +class AudioContext; + +class StereoPannerNode final : public AudioNode +{ +public: + MOZ_DECLARE_REFCOUNTED_TYPENAME(StereoPannerNode) + explicit StereoPannerNode(AudioContext* aContext); + + virtual JSObject* WrapObject(JSContext* aCx, JS::Handle<JSObject*> aGivenProto) override; + + virtual void SetChannelCount(uint32_t aChannelCount, ErrorResult& aRv) override + { + if (aChannelCount > 2) { + aRv.Throw(NS_ERROR_DOM_NOT_SUPPORTED_ERR); + return; + } + AudioNode::SetChannelCount(aChannelCount, aRv); + } + virtual void SetChannelCountModeValue(ChannelCountMode aMode, ErrorResult& aRv) override + { + if (aMode == ChannelCountMode::Max) { + aRv.Throw(NS_ERROR_DOM_NOT_SUPPORTED_ERR); + return; + } + AudioNode::SetChannelCountModeValue(aMode, aRv); + } + + AudioParam* Pan() const + { + return mPan; + } + + NS_DECL_ISUPPORTS_INHERITED + NS_DECL_CYCLE_COLLECTION_CLASS_INHERITED(StereoPannerNode, AudioNode) + + virtual const char* NodeType() const override + { + return "StereoPannerNode"; + } + + virtual size_t SizeOfExcludingThis(MallocSizeOf aMallocSizeOf) const override; + virtual size_t SizeOfIncludingThis(MallocSizeOf aMallocSizeOf) const override; + +protected: + virtual ~StereoPannerNode(); + +private: + RefPtr<AudioParam> mPan; +}; + +} // namespace dom +} // namespace mozilla + +#endif + diff --git a/dom/media/webaudio/ThreeDPoint.cpp b/dom/media/webaudio/ThreeDPoint.cpp new file mode 100644 index 000000000..ad816eb89 --- /dev/null +++ b/dom/media/webaudio/ThreeDPoint.cpp @@ -0,0 +1,49 @@ +/* -*- Mode: C++; tab-width: 2; indent-tabs-mode: nil; c-basic-offset: 2 -*- */ +/* vim:set ts=2 sw=2 sts=2 et cindent: */ +/* This Source Code Form is subject to the terms of the Mozilla Public + * License, v. 2.0. If a copy of the MPL was not distributed with this + * file, You can obtain one at http://mozilla.org/MPL/2.0/. */ + +/** + * Other similar methods can be added if needed. + */ + +#include "ThreeDPoint.h" +#include "WebAudioUtils.h" + +namespace mozilla { + +namespace dom { + +bool +ThreeDPoint::FuzzyEqual(const ThreeDPoint& other) +{ + return WebAudioUtils::FuzzyEqual(x, other.x) && + WebAudioUtils::FuzzyEqual(y, other.y) && + WebAudioUtils::FuzzyEqual(z, other.z); +} + +ThreeDPoint operator-(const ThreeDPoint& lhs, const ThreeDPoint& rhs) +{ + return ThreeDPoint(lhs.x - rhs.x, lhs.y - rhs.y, lhs.z - rhs.z); +} + +ThreeDPoint operator*(const ThreeDPoint& lhs, const ThreeDPoint& rhs) +{ + return ThreeDPoint(lhs.x * rhs.x, lhs.y * rhs.y, lhs.z * rhs.z); +} + +ThreeDPoint operator*(const ThreeDPoint& lhs, const double rhs) +{ + return ThreeDPoint(lhs.x * rhs, lhs.y * rhs, lhs.z * rhs); +} + +bool operator==(const ThreeDPoint& lhs, const ThreeDPoint& rhs) +{ + return lhs.x == rhs.x && + lhs.y == rhs.y && + lhs.z == rhs.z; +} + +} // namespace dom +} // namespace mozilla diff --git a/dom/media/webaudio/ThreeDPoint.h b/dom/media/webaudio/ThreeDPoint.h new file mode 100644 index 000000000..b6d51e69a --- /dev/null +++ b/dom/media/webaudio/ThreeDPoint.h @@ -0,0 +1,89 @@ +/* -*- Mode: C++; tab-width: 2; indent-tabs-mode: nil; c-basic-offset: 2 -*- */ +/* vim:set ts=2 sw=2 sts=2 et cindent: */ +/* This Source Code Form is subject to the terms of the Mozilla Public + * License, v. 2.0. If a copy of the MPL was not distributed with this + * file, You can obtain one at http://mozilla.org/MPL/2.0/. */ + +#ifndef ThreeDPoint_h_ +#define ThreeDPoint_h_ + +#include <cmath> +#include <algorithm> + +namespace mozilla { + +namespace dom { + +struct ThreeDPoint final +{ + ThreeDPoint() + : x(0.) + , y(0.) + , z(0.) + { + } + ThreeDPoint(double aX, double aY, double aZ) + : x(aX) + , y(aY) + , z(aZ) + { + } + + double Magnitude() const + { + return sqrt(x * x + y * y + z * z); + } + + void Normalize() + { + // Normalize with the maximum norm first to avoid overflow and underflow. + double invMax = 1 / MaxNorm(); + x *= invMax; + y *= invMax; + z *= invMax; + + double invDistance = 1 / Magnitude(); + x *= invDistance; + y *= invDistance; + z *= invDistance; + } + + ThreeDPoint CrossProduct(const ThreeDPoint& rhs) const + { + return ThreeDPoint(y * rhs.z - z * rhs.y, + z * rhs.x - x * rhs.z, + x * rhs.y - y * rhs.x); + } + + double DotProduct(const ThreeDPoint& rhs) + { + return x * rhs.x + y * rhs.y + z * rhs.z; + } + + bool IsZero() const + { + return x == 0 && y == 0 && z == 0; + } + + // For comparing two vectors of close to unit magnitude. + bool FuzzyEqual(const ThreeDPoint& other); + + double x, y, z; + +private: + double MaxNorm() const + { + return std::max(fabs(x), std::max(fabs(y), fabs(z))); + } +}; + +ThreeDPoint operator-(const ThreeDPoint& lhs, const ThreeDPoint& rhs); +ThreeDPoint operator*(const ThreeDPoint& lhs, const ThreeDPoint& rhs); +ThreeDPoint operator*(const ThreeDPoint& lhs, const double rhs); +bool operator==(const ThreeDPoint& lhs, const ThreeDPoint& rhs); + +} // namespace dom +} // namespace mozilla + +#endif + diff --git a/dom/media/webaudio/WaveShaperNode.cpp b/dom/media/webaudio/WaveShaperNode.cpp new file mode 100644 index 000000000..d5c617dcd --- /dev/null +++ b/dom/media/webaudio/WaveShaperNode.cpp @@ -0,0 +1,392 @@ +/* -*- Mode: C++; tab-width: 2; indent-tabs-mode: nil; c-basic-offset: 2 -*- */ +/* vim:set ts=2 sw=2 sts=2 et cindent: */ +/* This Source Code Form is subject to the terms of the Mozilla Public + * License, v. 2.0. If a copy of the MPL was not distributed with this + * file, You can obtain one at http://mozilla.org/MPL/2.0/. */ + +#include "WaveShaperNode.h" +#include "mozilla/dom/WaveShaperNodeBinding.h" +#include "AlignmentUtils.h" +#include "AudioNode.h" +#include "AudioNodeEngine.h" +#include "AudioNodeStream.h" +#include "mozilla/PodOperations.h" + +namespace mozilla { +namespace dom { + +NS_IMPL_CYCLE_COLLECTION_CLASS(WaveShaperNode) + +NS_IMPL_CYCLE_COLLECTION_UNLINK_BEGIN_INHERITED(WaveShaperNode, AudioNode) + NS_IMPL_CYCLE_COLLECTION_UNLINK_PRESERVED_WRAPPER + tmp->ClearCurve(); +NS_IMPL_CYCLE_COLLECTION_UNLINK_END + +NS_IMPL_CYCLE_COLLECTION_TRAVERSE_BEGIN_INHERITED(WaveShaperNode, AudioNode) + NS_IMPL_CYCLE_COLLECTION_TRAVERSE_SCRIPT_OBJECTS +NS_IMPL_CYCLE_COLLECTION_TRAVERSE_END + +NS_IMPL_CYCLE_COLLECTION_TRACE_BEGIN(WaveShaperNode) + NS_IMPL_CYCLE_COLLECTION_TRACE_PRESERVED_WRAPPER + NS_IMPL_CYCLE_COLLECTION_TRACE_JS_MEMBER_CALLBACK(mCurve) +NS_IMPL_CYCLE_COLLECTION_TRACE_END + +NS_INTERFACE_MAP_BEGIN_CYCLE_COLLECTION_INHERITED(WaveShaperNode) +NS_INTERFACE_MAP_END_INHERITING(AudioNode) + +NS_IMPL_ADDREF_INHERITED(WaveShaperNode, AudioNode) +NS_IMPL_RELEASE_INHERITED(WaveShaperNode, AudioNode) + +static uint32_t ValueOf(OverSampleType aType) +{ + switch (aType) { + case OverSampleType::None: return 1; + case OverSampleType::_2x: return 2; + case OverSampleType::_4x: return 4; + default: + NS_NOTREACHED("We should never reach here"); + return 1; + } +} + +class Resampler final +{ +public: + Resampler() + : mType(OverSampleType::None) + , mUpSampler(nullptr) + , mDownSampler(nullptr) + , mChannels(0) + , mSampleRate(0) + { + } + + ~Resampler() + { + Destroy(); + } + + void Reset(uint32_t aChannels, TrackRate aSampleRate, OverSampleType aType) + { + if (aChannels == mChannels && + aSampleRate == mSampleRate && + aType == mType) { + return; + } + + mChannels = aChannels; + mSampleRate = aSampleRate; + mType = aType; + + Destroy(); + + if (aType == OverSampleType::None) { + mBuffer.Clear(); + return; + } + + mUpSampler = speex_resampler_init(aChannels, + aSampleRate, + aSampleRate * ValueOf(aType), + SPEEX_RESAMPLER_QUALITY_MIN, + nullptr); + mDownSampler = speex_resampler_init(aChannels, + aSampleRate * ValueOf(aType), + aSampleRate, + SPEEX_RESAMPLER_QUALITY_MIN, + nullptr); + mBuffer.SetLength(WEBAUDIO_BLOCK_SIZE*ValueOf(aType)); + } + + float* UpSample(uint32_t aChannel, const float* aInputData, uint32_t aBlocks) + { + uint32_t inSamples = WEBAUDIO_BLOCK_SIZE; + uint32_t outSamples = WEBAUDIO_BLOCK_SIZE*aBlocks; + float* outputData = mBuffer.Elements(); + + MOZ_ASSERT(mBuffer.Length() == outSamples); + + WebAudioUtils::SpeexResamplerProcess(mUpSampler, aChannel, + aInputData, &inSamples, + outputData, &outSamples); + + MOZ_ASSERT(inSamples == WEBAUDIO_BLOCK_SIZE && outSamples == WEBAUDIO_BLOCK_SIZE*aBlocks); + + return outputData; + } + + void DownSample(uint32_t aChannel, float* aOutputData, uint32_t aBlocks) + { + uint32_t inSamples = WEBAUDIO_BLOCK_SIZE*aBlocks; + uint32_t outSamples = WEBAUDIO_BLOCK_SIZE; + const float* inputData = mBuffer.Elements(); + + MOZ_ASSERT(mBuffer.Length() == inSamples); + + WebAudioUtils::SpeexResamplerProcess(mDownSampler, aChannel, + inputData, &inSamples, + aOutputData, &outSamples); + + MOZ_ASSERT(inSamples == WEBAUDIO_BLOCK_SIZE*aBlocks && outSamples == WEBAUDIO_BLOCK_SIZE); + } + + size_t SizeOfExcludingThis(MallocSizeOf aMallocSizeOf) const + { + size_t amount = 0; + // Future: properly measure speex memory + amount += aMallocSizeOf(mUpSampler); + amount += aMallocSizeOf(mDownSampler); + amount += mBuffer.ShallowSizeOfExcludingThis(aMallocSizeOf); + return amount; + } + +private: + void Destroy() + { + if (mUpSampler) { + speex_resampler_destroy(mUpSampler); + mUpSampler = nullptr; + } + if (mDownSampler) { + speex_resampler_destroy(mDownSampler); + mDownSampler = nullptr; + } + } + +private: + OverSampleType mType; + SpeexResamplerState* mUpSampler; + SpeexResamplerState* mDownSampler; + uint32_t mChannels; + TrackRate mSampleRate; + nsTArray<float> mBuffer; +}; + +class WaveShaperNodeEngine final : public AudioNodeEngine +{ +public: + explicit WaveShaperNodeEngine(AudioNode* aNode) + : AudioNodeEngine(aNode) + , mType(OverSampleType::None) + { + } + + enum Parameteres { + TYPE + }; + + void SetRawArrayData(nsTArray<float>& aCurve) override + { + mCurve.SwapElements(aCurve); + } + + void SetInt32Parameter(uint32_t aIndex, int32_t aValue) override + { + switch (aIndex) { + case TYPE: + mType = static_cast<OverSampleType>(aValue); + break; + default: + NS_ERROR("Bad WaveShaperNode Int32Parameter"); + } + } + + template <uint32_t blocks> + void ProcessCurve(const float* aInputBuffer, float* aOutputBuffer) + { + for (uint32_t j = 0; j < WEBAUDIO_BLOCK_SIZE*blocks; ++j) { + // Index into the curve array based on the amplitude of the + // incoming signal by using an amplitude range of [-1, 1] and + // performing a linear interpolation of the neighbor values. + float index = (mCurve.Length() - 1) * (aInputBuffer[j] + 1.0f) / 2.0f; + if (index < 0.0f) { + aOutputBuffer[j] = mCurve[0]; + } else { + int32_t indexLower = index; + if (static_cast<uint32_t>(indexLower) >= mCurve.Length() - 1) { + aOutputBuffer[j] = mCurve[mCurve.Length() - 1]; + } else { + uint32_t indexHigher = indexLower + 1; + float interpolationFactor = index - indexLower; + aOutputBuffer[j] = (1.0f - interpolationFactor) * mCurve[indexLower] + + interpolationFactor * mCurve[indexHigher]; + } + } + } + } + + void ProcessBlock(AudioNodeStream* aStream, + GraphTime aFrom, + const AudioBlock& aInput, + AudioBlock* aOutput, + bool* aFinished) override + { + uint32_t channelCount = aInput.ChannelCount(); + if (!mCurve.Length()) { + // Optimize the case where we don't have a curve buffer + *aOutput = aInput; + return; + } + + // If the input is null, check to see if non-null output will be produced + bool nullInput = false; + if (channelCount == 0) { + float index = (mCurve.Length() - 1) * 0.5; + uint32_t indexLower = index; + uint32_t indexHigher = indexLower + 1; + float interpolationFactor = index - indexLower; + if ((1.0f - interpolationFactor) * mCurve[indexLower] + + interpolationFactor * mCurve[indexHigher] == 0.0) { + *aOutput = aInput; + return; + } else { + nullInput = true; + channelCount = 1; + } + } + + aOutput->AllocateChannels(channelCount); + for (uint32_t i = 0; i < channelCount; ++i) { + const float* inputSamples; + float scaledInput[WEBAUDIO_BLOCK_SIZE + 4]; + float* alignedScaledInput = ALIGNED16(scaledInput); + ASSERT_ALIGNED16(alignedScaledInput); + if (!nullInput) { + if (aInput.mVolume != 1.0f) { + AudioBlockCopyChannelWithScale( + static_cast<const float*>(aInput.mChannelData[i]), + aInput.mVolume, + alignedScaledInput); + inputSamples = alignedScaledInput; + } else { + inputSamples = static_cast<const float*>(aInput.mChannelData[i]); + } + } else { + PodZero(alignedScaledInput, WEBAUDIO_BLOCK_SIZE); + inputSamples = alignedScaledInput; + } + float* outputBuffer = aOutput->ChannelFloatsForWrite(i); + float* sampleBuffer; + + switch (mType) { + case OverSampleType::None: + mResampler.Reset(channelCount, aStream->SampleRate(), OverSampleType::None); + ProcessCurve<1>(inputSamples, outputBuffer); + break; + case OverSampleType::_2x: + mResampler.Reset(channelCount, aStream->SampleRate(), OverSampleType::_2x); + sampleBuffer = mResampler.UpSample(i, inputSamples, 2); + ProcessCurve<2>(sampleBuffer, sampleBuffer); + mResampler.DownSample(i, outputBuffer, 2); + break; + case OverSampleType::_4x: + mResampler.Reset(channelCount, aStream->SampleRate(), OverSampleType::_4x); + sampleBuffer = mResampler.UpSample(i, inputSamples, 4); + ProcessCurve<4>(sampleBuffer, sampleBuffer); + mResampler.DownSample(i, outputBuffer, 4); + break; + default: + NS_NOTREACHED("We should never reach here"); + } + } + } + + size_t SizeOfExcludingThis(MallocSizeOf aMallocSizeOf) const override + { + size_t amount = AudioNodeEngine::SizeOfExcludingThis(aMallocSizeOf); + amount += mCurve.ShallowSizeOfExcludingThis(aMallocSizeOf); + amount += mResampler.SizeOfExcludingThis(aMallocSizeOf); + return amount; + } + + size_t SizeOfIncludingThis(MallocSizeOf aMallocSizeOf) const override + { + return aMallocSizeOf(this) + SizeOfExcludingThis(aMallocSizeOf); + } + +private: + nsTArray<float> mCurve; + OverSampleType mType; + Resampler mResampler; +}; + +WaveShaperNode::WaveShaperNode(AudioContext* aContext) + : AudioNode(aContext, + 2, + ChannelCountMode::Max, + ChannelInterpretation::Speakers) + , mCurve(nullptr) + , mType(OverSampleType::None) +{ + mozilla::HoldJSObjects(this); + + WaveShaperNodeEngine* engine = new WaveShaperNodeEngine(this); + mStream = AudioNodeStream::Create(aContext, engine, + AudioNodeStream::NO_STREAM_FLAGS, + aContext->Graph()); +} + +WaveShaperNode::~WaveShaperNode() +{ + ClearCurve(); +} + +void +WaveShaperNode::ClearCurve() +{ + mCurve = nullptr; + mozilla::DropJSObjects(this); +} + +JSObject* +WaveShaperNode::WrapObject(JSContext *aCx, JS::Handle<JSObject*> aGivenProto) +{ + return WaveShaperNodeBinding::Wrap(aCx, this, aGivenProto); +} + +void +WaveShaperNode::SetCurve(const Nullable<Float32Array>& aCurve, ErrorResult& aRv) +{ + nsTArray<float> curve; + if (!aCurve.IsNull()) { + const Float32Array& floats = aCurve.Value(); + + floats.ComputeLengthAndData(); + if (floats.IsShared()) { + // Throw if the object is mapping shared memory (must opt in). + aRv.ThrowTypeError<MSG_TYPEDARRAY_IS_SHARED>(NS_LITERAL_STRING("Argument of WaveShaperNode.setCurve")); + return; + } + + uint32_t argLength = floats.Length(); + if (argLength < 2) { + aRv.Throw(NS_ERROR_DOM_INVALID_STATE_ERR); + return; + } + + if (!curve.SetLength(argLength, fallible)) { + aRv.Throw(NS_ERROR_OUT_OF_MEMORY); + return; + } + + PodCopy(curve.Elements(), floats.Data(), floats.Length()); + + mCurve = floats.Obj(); + } else { + mCurve = nullptr; + } + + AudioNodeStream* ns = mStream; + MOZ_ASSERT(ns, "Why don't we have a stream here?"); + ns->SetRawArrayData(curve); +} + +void +WaveShaperNode::SetOversample(OverSampleType aType) +{ + mType = aType; + SendInt32ParameterToStream(WaveShaperNodeEngine::TYPE, static_cast<int32_t>(aType)); +} + +} // namespace dom +} // namespace mozilla diff --git a/dom/media/webaudio/WaveShaperNode.h b/dom/media/webaudio/WaveShaperNode.h new file mode 100644 index 000000000..b58841ee6 --- /dev/null +++ b/dom/media/webaudio/WaveShaperNode.h @@ -0,0 +1,72 @@ +/* -*- Mode: C++; tab-width: 2; indent-tabs-mode: nil; c-basic-offset: 2 -*- */ +/* vim:set ts=2 sw=2 sts=2 et cindent: */ +/* This Source Code Form is subject to the terms of the Mozilla Public + * License, v. 2.0. If a copy of the MPL was not distributed with this + * file, You can obtain one at http://mozilla.org/MPL/2.0/. */ + +#ifndef WaveShaperNode_h_ +#define WaveShaperNode_h_ + +#include "AudioNode.h" +#include "mozilla/dom/WaveShaperNodeBinding.h" +#include "mozilla/dom/TypedArray.h" + +namespace mozilla { +namespace dom { + +class AudioContext; + +class WaveShaperNode final : public AudioNode +{ +public: + explicit WaveShaperNode(AudioContext *aContext); + + NS_DECL_ISUPPORTS_INHERITED + NS_DECL_CYCLE_COLLECTION_SCRIPT_HOLDER_CLASS_INHERITED(WaveShaperNode, AudioNode) + + JSObject* WrapObject(JSContext *aCx, JS::Handle<JSObject*> aGivenProto) override; + + void GetCurve(JSContext* aCx, JS::MutableHandle<JSObject*> aRetval) const + { + aRetval.set(mCurve); + } + void SetCurve(const Nullable<Float32Array>& aData, ErrorResult& aRv); + + OverSampleType Oversample() const + { + return mType; + } + void SetOversample(OverSampleType aType); + + size_t SizeOfExcludingThis(MallocSizeOf aMallocSizeOf) const override + { + // Possibly track in the future: + // - mCurve + return AudioNode::SizeOfExcludingThis(aMallocSizeOf); + } + + const char* NodeType() const override + { + return "WaveShaperNode"; + } + + size_t SizeOfIncludingThis(MallocSizeOf aMallocSizeOf) const override + { + return aMallocSizeOf(this) + SizeOfExcludingThis(aMallocSizeOf); + } + +protected: + virtual ~WaveShaperNode(); + +private: + void ClearCurve(); + +private: + JS::Heap<JSObject*> mCurve; + OverSampleType mType; +}; + +} // namespace dom +} // namespace mozilla + +#endif diff --git a/dom/media/webaudio/WebAudioUtils.cpp b/dom/media/webaudio/WebAudioUtils.cpp new file mode 100644 index 000000000..6289f803b --- /dev/null +++ b/dom/media/webaudio/WebAudioUtils.cpp @@ -0,0 +1,151 @@ +/* -*- Mode: C++; tab-width: 2; indent-tabs-mode: nil; c-basic-offset: 2 -*- */ +/* vim:set ts=2 sw=2 sts=2 et cindent: */ +/* This Source Code Form is subject to the terms of the Mozilla Public + * License, v. 2.0. If a copy of the MPL was not distributed with this + * file, You can obtain one at http://mozilla.org/MPL/2.0/. */ + +#include "WebAudioUtils.h" +#include "AudioNodeStream.h" +#include "blink/HRTFDatabaseLoader.h" + +#include "nsContentUtils.h" +#include "nsIConsoleService.h" +#include "nsIScriptError.h" + +namespace mozilla { + +LazyLogModule gWebAudioAPILog("WebAudioAPI"); + +namespace dom { + +void WebAudioUtils::ConvertAudioTimelineEventToTicks(AudioTimelineEvent& aEvent, + AudioNodeStream* aDest) +{ + aEvent.SetTimeInTicks( + aDest->SecondsToNearestStreamTime(aEvent.Time<double>())); + aEvent.mTimeConstant *= aDest->SampleRate(); + aEvent.mDuration *= aDest->SampleRate(); +} + +void +WebAudioUtils::Shutdown() +{ + WebCore::HRTFDatabaseLoader::shutdown(); +} + +int +WebAudioUtils::SpeexResamplerProcess(SpeexResamplerState* aResampler, + uint32_t aChannel, + const float* aIn, uint32_t* aInLen, + float* aOut, uint32_t* aOutLen) +{ +#ifdef MOZ_SAMPLE_TYPE_S16 + AutoTArray<AudioDataValue, WEBAUDIO_BLOCK_SIZE*4> tmp1; + AutoTArray<AudioDataValue, WEBAUDIO_BLOCK_SIZE*4> tmp2; + tmp1.SetLength(*aInLen); + tmp2.SetLength(*aOutLen); + ConvertAudioSamples(aIn, tmp1.Elements(), *aInLen); + int result = speex_resampler_process_int(aResampler, aChannel, tmp1.Elements(), aInLen, tmp2.Elements(), aOutLen); + ConvertAudioSamples(tmp2.Elements(), aOut, *aOutLen); + return result; +#else + return speex_resampler_process_float(aResampler, aChannel, aIn, aInLen, aOut, aOutLen); +#endif +} + +int +WebAudioUtils::SpeexResamplerProcess(SpeexResamplerState* aResampler, + uint32_t aChannel, + const int16_t* aIn, uint32_t* aInLen, + float* aOut, uint32_t* aOutLen) +{ + AutoTArray<AudioDataValue, WEBAUDIO_BLOCK_SIZE*4> tmp; +#ifdef MOZ_SAMPLE_TYPE_S16 + tmp.SetLength(*aOutLen); + int result = speex_resampler_process_int(aResampler, aChannel, aIn, aInLen, tmp.Elements(), aOutLen); + ConvertAudioSamples(tmp.Elements(), aOut, *aOutLen); + return result; +#else + tmp.SetLength(*aInLen); + ConvertAudioSamples(aIn, tmp.Elements(), *aInLen); + int result = speex_resampler_process_float(aResampler, aChannel, tmp.Elements(), aInLen, aOut, aOutLen); + return result; +#endif +} + +int +WebAudioUtils::SpeexResamplerProcess(SpeexResamplerState* aResampler, + uint32_t aChannel, + const int16_t* aIn, uint32_t* aInLen, + int16_t* aOut, uint32_t* aOutLen) +{ +#ifdef MOZ_SAMPLE_TYPE_S16 + return speex_resampler_process_int(aResampler, aChannel, aIn, aInLen, aOut, aOutLen); +#else + AutoTArray<AudioDataValue, WEBAUDIO_BLOCK_SIZE*4> tmp1; + AutoTArray<AudioDataValue, WEBAUDIO_BLOCK_SIZE*4> tmp2; + tmp1.SetLength(*aInLen); + tmp2.SetLength(*aOutLen); + ConvertAudioSamples(aIn, tmp1.Elements(), *aInLen); + int result = speex_resampler_process_float(aResampler, aChannel, tmp1.Elements(), aInLen, tmp2.Elements(), aOutLen); + ConvertAudioSamples(tmp2.Elements(), aOut, *aOutLen); + return result; +#endif +} + +void +WebAudioUtils::LogToDeveloperConsole(uint64_t aWindowID, const char* aKey) +{ + // This implementation is derived from dom/media/VideoUtils.cpp, but we + // use a windowID so that the message is delivered to the developer console. + // It is similar to ContentUtils::ReportToConsole, but also works off main + // thread. + if (!NS_IsMainThread()) { + nsCOMPtr<nsIRunnable> task = + NS_NewRunnableFunction([aWindowID, aKey]() { LogToDeveloperConsole(aWindowID, aKey); }); + NS_DispatchToMainThread(task.forget(), NS_DISPATCH_NORMAL); + return; + } + + nsCOMPtr<nsIConsoleService> console( + do_GetService("@mozilla.org/consoleservice;1")); + if (!console) { + NS_WARNING("Failed to log message to console."); + return; + } + + nsAutoCString spec; + uint32_t aLineNumber, aColumnNumber; + JSContext *cx = nsContentUtils::GetCurrentJSContext(); + if (cx) { + nsJSUtils::GetCallingLocation(cx, spec, &aLineNumber, &aColumnNumber); + } + + nsresult rv; + nsCOMPtr<nsIScriptError> errorObject = + do_CreateInstance(NS_SCRIPTERROR_CONTRACTID, &rv); + if (!errorObject) { + NS_WARNING("Failed to log message to console."); + return; + } + + nsXPIDLString result; + rv = nsContentUtils::GetLocalizedString(nsContentUtils::eDOM_PROPERTIES, + aKey, result); + + if (NS_FAILED(rv)) { + NS_WARNING("Failed to log message to console."); + return; + } + + errorObject->InitWithWindowID(result, + NS_ConvertUTF8toUTF16(spec), + EmptyString(), + aLineNumber, aColumnNumber, + nsIScriptError::warningFlag, "Web Audio", + aWindowID); + console->LogMessage(errorObject); +} + +} // namespace dom +} // namespace mozilla diff --git a/dom/media/webaudio/WebAudioUtils.h b/dom/media/webaudio/WebAudioUtils.h new file mode 100644 index 000000000..c0b27b837 --- /dev/null +++ b/dom/media/webaudio/WebAudioUtils.h @@ -0,0 +1,238 @@ +/* -*- Mode: C++; tab-width: 2; indent-tabs-mode: nil; c-basic-offset: 2 -*- */ +/* vim:set ts=2 sw=2 sts=2 et cindent: */ +/* This Source Code Form is subject to the terms of the Mozilla Public + * License, v. 2.0. If a copy of the MPL was not distributed with this + * file, You can obtain one at http://mozilla.org/MPL/2.0/. */ + +#ifndef WebAudioUtils_h_ +#define WebAudioUtils_h_ + +#include <cmath> +#include <limits> +#include "mozilla/TypeTraits.h" +#include "mozilla/FloatingPoint.h" +#include "MediaSegment.h" + +// Forward declaration +typedef struct SpeexResamplerState_ SpeexResamplerState; + +namespace mozilla { + +class AudioNodeStream; + +extern LazyLogModule gWebAudioAPILog; +#define WEB_AUDIO_API_LOG(...) \ + MOZ_LOG(gWebAudioAPILog, LogLevel::Debug, (__VA_ARGS__)) + +namespace dom { + +struct AudioTimelineEvent; + +namespace WebAudioUtils { + // 32 is the minimum required by the spec for createBuffer() and + // createScriptProcessor() and matches what is used by Blink. The limit + // protects against large memory allocations. + const size_t MaxChannelCount = 32; + // AudioContext::CreateBuffer() "must support sample-rates in at least the + // range 22050 to 96000." + const uint32_t MinSampleRate = 8000; + const uint32_t MaxSampleRate = 192000; + + inline bool FuzzyEqual(float v1, float v2) + { + using namespace std; + return fabsf(v1 - v2) < 1e-7f; + } + inline bool FuzzyEqual(double v1, double v2) + { + using namespace std; + return fabs(v1 - v2) < 1e-7; + } + + /** + * Computes an exponential smoothing rate for a time based variable + * over aDuration seconds. + */ + inline double ComputeSmoothingRate(double aDuration, double aSampleRate) + { + return 1.0 - std::exp(-1.0 / (aDuration * aSampleRate)); + } + + /** + * Converts an AudioTimelineEvent's floating point time values to tick values + * with respect to a destination AudioNodeStream. + * + * This needs to be called for each AudioTimelineEvent that gets sent to an + * AudioNodeEngine, on the engine side where the AudioTimlineEvent is + * received. This means that such engines need to be aware of their + * destination streams as well. + */ + void ConvertAudioTimelineEventToTicks(AudioTimelineEvent& aEvent, + AudioNodeStream* aDest); + + /** + * Converts a linear value to decibels. Returns aMinDecibels if the linear + * value is 0. + */ + inline float ConvertLinearToDecibels(float aLinearValue, float aMinDecibels) + { + return aLinearValue ? 20.0f * std::log10(aLinearValue) : aMinDecibels; + } + + /** + * Converts a decibel value to a linear value. + */ + inline float ConvertDecibelsToLinear(float aDecibels) + { + return std::pow(10.0f, 0.05f * aDecibels); + } + + /** + * Converts a decibel to a linear value. + */ + inline float ConvertDecibelToLinear(float aDecibel) + { + return std::pow(10.0f, 0.05f * aDecibel); + } + + inline void FixNaN(double& aDouble) + { + if (IsNaN(aDouble) || IsInfinite(aDouble)) { + aDouble = 0.0; + } + } + + inline double DiscreteTimeConstantForSampleRate(double timeConstant, double sampleRate) + { + return 1.0 - std::exp(-1.0 / (sampleRate * timeConstant)); + } + + inline bool IsTimeValid(double aTime) + { + return aTime >= 0 && aTime <= (MEDIA_TIME_MAX >> TRACK_RATE_MAX_BITS); + } + + /** + * Converts a floating point value to an integral type in a safe and + * platform agnostic way. The following program demonstrates the kinds + * of ways things can go wrong depending on the CPU architecture you're + * compiling for: + * + * #include <stdio.h> + * volatile float r; + * int main() + * { + * unsigned int q; + * r = 1e100; + * q = r; + * printf("%f %d\n", r, q); + * r = -1e100; + * q = r; + * printf("%f %d\n", r, q); + * r = 1e15; + * q = r; + * printf("%f %x\n", r, q); + * r = 0/0.; + * q = r; + * printf("%f %d\n", r, q); + * } + * + * This program, when compiled for unsigned int, generates the following + * results depending on the architecture: + * + * x86 and x86-64 + * --- + * inf 0 + * -inf 0 + * 999999995904.000000 -727384064 d4a50000 + * nan 0 + * + * ARM + * --- + * inf -1 + * -inf 0 + * 999999995904.000000 -1 + * nan 0 + * + * When compiled for int, this program generates the following results: + * + * x86 and x86-64 + * --- + * inf -2147483648 + * -inf -2147483648 + * 999999995904.000000 -2147483648 + * nan -2147483648 + * + * ARM + * --- + * inf 2147483647 + * -inf -2147483648 + * 999999995904.000000 2147483647 + * nan 0 + * + * Note that the caller is responsible to make sure that the value + * passed to this function is not a NaN. This function will abort if + * it sees a NaN. + */ + template <typename IntType, typename FloatType> + IntType TruncateFloatToInt(FloatType f) + { + using namespace std; + + static_assert(mozilla::IsIntegral<IntType>::value == true, + "IntType must be an integral type"); + static_assert(mozilla::IsFloatingPoint<FloatType>::value == true, + "FloatType must be a floating point type"); + + if (mozilla::IsNaN(f)) { + // It is the responsibility of the caller to deal with NaN values. + // If we ever get to this point, we have a serious bug to fix. + NS_RUNTIMEABORT("We should never see a NaN here"); + } + + if (f > FloatType(numeric_limits<IntType>::max())) { + // If the floating point value is outside of the range of maximum + // integral value for this type, just clamp to the maximum value. + return numeric_limits<IntType>::max(); + } + + if (f < FloatType(numeric_limits<IntType>::min())) { + // If the floating point value is outside of the range of minimum + // integral value for this type, just clamp to the minimum value. + return numeric_limits<IntType>::min(); + } + + // Otherwise, this conversion must be well defined. + return IntType(f); + } + + void Shutdown(); + + int + SpeexResamplerProcess(SpeexResamplerState* aResampler, + uint32_t aChannel, + const float* aIn, uint32_t* aInLen, + float* aOut, uint32_t* aOutLen); + + int + SpeexResamplerProcess(SpeexResamplerState* aResampler, + uint32_t aChannel, + const int16_t* aIn, uint32_t* aInLen, + float* aOut, uint32_t* aOutLen); + + int + SpeexResamplerProcess(SpeexResamplerState* aResampler, + uint32_t aChannel, + const int16_t* aIn, uint32_t* aInLen, + int16_t* aOut, uint32_t* aOutLen); + + void + LogToDeveloperConsole(uint64_t aWindowID, const char* aKey); + + } // namespace WebAudioUtils + +} // namespace dom +} // namespace mozilla + +#endif + diff --git a/dom/media/webaudio/blink/Biquad.cpp b/dom/media/webaudio/blink/Biquad.cpp new file mode 100644 index 000000000..3aa526072 --- /dev/null +++ b/dom/media/webaudio/blink/Biquad.cpp @@ -0,0 +1,469 @@ +/* + * Copyright (C) 2010 Google Inc. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * 3. Neither the name of Apple Computer, Inc. ("Apple") nor the names of + * its contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY APPLE AND ITS CONTRIBUTORS "AS IS" AND ANY + * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + * DISCLAIMED. IN NO EVENT SHALL APPLE OR ITS CONTRIBUTORS BE LIABLE FOR ANY + * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES + * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; + * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND + * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF + * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +#include "Biquad.h" + +#include <float.h> +#include <algorithm> +#include <math.h> + +namespace WebCore { + +Biquad::Biquad() +{ + // Initialize as pass-thru (straight-wire, no filter effect) + setNormalizedCoefficients(1, 0, 0, 1, 0, 0); + + reset(); // clear filter memory +} + +Biquad::~Biquad() +{ +} + +void Biquad::process(const float* sourceP, float* destP, size_t framesToProcess) +{ + // Create local copies of member variables + double x1 = m_x1; + double x2 = m_x2; + double y1 = m_y1; + double y2 = m_y2; + + double b0 = m_b0; + double b1 = m_b1; + double b2 = m_b2; + double a1 = m_a1; + double a2 = m_a2; + + for (size_t i = 0; i < framesToProcess; ++i) { + // FIXME: this can be optimized by pipelining the multiply adds... + double x = sourceP[i]; + double y = b0*x + b1*x1 + b2*x2 - a1*y1 - a2*y2; + + destP[i] = y; + + // Update state variables + x2 = x1; + x1 = x; + y2 = y1; + y1 = y; + } + + // Avoid introducing a stream of subnormals when input is silent and the + // tail approaches zero. + // TODO: Remove this code when Bug 1157635 is fixed. + if (x1 == 0.0 && x2 == 0.0 && (y1 != 0.0 || y2 != 0.0) && + fabs(y1) < FLT_MIN && fabs(y2) < FLT_MIN) { + // Flush future values to zero (until there is new input). + y1 = y2 = 0.0; + // Flush calculated values. + for (int i = framesToProcess; i-- && fabsf(destP[i]) < FLT_MIN; ) { + destP[i] = 0.0f; + } + } + // Local variables back to member. + m_x1 = x1; + m_x2 = x2; + m_y1 = y1; + m_y2 = y2; +} + +void Biquad::reset() +{ + m_x1 = m_x2 = m_y1 = m_y2 = 0; +} + +void Biquad::setLowpassParams(double cutoff, double resonance) +{ + // Limit cutoff to 0 to 1. + cutoff = std::max(0.0, std::min(cutoff, 1.0)); + + if (cutoff == 1) { + // When cutoff is 1, the z-transform is 1. + setNormalizedCoefficients(1, 0, 0, + 1, 0, 0); + } else if (cutoff > 0) { + // Compute biquad coefficients for lowpass filter + resonance = std::max(0.0, resonance); // can't go negative + double g = pow(10.0, -0.05 * resonance); + double w0 = M_PI * cutoff; + double cos_w0 = cos(w0); + double alpha = 0.5 * sin(w0) * g; + + double b1 = 1.0 - cos_w0; + double b0 = 0.5 * b1; + double b2 = b0; + double a0 = 1.0 + alpha; + double a1 = -2.0 * cos_w0; + double a2 = 1.0 - alpha; + + setNormalizedCoefficients(b0, b1, b2, a0, a1, a2); + } else { + // When cutoff is zero, nothing gets through the filter, so set + // coefficients up correctly. + setNormalizedCoefficients(0, 0, 0, + 1, 0, 0); + } +} + +void Biquad::setHighpassParams(double cutoff, double resonance) +{ + // Limit cutoff to 0 to 1. + cutoff = std::max(0.0, std::min(cutoff, 1.0)); + + if (cutoff == 1) { + // The z-transform is 0. + setNormalizedCoefficients(0, 0, 0, + 1, 0, 0); + } else if (cutoff > 0) { + // Compute biquad coefficients for highpass filter + resonance = std::max(0.0, resonance); // can't go negative + double g = pow(10.0, -0.05 * resonance); + double w0 = M_PI * cutoff; + double cos_w0 = cos(w0); + double alpha = 0.5 * sin(w0) * g; + + double b1 = -1.0 - cos_w0; + double b0 = -0.5 * b1; + double b2 = b0; + double a0 = 1.0 + alpha; + double a1 = -2.0 * cos_w0; + double a2 = 1.0 - alpha; + + setNormalizedCoefficients(b0, b1, b2, a0, a1, a2); + } else { + // When cutoff is zero, we need to be careful because the above + // gives a quadratic divided by the same quadratic, with poles + // and zeros on the unit circle in the same place. When cutoff + // is zero, the z-transform is 1. + setNormalizedCoefficients(1, 0, 0, + 1, 0, 0); + } +} + +void Biquad::setNormalizedCoefficients(double b0, double b1, double b2, double a0, double a1, double a2) +{ + double a0Inverse = 1 / a0; + + m_b0 = b0 * a0Inverse; + m_b1 = b1 * a0Inverse; + m_b2 = b2 * a0Inverse; + m_a1 = a1 * a0Inverse; + m_a2 = a2 * a0Inverse; +} + +void Biquad::setLowShelfParams(double frequency, double dbGain) +{ + // Clip frequencies to between 0 and 1, inclusive. + frequency = std::max(0.0, std::min(frequency, 1.0)); + + double A = pow(10.0, dbGain / 40); + + if (frequency == 1) { + // The z-transform is a constant gain. + setNormalizedCoefficients(A * A, 0, 0, + 1, 0, 0); + } else if (frequency > 0) { + double w0 = M_PI * frequency; + double S = 1; // filter slope (1 is max value) + double alpha = 0.5 * sin(w0) * sqrt((A + 1 / A) * (1 / S - 1) + 2); + double k = cos(w0); + double k2 = 2 * sqrt(A) * alpha; + double aPlusOne = A + 1; + double aMinusOne = A - 1; + + double b0 = A * (aPlusOne - aMinusOne * k + k2); + double b1 = 2 * A * (aMinusOne - aPlusOne * k); + double b2 = A * (aPlusOne - aMinusOne * k - k2); + double a0 = aPlusOne + aMinusOne * k + k2; + double a1 = -2 * (aMinusOne + aPlusOne * k); + double a2 = aPlusOne + aMinusOne * k - k2; + + setNormalizedCoefficients(b0, b1, b2, a0, a1, a2); + } else { + // When frequency is 0, the z-transform is 1. + setNormalizedCoefficients(1, 0, 0, + 1, 0, 0); + } +} + +void Biquad::setHighShelfParams(double frequency, double dbGain) +{ + // Clip frequencies to between 0 and 1, inclusive. + frequency = std::max(0.0, std::min(frequency, 1.0)); + + double A = pow(10.0, dbGain / 40); + + if (frequency == 1) { + // The z-transform is 1. + setNormalizedCoefficients(1, 0, 0, + 1, 0, 0); + } else if (frequency > 0) { + double w0 = M_PI * frequency; + double S = 1; // filter slope (1 is max value) + double alpha = 0.5 * sin(w0) * sqrt((A + 1 / A) * (1 / S - 1) + 2); + double k = cos(w0); + double k2 = 2 * sqrt(A) * alpha; + double aPlusOne = A + 1; + double aMinusOne = A - 1; + + double b0 = A * (aPlusOne + aMinusOne * k + k2); + double b1 = -2 * A * (aMinusOne + aPlusOne * k); + double b2 = A * (aPlusOne + aMinusOne * k - k2); + double a0 = aPlusOne - aMinusOne * k + k2; + double a1 = 2 * (aMinusOne - aPlusOne * k); + double a2 = aPlusOne - aMinusOne * k - k2; + + setNormalizedCoefficients(b0, b1, b2, a0, a1, a2); + } else { + // When frequency = 0, the filter is just a gain, A^2. + setNormalizedCoefficients(A * A, 0, 0, + 1, 0, 0); + } +} + +void Biquad::setPeakingParams(double frequency, double Q, double dbGain) +{ + // Clip frequencies to between 0 and 1, inclusive. + frequency = std::max(0.0, std::min(frequency, 1.0)); + + // Don't let Q go negative, which causes an unstable filter. + Q = std::max(0.0, Q); + + double A = pow(10.0, dbGain / 40); + + if (frequency > 0 && frequency < 1) { + if (Q > 0) { + double w0 = M_PI * frequency; + double alpha = sin(w0) / (2 * Q); + double k = cos(w0); + + double b0 = 1 + alpha * A; + double b1 = -2 * k; + double b2 = 1 - alpha * A; + double a0 = 1 + alpha / A; + double a1 = -2 * k; + double a2 = 1 - alpha / A; + + setNormalizedCoefficients(b0, b1, b2, a0, a1, a2); + } else { + // When Q = 0, the above formulas have problems. If we look at + // the z-transform, we can see that the limit as Q->0 is A^2, so + // set the filter that way. + setNormalizedCoefficients(A * A, 0, 0, + 1, 0, 0); + } + } else { + // When frequency is 0 or 1, the z-transform is 1. + setNormalizedCoefficients(1, 0, 0, + 1, 0, 0); + } +} + +void Biquad::setAllpassParams(double frequency, double Q) +{ + // Clip frequencies to between 0 and 1, inclusive. + frequency = std::max(0.0, std::min(frequency, 1.0)); + + // Don't let Q go negative, which causes an unstable filter. + Q = std::max(0.0, Q); + + if (frequency > 0 && frequency < 1) { + if (Q > 0) { + double w0 = M_PI * frequency; + double alpha = sin(w0) / (2 * Q); + double k = cos(w0); + + double b0 = 1 - alpha; + double b1 = -2 * k; + double b2 = 1 + alpha; + double a0 = 1 + alpha; + double a1 = -2 * k; + double a2 = 1 - alpha; + + setNormalizedCoefficients(b0, b1, b2, a0, a1, a2); + } else { + // When Q = 0, the above formulas have problems. If we look at + // the z-transform, we can see that the limit as Q->0 is -1, so + // set the filter that way. + setNormalizedCoefficients(-1, 0, 0, + 1, 0, 0); + } + } else { + // When frequency is 0 or 1, the z-transform is 1. + setNormalizedCoefficients(1, 0, 0, + 1, 0, 0); + } +} + +void Biquad::setNotchParams(double frequency, double Q) +{ + // Clip frequencies to between 0 and 1, inclusive. + frequency = std::max(0.0, std::min(frequency, 1.0)); + + // Don't let Q go negative, which causes an unstable filter. + Q = std::max(0.0, Q); + + if (frequency > 0 && frequency < 1) { + if (Q > 0) { + double w0 = M_PI * frequency; + double alpha = sin(w0) / (2 * Q); + double k = cos(w0); + + double b0 = 1; + double b1 = -2 * k; + double b2 = 1; + double a0 = 1 + alpha; + double a1 = -2 * k; + double a2 = 1 - alpha; + + setNormalizedCoefficients(b0, b1, b2, a0, a1, a2); + } else { + // When Q = 0, the above formulas have problems. If we look at + // the z-transform, we can see that the limit as Q->0 is 0, so + // set the filter that way. + setNormalizedCoefficients(0, 0, 0, + 1, 0, 0); + } + } else { + // When frequency is 0 or 1, the z-transform is 1. + setNormalizedCoefficients(1, 0, 0, + 1, 0, 0); + } +} + +void Biquad::setBandpassParams(double frequency, double Q) +{ + // No negative frequencies allowed. + frequency = std::max(0.0, frequency); + + // Don't let Q go negative, which causes an unstable filter. + Q = std::max(0.0, Q); + + if (frequency > 0 && frequency < 1) { + double w0 = M_PI * frequency; + if (Q > 0) { + double alpha = sin(w0) / (2 * Q); + double k = cos(w0); + + double b0 = alpha; + double b1 = 0; + double b2 = -alpha; + double a0 = 1 + alpha; + double a1 = -2 * k; + double a2 = 1 - alpha; + + setNormalizedCoefficients(b0, b1, b2, a0, a1, a2); + } else { + // When Q = 0, the above formulas have problems. If we look at + // the z-transform, we can see that the limit as Q->0 is 1, so + // set the filter that way. + setNormalizedCoefficients(1, 0, 0, + 1, 0, 0); + } + } else { + // When the cutoff is zero, the z-transform approaches 0, if Q + // > 0. When both Q and cutoff are zero, the z-transform is + // pretty much undefined. What should we do in this case? + // For now, just make the filter 0. When the cutoff is 1, the + // z-transform also approaches 0. + setNormalizedCoefficients(0, 0, 0, + 1, 0, 0); + } +} + +void Biquad::setZeroPolePairs(const Complex &zero, const Complex &pole) +{ + double b0 = 1; + double b1 = -2 * zero.real(); + + double zeroMag = abs(zero); + double b2 = zeroMag * zeroMag; + + double a1 = -2 * pole.real(); + + double poleMag = abs(pole); + double a2 = poleMag * poleMag; + setNormalizedCoefficients(b0, b1, b2, 1, a1, a2); +} + +void Biquad::setAllpassPole(const Complex &pole) +{ + Complex zero = Complex(1, 0) / pole; + setZeroPolePairs(zero, pole); +} + +void Biquad::getFrequencyResponse(int nFrequencies, + const float* frequency, + float* magResponse, + float* phaseResponse) +{ + // Evaluate the Z-transform of the filter at given normalized + // frequency from 0 to 1. (1 corresponds to the Nyquist + // frequency.) + // + // The z-transform of the filter is + // + // H(z) = (b0 + b1*z^(-1) + b2*z^(-2))/(1 + a1*z^(-1) + a2*z^(-2)) + // + // Evaluate as + // + // b0 + (b1 + b2*z1)*z1 + // -------------------- + // 1 + (a1 + a2*z1)*z1 + // + // with z1 = 1/z and z = exp(j*pi*frequency). Hence z1 = exp(-j*pi*frequency) + + // Make local copies of the coefficients as a micro-optimization. + double b0 = m_b0; + double b1 = m_b1; + double b2 = m_b2; + double a1 = m_a1; + double a2 = m_a2; + + for (int k = 0; k < nFrequencies; ++k) { + double omega = -M_PI * frequency[k]; + Complex z = Complex(cos(omega), sin(omega)); + Complex numerator = b0 + (b1 + b2 * z) * z; + Complex denominator = Complex(1, 0) + (a1 + a2 * z) * z; + // Strangely enough, using complex division: + // e.g. Complex response = numerator / denominator; + // fails on our test machines, yielding infinities and NaNs, so we do + // things the long way here. + double n = norm(denominator); + double r = (real(numerator)*real(denominator) + imag(numerator)*imag(denominator)) / n; + double i = (imag(numerator)*real(denominator) - real(numerator)*imag(denominator)) / n; + std::complex<double> response = std::complex<double>(r, i); + + magResponse[k] = static_cast<float>(abs(response)); + phaseResponse[k] = static_cast<float>(atan2(imag(response), real(response))); + } +} + +} // namespace WebCore + diff --git a/dom/media/webaudio/blink/Biquad.h b/dom/media/webaudio/blink/Biquad.h new file mode 100644 index 000000000..f266af441 --- /dev/null +++ b/dom/media/webaudio/blink/Biquad.h @@ -0,0 +1,108 @@ +/* + * Copyright (C) 2010 Google Inc. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * 3. Neither the name of Apple Computer, Inc. ("Apple") nor the names of + * its contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY APPLE AND ITS CONTRIBUTORS "AS IS" AND ANY + * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + * DISCLAIMED. IN NO EVENT SHALL APPLE OR ITS CONTRIBUTORS BE LIABLE FOR ANY + * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES + * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; + * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND + * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF + * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +#ifndef Biquad_h +#define Biquad_h + +#include <complex> + +namespace WebCore { + +typedef std::complex<double> Complex; + +// A basic biquad (two-zero / two-pole digital filter) +// +// It can be configured to a number of common and very useful filters: +// lowpass, highpass, shelving, parameteric, notch, allpass, ... + +class Biquad { +public: + Biquad(); + ~Biquad(); + + void process(const float* sourceP, float* destP, size_t framesToProcess); + + // frequency is 0 - 1 normalized, resonance and dbGain are in decibels. + // Q is a unitless quality factor. + void setLowpassParams(double frequency, double resonance); + void setHighpassParams(double frequency, double resonance); + void setBandpassParams(double frequency, double Q); + void setLowShelfParams(double frequency, double dbGain); + void setHighShelfParams(double frequency, double dbGain); + void setPeakingParams(double frequency, double Q, double dbGain); + void setAllpassParams(double frequency, double Q); + void setNotchParams(double frequency, double Q); + + // Set the biquad coefficients given a single zero (other zero will be conjugate) + // and a single pole (other pole will be conjugate) + void setZeroPolePairs(const Complex& zero, const Complex& pole); + + // Set the biquad coefficients given a single pole (other pole will be conjugate) + // (The zeroes will be the inverse of the poles) + void setAllpassPole(const Complex& pole); + + // Return true iff the next output block will contain sound even with + // silent input. + bool hasTail() const { return m_y1 || m_y2 || m_x1 || m_x2; } + + // Resets filter state + void reset(); + + // Filter response at a set of n frequencies. The magnitude and + // phase response are returned in magResponse and phaseResponse. + // The phase response is in radians. + void getFrequencyResponse(int nFrequencies, + const float* frequency, + float* magResponse, + float* phaseResponse); +private: + void setNormalizedCoefficients(double b0, double b1, double b2, double a0, double a1, double a2); + + // Filter coefficients. The filter is defined as + // + // y[n] + m_a1*y[n-1] + m_a2*y[n-2] = m_b0*x[n] + m_b1*x[n-1] + m_b2*x[n-2]. + double m_b0; + double m_b1; + double m_b2; + double m_a1; + double m_a2; + + // Filter memory + // + // Double precision for the output values is valuable because errors can + // accumulate. Input values are also stored as double so they need not be + // converted again for computation. + double m_x1; // input delayed by 1 sample + double m_x2; // input delayed by 2 samples + double m_y1; // output delayed by 1 sample + double m_y2; // output delayed by 2 samples +}; + +} // namespace WebCore + +#endif // Biquad_h diff --git a/dom/media/webaudio/blink/DenormalDisabler.h b/dom/media/webaudio/blink/DenormalDisabler.h new file mode 100644 index 000000000..241220732 --- /dev/null +++ b/dom/media/webaudio/blink/DenormalDisabler.h @@ -0,0 +1,124 @@ +/* + * Copyright (C) 2011, Google Inc. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY APPLE INC. AND ITS CONTRIBUTORS ``AS IS'' AND ANY + * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + * DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR ITS CONTRIBUTORS BE LIABLE FOR ANY + * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES + * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; + * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON + * ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS + * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +#ifndef DenormalDisabler_h +#define DenormalDisabler_h + +#include <cmath> +#include <float.h> + +namespace WebCore { + +// Deal with denormals. They can very seriously impact performance on x86. + +// Define HAVE_DENORMAL if we support flushing denormals to zero. +#if defined(XP_WIN) && defined(_MSC_VER) +#define HAVE_DENORMAL +#endif + +#if defined(__GNUC__) && (defined(__i386__) || defined(__x86_64__)) +#define HAVE_DENORMAL +#endif + +#ifdef HAVE_DENORMAL +class DenormalDisabler { +public: + DenormalDisabler() + : m_savedCSR(0) + { +#if defined(XP_WIN) && defined(_MSC_VER) + // Save the current state, and set mode to flush denormals. + // + // http://stackoverflow.com/questions/637175/possible-bug-in-controlfp-s-may-not-restore-control-word-correctly + _controlfp_s(&m_savedCSR, 0, 0); + unsigned int unused; + _controlfp_s(&unused, _DN_FLUSH, _MCW_DN); +#else + m_savedCSR = getCSR(); + setCSR(m_savedCSR | 0x8040); +#endif + } + + ~DenormalDisabler() + { +#if defined(XP_WIN) && defined(_MSC_VER) + unsigned int unused; + _controlfp_s(&unused, m_savedCSR, _MCW_DN); +#else + setCSR(m_savedCSR); +#endif + } + + // This is a nop if we can flush denormals to zero in hardware. + static inline float flushDenormalFloatToZero(float f) + { +#if defined(XP_WIN) && defined(_MSC_VER) && _M_IX86_FP + // For systems using x87 instead of sse, there's no hardware support + // to flush denormals automatically. Hence, we need to flush + // denormals to zero manually. + return (fabs(f) < FLT_MIN) ? 0.0f : f; +#else + return f; +#endif + } +private: +#if defined(__GNUC__) && (defined(__i386__) || defined(__x86_64__)) + inline int getCSR() + { + int result; + asm volatile("stmxcsr %0" : "=m" (result)); + return result; + } + + inline void setCSR(int a) + { + int temp = a; + asm volatile("ldmxcsr %0" : : "m" (temp)); + } + +#endif + + unsigned int m_savedCSR; +}; + +#else +// FIXME: add implementations for other architectures and compilers +class DenormalDisabler { +public: + DenormalDisabler() { } + + // Assume the worst case that other architectures and compilers + // need to flush denormals to zero manually. + static inline float flushDenormalFloatToZero(float f) + { + return (fabs(f) < FLT_MIN) ? 0.0f : f; + } +}; + +#endif + +} // namespace WebCore + +#undef HAVE_DENORMAL +#endif // DenormalDisabler_h diff --git a/dom/media/webaudio/blink/DynamicsCompressor.cpp b/dom/media/webaudio/blink/DynamicsCompressor.cpp new file mode 100644 index 000000000..8f18913c0 --- /dev/null +++ b/dom/media/webaudio/blink/DynamicsCompressor.cpp @@ -0,0 +1,321 @@ +/* + * Copyright (C) 2011 Google Inc. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * 3. Neither the name of Apple Computer, Inc. ("Apple") nor the names of + * its contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY APPLE AND ITS CONTRIBUTORS "AS IS" AND ANY + * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + * DISCLAIMED. IN NO EVENT SHALL APPLE OR ITS CONTRIBUTORS BE LIABLE FOR ANY + * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES + * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; + * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND + * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF + * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +#include "DynamicsCompressor.h" +#include "AlignmentUtils.h" +#include "AudioBlock.h" + +#include <cmath> +#include "AudioNodeEngine.h" +#include "nsDebug.h" + +using mozilla::WEBAUDIO_BLOCK_SIZE; +using mozilla::AudioBlockCopyChannelWithScale; + +namespace WebCore { + +DynamicsCompressor::DynamicsCompressor(float sampleRate, unsigned numberOfChannels) + : m_numberOfChannels(numberOfChannels) + , m_sampleRate(sampleRate) + , m_compressor(sampleRate, numberOfChannels) +{ + // Uninitialized state - for parameter recalculation. + m_lastFilterStageRatio = -1; + m_lastAnchor = -1; + m_lastFilterStageGain = -1; + + setNumberOfChannels(numberOfChannels); + initializeParameters(); +} + +size_t DynamicsCompressor::sizeOfIncludingThis(mozilla::MallocSizeOf aMallocSizeOf) const +{ + size_t amount = aMallocSizeOf(this); + amount += m_preFilterPacks.ShallowSizeOfExcludingThis(aMallocSizeOf); + for (size_t i = 0; i < m_preFilterPacks.Length(); i++) { + if (m_preFilterPacks[i]) { + amount += m_preFilterPacks[i]->sizeOfIncludingThis(aMallocSizeOf); + } + } + + amount += m_postFilterPacks.ShallowSizeOfExcludingThis(aMallocSizeOf); + for (size_t i = 0; i < m_postFilterPacks.Length(); i++) { + if (m_postFilterPacks[i]) { + amount += m_postFilterPacks[i]->sizeOfIncludingThis(aMallocSizeOf); + } + } + + amount += aMallocSizeOf(m_sourceChannels.get()); + amount += aMallocSizeOf(m_destinationChannels.get()); + amount += m_compressor.sizeOfExcludingThis(aMallocSizeOf); + return amount; +} + +void DynamicsCompressor::setParameterValue(unsigned parameterID, float value) +{ + MOZ_ASSERT(parameterID < ParamLast); + if (parameterID < ParamLast) + m_parameters[parameterID] = value; +} + +void DynamicsCompressor::initializeParameters() +{ + // Initializes compressor to default values. + + m_parameters[ParamThreshold] = -24; // dB + m_parameters[ParamKnee] = 30; // dB + m_parameters[ParamRatio] = 12; // unit-less + m_parameters[ParamAttack] = 0.003f; // seconds + m_parameters[ParamRelease] = 0.250f; // seconds + m_parameters[ParamPreDelay] = 0.006f; // seconds + + // Release zone values 0 -> 1. + m_parameters[ParamReleaseZone1] = 0.09f; + m_parameters[ParamReleaseZone2] = 0.16f; + m_parameters[ParamReleaseZone3] = 0.42f; + m_parameters[ParamReleaseZone4] = 0.98f; + + m_parameters[ParamFilterStageGain] = 4.4f; // dB + m_parameters[ParamFilterStageRatio] = 2; + m_parameters[ParamFilterAnchor] = 15000 / nyquist(); + + m_parameters[ParamPostGain] = 0; // dB + m_parameters[ParamReduction] = 0; // dB + + // Linear crossfade (0 -> 1). + m_parameters[ParamEffectBlend] = 1; +} + +float DynamicsCompressor::parameterValue(unsigned parameterID) +{ + MOZ_ASSERT(parameterID < ParamLast); + return m_parameters[parameterID]; +} + +void DynamicsCompressor::setEmphasisStageParameters(unsigned stageIndex, float gain, float normalizedFrequency /* 0 -> 1 */) +{ + float gk = 1 - gain / 20; + float f1 = normalizedFrequency * gk; + float f2 = normalizedFrequency / gk; + float r1 = expf(-f1 * M_PI); + float r2 = expf(-f2 * M_PI); + + MOZ_ASSERT(m_numberOfChannels == m_preFilterPacks.Length()); + + for (unsigned i = 0; i < m_numberOfChannels; ++i) { + // Set pre-filter zero and pole to create an emphasis filter. + ZeroPole& preFilter = m_preFilterPacks[i]->filters[stageIndex]; + preFilter.setZero(r1); + preFilter.setPole(r2); + + // Set post-filter with zero and pole reversed to create the de-emphasis filter. + // If there were no compressor kernel in between, they would cancel each other out (allpass filter). + ZeroPole& postFilter = m_postFilterPacks[i]->filters[stageIndex]; + postFilter.setZero(r2); + postFilter.setPole(r1); + } +} + +void DynamicsCompressor::setEmphasisParameters(float gain, float anchorFreq, float filterStageRatio) +{ + setEmphasisStageParameters(0, gain, anchorFreq); + setEmphasisStageParameters(1, gain, anchorFreq / filterStageRatio); + setEmphasisStageParameters(2, gain, anchorFreq / (filterStageRatio * filterStageRatio)); + setEmphasisStageParameters(3, gain, anchorFreq / (filterStageRatio * filterStageRatio * filterStageRatio)); +} + +void DynamicsCompressor::process(const AudioBlock* sourceChunk, AudioBlock* destinationChunk, unsigned framesToProcess) +{ + // Though numberOfChannels is retrived from destinationBus, we still name it numberOfChannels instead of numberOfDestinationChannels. + // It's because we internally match sourceChannels's size to destinationBus by channel up/down mix. Thus we need numberOfChannels + // to do the loop work for both m_sourceChannels and m_destinationChannels. + + unsigned numberOfChannels = destinationChunk->ChannelCount(); + unsigned numberOfSourceChannels = sourceChunk->ChannelCount(); + + MOZ_ASSERT(numberOfChannels == m_numberOfChannels && numberOfSourceChannels); + + if (numberOfChannels != m_numberOfChannels || !numberOfSourceChannels) { + destinationChunk->SetNull(WEBAUDIO_BLOCK_SIZE); + return; + } + + switch (numberOfChannels) { + case 2: // stereo + m_sourceChannels[0] = static_cast<const float*>(sourceChunk->mChannelData[0]); + + if (numberOfSourceChannels > 1) + m_sourceChannels[1] = static_cast<const float*>(sourceChunk->mChannelData[1]); + else + // Simply duplicate mono channel input data to right channel for stereo processing. + m_sourceChannels[1] = m_sourceChannels[0]; + + break; + default: + // FIXME : support other number of channels. + NS_WARNING("Support other number of channels"); + destinationChunk->SetNull(WEBAUDIO_BLOCK_SIZE); + return; + } + + for (unsigned i = 0; i < numberOfChannels; ++i) + m_destinationChannels[i] = const_cast<float*>(static_cast<const float*>( + destinationChunk->mChannelData[i])); + + float filterStageGain = parameterValue(ParamFilterStageGain); + float filterStageRatio = parameterValue(ParamFilterStageRatio); + float anchor = parameterValue(ParamFilterAnchor); + + if (filterStageGain != m_lastFilterStageGain || filterStageRatio != m_lastFilterStageRatio || anchor != m_lastAnchor) { + m_lastFilterStageGain = filterStageGain; + m_lastFilterStageRatio = filterStageRatio; + m_lastAnchor = anchor; + + setEmphasisParameters(filterStageGain, anchor, filterStageRatio); + } + + float sourceWithVolume[WEBAUDIO_BLOCK_SIZE + 4]; + float* alignedSourceWithVolume = ALIGNED16(sourceWithVolume); + ASSERT_ALIGNED16(alignedSourceWithVolume); + + // Apply pre-emphasis filter. + // Note that the final three stages are computed in-place in the destination buffer. + for (unsigned i = 0; i < numberOfChannels; ++i) { + const float* sourceData; + if (sourceChunk->mVolume == 1.0f) { + // Fast path, the volume scale doesn't need to get taken into account + sourceData = m_sourceChannels[i]; + } else { + AudioBlockCopyChannelWithScale(m_sourceChannels[i], + sourceChunk->mVolume, + alignedSourceWithVolume); + sourceData = alignedSourceWithVolume; + } + + float* destinationData = m_destinationChannels[i]; + ZeroPole* preFilters = m_preFilterPacks[i]->filters; + + preFilters[0].process(sourceData, destinationData, framesToProcess); + preFilters[1].process(destinationData, destinationData, framesToProcess); + preFilters[2].process(destinationData, destinationData, framesToProcess); + preFilters[3].process(destinationData, destinationData, framesToProcess); + } + + float dbThreshold = parameterValue(ParamThreshold); + float dbKnee = parameterValue(ParamKnee); + float ratio = parameterValue(ParamRatio); + float attackTime = parameterValue(ParamAttack); + float releaseTime = parameterValue(ParamRelease); + float preDelayTime = parameterValue(ParamPreDelay); + + // This is effectively a master volume on the compressed signal (pre-blending). + float dbPostGain = parameterValue(ParamPostGain); + + // Linear blending value from dry to completely processed (0 -> 1) + // 0 means the signal is completely unprocessed. + // 1 mixes in only the compressed signal. + float effectBlend = parameterValue(ParamEffectBlend); + + float releaseZone1 = parameterValue(ParamReleaseZone1); + float releaseZone2 = parameterValue(ParamReleaseZone2); + float releaseZone3 = parameterValue(ParamReleaseZone3); + float releaseZone4 = parameterValue(ParamReleaseZone4); + + // Apply compression to the pre-filtered signal. + // The processing is performed in place. + m_compressor.process(m_destinationChannels.get(), + m_destinationChannels.get(), + numberOfChannels, + framesToProcess, + + dbThreshold, + dbKnee, + ratio, + attackTime, + releaseTime, + preDelayTime, + dbPostGain, + effectBlend, + + releaseZone1, + releaseZone2, + releaseZone3, + releaseZone4 + ); + + // Update the compression amount. + setParameterValue(ParamReduction, m_compressor.meteringGain()); + + // Apply de-emphasis filter. + for (unsigned i = 0; i < numberOfChannels; ++i) { + float* destinationData = m_destinationChannels[i]; + ZeroPole* postFilters = m_postFilterPacks[i]->filters; + + postFilters[0].process(destinationData, destinationData, framesToProcess); + postFilters[1].process(destinationData, destinationData, framesToProcess); + postFilters[2].process(destinationData, destinationData, framesToProcess); + postFilters[3].process(destinationData, destinationData, framesToProcess); + } +} + +void DynamicsCompressor::reset() +{ + m_lastFilterStageRatio = -1; // for recalc + m_lastAnchor = -1; + m_lastFilterStageGain = -1; + + for (unsigned channel = 0; channel < m_numberOfChannels; ++channel) { + for (unsigned stageIndex = 0; stageIndex < 4; ++stageIndex) { + m_preFilterPacks[channel]->filters[stageIndex].reset(); + m_postFilterPacks[channel]->filters[stageIndex].reset(); + } + } + + m_compressor.reset(); +} + +void DynamicsCompressor::setNumberOfChannels(unsigned numberOfChannels) +{ + if (m_preFilterPacks.Length() == numberOfChannels) + return; + + m_preFilterPacks.Clear(); + m_postFilterPacks.Clear(); + for (unsigned i = 0; i < numberOfChannels; ++i) { + m_preFilterPacks.AppendElement(new ZeroPoleFilterPack4()); + m_postFilterPacks.AppendElement(new ZeroPoleFilterPack4()); + } + + m_sourceChannels = mozilla::MakeUnique<const float* []>(numberOfChannels); + m_destinationChannels = mozilla::MakeUnique<float* []>(numberOfChannels); + + m_compressor.setNumberOfChannels(numberOfChannels); + m_numberOfChannels = numberOfChannels; +} + +} // namespace WebCore diff --git a/dom/media/webaudio/blink/DynamicsCompressor.h b/dom/media/webaudio/blink/DynamicsCompressor.h new file mode 100644 index 000000000..f460836b4 --- /dev/null +++ b/dom/media/webaudio/blink/DynamicsCompressor.h @@ -0,0 +1,131 @@ +/* + * Copyright (C) 2011 Google Inc. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * 3. Neither the name of Apple Computer, Inc. ("Apple") nor the names of + * its contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY APPLE AND ITS CONTRIBUTORS "AS IS" AND ANY + * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + * DISCLAIMED. IN NO EVENT SHALL APPLE OR ITS CONTRIBUTORS BE LIABLE FOR ANY + * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES + * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; + * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND + * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF + * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +#ifndef DynamicsCompressor_h +#define DynamicsCompressor_h + +#include "DynamicsCompressorKernel.h" +#include "ZeroPole.h" + +#include "nsTArray.h" +#include "nsAutoPtr.h" +#include "mozilla/MemoryReporting.h" +#include "mozilla/UniquePtr.h" + +namespace mozilla { +class AudioBlock; +} // namespace mozilla + +namespace WebCore { + +using mozilla::AudioBlock; + +// DynamicsCompressor implements a flexible audio dynamics compression effect such as +// is commonly used in musical production and game audio. It lowers the volume +// of the loudest parts of the signal and raises the volume of the softest parts, +// making the sound richer, fuller, and more controlled. + +class DynamicsCompressor { +public: + enum { + ParamThreshold, + ParamKnee, + ParamRatio, + ParamAttack, + ParamRelease, + ParamPreDelay, + ParamReleaseZone1, + ParamReleaseZone2, + ParamReleaseZone3, + ParamReleaseZone4, + ParamPostGain, + ParamFilterStageGain, + ParamFilterStageRatio, + ParamFilterAnchor, + ParamEffectBlend, + ParamReduction, + ParamLast + }; + + DynamicsCompressor(float sampleRate, unsigned numberOfChannels); + + void process(const AudioBlock* sourceChunk, AudioBlock* destinationChunk, unsigned framesToProcess); + void reset(); + void setNumberOfChannels(unsigned); + unsigned numberOfChannels() const { return m_numberOfChannels; } + + void setParameterValue(unsigned parameterID, float value); + float parameterValue(unsigned parameterID); + + float sampleRate() const { return m_sampleRate; } + float nyquist() const { return m_sampleRate / 2; } + + double tailTime() const { return 0; } + double latencyTime() const { return m_compressor.latencyFrames() / static_cast<double>(sampleRate()); } + + size_t sizeOfIncludingThis(mozilla::MallocSizeOf aMallocSizeOf) const; + +protected: + unsigned m_numberOfChannels; + + // m_parameters holds the tweakable compressor parameters. + float m_parameters[ParamLast]; + void initializeParameters(); + + float m_sampleRate; + + // Emphasis filter controls. + float m_lastFilterStageRatio; + float m_lastAnchor; + float m_lastFilterStageGain; + + typedef struct { + ZeroPole filters[4]; + size_t sizeOfIncludingThis(mozilla::MallocSizeOf aMallocSizeOf) const + { + return aMallocSizeOf(this); + } + } ZeroPoleFilterPack4; + + // Per-channel emphasis filters. + nsTArray<nsAutoPtr<ZeroPoleFilterPack4> > m_preFilterPacks; + nsTArray<nsAutoPtr<ZeroPoleFilterPack4> > m_postFilterPacks; + + mozilla::UniquePtr<const float*[]> m_sourceChannels; + mozilla::UniquePtr<float*[]> m_destinationChannels; + + void setEmphasisStageParameters(unsigned stageIndex, float gain, float normalizedFrequency /* 0 -> 1 */); + void setEmphasisParameters(float gain, float anchorFreq, float filterStageRatio); + + // The core compressor. + DynamicsCompressorKernel m_compressor; +}; + +} // namespace WebCore + +#endif // DynamicsCompressor_h diff --git a/dom/media/webaudio/blink/DynamicsCompressorKernel.cpp b/dom/media/webaudio/blink/DynamicsCompressorKernel.cpp new file mode 100644 index 000000000..e5b4aba2f --- /dev/null +++ b/dom/media/webaudio/blink/DynamicsCompressorKernel.cpp @@ -0,0 +1,491 @@ +/* + * Copyright (C) 2011 Google Inc. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * 3. Neither the name of Apple Computer, Inc. ("Apple") nor the names of + * its contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY APPLE AND ITS CONTRIBUTORS "AS IS" AND ANY + * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + * DISCLAIMED. IN NO EVENT SHALL APPLE OR ITS CONTRIBUTORS BE LIABLE FOR ANY + * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES + * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; + * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND + * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF + * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +#include "DynamicsCompressorKernel.h" + +#include "DenormalDisabler.h" +#include <algorithm> +#include <cmath> + +#include "mozilla/FloatingPoint.h" +#include "WebAudioUtils.h" + +using namespace std; + +using namespace mozilla::dom; // for WebAudioUtils +using mozilla::IsInfinite; +using mozilla::IsNaN; +using mozilla::MakeUnique; + +namespace WebCore { + + +// Metering hits peaks instantly, but releases this fast (in seconds). +const float meteringReleaseTimeConstant = 0.325f; + +const float uninitializedValue = -1; + +DynamicsCompressorKernel::DynamicsCompressorKernel(float sampleRate, unsigned numberOfChannels) + : m_sampleRate(sampleRate) + , m_lastPreDelayFrames(DefaultPreDelayFrames) + , m_preDelayReadIndex(0) + , m_preDelayWriteIndex(DefaultPreDelayFrames) + , m_ratio(uninitializedValue) + , m_slope(uninitializedValue) + , m_linearThreshold(uninitializedValue) + , m_dbThreshold(uninitializedValue) + , m_dbKnee(uninitializedValue) + , m_kneeThreshold(uninitializedValue) + , m_kneeThresholdDb(uninitializedValue) + , m_ykneeThresholdDb(uninitializedValue) + , m_K(uninitializedValue) +{ + setNumberOfChannels(numberOfChannels); + + // Initializes most member variables + reset(); + + m_meteringReleaseK = + static_cast<float>(WebAudioUtils::DiscreteTimeConstantForSampleRate(meteringReleaseTimeConstant, sampleRate)); +} + +size_t DynamicsCompressorKernel::sizeOfExcludingThis(mozilla::MallocSizeOf aMallocSizeOf) const +{ + size_t amount = 0; + amount += m_preDelayBuffers.ShallowSizeOfExcludingThis(aMallocSizeOf); + for (size_t i = 0; i < m_preDelayBuffers.Length(); i++) { + amount += aMallocSizeOf(m_preDelayBuffers[i].get()); + } + + return amount; +} + +void DynamicsCompressorKernel::setNumberOfChannels(unsigned numberOfChannels) +{ + if (m_preDelayBuffers.Length() == numberOfChannels) + return; + + m_preDelayBuffers.Clear(); + for (unsigned i = 0; i < numberOfChannels; ++i) + m_preDelayBuffers.AppendElement(MakeUnique<float[]>(MaxPreDelayFrames)); +} + +void DynamicsCompressorKernel::setPreDelayTime(float preDelayTime) +{ + // Re-configure look-ahead section pre-delay if delay time has changed. + unsigned preDelayFrames = preDelayTime * sampleRate(); + if (preDelayFrames > MaxPreDelayFrames - 1) + preDelayFrames = MaxPreDelayFrames - 1; + + if (m_lastPreDelayFrames != preDelayFrames) { + m_lastPreDelayFrames = preDelayFrames; + for (unsigned i = 0; i < m_preDelayBuffers.Length(); ++i) + memset(m_preDelayBuffers[i].get(), 0, sizeof(float) * MaxPreDelayFrames); + + m_preDelayReadIndex = 0; + m_preDelayWriteIndex = preDelayFrames; + } +} + +// Exponential curve for the knee. +// It is 1st derivative matched at m_linearThreshold and asymptotically approaches the value m_linearThreshold + 1 / k. +float DynamicsCompressorKernel::kneeCurve(float x, float k) +{ + // Linear up to threshold. + if (x < m_linearThreshold) + return x; + + return m_linearThreshold + (1 - expf(-k * (x - m_linearThreshold))) / k; +} + +// Full compression curve with constant ratio after knee. +float DynamicsCompressorKernel::saturate(float x, float k) +{ + float y; + + if (x < m_kneeThreshold) + y = kneeCurve(x, k); + else { + // Constant ratio after knee. + float xDb = WebAudioUtils::ConvertLinearToDecibels(x, -1000.0f); + float yDb = m_ykneeThresholdDb + m_slope * (xDb - m_kneeThresholdDb); + + y = WebAudioUtils::ConvertDecibelsToLinear(yDb); + } + + return y; +} + +// Approximate 1st derivative with input and output expressed in dB. +// This slope is equal to the inverse of the compression "ratio". +// In other words, a compression ratio of 20 would be a slope of 1/20. +float DynamicsCompressorKernel::slopeAt(float x, float k) +{ + if (x < m_linearThreshold) + return 1; + + float x2 = x * 1.001; + + float xDb = WebAudioUtils::ConvertLinearToDecibels(x, -1000.0f); + float x2Db = WebAudioUtils::ConvertLinearToDecibels(x2, -1000.0f); + + float yDb = WebAudioUtils::ConvertLinearToDecibels(kneeCurve(x, k), -1000.0f); + float y2Db = WebAudioUtils::ConvertLinearToDecibels(kneeCurve(x2, k), -1000.0f); + + float m = (y2Db - yDb) / (x2Db - xDb); + + return m; +} + +float DynamicsCompressorKernel::kAtSlope(float desiredSlope) +{ + float xDb = m_dbThreshold + m_dbKnee; + float x = WebAudioUtils::ConvertDecibelsToLinear(xDb); + + // Approximate k given initial values. + float minK = 0.1f; + float maxK = 10000; + float k = 5; + + for (int i = 0; i < 15; ++i) { + // A high value for k will more quickly asymptotically approach a slope of 0. + float slope = slopeAt(x, k); + + if (slope < desiredSlope) { + // k is too high. + maxK = k; + } else { + // k is too low. + minK = k; + } + + // Re-calculate based on geometric mean. + k = sqrtf(minK * maxK); + } + + return k; +} + +float DynamicsCompressorKernel::updateStaticCurveParameters(float dbThreshold, float dbKnee, float ratio) +{ + if (dbThreshold != m_dbThreshold || dbKnee != m_dbKnee || ratio != m_ratio) { + // Threshold and knee. + m_dbThreshold = dbThreshold; + m_linearThreshold = WebAudioUtils::ConvertDecibelsToLinear(dbThreshold); + m_dbKnee = dbKnee; + + // Compute knee parameters. + m_ratio = ratio; + m_slope = 1 / m_ratio; + + float k = kAtSlope(1 / m_ratio); + + m_kneeThresholdDb = dbThreshold + dbKnee; + m_kneeThreshold = WebAudioUtils::ConvertDecibelsToLinear(m_kneeThresholdDb); + + m_ykneeThresholdDb = WebAudioUtils::ConvertLinearToDecibels(kneeCurve(m_kneeThreshold, k), -1000.0f); + + m_K = k; + } + return m_K; +} + +void DynamicsCompressorKernel::process(float* sourceChannels[], + float* destinationChannels[], + unsigned numberOfChannels, + unsigned framesToProcess, + + float dbThreshold, + float dbKnee, + float ratio, + float attackTime, + float releaseTime, + float preDelayTime, + float dbPostGain, + float effectBlend, /* equal power crossfade */ + + float releaseZone1, + float releaseZone2, + float releaseZone3, + float releaseZone4 + ) +{ + MOZ_ASSERT(m_preDelayBuffers.Length() == numberOfChannels); + + float sampleRate = this->sampleRate(); + + float dryMix = 1 - effectBlend; + float wetMix = effectBlend; + + float k = updateStaticCurveParameters(dbThreshold, dbKnee, ratio); + + // Makeup gain. + float fullRangeGain = saturate(1, k); + float fullRangeMakeupGain = 1 / fullRangeGain; + + // Empirical/perceptual tuning. + fullRangeMakeupGain = powf(fullRangeMakeupGain, 0.6f); + + float masterLinearGain = WebAudioUtils::ConvertDecibelsToLinear(dbPostGain) * fullRangeMakeupGain; + + // Attack parameters. + attackTime = max(0.001f, attackTime); + float attackFrames = attackTime * sampleRate; + + // Release parameters. + float releaseFrames = sampleRate * releaseTime; + + // Detector release time. + float satReleaseTime = 0.0025f; + float satReleaseFrames = satReleaseTime * sampleRate; + + // Create a smooth function which passes through four points. + + // Polynomial of the form + // y = a + b*x + c*x^2 + d*x^3 + e*x^4; + + float y1 = releaseFrames * releaseZone1; + float y2 = releaseFrames * releaseZone2; + float y3 = releaseFrames * releaseZone3; + float y4 = releaseFrames * releaseZone4; + + // All of these coefficients were derived for 4th order polynomial curve fitting where the y values + // match the evenly spaced x values as follows: (y1 : x == 0, y2 : x == 1, y3 : x == 2, y4 : x == 3) + float kA = 0.9999999999999998f*y1 + 1.8432219684323923e-16f*y2 - 1.9373394351676423e-16f*y3 + 8.824516011816245e-18f*y4; + float kB = -1.5788320352845888f*y1 + 2.3305837032074286f*y2 - 0.9141194204840429f*y3 + 0.1623677525612032f*y4; + float kC = 0.5334142869106424f*y1 - 1.272736789213631f*y2 + 0.9258856042207512f*y3 - 0.18656310191776226f*y4; + float kD = 0.08783463138207234f*y1 - 0.1694162967925622f*y2 + 0.08588057951595272f*y3 - 0.00429891410546283f*y4; + float kE = -0.042416883008123074f*y1 + 0.1115693827987602f*y2 - 0.09764676325265872f*y3 + 0.028494263462021576f*y4; + + // x ranges from 0 -> 3 0 1 2 3 + // -15 -10 -5 0db + + // y calculates adaptive release frames depending on the amount of compression. + + setPreDelayTime(preDelayTime); + + const int nDivisionFrames = 32; + + const int nDivisions = framesToProcess / nDivisionFrames; + + unsigned frameIndex = 0; + for (int i = 0; i < nDivisions; ++i) { + // ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + // Calculate desired gain + // ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + + // Fix gremlins. + if (IsNaN(m_detectorAverage)) + m_detectorAverage = 1; + if (IsInfinite(m_detectorAverage)) + m_detectorAverage = 1; + + float desiredGain = m_detectorAverage; + + // Pre-warp so we get desiredGain after sin() warp below. + float scaledDesiredGain = asinf(desiredGain) / (0.5f * M_PI); + + // ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + // Deal with envelopes + // ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + + // envelopeRate is the rate we slew from current compressor level to the desired level. + // The exact rate depends on if we're attacking or releasing and by how much. + float envelopeRate; + + bool isReleasing = scaledDesiredGain > m_compressorGain; + + // compressionDiffDb is the difference between current compression level and the desired level. + float compressionDiffDb = WebAudioUtils::ConvertLinearToDecibels(m_compressorGain / scaledDesiredGain, -1000.0f); + + if (isReleasing) { + // Release mode - compressionDiffDb should be negative dB + m_maxAttackCompressionDiffDb = -1; + + // Fix gremlins. + if (IsNaN(compressionDiffDb)) + compressionDiffDb = -1; + if (IsInfinite(compressionDiffDb)) + compressionDiffDb = -1; + + // Adaptive release - higher compression (lower compressionDiffDb) releases faster. + + // Contain within range: -12 -> 0 then scale to go from 0 -> 3 + float x = compressionDiffDb; + x = max(-12.0f, x); + x = min(0.0f, x); + x = 0.25f * (x + 12); + + // Compute adaptive release curve using 4th order polynomial. + // Normal values for the polynomial coefficients would create a monotonically increasing function. + float x2 = x * x; + float x3 = x2 * x; + float x4 = x2 * x2; + float releaseFrames = kA + kB * x + kC * x2 + kD * x3 + kE * x4; + +#define kSpacingDb 5 + float dbPerFrame = kSpacingDb / releaseFrames; + + envelopeRate = WebAudioUtils::ConvertDecibelsToLinear(dbPerFrame); + } else { + // Attack mode - compressionDiffDb should be positive dB + + // Fix gremlins. + if (IsNaN(compressionDiffDb)) + compressionDiffDb = 1; + if (IsInfinite(compressionDiffDb)) + compressionDiffDb = 1; + + // As long as we're still in attack mode, use a rate based off + // the largest compressionDiffDb we've encountered so far. + if (m_maxAttackCompressionDiffDb == -1 || m_maxAttackCompressionDiffDb < compressionDiffDb) + m_maxAttackCompressionDiffDb = compressionDiffDb; + + float effAttenDiffDb = max(0.5f, m_maxAttackCompressionDiffDb); + + float x = 0.25f / effAttenDiffDb; + envelopeRate = 1 - powf(x, 1 / attackFrames); + } + + // ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + // Inner loop - calculate shaped power average - apply compression. + // ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + + { + int preDelayReadIndex = m_preDelayReadIndex; + int preDelayWriteIndex = m_preDelayWriteIndex; + float detectorAverage = m_detectorAverage; + float compressorGain = m_compressorGain; + + int loopFrames = nDivisionFrames; + while (loopFrames--) { + float compressorInput = 0; + + // Predelay signal, computing compression amount from un-delayed version. + for (unsigned i = 0; i < numberOfChannels; ++i) { + float* delayBuffer = m_preDelayBuffers[i].get(); + float undelayedSource = sourceChannels[i][frameIndex]; + delayBuffer[preDelayWriteIndex] = undelayedSource; + + float absUndelayedSource = undelayedSource > 0 ? undelayedSource : -undelayedSource; + if (compressorInput < absUndelayedSource) + compressorInput = absUndelayedSource; + } + + // Calculate shaped power on undelayed input. + + float scaledInput = compressorInput; + float absInput = scaledInput > 0 ? scaledInput : -scaledInput; + + // Put through shaping curve. + // This is linear up to the threshold, then enters a "knee" portion followed by the "ratio" portion. + // The transition from the threshold to the knee is smooth (1st derivative matched). + // The transition from the knee to the ratio portion is smooth (1st derivative matched). + float shapedInput = saturate(absInput, k); + + float attenuation = absInput <= 0.0001f ? 1 : shapedInput / absInput; + + float attenuationDb = -WebAudioUtils::ConvertLinearToDecibels(attenuation, -1000.0f); + attenuationDb = max(2.0f, attenuationDb); + + float dbPerFrame = attenuationDb / satReleaseFrames; + + float satReleaseRate = WebAudioUtils::ConvertDecibelsToLinear(dbPerFrame) - 1; + + bool isRelease = (attenuation > detectorAverage); + float rate = isRelease ? satReleaseRate : 1; + + detectorAverage += (attenuation - detectorAverage) * rate; + detectorAverage = min(1.0f, detectorAverage); + + // Fix gremlins. + if (IsNaN(detectorAverage)) + detectorAverage = 1; + if (IsInfinite(detectorAverage)) + detectorAverage = 1; + + // Exponential approach to desired gain. + if (envelopeRate < 1) { + // Attack - reduce gain to desired. + compressorGain += (scaledDesiredGain - compressorGain) * envelopeRate; + } else { + // Release - exponentially increase gain to 1.0 + compressorGain *= envelopeRate; + compressorGain = min(1.0f, compressorGain); + } + + // Warp pre-compression gain to smooth out sharp exponential transition points. + float postWarpCompressorGain = sinf(0.5f * M_PI * compressorGain); + + // Calculate total gain using master gain and effect blend. + float totalGain = dryMix + wetMix * masterLinearGain * postWarpCompressorGain; + + // Calculate metering. + float dbRealGain = 20 * log10(postWarpCompressorGain); + if (dbRealGain < m_meteringGain) + m_meteringGain = dbRealGain; + else + m_meteringGain += (dbRealGain - m_meteringGain) * m_meteringReleaseK; + + // Apply final gain. + for (unsigned i = 0; i < numberOfChannels; ++i) { + float* delayBuffer = m_preDelayBuffers[i].get(); + destinationChannels[i][frameIndex] = delayBuffer[preDelayReadIndex] * totalGain; + } + + frameIndex++; + preDelayReadIndex = (preDelayReadIndex + 1) & MaxPreDelayFramesMask; + preDelayWriteIndex = (preDelayWriteIndex + 1) & MaxPreDelayFramesMask; + } + + // Locals back to member variables. + m_preDelayReadIndex = preDelayReadIndex; + m_preDelayWriteIndex = preDelayWriteIndex; + m_detectorAverage = DenormalDisabler::flushDenormalFloatToZero(detectorAverage); + m_compressorGain = DenormalDisabler::flushDenormalFloatToZero(compressorGain); + } + } +} + +void DynamicsCompressorKernel::reset() +{ + m_detectorAverage = 0; + m_compressorGain = 1; + m_meteringGain = 1; + + // Predelay section. + for (unsigned i = 0; i < m_preDelayBuffers.Length(); ++i) + memset(m_preDelayBuffers[i].get(), 0, sizeof(float) * MaxPreDelayFrames); + + m_preDelayReadIndex = 0; + m_preDelayWriteIndex = DefaultPreDelayFrames; + + m_maxAttackCompressionDiffDb = -1; // uninitialized state +} + +} // namespace WebCore diff --git a/dom/media/webaudio/blink/DynamicsCompressorKernel.h b/dom/media/webaudio/blink/DynamicsCompressorKernel.h new file mode 100644 index 000000000..39449949c --- /dev/null +++ b/dom/media/webaudio/blink/DynamicsCompressorKernel.h @@ -0,0 +1,130 @@ +/* + * Copyright (C) 2011 Google Inc. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * 3. Neither the name of Apple Computer, Inc. ("Apple") nor the names of + * its contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY APPLE AND ITS CONTRIBUTORS "AS IS" AND ANY + * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + * DISCLAIMED. IN NO EVENT SHALL APPLE OR ITS CONTRIBUTORS BE LIABLE FOR ANY + * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES + * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; + * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND + * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF + * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +#ifndef DynamicsCompressorKernel_h +#define DynamicsCompressorKernel_h + +#include "nsTArray.h" +#include "mozilla/MemoryReporting.h" +#include "mozilla/UniquePtr.h" + +namespace WebCore { + +class DynamicsCompressorKernel { +public: + DynamicsCompressorKernel(float sampleRate, unsigned numberOfChannels); + + void setNumberOfChannels(unsigned); + + // Performs stereo-linked compression. + void process(float* sourceChannels[], + float* destinationChannels[], + unsigned numberOfChannels, + unsigned framesToProcess, + + float dbThreshold, + float dbKnee, + float ratio, + float attackTime, + float releaseTime, + float preDelayTime, + float dbPostGain, + float effectBlend, + + float releaseZone1, + float releaseZone2, + float releaseZone3, + float releaseZone4 + ); + + void reset(); + + unsigned latencyFrames() const { return m_lastPreDelayFrames; } + + float sampleRate() const { return m_sampleRate; } + + float meteringGain() const { return m_meteringGain; } + + size_t sizeOfExcludingThis(mozilla::MallocSizeOf aMallocSizeOf) const; + +protected: + float m_sampleRate; + + float m_detectorAverage; + float m_compressorGain; + + // Metering + float m_meteringReleaseK; + float m_meteringGain; + + // Lookahead section. + enum { MaxPreDelayFrames = 1024 }; + enum { MaxPreDelayFramesMask = MaxPreDelayFrames - 1 }; + enum { DefaultPreDelayFrames = 256 }; // setPreDelayTime() will override this initial value + unsigned m_lastPreDelayFrames; + void setPreDelayTime(float); + + nsTArray<mozilla::UniquePtr<float[]>> m_preDelayBuffers; + int m_preDelayReadIndex; + int m_preDelayWriteIndex; + + float m_maxAttackCompressionDiffDb; + + // Static compression curve. + float kneeCurve(float x, float k); + float saturate(float x, float k); + float slopeAt(float x, float k); + float kAtSlope(float desiredSlope); + + float updateStaticCurveParameters(float dbThreshold, float dbKnee, float ratio); + + // Amount of input change in dB required for 1 dB of output change. + // This applies to the portion of the curve above m_kneeThresholdDb (see below). + float m_ratio; + float m_slope; // Inverse ratio. + + // The input to output change below the threshold is linear 1:1. + float m_linearThreshold; + float m_dbThreshold; + + // m_dbKnee is the number of dB above the threshold before we enter the "ratio" portion of the curve. + // m_kneeThresholdDb = m_dbThreshold + m_dbKnee + // The portion between m_dbThreshold and m_kneeThresholdDb is the "soft knee" portion of the curve + // which transitions smoothly from the linear portion to the ratio portion. + float m_dbKnee; + float m_kneeThreshold; + float m_kneeThresholdDb; + float m_ykneeThresholdDb; + + // Internal parameter for the knee portion of the curve. + float m_K; +}; + +} // namespace WebCore + +#endif // DynamicsCompressorKernel_h diff --git a/dom/media/webaudio/blink/FFTConvolver.cpp b/dom/media/webaudio/blink/FFTConvolver.cpp new file mode 100644 index 000000000..8694073ae --- /dev/null +++ b/dom/media/webaudio/blink/FFTConvolver.cpp @@ -0,0 +1,119 @@ +/* + * Copyright (C) 2010 Google Inc. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * 3. Neither the name of Apple Computer, Inc. ("Apple") nor the names of + * its contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY APPLE AND ITS CONTRIBUTORS "AS IS" AND ANY + * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + * DISCLAIMED. IN NO EVENT SHALL APPLE OR ITS CONTRIBUTORS BE LIABLE FOR ANY + * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES + * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; + * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND + * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF + * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +#include "FFTConvolver.h" +#include "mozilla/PodOperations.h" + +using namespace mozilla; + +namespace WebCore { + +FFTConvolver::FFTConvolver(size_t fftSize, size_t renderPhase) + : m_frame(fftSize) + , m_readWriteIndex(renderPhase % (fftSize / 2)) +{ + MOZ_ASSERT(fftSize >= 2 * WEBAUDIO_BLOCK_SIZE); + m_inputBuffer.SetLength(fftSize); + PodZero(m_inputBuffer.Elements(), fftSize); + m_outputBuffer.SetLength(fftSize); + PodZero(m_outputBuffer.Elements(), fftSize); + m_lastOverlapBuffer.SetLength(fftSize / 2); + PodZero(m_lastOverlapBuffer.Elements(), fftSize / 2); +} + +size_t FFTConvolver::sizeOfExcludingThis(mozilla::MallocSizeOf aMallocSizeOf) const +{ + size_t amount = 0; + amount += m_frame.SizeOfExcludingThis(aMallocSizeOf); + amount += m_inputBuffer.ShallowSizeOfExcludingThis(aMallocSizeOf); + amount += m_outputBuffer.ShallowSizeOfExcludingThis(aMallocSizeOf); + amount += m_lastOverlapBuffer.ShallowSizeOfExcludingThis(aMallocSizeOf); + return amount; +} + +size_t FFTConvolver::sizeOfIncludingThis(mozilla::MallocSizeOf aMallocSizeOf) const +{ + return aMallocSizeOf(this) + sizeOfExcludingThis(aMallocSizeOf); +} + +const float* FFTConvolver::process(FFTBlock* fftKernel, const float* sourceP) +{ + size_t halfSize = fftSize() / 2; + + // WEBAUDIO_BLOCK_SIZE must be an exact multiple of halfSize, + // halfSize must be a multiple of WEBAUDIO_BLOCK_SIZE + // and > WEBAUDIO_BLOCK_SIZE. + MOZ_ASSERT(halfSize % WEBAUDIO_BLOCK_SIZE == 0 && + WEBAUDIO_BLOCK_SIZE <= halfSize); + + // Copy samples to input buffer (note contraint above!) + float* inputP = m_inputBuffer.Elements(); + + MOZ_ASSERT(sourceP && inputP && m_readWriteIndex + WEBAUDIO_BLOCK_SIZE <= m_inputBuffer.Length()); + + memcpy(inputP + m_readWriteIndex, sourceP, sizeof(float) * WEBAUDIO_BLOCK_SIZE); + + float* outputP = m_outputBuffer.Elements(); + m_readWriteIndex += WEBAUDIO_BLOCK_SIZE; + + // Check if it's time to perform the next FFT + if (m_readWriteIndex == halfSize) { + // The input buffer is now filled (get frequency-domain version) + m_frame.PerformFFT(m_inputBuffer.Elements()); + m_frame.Multiply(*fftKernel); + m_frame.GetInverseWithoutScaling(m_outputBuffer.Elements()); + + // Overlap-add 1st half from previous time + AudioBufferAddWithScale(m_lastOverlapBuffer.Elements(), 1.0f, + m_outputBuffer.Elements(), halfSize); + + // Finally, save 2nd half of result + MOZ_ASSERT(m_outputBuffer.Length() == 2 * halfSize && m_lastOverlapBuffer.Length() == halfSize); + + memcpy(m_lastOverlapBuffer.Elements(), m_outputBuffer.Elements() + halfSize, sizeof(float) * halfSize); + + // Reset index back to start for next time + m_readWriteIndex = 0; + } + + return outputP + m_readWriteIndex; +} + +void FFTConvolver::reset() +{ + PodZero(m_lastOverlapBuffer.Elements(), m_lastOverlapBuffer.Length()); + m_readWriteIndex = 0; +} + +size_t FFTConvolver::latencyFrames() const +{ + return std::max<size_t>(fftSize()/2, WEBAUDIO_BLOCK_SIZE) - + WEBAUDIO_BLOCK_SIZE; +} + +} // namespace WebCore diff --git a/dom/media/webaudio/blink/FFTConvolver.h b/dom/media/webaudio/blink/FFTConvolver.h new file mode 100644 index 000000000..118c6baef --- /dev/null +++ b/dom/media/webaudio/blink/FFTConvolver.h @@ -0,0 +1,85 @@ +/* + * Copyright (C) 2010 Google Inc. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * 3. Neither the name of Apple Computer, Inc. ("Apple") nor the names of + * its contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY APPLE AND ITS CONTRIBUTORS "AS IS" AND ANY + * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + * DISCLAIMED. IN NO EVENT SHALL APPLE OR ITS CONTRIBUTORS BE LIABLE FOR ANY + * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES + * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; + * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND + * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF + * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +#ifndef FFTConvolver_h +#define FFTConvolver_h + +#include "nsTArray.h" +#include "mozilla/FFTBlock.h" +#include "mozilla/MemoryReporting.h" + +namespace WebCore { + +typedef AlignedTArray<float> AlignedAudioFloatArray; +using mozilla::FFTBlock; + +class FFTConvolver { +public: + // |fftSize| must be a power of two. + // + // |renderPhase| is the initial offset in the initially zero input buffer. + // It is coordinated with the other stages, so they don't all do their + // FFTs at the same time. + explicit FFTConvolver(size_t fftSize, size_t renderPhase = 0); + + // Process WEBAUDIO_BLOCK_SIZE elements of array |sourceP| and return a + // pointer to an output array of the same size. + // + // |fftKernel| must be pre-scaled for FFTBlock::GetInverseWithoutScaling(). + // + // FIXME: Later, we can do more sophisticated buffering to relax this requirement... + const float* process(FFTBlock* fftKernel, const float* sourceP); + + void reset(); + + size_t fftSize() const { return m_frame.FFTSize(); } + + // The input to output latency is up to fftSize / 2, but alignment of the + // FFTs with the blocks reduces this by one block. + size_t latencyFrames() const; + + size_t sizeOfExcludingThis(mozilla::MallocSizeOf aMallocSizeOf) const; + size_t sizeOfIncludingThis(mozilla::MallocSizeOf aMallocSizeOf) const; + +private: + FFTBlock m_frame; + + // Buffer input until we get fftSize / 2 samples then do an FFT + size_t m_readWriteIndex; + AlignedAudioFloatArray m_inputBuffer; + + // Stores output which we read a little at a time + AlignedAudioFloatArray m_outputBuffer; + + // Saves the 2nd half of the FFT buffer, so we can do an overlap-add with the 1st half of the next one + AlignedAudioFloatArray m_lastOverlapBuffer; +}; + +} // namespace WebCore + +#endif // FFTConvolver_h diff --git a/dom/media/webaudio/blink/HRTFDatabase.cpp b/dom/media/webaudio/blink/HRTFDatabase.cpp new file mode 100644 index 000000000..ef236c855 --- /dev/null +++ b/dom/media/webaudio/blink/HRTFDatabase.cpp @@ -0,0 +1,130 @@ +/* + * Copyright (C) 2010 Google Inc. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * 3. Neither the name of Apple Computer, Inc. ("Apple") nor the names of + * its contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY APPLE AND ITS CONTRIBUTORS "AS IS" AND ANY + * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + * DISCLAIMED. IN NO EVENT SHALL APPLE OR ITS CONTRIBUTORS BE LIABLE FOR ANY + * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES + * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; + * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND + * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF + * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +#include "HRTFDatabase.h" + +#include "HRTFElevation.h" + +using namespace std; + +namespace WebCore { + +const int HRTFDatabase::MinElevation = -45; +const int HRTFDatabase::MaxElevation = 90; +const unsigned HRTFDatabase::RawElevationAngleSpacing = 15; +const unsigned HRTFDatabase::NumberOfRawElevations = 10; // -45 -> +90 (each 15 degrees) +const unsigned HRTFDatabase::InterpolationFactor = 1; +const unsigned HRTFDatabase::NumberOfTotalElevations = NumberOfRawElevations * InterpolationFactor; + +nsReturnRef<HRTFDatabase> HRTFDatabase::create(float sampleRate) +{ + return nsReturnRef<HRTFDatabase>(new HRTFDatabase(sampleRate)); +} + +HRTFDatabase::HRTFDatabase(float sampleRate) + : m_sampleRate(sampleRate) +{ + m_elevations.SetLength(NumberOfTotalElevations); + + unsigned elevationIndex = 0; + for (int elevation = MinElevation; elevation <= MaxElevation; elevation += RawElevationAngleSpacing) { + nsAutoRef<HRTFElevation> hrtfElevation(HRTFElevation::createBuiltin(elevation, sampleRate)); + MOZ_ASSERT(hrtfElevation.get()); + if (!hrtfElevation.get()) + return; + + m_elevations[elevationIndex] = hrtfElevation.out(); + elevationIndex += InterpolationFactor; + } + + // Now, go back and interpolate elevations. + if (InterpolationFactor > 1) { + for (unsigned i = 0; i < NumberOfTotalElevations; i += InterpolationFactor) { + unsigned j = (i + InterpolationFactor); + if (j >= NumberOfTotalElevations) + j = i; // for last elevation interpolate with itself + + // Create the interpolated convolution kernels and delays. + for (unsigned jj = 1; jj < InterpolationFactor; ++jj) { + float x = static_cast<float>(jj) / static_cast<float>(InterpolationFactor); + m_elevations[i + jj] = HRTFElevation::createByInterpolatingSlices(m_elevations[i].get(), m_elevations[j].get(), x, sampleRate); + MOZ_ASSERT(m_elevations[i + jj].get()); + } + } + } +} + +size_t HRTFDatabase::sizeOfIncludingThis(mozilla::MallocSizeOf aMallocSizeOf) const +{ + size_t amount = aMallocSizeOf(this); + amount += m_elevations.ShallowSizeOfExcludingThis(aMallocSizeOf); + for (size_t i = 0; i < m_elevations.Length(); i++) { + amount += m_elevations[i]->sizeOfIncludingThis(aMallocSizeOf); + } + + return amount; +} + +void HRTFDatabase::getKernelsFromAzimuthElevation(double azimuthBlend, unsigned azimuthIndex, double elevationAngle, HRTFKernel* &kernelL, HRTFKernel* &kernelR, + double& frameDelayL, double& frameDelayR) +{ + unsigned elevationIndex = indexFromElevationAngle(elevationAngle); + MOZ_ASSERT(elevationIndex < m_elevations.Length() && m_elevations.Length() > 0); + + if (!m_elevations.Length()) { + kernelL = 0; + kernelR = 0; + return; + } + + if (elevationIndex > m_elevations.Length() - 1) + elevationIndex = m_elevations.Length() - 1; + + HRTFElevation* hrtfElevation = m_elevations[elevationIndex].get(); + MOZ_ASSERT(hrtfElevation); + if (!hrtfElevation) { + kernelL = 0; + kernelR = 0; + return; + } + + hrtfElevation->getKernelsFromAzimuth(azimuthBlend, azimuthIndex, kernelL, kernelR, frameDelayL, frameDelayR); +} + +unsigned HRTFDatabase::indexFromElevationAngle(double elevationAngle) +{ + // Clamp to allowed range. + elevationAngle = mozilla::clamped(elevationAngle, + static_cast<double>(MinElevation), + static_cast<double>(MaxElevation)); + + unsigned elevationIndex = static_cast<int>(InterpolationFactor * (elevationAngle - MinElevation) / RawElevationAngleSpacing); + return elevationIndex; +} + +} // namespace WebCore diff --git a/dom/media/webaudio/blink/HRTFDatabase.h b/dom/media/webaudio/blink/HRTFDatabase.h new file mode 100644 index 000000000..400763b8f --- /dev/null +++ b/dom/media/webaudio/blink/HRTFDatabase.h @@ -0,0 +1,94 @@ +/* + * Copyright (C) 2010 Google Inc. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * 3. Neither the name of Apple Computer, Inc. ("Apple") nor the names of + * its contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY APPLE AND ITS CONTRIBUTORS "AS IS" AND ANY + * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + * DISCLAIMED. IN NO EVENT SHALL APPLE OR ITS CONTRIBUTORS BE LIABLE FOR ANY + * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES + * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; + * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND + * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF + * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +#ifndef HRTFDatabase_h +#define HRTFDatabase_h + +#include "HRTFElevation.h" +#include "nsAutoRef.h" +#include "nsTArray.h" +#include "mozilla/MemoryReporting.h" + +namespace WebCore { + +class HRTFKernel; + +class HRTFDatabase { +public: + static nsReturnRef<HRTFDatabase> create(float sampleRate); + + // getKernelsFromAzimuthElevation() returns a left and right ear kernel, and an interpolated left and right frame delay for the given azimuth and elevation. + // azimuthBlend must be in the range 0 -> 1. + // Valid values for azimuthIndex are 0 -> HRTFElevation::NumberOfTotalAzimuths - 1 (corresponding to angles of 0 -> 360). + // Valid values for elevationAngle are MinElevation -> MaxElevation. + void getKernelsFromAzimuthElevation(double azimuthBlend, unsigned azimuthIndex, double elevationAngle, HRTFKernel* &kernelL, HRTFKernel* &kernelR, double& frameDelayL, double& frameDelayR); + + // Returns the number of different azimuth angles. + static unsigned numberOfAzimuths() { return HRTFElevation::NumberOfTotalAzimuths; } + + float sampleRate() const { return m_sampleRate; } + + // Number of elevations loaded from resource. + static const unsigned NumberOfRawElevations; + + size_t sizeOfIncludingThis(mozilla::MallocSizeOf aMallocSizeOf) const; + +private: + HRTFDatabase(const HRTFDatabase& other) = delete; + void operator=(const HRTFDatabase& other) = delete; + + explicit HRTFDatabase(float sampleRate); + + // Minimum and maximum elevation angles (inclusive) for a HRTFDatabase. + static const int MinElevation; + static const int MaxElevation; + static const unsigned RawElevationAngleSpacing; + + // Interpolates by this factor to get the total number of elevations from every elevation loaded from resource. + static const unsigned InterpolationFactor; + + // Total number of elevations after interpolation. + static const unsigned NumberOfTotalElevations; + + // Returns the index for the correct HRTFElevation given the elevation angle. + static unsigned indexFromElevationAngle(double); + + nsTArray<nsAutoRef<HRTFElevation> > m_elevations; + float m_sampleRate; +}; + +} // namespace WebCore + +template <> +class nsAutoRefTraits<WebCore::HRTFDatabase> : + public nsPointerRefTraits<WebCore::HRTFDatabase> { +public: + static void Release(WebCore::HRTFDatabase* ptr) { delete(ptr); } +}; + +#endif // HRTFDatabase_h diff --git a/dom/media/webaudio/blink/HRTFDatabaseLoader.cpp b/dom/media/webaudio/blink/HRTFDatabaseLoader.cpp new file mode 100644 index 000000000..090e1b217 --- /dev/null +++ b/dom/media/webaudio/blink/HRTFDatabaseLoader.cpp @@ -0,0 +1,223 @@ +/* + * Copyright (C) 2010 Google Inc. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * 3. Neither the name of Apple Computer, Inc. ("Apple") nor the names of + * its contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY APPLE AND ITS CONTRIBUTORS "AS IS" AND ANY + * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + * DISCLAIMED. IN NO EVENT SHALL APPLE OR ITS CONTRIBUTORS BE LIABLE FOR ANY + * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES + * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; + * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND + * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF + * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +#include "HRTFDatabaseLoader.h" +#include "HRTFDatabase.h" + +using namespace mozilla; + +namespace WebCore { + +// Singleton +nsTHashtable<HRTFDatabaseLoader::LoaderByRateEntry>* + HRTFDatabaseLoader::s_loaderMap = nullptr; + +size_t HRTFDatabaseLoader::sizeOfLoaders(mozilla::MallocSizeOf aMallocSizeOf) +{ + return s_loaderMap ? s_loaderMap->SizeOfIncludingThis(aMallocSizeOf) : 0; +} + +already_AddRefed<HRTFDatabaseLoader> HRTFDatabaseLoader::createAndLoadAsynchronouslyIfNecessary(float sampleRate) +{ + MOZ_ASSERT(NS_IsMainThread()); + + RefPtr<HRTFDatabaseLoader> loader; + + if (!s_loaderMap) { + s_loaderMap = new nsTHashtable<LoaderByRateEntry>(); + } + + LoaderByRateEntry* entry = s_loaderMap->PutEntry(sampleRate); + loader = entry->mLoader; + if (loader) { // existing entry + MOZ_ASSERT(sampleRate == loader->databaseSampleRate()); + return loader.forget(); + } + + loader = new HRTFDatabaseLoader(sampleRate); + entry->mLoader = loader; + + loader->loadAsynchronously(); + + return loader.forget(); +} + +HRTFDatabaseLoader::HRTFDatabaseLoader(float sampleRate) + : m_refCnt(0) + , m_threadLock("HRTFDatabaseLoader") + , m_databaseLoaderThread(nullptr) + , m_databaseSampleRate(sampleRate) +{ + MOZ_ASSERT(NS_IsMainThread()); +} + +HRTFDatabaseLoader::~HRTFDatabaseLoader() +{ + MOZ_ASSERT(NS_IsMainThread()); + + waitForLoaderThreadCompletion(); + m_hrtfDatabase.reset(); + + if (s_loaderMap) { + // Remove ourself from the map. + s_loaderMap->RemoveEntry(m_databaseSampleRate); + if (s_loaderMap->Count() == 0) { + delete s_loaderMap; + s_loaderMap = nullptr; + } + } +} + +size_t HRTFDatabaseLoader::sizeOfIncludingThis(mozilla::MallocSizeOf aMallocSizeOf) const +{ + size_t amount = aMallocSizeOf(this); + + // NB: Need to make sure we're not competing with the loader thread. + const_cast<HRTFDatabaseLoader*>(this)->waitForLoaderThreadCompletion(); + + if (m_hrtfDatabase) { + amount += m_hrtfDatabase->sizeOfIncludingThis(aMallocSizeOf); + } + + return amount; +} + +class HRTFDatabaseLoader::ProxyReleaseEvent final : public Runnable { +public: + explicit ProxyReleaseEvent(HRTFDatabaseLoader* loader) : mLoader(loader) {} + NS_IMETHOD Run() override + { + mLoader->MainThreadRelease(); + return NS_OK; + } +private: + HRTFDatabaseLoader* mLoader; +}; + +void HRTFDatabaseLoader::ProxyRelease() +{ + nsCOMPtr<nsIThread> mainThread = do_GetMainThread(); + if (MOZ_LIKELY(mainThread)) { + RefPtr<ProxyReleaseEvent> event = new ProxyReleaseEvent(this); + DebugOnly<nsresult> rv = + mainThread->Dispatch(event, NS_DISPATCH_NORMAL); + MOZ_ASSERT(NS_SUCCEEDED(rv), "Failed to dispatch release event"); + } else { + // Should be in XPCOM shutdown. + MOZ_ASSERT(NS_IsMainThread(), + "Main thread is not available for dispatch."); + MainThreadRelease(); + } +} + +void HRTFDatabaseLoader::MainThreadRelease() +{ + MOZ_ASSERT(NS_IsMainThread()); + int count = --m_refCnt; + MOZ_ASSERT(count >= 0, "extra release"); + NS_LOG_RELEASE(this, count, "HRTFDatabaseLoader"); + if (count == 0) { + // It is safe to delete here as the first reference can only be added + // on this (main) thread. + delete this; + } +} + +// Asynchronously load the database in this thread. +static void databaseLoaderEntry(void* threadData) +{ + PR_SetCurrentThreadName("HRTFDatabaseLdr"); + + HRTFDatabaseLoader* loader = reinterpret_cast<HRTFDatabaseLoader*>(threadData); + MOZ_ASSERT(loader); + loader->load(); +} + +void HRTFDatabaseLoader::load() +{ + MOZ_ASSERT(!NS_IsMainThread()); + MOZ_ASSERT(!m_hrtfDatabase.get(), "Called twice"); + // Load the default HRTF database. + m_hrtfDatabase = HRTFDatabase::create(m_databaseSampleRate); + // Notifies the main thread of completion. See loadAsynchronously(). + Release(); +} + +void HRTFDatabaseLoader::loadAsynchronously() +{ + MOZ_ASSERT(NS_IsMainThread()); + MOZ_ASSERT(m_refCnt, "Must not be called before a reference is added"); + + // Add a reference so that the destructor won't run and wait for the + // loader thread, until load() has completed. + AddRef(); + + MutexAutoLock locker(m_threadLock); + + MOZ_ASSERT(!m_hrtfDatabase.get() && !m_databaseLoaderThread, + "Called twice"); + // Start the asynchronous database loading process. + m_databaseLoaderThread = + PR_CreateThread(PR_USER_THREAD, databaseLoaderEntry, this, + PR_PRIORITY_NORMAL, PR_GLOBAL_THREAD, + PR_JOINABLE_THREAD, 0); +} + +bool HRTFDatabaseLoader::isLoaded() const +{ + return m_hrtfDatabase.get(); +} + +void HRTFDatabaseLoader::waitForLoaderThreadCompletion() +{ + MutexAutoLock locker(m_threadLock); + + // waitForThreadCompletion() should not be called twice for the same thread. + if (m_databaseLoaderThread) { + DebugOnly<PRStatus> status = PR_JoinThread(m_databaseLoaderThread); + MOZ_ASSERT(status == PR_SUCCESS, "PR_JoinThread failed"); + } + m_databaseLoaderThread = nullptr; +} + +void HRTFDatabaseLoader::shutdown() +{ + MOZ_ASSERT(NS_IsMainThread()); + if (s_loaderMap) { + // Set s_loaderMap to nullptr so that the hashtable is not modified on + // reference release during enumeration. + nsTHashtable<LoaderByRateEntry>* loaderMap = s_loaderMap; + s_loaderMap = nullptr; + for (auto iter = loaderMap->Iter(); !iter.Done(); iter.Next()) { + iter.Get()->mLoader->waitForLoaderThreadCompletion(); + } + delete loaderMap; + } +} + +} // namespace WebCore diff --git a/dom/media/webaudio/blink/HRTFDatabaseLoader.h b/dom/media/webaudio/blink/HRTFDatabaseLoader.h new file mode 100644 index 000000000..50a875b18 --- /dev/null +++ b/dom/media/webaudio/blink/HRTFDatabaseLoader.h @@ -0,0 +1,148 @@ +/* + * Copyright (C) 2010 Google Inc. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * 3. Neither the name of Apple Computer, Inc. ("Apple") nor the names of + * its contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY APPLE AND ITS CONTRIBUTORS "AS IS" AND ANY + * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + * DISCLAIMED. IN NO EVENT SHALL APPLE OR ITS CONTRIBUTORS BE LIABLE FOR ANY + * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES + * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; + * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND + * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF + * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +#ifndef HRTFDatabaseLoader_h +#define HRTFDatabaseLoader_h + +#include "nsHashKeys.h" +#include "mozilla/RefPtr.h" +#include "mozilla/MemoryReporting.h" +#include "mozilla/Mutex.h" +#include "HRTFDatabase.h" + +template <class EntryType> class nsTHashtable; +template <class T> class nsAutoRef; + +namespace WebCore { + +// HRTFDatabaseLoader will asynchronously load the default HRTFDatabase in a new thread. + +class HRTFDatabaseLoader { +public: + // Lazily creates a HRTFDatabaseLoader (if not already created) for the given sample-rate + // and starts loading asynchronously (when created the first time). + // Returns the HRTFDatabaseLoader. + // Must be called from the main thread. + static already_AddRefed<HRTFDatabaseLoader> createAndLoadAsynchronouslyIfNecessary(float sampleRate); + + // AddRef and Release may be called from any thread. + void AddRef() + { +#if defined(DEBUG) || defined(NS_BUILD_REFCNT_LOGGING) + int count = +#endif + ++m_refCnt; + MOZ_ASSERT(count > 0, "invalid ref count"); + NS_LOG_ADDREF(this, count, "HRTFDatabaseLoader", sizeof(*this)); + } + + void Release() + { + // The last reference can't be removed on a non-main thread because + // the object can be accessed on the main thread from the hash + // table via createAndLoadAsynchronouslyIfNecessary(). + int count = m_refCnt; + MOZ_ASSERT(count > 0, "extra release"); + // Optimization attempt to possibly skip proxying the release to the + // main thread. + if (count != 1 && m_refCnt.compareExchange(count, count - 1)) { + NS_LOG_RELEASE(this, count - 1, "HRTFDatabaseLoader"); + return; + } + + ProxyRelease(); + } + + // Returns true once the default database has been completely loaded. + bool isLoaded() const; + + // waitForLoaderThreadCompletion() may be called more than once, + // on any thread except m_databaseLoaderThread. + void waitForLoaderThreadCompletion(); + + HRTFDatabase* database() { return m_hrtfDatabase.get(); } + + float databaseSampleRate() const { return m_databaseSampleRate; } + + static void shutdown(); + + // Called in asynchronous loading thread. + void load(); + + // Sums the size of all cached database loaders. + static size_t sizeOfLoaders(mozilla::MallocSizeOf aMallocSizeOf); + +private: + // Both constructor and destructor must be called from the main thread. + explicit HRTFDatabaseLoader(float sampleRate); + ~HRTFDatabaseLoader(); + + size_t sizeOfIncludingThis(mozilla::MallocSizeOf aMallocSizeOf) const; + + void ProxyRelease(); // any thread + void MainThreadRelease(); // main thread only + class ProxyReleaseEvent; + + // If it hasn't already been loaded, creates a new thread and initiates asynchronous loading of the default database. + // This must be called from the main thread. + void loadAsynchronously(); + + // Map from sample-rate to loader. + class LoaderByRateEntry : public nsFloatHashKey { + public: + explicit LoaderByRateEntry(KeyTypePointer aKey) + : nsFloatHashKey(aKey) + , mLoader() // so PutEntry() will zero-initialize + { + } + + size_t SizeOfExcludingThis(mozilla::MallocSizeOf aMallocSizeOf) const + { + return mLoader ? mLoader->sizeOfIncludingThis(aMallocSizeOf) : 0; + } + + HRTFDatabaseLoader* mLoader; + }; + + // Keeps track of loaders on a per-sample-rate basis. + static nsTHashtable<LoaderByRateEntry> *s_loaderMap; // singleton + + mozilla::Atomic<int> m_refCnt; + + nsAutoRef<HRTFDatabase> m_hrtfDatabase; + + // Holding a m_threadLock is required when accessing m_databaseLoaderThread. + mozilla::Mutex m_threadLock; + PRThread* m_databaseLoaderThread; + + float m_databaseSampleRate; +}; + +} // namespace WebCore + +#endif // HRTFDatabaseLoader_h diff --git a/dom/media/webaudio/blink/HRTFElevation.cpp b/dom/media/webaudio/blink/HRTFElevation.cpp new file mode 100644 index 000000000..2300872f3 --- /dev/null +++ b/dom/media/webaudio/blink/HRTFElevation.cpp @@ -0,0 +1,328 @@ +/* + * Copyright (C) 2010 Google Inc. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * 3. Neither the name of Apple Computer, Inc. ("Apple") nor the names of + * its contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY APPLE AND ITS CONTRIBUTORS "AS IS" AND ANY + * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + * DISCLAIMED. IN NO EVENT SHALL APPLE OR ITS CONTRIBUTORS BE LIABLE FOR ANY + * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES + * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; + * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND + * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF + * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +#include "HRTFElevation.h" + +#include <speex/speex_resampler.h> +#include "mozilla/PodOperations.h" +#include "AudioSampleFormat.h" + +#include "IRC_Composite_C_R0195-incl.cpp" + +using namespace std; +using namespace mozilla; + +namespace WebCore { + +const int elevationSpacing = irc_composite_c_r0195_elevation_interval; +const int firstElevation = irc_composite_c_r0195_first_elevation; +const int numberOfElevations = MOZ_ARRAY_LENGTH(irc_composite_c_r0195); + +const unsigned HRTFElevation::NumberOfTotalAzimuths = 360 / 15 * 8; + +const int rawSampleRate = irc_composite_c_r0195_sample_rate; + +// Number of frames in an individual impulse response. +const size_t ResponseFrameSize = 256; + +size_t HRTFElevation::sizeOfIncludingThis(mozilla::MallocSizeOf aMallocSizeOf) const +{ + size_t amount = aMallocSizeOf(this); + + amount += m_kernelListL.ShallowSizeOfExcludingThis(aMallocSizeOf); + for (size_t i = 0; i < m_kernelListL.Length(); i++) { + amount += m_kernelListL[i]->sizeOfIncludingThis(aMallocSizeOf); + } + + return amount; +} + +size_t HRTFElevation::fftSizeForSampleRate(float sampleRate) +{ + // The IRCAM HRTF impulse responses were 512 sample-frames @44.1KHz, + // but these have been truncated to 256 samples. + // An FFT-size of twice impulse response size is used (for convolution). + // So for sample rates of 44.1KHz an FFT size of 512 is good. + // We double the FFT-size only for sample rates at least double this. + // If the FFT size is too large then the impulse response will be padded + // with zeros without the fade-out provided by HRTFKernel. + MOZ_ASSERT(sampleRate > 1.0 && sampleRate < 1048576.0); + + // This is the size if we were to use all raw response samples. + unsigned resampledLength = + floorf(ResponseFrameSize * sampleRate / rawSampleRate); + // Keep things semi-sane, with max FFT size of 1024. + unsigned size = min(resampledLength, 1023U); + // Ensure a minimum of 2 * WEBAUDIO_BLOCK_SIZE (with the size++ below) for + // FFTConvolver and set the 8 least significant bits for rounding up to + // the next power of 2 below. + size |= 2 * WEBAUDIO_BLOCK_SIZE - 1; + // Round up to the next power of 2, making the FFT size no more than twice + // the impulse response length. This doubles size for values that are + // already powers of 2. This works by filling in alls bit to right of the + // most significant bit. The most significant bit is no greater than + // 1 << 9, and the least significant 8 bits were already set above, so + // there is at most one bit to add. + size |= (size >> 1); + size++; + MOZ_ASSERT((size & (size - 1)) == 0); + + return size; +} + +nsReturnRef<HRTFKernel> HRTFElevation::calculateKernelForAzimuthElevation(int azimuth, int elevation, SpeexResamplerState* resampler, float sampleRate) +{ + int elevationIndex = (elevation - firstElevation) / elevationSpacing; + MOZ_ASSERT(elevationIndex >= 0 && elevationIndex <= numberOfElevations); + + int numberOfAzimuths = irc_composite_c_r0195[elevationIndex].count; + int azimuthSpacing = 360 / numberOfAzimuths; + MOZ_ASSERT(numberOfAzimuths * azimuthSpacing == 360); + + int azimuthIndex = azimuth / azimuthSpacing; + MOZ_ASSERT(azimuthIndex * azimuthSpacing == azimuth); + + const int16_t (&impulse_response_data)[ResponseFrameSize] = + irc_composite_c_r0195[elevationIndex].azimuths[azimuthIndex]; + + // When libspeex_resampler is compiled with FIXED_POINT, samples in + // speex_resampler_process_float are rounded directly to int16_t, which + // only works well if the floats are in the range +/-32767. On such + // platforms it's better to resample before converting to float anyway. +#ifdef MOZ_SAMPLE_TYPE_S16 +# define RESAMPLER_PROCESS speex_resampler_process_int + const int16_t* response = impulse_response_data; + const int16_t* resampledResponse; +#else +# define RESAMPLER_PROCESS speex_resampler_process_float + float response[ResponseFrameSize]; + ConvertAudioSamples(impulse_response_data, response, ResponseFrameSize); + float* resampledResponse; +#endif + + // Note that depending on the fftSize returned by the panner, we may be truncating the impulse response. + const size_t resampledResponseLength = fftSizeForSampleRate(sampleRate) / 2; + + AutoTArray<AudioDataValue, 2 * ResponseFrameSize> resampled; + if (sampleRate == rawSampleRate) { + resampledResponse = response; + MOZ_ASSERT(resampledResponseLength == ResponseFrameSize); + } else { + resampled.SetLength(resampledResponseLength); + resampledResponse = resampled.Elements(); + speex_resampler_skip_zeros(resampler); + + // Feed the input buffer into the resampler. + spx_uint32_t in_len = ResponseFrameSize; + spx_uint32_t out_len = resampled.Length(); + RESAMPLER_PROCESS(resampler, 0, response, &in_len, + resampled.Elements(), &out_len); + + if (out_len < resampled.Length()) { + // The input should have all been processed. + MOZ_ASSERT(in_len == ResponseFrameSize); + // Feed in zeros get the data remaining in the resampler. + spx_uint32_t out_index = out_len; + in_len = speex_resampler_get_input_latency(resampler); + out_len = resampled.Length() - out_index; + RESAMPLER_PROCESS(resampler, 0, nullptr, &in_len, + resampled.Elements() + out_index, &out_len); + out_index += out_len; + // There may be some uninitialized samples remaining for very low + // sample rates. + PodZero(resampled.Elements() + out_index, + resampled.Length() - out_index); + } + + speex_resampler_reset_mem(resampler); + } + +#ifdef MOZ_SAMPLE_TYPE_S16 + AutoTArray<float, 2 * ResponseFrameSize> floatArray; + floatArray.SetLength(resampledResponseLength); + float *floatResponse = floatArray.Elements(); + ConvertAudioSamples(resampledResponse, + floatResponse, resampledResponseLength); +#else + float *floatResponse = resampledResponse; +#endif +#undef RESAMPLER_PROCESS + + return HRTFKernel::create(floatResponse, resampledResponseLength, sampleRate); +} + +// The range of elevations for the IRCAM impulse responses varies depending on azimuth, but the minimum elevation appears to always be -45. +// +// Here's how it goes: +static int maxElevations[] = { + // Azimuth + // + 90, // 0 + 45, // 15 + 60, // 30 + 45, // 45 + 75, // 60 + 45, // 75 + 60, // 90 + 45, // 105 + 75, // 120 + 45, // 135 + 60, // 150 + 45, // 165 + 75, // 180 + 45, // 195 + 60, // 210 + 45, // 225 + 75, // 240 + 45, // 255 + 60, // 270 + 45, // 285 + 75, // 300 + 45, // 315 + 60, // 330 + 45 // 345 +}; + +nsReturnRef<HRTFElevation> HRTFElevation::createBuiltin(int elevation, float sampleRate) +{ + if (elevation < firstElevation || + elevation > firstElevation + numberOfElevations * elevationSpacing || + (elevation / elevationSpacing) * elevationSpacing != elevation) + return nsReturnRef<HRTFElevation>(); + + // Spacing, in degrees, between every azimuth loaded from resource. + // Some elevations do not have data for all these intervals. + // See maxElevations. + static const unsigned AzimuthSpacing = 15; + static const unsigned NumberOfRawAzimuths = 360 / AzimuthSpacing; + static_assert(AzimuthSpacing * NumberOfRawAzimuths == 360, + "Not a multiple"); + static const unsigned InterpolationFactor = + NumberOfTotalAzimuths / NumberOfRawAzimuths; + static_assert(NumberOfTotalAzimuths == + NumberOfRawAzimuths * InterpolationFactor, "Not a multiple"); + + HRTFKernelList kernelListL; + kernelListL.SetLength(NumberOfTotalAzimuths); + + SpeexResamplerState* resampler = sampleRate == rawSampleRate ? nullptr : + speex_resampler_init(1, rawSampleRate, sampleRate, + SPEEX_RESAMPLER_QUALITY_MIN, nullptr); + + // Load convolution kernels from HRTF files. + int interpolatedIndex = 0; + for (unsigned rawIndex = 0; rawIndex < NumberOfRawAzimuths; ++rawIndex) { + // Don't let elevation exceed maximum for this azimuth. + int maxElevation = maxElevations[rawIndex]; + int actualElevation = min(elevation, maxElevation); + + kernelListL[interpolatedIndex] = calculateKernelForAzimuthElevation(rawIndex * AzimuthSpacing, actualElevation, resampler, sampleRate); + + interpolatedIndex += InterpolationFactor; + } + + if (resampler) + speex_resampler_destroy(resampler); + + // Now go back and interpolate intermediate azimuth values. + for (unsigned i = 0; i < NumberOfTotalAzimuths; i += InterpolationFactor) { + int j = (i + InterpolationFactor) % NumberOfTotalAzimuths; + + // Create the interpolated convolution kernels and delays. + for (unsigned jj = 1; jj < InterpolationFactor; ++jj) { + float x = float(jj) / float(InterpolationFactor); // interpolate from 0 -> 1 + + kernelListL[i + jj] = HRTFKernel::createInterpolatedKernel(kernelListL[i], kernelListL[j], x); + } + } + + return nsReturnRef<HRTFElevation>(new HRTFElevation(&kernelListL, elevation, sampleRate)); +} + +nsReturnRef<HRTFElevation> HRTFElevation::createByInterpolatingSlices(HRTFElevation* hrtfElevation1, HRTFElevation* hrtfElevation2, float x, float sampleRate) +{ + MOZ_ASSERT(hrtfElevation1 && hrtfElevation2); + if (!hrtfElevation1 || !hrtfElevation2) + return nsReturnRef<HRTFElevation>(); + + MOZ_ASSERT(x >= 0.0 && x < 1.0); + + HRTFKernelList kernelListL; + kernelListL.SetLength(NumberOfTotalAzimuths); + + const HRTFKernelList& kernelListL1 = hrtfElevation1->kernelListL(); + const HRTFKernelList& kernelListL2 = hrtfElevation2->kernelListL(); + + // Interpolate kernels of corresponding azimuths of the two elevations. + for (unsigned i = 0; i < NumberOfTotalAzimuths; ++i) { + kernelListL[i] = HRTFKernel::createInterpolatedKernel(kernelListL1[i], kernelListL2[i], x); + } + + // Interpolate elevation angle. + double angle = (1.0 - x) * hrtfElevation1->elevationAngle() + x * hrtfElevation2->elevationAngle(); + + return nsReturnRef<HRTFElevation>(new HRTFElevation(&kernelListL, static_cast<int>(angle), sampleRate)); +} + +void HRTFElevation::getKernelsFromAzimuth(double azimuthBlend, unsigned azimuthIndex, HRTFKernel* &kernelL, HRTFKernel* &kernelR, double& frameDelayL, double& frameDelayR) +{ + bool checkAzimuthBlend = azimuthBlend >= 0.0 && azimuthBlend < 1.0; + MOZ_ASSERT(checkAzimuthBlend); + if (!checkAzimuthBlend) + azimuthBlend = 0.0; + + unsigned numKernels = m_kernelListL.Length(); + + bool isIndexGood = azimuthIndex < numKernels; + MOZ_ASSERT(isIndexGood); + if (!isIndexGood) { + kernelL = 0; + kernelR = 0; + return; + } + + // Return the left and right kernels, + // using symmetry to produce the right kernel. + kernelL = m_kernelListL[azimuthIndex]; + int azimuthIndexR = (numKernels - azimuthIndex) % numKernels; + kernelR = m_kernelListL[azimuthIndexR]; + + frameDelayL = kernelL->frameDelay(); + frameDelayR = kernelR->frameDelay(); + + int azimuthIndex2L = (azimuthIndex + 1) % numKernels; + double frameDelay2L = m_kernelListL[azimuthIndex2L]->frameDelay(); + int azimuthIndex2R = (numKernels - azimuthIndex2L) % numKernels; + double frameDelay2R = m_kernelListL[azimuthIndex2R]->frameDelay(); + + // Linearly interpolate delays. + frameDelayL = (1.0 - azimuthBlend) * frameDelayL + azimuthBlend * frameDelay2L; + frameDelayR = (1.0 - azimuthBlend) * frameDelayR + azimuthBlend * frameDelay2R; +} + +} // namespace WebCore diff --git a/dom/media/webaudio/blink/HRTFElevation.h b/dom/media/webaudio/blink/HRTFElevation.h new file mode 100644 index 000000000..e50947b12 --- /dev/null +++ b/dom/media/webaudio/blink/HRTFElevation.h @@ -0,0 +1,103 @@ +/* + * Copyright (C) 2010 Google Inc. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * 3. Neither the name of Apple Computer, Inc. ("Apple") nor the names of + * its contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY APPLE AND ITS CONTRIBUTORS "AS IS" AND ANY + * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + * DISCLAIMED. IN NO EVENT SHALL APPLE OR ITS CONTRIBUTORS BE LIABLE FOR ANY + * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES + * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; + * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND + * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF + * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +#ifndef HRTFElevation_h +#define HRTFElevation_h + +#include "HRTFKernel.h" +#include "nsAutoRef.h" +#include "mozilla/MemoryReporting.h" + +struct SpeexResamplerState_; +typedef struct SpeexResamplerState_ SpeexResamplerState; + +namespace WebCore { + +// HRTFElevation contains all of the HRTFKernels (one left ear and one right ear per azimuth angle) for a particular elevation. + +class HRTFElevation { +public: + // Loads and returns an HRTFElevation with the given HRTF database subject name and elevation from browser (or WebKit.framework) resources. + // Normally, there will only be a single HRTF database set, but this API supports the possibility of multiple ones with different names. + // Interpolated azimuths will be generated based on InterpolationFactor. + // Valid values for elevation are -45 -> +90 in 15 degree increments. + static nsReturnRef<HRTFElevation> createBuiltin(int elevation, float sampleRate); + + // Given two HRTFElevations, and an interpolation factor x: 0 -> 1, returns an interpolated HRTFElevation. + static nsReturnRef<HRTFElevation> createByInterpolatingSlices(HRTFElevation* hrtfElevation1, HRTFElevation* hrtfElevation2, float x, float sampleRate); + + double elevationAngle() const { return m_elevationAngle; } + unsigned numberOfAzimuths() const { return NumberOfTotalAzimuths; } + float sampleRate() const { return m_sampleRate; } + + // Returns the left and right kernels for the given azimuth index. + // The interpolated delays based on azimuthBlend: 0 -> 1 are returned in frameDelayL and frameDelayR. + void getKernelsFromAzimuth(double azimuthBlend, unsigned azimuthIndex, HRTFKernel* &kernelL, HRTFKernel* &kernelR, double& frameDelayL, double& frameDelayR); + + // Total number of azimuths after interpolation. + static const unsigned NumberOfTotalAzimuths; + + static size_t fftSizeForSampleRate(float sampleRate); + + size_t sizeOfIncludingThis(mozilla::MallocSizeOf aMallocSizeOf) const; + +private: + HRTFElevation(const HRTFElevation& other) = delete; + void operator=(const HRTFElevation& other) = delete; + + HRTFElevation(HRTFKernelList *kernelListL, int elevation, float sampleRate) + : m_elevationAngle(elevation) + , m_sampleRate(sampleRate) + { + m_kernelListL.SwapElements(*kernelListL); + } + + // Returns the list of left ear HRTFKernels for all the azimuths going from 0 to 360 degrees. + const HRTFKernelList& kernelListL() { return m_kernelListL; } + + // Given a specific azimuth and elevation angle, returns the left HRTFKernel. + // Values for azimuth must be multiples of 15 in 0 -> 345, + // but not all azimuths are available for elevations > +45. + // Valid values for elevation are -45 -> +90 in 15 degree increments. + static nsReturnRef<HRTFKernel> calculateKernelForAzimuthElevation(int azimuth, int elevation, SpeexResamplerState* resampler, float sampleRate); + + HRTFKernelList m_kernelListL; + double m_elevationAngle; + float m_sampleRate; +}; + +} // namespace WebCore + +template <> +class nsAutoRefTraits<WebCore::HRTFElevation> : + public nsPointerRefTraits<WebCore::HRTFElevation> { +public: + static void Release(WebCore::HRTFElevation* ptr) { delete(ptr); } +}; + +#endif // HRTFElevation_h diff --git a/dom/media/webaudio/blink/HRTFKernel.cpp b/dom/media/webaudio/blink/HRTFKernel.cpp new file mode 100644 index 000000000..3ee5e63a3 --- /dev/null +++ b/dom/media/webaudio/blink/HRTFKernel.cpp @@ -0,0 +1,107 @@ +/* + * Copyright (C) 2010 Google Inc. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * 3. Neither the name of Apple Computer, Inc. ("Apple") nor the names of + * its contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY APPLE AND ITS CONTRIBUTORS "AS IS" AND ANY + * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + * DISCLAIMED. IN NO EVENT SHALL APPLE OR ITS CONTRIBUTORS BE LIABLE FOR ANY + * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES + * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; + * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND + * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF + * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +#include "HRTFKernel.h" +namespace WebCore { + +// Takes the input audio channel |impulseP| as an input impulse response and calculates the average group delay. +// This represents the initial delay before the most energetic part of the impulse response. +// The sample-frame delay is removed from the |impulseP| impulse response, and this value is returned. +// The |length| of the passed in |impulseP| must be must be a power of 2. +static float extractAverageGroupDelay(float* impulseP, size_t length) +{ + // Check for power-of-2. + MOZ_ASSERT(length && (length & (length - 1)) == 0); + + FFTBlock estimationFrame(length); + estimationFrame.PerformFFT(impulseP); + + float frameDelay = static_cast<float>(estimationFrame.ExtractAverageGroupDelay()); + estimationFrame.GetInverse(impulseP); + + return frameDelay; +} + +HRTFKernel::HRTFKernel(float* impulseResponse, size_t length, float sampleRate) + : m_frameDelay(0) + , m_sampleRate(sampleRate) +{ + AlignedTArray<float> buffer; + // copy to a 32-byte aligned buffer + if (((uintptr_t)impulseResponse & 31) != 0) { + buffer.SetLength(length); + mozilla::PodCopy(buffer.Elements(), impulseResponse, length); + impulseResponse = buffer.Elements(); + } + + // Determine the leading delay (average group delay) for the response. + m_frameDelay = extractAverageGroupDelay(impulseResponse, length); + + // The FFT size (with zero padding) needs to be twice the response length + // in order to do proper convolution. + unsigned fftSize = 2 * length; + + // Quick fade-out (apply window) at truncation point + // because the impulse response has been truncated. + unsigned numberOfFadeOutFrames = static_cast<unsigned>(sampleRate / 4410); // 10 sample-frames @44.1KHz sample-rate + MOZ_ASSERT(numberOfFadeOutFrames < length); + if (numberOfFadeOutFrames < length) { + for (unsigned i = length - numberOfFadeOutFrames; i < length; ++i) { + float x = 1.0f - static_cast<float>(i - (length - numberOfFadeOutFrames)) / numberOfFadeOutFrames; + impulseResponse[i] *= x; + } + } + + m_fftFrame = new FFTBlock(fftSize); + m_fftFrame->PadAndMakeScaledDFT(impulseResponse, length); +} + +// Interpolates two kernels with x: 0 -> 1 and returns the result. +nsReturnRef<HRTFKernel> HRTFKernel::createInterpolatedKernel(HRTFKernel* kernel1, HRTFKernel* kernel2, float x) +{ + MOZ_ASSERT(kernel1 && kernel2); + if (!kernel1 || !kernel2) + return nsReturnRef<HRTFKernel>(); + + MOZ_ASSERT(x >= 0.0 && x < 1.0); + x = mozilla::clamped(x, 0.0f, 1.0f); + + float sampleRate1 = kernel1->sampleRate(); + float sampleRate2 = kernel2->sampleRate(); + MOZ_ASSERT(sampleRate1 == sampleRate2); + if (sampleRate1 != sampleRate2) + return nsReturnRef<HRTFKernel>(); + + float frameDelay = (1 - x) * kernel1->frameDelay() + x * kernel2->frameDelay(); + + nsAutoPtr<FFTBlock> interpolatedFrame( + FFTBlock::CreateInterpolatedBlock(*kernel1->fftFrame(), *kernel2->fftFrame(), x)); + return HRTFKernel::create(interpolatedFrame, frameDelay, sampleRate1); +} + +} // namespace WebCore diff --git a/dom/media/webaudio/blink/HRTFKernel.h b/dom/media/webaudio/blink/HRTFKernel.h new file mode 100644 index 000000000..940e69b13 --- /dev/null +++ b/dom/media/webaudio/blink/HRTFKernel.h @@ -0,0 +1,119 @@ +/* + * Copyright (C) 2010 Google Inc. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * 3. Neither the name of Apple Computer, Inc. ("Apple") nor the names of + * its contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY APPLE AND ITS CONTRIBUTORS "AS IS" AND ANY + * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + * DISCLAIMED. IN NO EVENT SHALL APPLE OR ITS CONTRIBUTORS BE LIABLE FOR ANY + * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES + * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; + * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND + * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF + * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +#ifndef HRTFKernel_h +#define HRTFKernel_h + +#include "nsAutoPtr.h" +#include "nsAutoRef.h" +#include "nsTArray.h" +#include "mozilla/FFTBlock.h" +#include "mozilla/MemoryReporting.h" + +namespace WebCore { + +using mozilla::FFTBlock; + +// HRTF stands for Head-Related Transfer Function. +// HRTFKernel is a frequency-domain representation of an impulse-response used as part of the spatialized panning system. +// For a given azimuth / elevation angle there will be one HRTFKernel for the left ear transfer function, and one for the right ear. +// The leading delay (average group delay) for each impulse response is extracted: +// m_fftFrame is the frequency-domain representation of the impulse response with the delay removed +// m_frameDelay is the leading delay of the original impulse response. +class HRTFKernel { +public: + // Note: this is destructive on the passed in |impulseResponse|. + // The |length| of |impulseResponse| must be a power of two. + // The size of the DFT will be |2 * length|. + static nsReturnRef<HRTFKernel> create(float* impulseResponse, size_t length, float sampleRate); + + static nsReturnRef<HRTFKernel> create(nsAutoPtr<FFTBlock> fftFrame, float frameDelay, float sampleRate); + + // Given two HRTFKernels, and an interpolation factor x: 0 -> 1, returns an interpolated HRTFKernel. + static nsReturnRef<HRTFKernel> createInterpolatedKernel(HRTFKernel* kernel1, HRTFKernel* kernel2, float x); + + FFTBlock* fftFrame() { return m_fftFrame.get(); } + + size_t fftSize() const { return m_fftFrame->FFTSize(); } + float frameDelay() const { return m_frameDelay; } + + float sampleRate() const { return m_sampleRate; } + double nyquist() const { return 0.5 * sampleRate(); } + + size_t sizeOfIncludingThis(mozilla::MallocSizeOf aMallocSizeOf) const + { + size_t amount = aMallocSizeOf(this); + amount += m_fftFrame->SizeOfIncludingThis(aMallocSizeOf); + return amount; + } + +private: + HRTFKernel(const HRTFKernel& other) = delete; + void operator=(const HRTFKernel& other) = delete; + + // Note: this is destructive on the passed in |impulseResponse|. + HRTFKernel(float* impulseResponse, size_t fftSize, float sampleRate); + + HRTFKernel(nsAutoPtr<FFTBlock> fftFrame, float frameDelay, float sampleRate) + : m_fftFrame(fftFrame) + , m_frameDelay(frameDelay) + , m_sampleRate(sampleRate) + { + } + + nsAutoPtr<FFTBlock> m_fftFrame; + float m_frameDelay; + float m_sampleRate; +}; + +typedef nsTArray<nsAutoRef<HRTFKernel> > HRTFKernelList; + +} // namespace WebCore + +template <> +class nsAutoRefTraits<WebCore::HRTFKernel> : + public nsPointerRefTraits<WebCore::HRTFKernel> { +public: + static void Release(WebCore::HRTFKernel* ptr) { delete(ptr); } +}; + +namespace WebCore { + +inline nsReturnRef<HRTFKernel> HRTFKernel::create(float* impulseResponse, size_t length, float sampleRate) +{ + return nsReturnRef<HRTFKernel>(new HRTFKernel(impulseResponse, length, sampleRate)); +} + +inline nsReturnRef<HRTFKernel> HRTFKernel::create(nsAutoPtr<FFTBlock> fftFrame, float frameDelay, float sampleRate) +{ + return nsReturnRef<HRTFKernel>(new HRTFKernel(fftFrame, frameDelay, sampleRate)); +} + +} // namespace WebCore + +#endif // HRTFKernel_h diff --git a/dom/media/webaudio/blink/HRTFPanner.cpp b/dom/media/webaudio/blink/HRTFPanner.cpp new file mode 100644 index 000000000..c97ce4767 --- /dev/null +++ b/dom/media/webaudio/blink/HRTFPanner.cpp @@ -0,0 +1,324 @@ +/* + * Copyright (C) 2010, Google Inc. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY APPLE INC. AND ITS CONTRIBUTORS ``AS IS'' AND ANY + * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + * DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR ITS CONTRIBUTORS BE LIABLE FOR ANY + * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES + * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; + * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON + * ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS + * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +#include "HRTFPanner.h" +#include "HRTFDatabaseLoader.h" + +#include "FFTConvolver.h" +#include "HRTFDatabase.h" +#include "AudioBlock.h" + +using namespace std; +using namespace mozilla; +using dom::ChannelInterpretation; + +namespace WebCore { + +// The value of 2 milliseconds is larger than the largest delay which exists in any HRTFKernel from the default HRTFDatabase (0.0136 seconds). +// We ASSERT the delay values used in process() with this value. +const double MaxDelayTimeSeconds = 0.002; + +const int UninitializedAzimuth = -1; +const unsigned RenderingQuantum = WEBAUDIO_BLOCK_SIZE; + +HRTFPanner::HRTFPanner(float sampleRate, already_AddRefed<HRTFDatabaseLoader> databaseLoader) + : m_databaseLoader(databaseLoader) + , m_sampleRate(sampleRate) + , m_crossfadeSelection(CrossfadeSelection1) + , m_azimuthIndex1(UninitializedAzimuth) + , m_azimuthIndex2(UninitializedAzimuth) + // m_elevation1 and m_elevation2 are initialized in pan() + , m_crossfadeX(0) + , m_crossfadeIncr(0) + , m_convolverL1(HRTFElevation::fftSizeForSampleRate(sampleRate)) + , m_convolverR1(m_convolverL1.fftSize()) + , m_convolverL2(m_convolverL1.fftSize()) + , m_convolverR2(m_convolverL1.fftSize()) + , m_delayLine(MaxDelayTimeSeconds * sampleRate, 1.0) +{ + MOZ_ASSERT(m_databaseLoader); + MOZ_COUNT_CTOR(HRTFPanner); +} + +HRTFPanner::~HRTFPanner() +{ + MOZ_COUNT_DTOR(HRTFPanner); +} + +size_t HRTFPanner::sizeOfIncludingThis(mozilla::MallocSizeOf aMallocSizeOf) const +{ + size_t amount = aMallocSizeOf(this); + + // NB: m_databaseLoader can be shared, so it is not measured here + amount += m_convolverL1.sizeOfExcludingThis(aMallocSizeOf); + amount += m_convolverR1.sizeOfExcludingThis(aMallocSizeOf); + amount += m_convolverL2.sizeOfExcludingThis(aMallocSizeOf); + amount += m_convolverR2.sizeOfExcludingThis(aMallocSizeOf); + amount += m_delayLine.SizeOfExcludingThis(aMallocSizeOf); + + return amount; +} + +void HRTFPanner::reset() +{ + m_azimuthIndex1 = UninitializedAzimuth; + m_azimuthIndex2 = UninitializedAzimuth; + // m_elevation1 and m_elevation2 are initialized in pan() + m_crossfadeSelection = CrossfadeSelection1; + m_crossfadeX = 0.0f; + m_crossfadeIncr = 0.0f; + m_convolverL1.reset(); + m_convolverR1.reset(); + m_convolverL2.reset(); + m_convolverR2.reset(); + m_delayLine.Reset(); +} + +int HRTFPanner::calculateDesiredAzimuthIndexAndBlend(double azimuth, double& azimuthBlend) +{ + // Convert the azimuth angle from the range -180 -> +180 into the range 0 -> 360. + // The azimuth index may then be calculated from this positive value. + if (azimuth < 0) + azimuth += 360.0; + + HRTFDatabase* database = m_databaseLoader->database(); + MOZ_ASSERT(database); + + int numberOfAzimuths = database->numberOfAzimuths(); + const double angleBetweenAzimuths = 360.0 / numberOfAzimuths; + + // Calculate the azimuth index and the blend (0 -> 1) for interpolation. + double desiredAzimuthIndexFloat = azimuth / angleBetweenAzimuths; + int desiredAzimuthIndex = static_cast<int>(desiredAzimuthIndexFloat); + azimuthBlend = desiredAzimuthIndexFloat - static_cast<double>(desiredAzimuthIndex); + + // We don't immediately start using this azimuth index, but instead approach this index from the last index we rendered at. + // This minimizes the clicks and graininess for moving sources which occur otherwise. + desiredAzimuthIndex = max(0, desiredAzimuthIndex); + desiredAzimuthIndex = min(numberOfAzimuths - 1, desiredAzimuthIndex); + return desiredAzimuthIndex; +} + +void HRTFPanner::pan(double desiredAzimuth, double elevation, const AudioBlock* inputBus, AudioBlock* outputBus) +{ +#ifdef DEBUG + unsigned numInputChannels = + inputBus->IsNull() ? 0 : inputBus->ChannelCount(); + + MOZ_ASSERT(numInputChannels <= 2); + MOZ_ASSERT(inputBus->GetDuration() == WEBAUDIO_BLOCK_SIZE); +#endif + + bool isOutputGood = outputBus && outputBus->ChannelCount() == 2 && outputBus->GetDuration() == WEBAUDIO_BLOCK_SIZE; + MOZ_ASSERT(isOutputGood); + + if (!isOutputGood) { + if (outputBus) + outputBus->SetNull(outputBus->GetDuration()); + return; + } + + HRTFDatabase* database = m_databaseLoader->database(); + if (!database) { // not yet loaded + outputBus->SetNull(outputBus->GetDuration()); + return; + } + + // IRCAM HRTF azimuths values from the loaded database is reversed from the panner's notion of azimuth. + double azimuth = -desiredAzimuth; + + bool isAzimuthGood = azimuth >= -180.0 && azimuth <= 180.0; + MOZ_ASSERT(isAzimuthGood); + if (!isAzimuthGood) { + outputBus->SetNull(outputBus->GetDuration()); + return; + } + + // Normally, we'll just be dealing with mono sources. + // If we have a stereo input, implement stereo panning with left source processed by left HRTF, and right source by right HRTF. + + // Get destination pointers. + float* destinationL = + static_cast<float*>(const_cast<void*>(outputBus->mChannelData[0])); + float* destinationR = + static_cast<float*>(const_cast<void*>(outputBus->mChannelData[1])); + + double azimuthBlend; + int desiredAzimuthIndex = calculateDesiredAzimuthIndexAndBlend(azimuth, azimuthBlend); + + // Initially snap azimuth and elevation values to first values encountered. + if (m_azimuthIndex1 == UninitializedAzimuth) { + m_azimuthIndex1 = desiredAzimuthIndex; + m_elevation1 = elevation; + } + if (m_azimuthIndex2 == UninitializedAzimuth) { + m_azimuthIndex2 = desiredAzimuthIndex; + m_elevation2 = elevation; + } + + // Cross-fade / transition over a period of around 45 milliseconds. + // This is an empirical value tuned to be a reasonable trade-off between + // smoothness and speed. + const double fadeFrames = sampleRate() <= 48000 ? 2048 : 4096; + + // Check for azimuth and elevation changes, initiating a cross-fade if needed. + if (!m_crossfadeX && m_crossfadeSelection == CrossfadeSelection1) { + if (desiredAzimuthIndex != m_azimuthIndex1 || elevation != m_elevation1) { + // Cross-fade from 1 -> 2 + m_crossfadeIncr = 1 / fadeFrames; + m_azimuthIndex2 = desiredAzimuthIndex; + m_elevation2 = elevation; + } + } + if (m_crossfadeX == 1 && m_crossfadeSelection == CrossfadeSelection2) { + if (desiredAzimuthIndex != m_azimuthIndex2 || elevation != m_elevation2) { + // Cross-fade from 2 -> 1 + m_crossfadeIncr = -1 / fadeFrames; + m_azimuthIndex1 = desiredAzimuthIndex; + m_elevation1 = elevation; + } + } + + // Get the HRTFKernels and interpolated delays. + HRTFKernel* kernelL1; + HRTFKernel* kernelR1; + HRTFKernel* kernelL2; + HRTFKernel* kernelR2; + double frameDelayL1; + double frameDelayR1; + double frameDelayL2; + double frameDelayR2; + database->getKernelsFromAzimuthElevation(azimuthBlend, m_azimuthIndex1, m_elevation1, kernelL1, kernelR1, frameDelayL1, frameDelayR1); + database->getKernelsFromAzimuthElevation(azimuthBlend, m_azimuthIndex2, m_elevation2, kernelL2, kernelR2, frameDelayL2, frameDelayR2); + + bool areKernelsGood = kernelL1 && kernelR1 && kernelL2 && kernelR2; + MOZ_ASSERT(areKernelsGood); + if (!areKernelsGood) { + outputBus->SetNull(outputBus->GetDuration()); + return; + } + + MOZ_ASSERT(frameDelayL1 / sampleRate() < MaxDelayTimeSeconds && frameDelayR1 / sampleRate() < MaxDelayTimeSeconds); + MOZ_ASSERT(frameDelayL2 / sampleRate() < MaxDelayTimeSeconds && frameDelayR2 / sampleRate() < MaxDelayTimeSeconds); + + // Crossfade inter-aural delays based on transitions. + double frameDelaysL[WEBAUDIO_BLOCK_SIZE]; + double frameDelaysR[WEBAUDIO_BLOCK_SIZE]; + { + float x = m_crossfadeX; + float incr = m_crossfadeIncr; + for (unsigned i = 0; i < WEBAUDIO_BLOCK_SIZE; ++i) { + frameDelaysL[i] = (1 - x) * frameDelayL1 + x * frameDelayL2; + frameDelaysR[i] = (1 - x) * frameDelayR1 + x * frameDelayR2; + x += incr; + } + } + + // First run through delay lines for inter-aural time difference. + m_delayLine.Write(*inputBus); + // "Speakers" means a mono input is read into both outputs (with possibly + // different delays). + m_delayLine.ReadChannel(frameDelaysL, outputBus, 0, + ChannelInterpretation::Speakers); + m_delayLine.ReadChannel(frameDelaysR, outputBus, 1, + ChannelInterpretation::Speakers); + m_delayLine.NextBlock(); + + bool needsCrossfading = m_crossfadeIncr; + + const float* convolutionDestinationL1; + const float* convolutionDestinationR1; + const float* convolutionDestinationL2; + const float* convolutionDestinationR2; + + // Now do the convolutions. + // Note that we avoid doing convolutions on both sets of convolvers if we're not currently cross-fading. + + if (m_crossfadeSelection == CrossfadeSelection1 || needsCrossfading) { + convolutionDestinationL1 = + m_convolverL1.process(kernelL1->fftFrame(), destinationL); + convolutionDestinationR1 = + m_convolverR1.process(kernelR1->fftFrame(), destinationR); + } + + if (m_crossfadeSelection == CrossfadeSelection2 || needsCrossfading) { + convolutionDestinationL2 = + m_convolverL2.process(kernelL2->fftFrame(), destinationL); + convolutionDestinationR2 = + m_convolverR2.process(kernelR2->fftFrame(), destinationR); + } + + if (needsCrossfading) { + // Apply linear cross-fade. + float x = m_crossfadeX; + float incr = m_crossfadeIncr; + for (unsigned i = 0; i < WEBAUDIO_BLOCK_SIZE; ++i) { + destinationL[i] = (1 - x) * convolutionDestinationL1[i] + x * convolutionDestinationL2[i]; + destinationR[i] = (1 - x) * convolutionDestinationR1[i] + x * convolutionDestinationR2[i]; + x += incr; + } + // Update cross-fade value from local. + m_crossfadeX = x; + + if (m_crossfadeIncr > 0 && fabs(m_crossfadeX - 1) < m_crossfadeIncr) { + // We've fully made the crossfade transition from 1 -> 2. + m_crossfadeSelection = CrossfadeSelection2; + m_crossfadeX = 1; + m_crossfadeIncr = 0; + } else if (m_crossfadeIncr < 0 && fabs(m_crossfadeX) < -m_crossfadeIncr) { + // We've fully made the crossfade transition from 2 -> 1. + m_crossfadeSelection = CrossfadeSelection1; + m_crossfadeX = 0; + m_crossfadeIncr = 0; + } + } else { + const float* sourceL; + const float* sourceR; + if (m_crossfadeSelection == CrossfadeSelection1) { + sourceL = convolutionDestinationL1; + sourceR = convolutionDestinationR1; + } else { + sourceL = convolutionDestinationL2; + sourceR = convolutionDestinationR2; + } + PodCopy(destinationL, sourceL, WEBAUDIO_BLOCK_SIZE); + PodCopy(destinationR, sourceR, WEBAUDIO_BLOCK_SIZE); + } +} + +int HRTFPanner::maxTailFrames() const +{ + // Although the ideal tail time would be the length of the impulse + // response, there is additional tail time from the approximations in the + // implementation. Because HRTFPanner is implemented with a DelayKernel + // and a FFTConvolver, the tailTime of the HRTFPanner is the sum of the + // tailTime of the DelayKernel and the tailTime of the FFTConvolver. The + // FFTs of the convolver are fftSize(), half of which is latency, but this + // is aligned with blocks and so is reduced by the one block which is + // processed immediately. + return m_delayLine.MaxDelayTicks() + + m_convolverL1.fftSize()/2 + m_convolverL1.latencyFrames(); +} + +} // namespace WebCore diff --git a/dom/media/webaudio/blink/HRTFPanner.h b/dom/media/webaudio/blink/HRTFPanner.h new file mode 100644 index 000000000..f56d0d423 --- /dev/null +++ b/dom/media/webaudio/blink/HRTFPanner.h @@ -0,0 +1,117 @@ +/* + * Copyright (C) 2010, Google Inc. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY APPLE INC. AND ITS CONTRIBUTORS ``AS IS'' AND ANY + * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + * DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR ITS CONTRIBUTORS BE LIABLE FOR ANY + * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES + * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; + * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON + * ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS + * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +#ifndef HRTFPanner_h +#define HRTFPanner_h + +#include "FFTConvolver.h" +#include "DelayBuffer.h" +#include "mozilla/MemoryReporting.h" + +namespace mozilla { +class AudioBlock; +} // namespace mozilla + +namespace WebCore { + +typedef nsTArray<float> AudioFloatArray; + +class HRTFDatabaseLoader; + +using mozilla::AudioBlock; + +class HRTFPanner { +public: + HRTFPanner(float sampleRate, already_AddRefed<HRTFDatabaseLoader> databaseLoader); + ~HRTFPanner(); + + // chunk durations must be 128 + void pan(double azimuth, double elevation, const AudioBlock* inputBus, AudioBlock* outputBus); + void reset(); + + size_t fftSize() const { return m_convolverL1.fftSize(); } + + float sampleRate() const { return m_sampleRate; } + + int maxTailFrames() const; + + size_t sizeOfIncludingThis(mozilla::MallocSizeOf aMallocSizeOf) const; + +private: + // Given an azimuth angle in the range -180 -> +180, returns the corresponding azimuth index for the database, + // and azimuthBlend which is an interpolation value from 0 -> 1. + int calculateDesiredAzimuthIndexAndBlend(double azimuth, double& azimuthBlend); + + RefPtr<HRTFDatabaseLoader> m_databaseLoader; + + float m_sampleRate; + + // We maintain two sets of convolvers for smooth cross-faded interpolations when + // then azimuth and elevation are dynamically changing. + // When the azimuth and elevation are not changing, we simply process with one of the two sets. + // Initially we use CrossfadeSelection1 corresponding to m_convolverL1 and m_convolverR1. + // Whenever the azimuth or elevation changes, a crossfade is initiated to transition + // to the new position. So if we're currently processing with CrossfadeSelection1, then + // we transition to CrossfadeSelection2 (and vice versa). + // If we're in the middle of a transition, then we wait until it is complete before + // initiating a new transition. + + // Selects either the convolver set (m_convolverL1, m_convolverR1) or (m_convolverL2, m_convolverR2). + enum CrossfadeSelection { + CrossfadeSelection1, + CrossfadeSelection2 + }; + + CrossfadeSelection m_crossfadeSelection; + + // azimuth/elevation for CrossfadeSelection1. + int m_azimuthIndex1; + double m_elevation1; + + // azimuth/elevation for CrossfadeSelection2. + int m_azimuthIndex2; + double m_elevation2; + + // A crossfade value 0 <= m_crossfadeX <= 1. + float m_crossfadeX; + + // Per-sample-frame crossfade value increment. + float m_crossfadeIncr; + + FFTConvolver m_convolverL1; + FFTConvolver m_convolverR1; + FFTConvolver m_convolverL2; + FFTConvolver m_convolverR2; + + mozilla::DelayBuffer m_delayLine; + + AudioFloatArray m_tempL1; + AudioFloatArray m_tempR1; + AudioFloatArray m_tempL2; + AudioFloatArray m_tempR2; +}; + +} // namespace WebCore + +#endif // HRTFPanner_h diff --git a/dom/media/webaudio/blink/IIRFilter.cpp b/dom/media/webaudio/blink/IIRFilter.cpp new file mode 100644 index 000000000..94ec129c7 --- /dev/null +++ b/dom/media/webaudio/blink/IIRFilter.cpp @@ -0,0 +1,166 @@ +// Copyright 2016 The Chromium Authors. All rights reserved. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. + +#include "IIRFilter.h" + +#include <complex> + +namespace blink { + +// The length of the memory buffers for the IIR filter. This MUST be a power of two and must be +// greater than the possible length of the filter coefficients. +const int kBufferLength = 32; +static_assert(kBufferLength >= IIRFilter::kMaxOrder + 1, + "Internal IIR buffer length must be greater than maximum IIR Filter order."); + +IIRFilter::IIRFilter(const AudioDoubleArray* feedforwardCoef, const AudioDoubleArray* feedbackCoef) + : m_bufferIndex(0) + , m_feedback(feedbackCoef) + , m_feedforward(feedforwardCoef) +{ + m_xBuffer.SetLength(kBufferLength); + m_yBuffer.SetLength(kBufferLength); + reset(); +} + +IIRFilter::~IIRFilter() +{ +} + +void IIRFilter::reset() +{ + memset(m_xBuffer.Elements(), 0, m_xBuffer.Length() * sizeof(double)); + memset(m_yBuffer.Elements(), 0, m_yBuffer.Length() * sizeof(double)); +} + +static std::complex<double> evaluatePolynomial(const double* coef, std::complex<double> z, int order) +{ + // Use Horner's method to evaluate the polynomial P(z) = sum(coef[k]*z^k, k, 0, order); + std::complex<double> result = 0; + + for (int k = order; k >= 0; --k) + result = result * z + std::complex<double>(coef[k]); + + return result; +} + +void IIRFilter::process(const float* sourceP, float* destP, size_t framesToProcess) +{ + // Compute + // + // y[n] = sum(b[k] * x[n - k], k = 0, M) - sum(a[k] * y[n - k], k = 1, N) + // + // where b[k] are the feedforward coefficients and a[k] are the feedback coefficients of the + // filter. + + // This is a Direct Form I implementation of an IIR Filter. Should we consider doing a + // different implementation such as Transposed Direct Form II? + const double* feedback = m_feedback->Elements(); + const double* feedforward = m_feedforward->Elements(); + + MOZ_ASSERT(feedback); + MOZ_ASSERT(feedforward); + + // Sanity check to see if the feedback coefficients have been scaled appropriately. It must + // be EXACTLY 1! + MOZ_ASSERT(feedback[0] == 1); + + int feedbackLength = m_feedback->Length(); + int feedforwardLength = m_feedforward->Length(); + int minLength = std::min(feedbackLength, feedforwardLength); + + double* xBuffer = m_xBuffer.Elements(); + double* yBuffer = m_yBuffer.Elements(); + + for (size_t n = 0; n < framesToProcess; ++n) { + // To help minimize roundoff, we compute using double's, even though the filter coefficients + // only have single precision values. + double yn = feedforward[0] * sourceP[n]; + + // Run both the feedforward and feedback terms together, when possible. + for (int k = 1; k < minLength; ++k) { + int n = (m_bufferIndex - k) & (kBufferLength - 1); + yn += feedforward[k] * xBuffer[n]; + yn -= feedback[k] * yBuffer[n]; + } + + // Handle any remaining feedforward or feedback terms. + for (int k = minLength; k < feedforwardLength; ++k) + yn += feedforward[k] * xBuffer[(m_bufferIndex - k) & (kBufferLength - 1)]; + + for (int k = minLength; k < feedbackLength; ++k) + yn -= feedback[k] * yBuffer[(m_bufferIndex - k) & (kBufferLength - 1)]; + + // Save the current input and output values in the memory buffers for the next output. + m_xBuffer[m_bufferIndex] = sourceP[n]; + m_yBuffer[m_bufferIndex] = yn; + + m_bufferIndex = (m_bufferIndex + 1) & (kBufferLength - 1); + + // Avoid introducing a stream of subnormals + // TODO: Remove this code when Bug 1157635 is fixed. + if (fabs(yn) >= FLT_MIN) { + destP[n] = yn; + } else { + destP[n] = 0.0; + } + } +} + +void IIRFilter::getFrequencyResponse(int nFrequencies, const float* frequency, float* magResponse, float* phaseResponse) +{ + // Evaluate the z-transform of the filter at the given normalized frequencies from 0 to 1. (One + // corresponds to the Nyquist frequency.) + // + // The z-tranform of the filter is + // + // H(z) = sum(b[k]*z^(-k), k, 0, M) / sum(a[k]*z^(-k), k, 0, N); + // + // The desired frequency response is H(exp(j*omega)), where omega is in [0, 1). + // + // Let P(x) = sum(c[k]*x^k, k, 0, P) be a polynomial of order P. Then each of the sums in H(z) + // is equivalent to evaluating a polynomial at the point 1/z. + + for (int k = 0; k < nFrequencies; ++k) { + // zRecip = 1/z = exp(-j*frequency) + double omega = -M_PI * frequency[k]; + std::complex<double> zRecip = std::complex<double>(cos(omega), sin(omega)); + + std::complex<double> numerator = evaluatePolynomial(m_feedforward->Elements(), zRecip, m_feedforward->Length() - 1); + std::complex<double> denominator = evaluatePolynomial(m_feedback->Elements(), zRecip, m_feedback->Length() - 1); + // Strangely enough, using complex division: + // e.g. Complex response = numerator / denominator; + // fails on our test machines, yielding infinities and NaNs, so we do + // things the long way here. + double n = norm(denominator); + double r = (real(numerator)*real(denominator) + imag(numerator)*imag(denominator)) / n; + double i = (imag(numerator)*real(denominator) - real(numerator)*imag(denominator)) / n; + std::complex<double> response = std::complex<double>(r, i); + + magResponse[k] = static_cast<float>(abs(response)); + phaseResponse[k] = static_cast<float>(atan2(imag(response), real(response))); + } +} + +bool IIRFilter::buffersAreZero() +{ + double* xBuffer = m_xBuffer.Elements(); + double* yBuffer = m_yBuffer.Elements(); + + for (size_t k = 0; k < m_feedforward->Length(); ++k) { + if (xBuffer[(m_bufferIndex - k) & (kBufferLength - 1)] != 0.0) { + return false; + } + } + + for (size_t k = 0; k < m_feedback->Length(); ++k) { + if (fabs(yBuffer[(m_bufferIndex - k) & (kBufferLength - 1)]) >= FLT_MIN) { + return false; + } + } + + return true; +} + +} // namespace blink diff --git a/dom/media/webaudio/blink/IIRFilter.h b/dom/media/webaudio/blink/IIRFilter.h new file mode 100644 index 000000000..5656d959a --- /dev/null +++ b/dom/media/webaudio/blink/IIRFilter.h @@ -0,0 +1,58 @@ +// Copyright 2016 The Chromium Authors. All rights reserved. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. + +#ifndef IIRFilter_h +#define IIRFilter_h + +typedef nsTArray<double> AudioDoubleArray; + +namespace blink { + +class IIRFilter final { +public: + // The maximum IIR filter order. This also limits the number of feedforward coefficients. The + // maximum number of coefficients is 20 according to the spec. + const static size_t kMaxOrder = 19; + IIRFilter(const AudioDoubleArray* feedforwardCoef, const AudioDoubleArray* feedbackCoef); + ~IIRFilter(); + + void process(const float* sourceP, float* destP, size_t framesToProcess); + + void reset(); + + void getFrequencyResponse(int nFrequencies, + const float* frequency, + float* magResponse, + float* phaseResponse); + + bool buffersAreZero(); + +private: + // Filter memory + // + // For simplicity, we assume |m_xBuffer| and |m_yBuffer| have the same length, and the length is + // a power of two. Since the number of coefficients has a fixed upper length, the size of + // xBuffer and yBuffer is fixed. |m_xBuffer| holds the old input values and |m_yBuffer| holds + // the old output values needed to compute the new output value. + // + // m_yBuffer[m_bufferIndex] holds the most recent output value, say, y[n]. Then + // m_yBuffer[m_bufferIndex - k] is y[n - k]. Similarly for m_xBuffer. + // + // To minimize roundoff, these arrays are double's instead of floats. + AudioDoubleArray m_xBuffer; + AudioDoubleArray m_yBuffer; + + // Index into the xBuffer and yBuffer arrays where the most current x and y values should be + // stored. xBuffer[bufferIndex] corresponds to x[n], the current x input value and + // yBuffer[bufferIndex] is where y[n], the current output value. + int m_bufferIndex; + + // Coefficients of the IIR filter. + const AudioDoubleArray* m_feedback; + const AudioDoubleArray* m_feedforward; +}; + +} // namespace blink + +#endif // IIRFilter_h diff --git a/dom/media/webaudio/blink/IRC_Composite_C_R0195-incl.cpp b/dom/media/webaudio/blink/IRC_Composite_C_R0195-incl.cpp new file mode 100644 index 000000000..daffb114f --- /dev/null +++ b/dom/media/webaudio/blink/IRC_Composite_C_R0195-incl.cpp @@ -0,0 +1,449 @@ +/* -*- Mode: C++; tab-width: 2; indent-tabs-mode: nil; c-basic-offset: 2 -*- */ +/* vim:set ts=2 sw=2 sts=2 et cindent: */ +/* This Source Code Form is subject to the terms of the Mozilla Public + * License, v. 2.0. If a copy of the MPL was not distributed with this + * file, You can obtain one at http://mozilla.org/MPL/2.0/. */ + +/** + * The sample data in the arrays here was derived for Webkit by Chris Rogers + * through averaging of impulse responses from the IRCAM Listen HRTF Database. + * The responses here are half the length of the Listen responses. This + * sample data has been granted to the Public Domain. + * + * This file is intended to be included in compilation of a single + * implementation file. + * + * Each elevation (p) contains impulse responses at a varying number of + * equally spaced azimuths for the left ear, ordered clockwise from in front + * the listener. + */ + +#include "mozilla/ArrayUtils.h" + +using mozilla::ArrayLength; + +const int16_t irc_composite_c_r0195_p315[][256] = + {/* IRC_Composite_C_R0195_T000_P315.wav */ + {-37,37,-38,39,-39,40,-41,42,-42,43,-43,44,-44,44,-44,44,-43,42,-39,36,-31,23,-10,-10,5,-655,-410,-552,-353,-474,-1525,-758,656,-263,70,1414,-1528,-731,-1811,1646,1312,-1501,-407,8893,-1543,7696,8084,2629,-2452,-234,3799,1676,177,-1077,-474,-1325,3527,985,265,-884,373,971,1024,-412,507,-173,259,-799,635,-628,507,-344,394,-359,178,-276,349,-201,137,-249,377,-311,263,-404,284,-244,173,-243,330,-320,112,-150,164,-142,174,-300,158,-197,13,-141,85,-190,64,-122,41,-122,60,-195,125,-163,10,-67,-6,-122,77,-133,26,-71,-42,-156,48,-152,-12,-89,-120,-104,-37,-154,-57,-139,-80,-165,-95,-242,-81,-146,-111,-178,-109,-208,-48,-178,-131,-163,-68,-169,-94,-190,-139,-190,-118,-204,-160,-220,-140,-204,-171,-238,-126,-203,-114,-209,-138,-177,-124,-184,-130,-175,-170,-185,-180,-231,-189,-233,-210,-236,-245,-288,-208,-329,-246,-274,-199,-273,-189,-267,-208,-215,-199,-187,-209,-206,-210,-123,-197,-156,-173,-142,-97,-123,-97,-107,-73,-84,-39,-50,-66,-11,-50,-12,-51,8,-27,19,-48,-9,-18,5,-42,-15,-35,-31,-27,-27,-64,-33,-54,-1,-98,-47,-56,-7,-76,-47,-70,-42,-54,-65,-76,-43,-57,-9,-61,-39,-58,33,-39,3,-34,20,-19,4,-71,61,-22,10}, + /* IRC_Composite_C_R0195_T015_P315.wav */ + {81,-82,83,-84,84,-85,86,-87,87,-87,87,-87,87,-86,84,-82,78,-72,63,-49,23,33,-344,-481,-563,-443,-265,-1527,-713,251,-1453,939,2510,-1221,-1282,-1307,806,585,-990,-82,9029,-4621,9096,11230,4611,-3051,400,2105,749,1644,165,-1556,-1521,3009,1430,723,-902,933,187,28,480,951,-214,122,-730,-95,-137,573,-593,558,-692,-62,28,16,-505,426,-283,227,-320,210,-374,303,-435,127,-128,76,-349,106,-364,139,-348,184,-425,-36,-441,91,-413,93,-444,33,-257,-74,-414,93,-379,19,-327,-43,-366,79,-293,20,-199,-76,-207,149,-239,68,-247,66,-219,61,-232,38,-212,36,-209,-27,-209,-58,-227,-6,-309,-12,-225,-60,-199,-10,-277,19,-207,-69,-211,-30,-261,-24,-233,-102,-217,-131,-247,-144,-229,-111,-256,-104,-254,-130,-241,-66,-249,-88,-223,-144,-199,-148,-229,-101,-242,-189,-240,-163,-308,-147,-285,-217,-239,-224,-291,-173,-269,-220,-209,-208,-240,-180,-212,-201,-217,-169,-184,-174,-178,-146,-209,-108,-186,-96,-108,-90,-120,-44,-101,-49,-39,-22,-44,1,-58,3,-19,-21,-8,-6,-22,-9,18,-15,-7,-23,-12,-24,-11,-43,-33,-33,-31,-57,-5,-64,-22,-45,-16,-87,-8,-40,-45,-57,-14,-53,-5,-47,-21,-45,-8,-33,13,-16,3,-29,6,-18,23,-31,24,-41}, + /* IRC_Composite_C_R0195_T030_P315.wav */ + {9,-9,8,-8,8,-8,8,-7,7,-6,5,-4,3,-2,0,2,-5,9,-14,23,-127,-380,-922,-742,-548,257,-2362,972,-1653,1079,1193,1855,-423,-3573,-1945,3974,274,-2796,11656,-988,5085,9362,10293,-750,-3105,-1864,2111,2283,-318,-221,-1158,-110,2923,410,-981,900,122,-303,833,532,-152,20,-462,17,-48,336,235,63,-647,577,-362,-19,-259,102,168,-103,-338,-333,24,-9,-458,287,-470,-273,-216,46,-484,13,-220,-143,-392,-290,-340,-198,-301,-237,-241,-176,-489,-201,-271,-252,-335,-60,-395,-225,-412,-191,-198,-164,-259,-64,-380,-75,-287,-9,-159,-126,-100,-116,-155,-96,-113,-23,-100,-117,-115,-164,-180,8,-173,-57,-208,-89,-129,-73,-188,-102,-184,-93,-204,-81,-217,-128,-180,-66,-279,-127,-244,-206,-146,-178,-190,-135,-228,-154,-205,-100,-196,-143,-185,-140,-235,-135,-209,-144,-158,-250,-202,-234,-227,-157,-214,-201,-260,-180,-244,-184,-234,-151,-192,-198,-223,-199,-199,-202,-189,-164,-233,-143,-220,-175,-186,-107,-156,-84,-181,-106,-116,-72,-63,-29,-76,-11,-83,15,-65,30,-2,-4,-41,4,-49,38,-44,57,-2,9,-61,11,-29,35,-27,2,-50,-23,-17,1,-57,2,-26,-14,-67,-39,-9,-30,-30,-21,-44,-39,-75,-12,-27,-13,-32,13,-22,-16,5,-2,23,-25,48,-24,-32,-30}, + /* IRC_Composite_C_R0195_T045_P315.wav */ + {52,-54,57,-59,62,-64,67,-71,74,-78,82,-86,91,-95,100,-104,107,-105,80,-502,-332,-993,-162,-1907,1505,-2021,-314,756,-842,5057,-1148,-2263,-3462,3639,-1435,4503,-272,9367,494,8006,14353,933,-5033,-2453,2315,1073,2918,-1236,-3010,2416,371,1956,-531,-1294,-177,691,1104,-312,402,-1300,693,-586,315,-26,538,-594,294,-267,-119,-580,378,28,261,-1010,385,-342,8,-224,217,-399,43,-603,-1,-295,-252,167,-340,-419,-328,-348,-86,-497,-57,-294,-381,-499,-201,-454,-232,-518,-17,-717,-310,-542,-84,-458,-96,-313,-154,-496,-112,-226,-165,-242,-163,-234,-306,-270,-70,-238,-108,-139,-101,-251,-218,-175,-17,-215,-23,-205,-109,-221,-12,-131,-27,-205,-45,-264,-158,-151,-4,-169,-113,-251,-102,-247,-100,-151,-158,-212,-170,-190,-217,-220,-105,-182,-191,-216,-210,-208,-188,-144,-93,-288,-132,-248,-124,-281,-112,-217,-92,-321,-80,-343,-94,-222,-100,-231,-229,-216,-139,-209,-164,-138,-158,-204,-180,-171,-145,-125,-68,-150,-139,-133,-97,-43,-133,-83,-49,-109,-53,-101,-20,-84,23,-64,-10,-116,38,-50,15,-23,-2,-3,-27,-28,3,14,36,-13,-21,-18,-38,28,-26,9,-12,-28,-46,-29,-8,8,18,-21,-12,-47,16,-23,-21,-22,-28,-21,-46,-32,37,-14,-1,-31,11,-4,-17,51,-42,23,-30,55}, + /* IRC_Composite_C_R0195_T060_P315.wav */ + {-9,10,-10,10,-11,11,-11,12,-12,13,-13,14,-14,14,-15,15,-14,14,-237,-853,-211,-839,-537,-1171,1035,-1099,1039,294,-1596,6549,-2739,-2660,-4050,4749,2134,7195,4024,6882,-1377,13010,8996,-6905,-3319,-3088,4606,2892,2461,-4423,-1310,1787,1273,1672,-1868,-79,1190,70,-141,-131,222,-677,570,-820,675,-811,128,-382,165,-353,-183,-560,68,-440,-382,-163,-67,-467,-152,-570,177,-472,-46,-374,-58,-324,-179,-380,-114,-308,-223,-231,-266,-228,-188,-382,-93,-468,-172,-439,-277,-467,-233,-484,-158,-431,-177,-375,-153,-360,-208,-377,-90,-359,-252,-321,-261,-288,-236,-352,-117,-397,-126,-318,-103,-229,-203,-302,-132,-152,-113,-159,-188,-103,-160,-141,-121,-237,-158,-186,-215,-126,-180,-203,-129,-258,-131,-232,-90,-192,-139,-222,-58,-238,-81,-214,-111,-210,-119,-232,-97,-257,-79,-254,-124,-244,-169,-224,-170,-218,-187,-194,-208,-142,-212,-136,-216,-224,-142,-210,-181,-238,-144,-243,-81,-233,-107,-193,-82,-124,-113,-114,-86,-61,-72,-88,-74,-42,-62,-68,-57,-28,-20,-32,-42,-4,-55,-25,11,-34,-17,-21,12,-38,-17,-4,-21,-19,-51,-19,-40,-1,-58,-23,-65,-46,-5,-23,-26,-24,-22,36,-42,4,-20,73,-50,19,-61,15,-46,1,-36,-37,-6,-9,13,-34,18,-10,58,-13,38,32,29,4,16,-6}, + /* IRC_Composite_C_R0195_T075_P315.wav */ + {-13,13,-14,14,-14,14,-14,14,-14,13,-13,11,-9,5,2,-16,50,-741,214,-932,242,-1432,-236,-662,1347,-571,-1196,4105,-1805,3633,-3512,-1059,-3340,6144,8904,2949,-3056,13761,4639,6581,1016,-7994,-537,2069,9498,-3772,-2314,-3272,3945,434,437,-1200,-83,923,138,258,258,-7,455,-141,284,-478,398,-1090,528,-789,-29,-665,-287,-554,-73,-808,-317,-229,-409,-754,-201,-562,103,-767,-233,-393,90,-725,-54,-341,-112,-375,-320,-304,-39,-501,-232,-150,-220,-432,-164,-401,-81,-462,-293,-278,-139,-468,-172,-335,-180,-318,-233,-226,-103,-361,-214,-194,-168,-399,-129,-254,-92,-285,-156,-134,-28,-353,-136,-218,-68,-245,-164,-257,-128,-294,-188,-259,-264,-283,-216,-266,-192,-249,-243,-221,-193,-297,-159,-234,-247,-176,-224,-169,-180,-199,-220,-117,-192,-112,-197,-146,-176,-92,-190,-108,-218,-108,-210,-116,-237,-105,-215,-104,-222,-112,-224,-93,-273,-105,-266,-128,-293,-88,-283,-37,-279,-49,-211,-28,-227,20,-197,2,-189,32,-205,58,-176,59,-144,57,-167,132,-119,93,-104,104,-78,87,-109,101,-75,67,-98,89,-87,61,-85,56,-78,56,-103,66,-107,43,-101,53,-122,26,-89,31,-77,5,-71,25,-79,9,-93,21,-74,23,-83,51,-92,20,-72,56,-59,56,-36,38,19,39,4,62,-17,56,-23,68}, + /* IRC_Composite_C_R0195_T090_P315.wav */ + {87,-92,97,-103,109,-116,123,-131,140,-150,162,-174,189,-205,224,-244,266,-286,51,-859,535,-1364,139,-1195,1589,-390,911,-1048,4305,473,-5108,558,766,2725,10227,-95,2100,7357,10939,4034,-9033,1273,-876,7923,448,-413,-5710,1509,967,2067,-1395,-1318,853,624,-242,51,284,401,299,341,-114,602,-538,101,-460,-168,-515,-276,-591,-395,-537,-566,-206,-633,-470,-595,-421,-387,-497,-528,-554,-277,-415,-451,-399,-376,-198,-441,-287,-406,-190,-420,-196,-316,-218,-371,-230,-316,-251,-337,-304,-180,-217,-314,-203,-175,-197,-285,-220,-238,-109,-305,-167,-234,-146,-316,-92,-192,-70,-236,-136,-80,-80,-227,-175,-115,-77,-157,-162,-216,-61,-249,-168,-273,-182,-316,-207,-288,-178,-282,-287,-286,-206,-333,-257,-356,-226,-357,-253,-372,-144,-341,-201,-306,-120,-225,-145,-256,-69,-151,-103,-191,-78,-159,-72,-231,-86,-209,-102,-254,-75,-225,-131,-246,-122,-189,-129,-238,-123,-178,-85,-185,-80,-172,-78,-165,-42,-131,-62,-158,-35,-137,-15,-123,-23,-84,14,-68,23,-29,8,12,-7,21,17,-7,6,-12,18,-60,33,-67,51,-69,41,-34,41,-45,33,-37,31,-56,11,-29,-2,-76,4,-43,17,-73,-1,-63,19,-62,-23,-71,-23,-50,-30,-62,24,-59,23,-75,63,-53,35,-32,38,-9,16,4,53,-2,34,-15}, + /* IRC_Composite_C_R0195_T105_P315.wav */ + {-4,4,-4,4,-4,4,-4,4,-5,5,-5,6,-7,9,-11,17,-28,67,21,-444,-141,-224,-503,-492,56,581,659,-254,970,3243,-1085,-4435,2584,328,6386,5564,-809,5990,6762,10583,-5626,-2592,332,5216,4379,-2326,-2369,-2841,2364,794,414,-2336,841,434,185,-245,467,54,226,66,291,109,-14,-250,-37,-434,-55,-468,-301,-501,-528,-390,-260,-361,-561,-460,-370,-387,-354,-670,-414,-508,-325,-671,-240,-470,-287,-539,-267,-403,-309,-523,-407,-423,-444,-437,-347,-265,-394,-345,-296,-231,-221,-203,-113,-208,-173,-209,-71,-127,-120,-168,-119,-150,-164,-160,-171,-155,-129,-132,-133,-114,-84,-152,-145,-150,-50,-140,-100,-199,-121,-188,-145,-209,-221,-240,-266,-216,-273,-254,-311,-241,-309,-303,-306,-288,-304,-324,-325,-280,-286,-254,-285,-213,-282,-199,-235,-168,-198,-151,-152,-157,-138,-157,-155,-189,-176,-212,-179,-192,-145,-184,-171,-174,-110,-150,-113,-170,-84,-115,-76,-135,-71,-161,-62,-153,-49,-144,-73,-126,-80,-100,-82,-63,-82,-57,-74,14,-40,-10,-30,25,-21,31,-22,45,-26,43,-15,48,-18,20,-19,34,-32,26,-30,33,-39,33,-38,25,-62,21,-45,-12,-65,-6,-49,-13,-61,8,-71,9,-80,15,-87,37,-62,49,-58,33,-30,20,-31,-30,-19,-18,-22,12,-19,18,-18,62,-6,19}, + /* IRC_Composite_C_R0195_T120_P315.wav */ + {-8,8,-9,10,-10,11,-12,13,-14,15,-17,18,-20,22,-24,27,-30,33,-36,-6,-353,-241,59,-307,-656,81,435,931,-462,1098,2228,1185,-5297,2389,1039,4366,5054,563,5052,5065,10308,-3912,-1912,-658,6384,3010,-2420,-2503,-2288,1665,1300,-465,-1526,865,689,-244,192,664,-256,-73,265,-57,-312,-7,-322,-127,-354,-230,-494,-275,-378,-331,-253,-267,-433,-173,-259,-320,-294,-344,-567,-293,-505,-543,-515,-293,-398,-306,-402,-421,-360,-360,-466,-547,-514,-519,-327,-326,-417,-471,-219,-314,-177,-232,-161,-243,-135,-249,-154,-136,-89,-210,-64,-203,-123,-169,-90,-247,-100,-141,-68,-108,-41,-101,-97,-95,-145,-67,-56,-122,-193,-178,-180,-188,-181,-281,-271,-252,-234,-263,-302,-242,-343,-214,-391,-204,-407,-189,-457,-208,-383,-206,-352,-186,-336,-183,-268,-158,-243,-145,-229,-99,-205,-110,-223,-83,-256,-120,-241,-124,-258,-103,-238,-73,-205,-48,-194,-12,-168,-14,-161,-15,-200,-2,-184,-29,-168,-39,-203,-35,-143,-59,-138,-57,-120,-39,-78,-38,-71,4,-85,25,-41,42,-65,60,-35,71,-28,56,-41,66,-43,40,-26,-6,-23,-11,-1,-28,3,-16,-8,-25,-4,-23,-19,-21,-43,6,-38,-12,-49,4,-21,-1,-32,9,-8,22,-23,-1,-11,-14,-32,-12,-40,-9,-31,23,-39,25,-13,20,-8,9}, + /* IRC_Composite_C_R0195_T135_P315.wav */ + {-23,24,-24,24,-24,24,-24,24,-24,24,-24,24,-24,23,-23,22,-21,19,-17,12,2,-321,142,-352,-116,-71,-511,212,1241,-690,1132,1447,1818,-2325,-1459,2842,2322,3950,-401,7294,1799,9821,384,-3014,-458,4980,4067,-3265,-1817,-2557,1536,1050,141,-1408,467,945,477,-318,841,-225,-162,-191,333,-541,-289,-847,48,-224,-228,-453,-42,-332,-101,-393,-171,-405,-103,-444,-70,-591,-149,-459,-69,-570,-117,-584,-95,-396,-42,-496,-210,-651,-204,-618,-333,-581,-255,-540,-313,-471,-202,-369,-230,-385,-202,-278,-178,-259,-149,-205,-123,-195,-100,-150,-73,-188,-126,-225,-124,-193,-92,-182,-65,-172,-74,-168,-103,-186,-57,-178,-89,-280,-140,-220,-97,-251,-226,-268,-204,-268,-271,-286,-293,-290,-360,-269,-322,-274,-307,-261,-331,-275,-290,-250,-261,-307,-242,-239,-199,-225,-211,-141,-180,-148,-189,-147,-163,-178,-155,-195,-114,-193,-96,-201,-69,-171,-54,-158,-66,-130,-90,-112,-83,-108,-98,-108,-90,-89,-94,-103,-90,-117,-87,-105,-51,-97,-64,-70,-56,-23,-79,7,-88,34,-80,43,-77,63,-50,59,-68,43,-51,41,-54,60,-92,29,-102,48,-85,28,-79,9,-67,13,-66,37,-57,38,-64,47,-47,53,-38,54,-42,32,-33,27,-15,13,-24,-26,-34,-6,-41,23,-23,1,-31,23,-1,22,-48,-14}, + /* IRC_Composite_C_R0195_T150_P315.wav */ + {-14,14,-14,14,-14,14,-14,14,-14,14,-15,15,-15,15,-15,15,-15,15,-15,15,-14,13,-9,-299,105,-109,-149,-174,-101,123,928,331,409,1091,1957,-1219,-1800,2020,2641,4225,-365,6765,1276,7468,2095,-1421,-1941,2847,4183,-2838,-2013,-2585,1301,1369,1048,-1018,299,712,620,-358,419,-160,-391,-365,85,-244,-278,-735,101,-159,-46,-323,-112,-300,-73,-375,-177,-408,-241,-349,-232,-359,-171,-320,-131,-293,-2,-318,-62,-331,-78,-385,-166,-449,-212,-441,-266,-408,-251,-382,-246,-414,-254,-434,-274,-407,-251,-444,-252,-399,-189,-349,-176,-289,-151,-238,-106,-181,-45,-201,-17,-170,-8,-151,12,-150,-68,-196,-148,-187,-145,-232,-247,-286,-284,-266,-250,-295,-275,-287,-281,-302,-308,-288,-299,-267,-330,-280,-301,-267,-269,-255,-300,-276,-267,-240,-267,-229,-279,-186,-258,-159,-229,-176,-170,-148,-137,-189,-140,-182,-128,-193,-123,-144,-95,-99,-87,-75,-90,-47,-89,-71,-115,-103,-102,-112,-110,-131,-87,-120,-99,-104,-130,-68,-115,-50,-116,-33,-115,-23,-91,-20,-77,-32,-46,-36,-20,-19,-1,-1,-16,2,-30,-4,-10,2,-12,-25,-22,-46,-39,-40,-29,-51,-40,-76,-30,-47,-17,-48,-11,-52,8,-41,31,-26,41,1,54,-33,53,-2,35,-28,34,-23,25,-31,33,-25,-2,-7,10,-26,-60,-40}, + /* IRC_Composite_C_R0195_T165_P315.wav */ + {-17,17,-18,19,-19,20,-21,21,-22,23,-24,25,-26,27,-29,30,-32,33,-35,37,-39,42,-45,48,-52,67,-40,152,75,-119,-31,837,302,348,1063,1535,471,-104,-785,1551,1039,5803,545,3679,3043,5986,50,-1878,1830,-1451,2642,-2294,-1114,-2432,2146,1209,-135,-693,285,141,308,-50,-77,-237,196,-409,80,-447,-119,-467,-15,-415,-52,-429,-63,-216,-215,-423,-211,-250,-99,-343,-232,-231,-78,-380,-66,-225,10,-172,-61,-149,-128,-331,-281,-369,-348,-420,-336,-400,-303,-402,-320,-360,-377,-402,-338,-363,-356,-382,-392,-353,-371,-352,-344,-280,-280,-250,-233,-166,-154,-160,-136,-102,-71,-93,-69,-118,-111,-164,-179,-223,-230,-272,-255,-285,-312,-330,-270,-347,-306,-400,-338,-356,-324,-353,-315,-327,-313,-301,-321,-289,-309,-258,-317,-227,-285,-196,-252,-189,-237,-155,-182,-95,-165,-128,-123,-87,-127,-113,-150,-118,-122,-92,-114,-79,-90,-44,-70,-19,-82,-37,-76,-88,-84,-86,-94,-117,-109,-92,-108,-75,-136,-74,-119,-62,-120,-51,-126,-38,-97,-20,-110,-3,-105,29,-72,41,-66,43,-69,58,-54,55,-37,38,-45,18,-47,10,-73,-1,-104,15,-120,0,-103,12,-106,19,-73,20,-41,43,-9,42,-5,48,-4,46,3,29,-8,24,11,13,-8,11,55,13,32,16,31,9,-51,-42}, + /* IRC_Composite_C_R0195_T180_P315.wav */ + {24,-25,26,-27,28,-30,31,-33,34,-36,38,-40,42,-45,47,-50,53,-57,61,-65,70,-76,82,-90,100,-116,158,-120,110,-47,143,-200,288,-83,509,566,678,584,1717,-752,-218,-645,3620,1547,2553,2618,2850,3932,1154,2478,-3553,2655,-1012,521,-1943,-220,418,465,358,-445,32,572,410,189,111,242,-109,240,99,120,-178,-44,-146,-12,-251,-169,-110,-64,-56,-37,-65,38,137,-75,-42,-159,-127,-127,-11,-39,-7,-121,-97,-173,-207,-322,-357,-395,-311,-334,-329,-369,-303,-285,-259,-281,-323,-274,-364,-264,-415,-314,-459,-317,-463,-346,-399,-298,-323,-239,-217,-185,-167,-162,-146,-115,-142,-125,-194,-139,-196,-150,-224,-208,-224,-201,-206,-219,-254,-241,-239,-289,-306,-358,-320,-362,-312,-398,-353,-429,-362,-421,-352,-421,-332,-364,-272,-351,-188,-256,-150,-222,-109,-165,-57,-142,-72,-111,-54,-116,-66,-134,-87,-132,-39,-149,-55,-144,-3,-111,-37,-108,-57,-98,-89,-106,-109,-135,-117,-147,-114,-137,-99,-140,-85,-140,-56,-119,-68,-119,-57,-94,-53,-95,-61,-85,-42,-76,-22,-61,-39,-37,-44,-27,-31,-14,-37,-23,-16,-39,0,-62,-11,-92,2,-64,-27,-61,-33,-39,-34,-35,-26,-27,5,-20,14,-33,30,-28,12,-27,25,-7,7,29,-15,24,-9,67,32,52,-7,19,-10,-34}, + /* IRC_Composite_C_R0195_T195_P315.wav */ + {-3,3,-4,4,-4,4,-4,4,-4,4,-4,4,-5,5,-5,5,-5,5,-6,6,-6,7,-7,8,-10,16,-53,-21,71,-52,123,66,-67,138,16,149,471,552,491,879,834,185,-864,886,1624,2747,1377,2335,2675,2476,2941,-204,-424,-902,1080,-594,-652,-972,-15,737,158,15,-103,673,669,117,134,496,369,6,174,211,204,39,24,7,-16,8,-20,185,63,80,52,87,-18,-50,-107,-83,-58,-90,-149,-191,-215,-179,-280,-228,-426,-325,-403,-260,-377,-309,-454,-294,-358,-263,-350,-288,-312,-298,-376,-336,-392,-351,-440,-368,-423,-300,-356,-221,-309,-156,-237,-133,-233,-159,-209,-147,-233,-211,-247,-210,-236,-217,-261,-238,-270,-208,-309,-227,-327,-269,-364,-336,-391,-375,-368,-412,-407,-388,-417,-354,-411,-328,-386,-273,-319,-198,-244,-162,-205,-101,-178,-129,-177,-104,-161,-77,-156,-48,-139,-49,-168,-54,-132,-40,-129,-49,-113,-43,-97,-66,-144,-76,-174,-72,-177,-115,-186,-100,-175,-109,-149,-106,-124,-101,-86,-76,-56,-81,-27,-107,3,-87,-27,-99,-52,-77,-57,-80,-68,-65,-62,-77,-17,-71,-16,-67,-12,-47,-11,-33,-14,-33,-31,-4,-26,-4,-79,6,-73,-6,-74,-4,-61,-7,-62,10,-50,33,-27,0,-4,51,21,29,16,43,17,21,38,16,70,-26,47,-23}, + /* IRC_Composite_C_R0195_T210_P315.wav */ + {6,-6,6,-6,6,-6,6,-6,6,-6,6,-7,7,-7,7,-7,7,-7,8,-8,8,-8,9,-9,9,-9,9,-9,9,-6,-16,-81,-27,-64,-11,-116,37,-7,308,224,339,624,595,47,-450,456,1108,1630,1528,1730,2147,2381,2098,1287,-731,283,215,213,-551,-518,308,203,729,-194,652,579,759,441,763,571,528,360,442,343,364,229,431,125,293,81,275,88,238,45,215,155,198,82,61,12,5,-51,-58,-119,-156,-200,-208,-255,-244,-261,-244,-232,-236,-252,-249,-226,-258,-256,-283,-248,-281,-296,-308,-266,-304,-256,-333,-220,-281,-198,-262,-186,-220,-164,-184,-148,-198,-168,-234,-150,-270,-190,-317,-221,-358,-246,-358,-268,-356,-277,-357,-276,-375,-272,-357,-257,-384,-239,-370,-230,-374,-258,-374,-262,-363,-281,-344,-263,-317,-224,-292,-194,-243,-172,-216,-149,-196,-138,-151,-112,-138,-121,-140,-121,-158,-136,-149,-135,-124,-140,-124,-136,-125,-160,-126,-165,-139,-151,-144,-151,-125,-132,-116,-99,-80,-92,-84,-64,-97,-68,-84,-76,-83,-93,-75,-65,-93,-87,-89,-70,-103,-55,-102,-68,-112,-68,-70,-61,-55,-62,-34,-41,-24,-58,-48,-63,-63,-52,-80,-56,-81,-54,-68,-55,-48,-48,-24,-69,-21,-60,-6,-21,-10,-12,-6,14,-8,19,1,57,22,38,15,27,32,33}, + /* IRC_Composite_C_R0195_T225_P315.wav */ + {-7,7,-8,8,-8,8,-8,8,-8,8,-8,8,-8,8,-8,8,-8,8,-8,9,-9,9,-9,9,-9,9,-9,9,-6,33,28,58,51,52,71,102,56,94,137,218,326,455,483,632,249,92,428,904,1132,1053,1398,1725,1892,1761,996,419,-35,393,230,-426,83,187,561,443,405,624,567,778,322,714,220,555,133,272,57,175,33,-14,10,34,86,73,113,24,-32,-47,-92,-107,-159,-198,-236,-286,-255,-307,-290,-357,-323,-402,-335,-423,-329,-409,-335,-383,-288,-312,-274,-328,-286,-276,-227,-303,-252,-323,-243,-382,-269,-374,-281,-400,-300,-362,-289,-363,-292,-337,-287,-341,-275,-329,-275,-353,-270,-350,-272,-365,-283,-346,-284,-356,-310,-344,-298,-351,-295,-331,-289,-350,-307,-364,-293,-334,-305,-333,-280,-301,-259,-287,-211,-253,-202,-233,-185,-196,-178,-176,-188,-164,-153,-127,-135,-117,-145,-120,-134,-127,-143,-116,-126,-80,-113,-89,-119,-102,-89,-86,-92,-78,-108,-66,-94,-63,-86,-58,-73,-44,-52,-66,-55,-76,-50,-92,-33,-91,-19,-106,-44,-97,-52,-76,-62,-65,-52,-53,-56,-64,-38,-57,-27,-67,-32,-61,-26,-56,-44,-40,-36,-27,-22,-5,-13,-6,12,-15,8,-1,18,1,-3,6,-13,24,6,33,17,44,11,63,31,57,47,73,42,34,14,5}, + /* IRC_Composite_C_R0195_T240_P315.wav */ + {0,0,0,-1,1,-1,1,-1,1,-1,1,-1,1,-1,1,-1,1,-1,1,-1,1,-1,1,-1,1,-1,2,58,78,16,22,51,114,84,41,96,70,113,111,119,154,190,332,340,437,381,393,351,291,690,869,1203,1249,1386,1308,1507,1378,1185,595,424,365,626,420,458,252,382,637,446,207,255,488,305,321,49,221,53,63,79,-40,108,-82,101,-101,-13,-203,-152,-221,-220,-235,-289,-282,-281,-296,-224,-344,-241,-368,-252,-366,-268,-359,-308,-318,-288,-298,-317,-264,-285,-261,-303,-269,-298,-270,-320,-331,-336,-351,-327,-351,-348,-331,-347,-276,-351,-246,-372,-243,-370,-280,-365,-300,-410,-295,-413,-330,-447,-345,-440,-333,-430,-333,-415,-326,-403,-318,-379,-309,-372,-303,-357,-286,-300,-274,-299,-260,-274,-222,-258,-207,-249,-178,-228,-179,-202,-181,-164,-194,-132,-181,-100,-149,-105,-124,-85,-117,-92,-115,-104,-129,-96,-119,-72,-137,-65,-104,-51,-87,-75,-80,-56,-63,-66,-73,-81,-74,-88,-75,-87,-74,-68,-80,-63,-77,-50,-74,-36,-76,-42,-77,-46,-59,-47,-48,-54,-32,-62,-21,-50,-24,-43,-33,-23,-40,-10,-7,-13,-16,-4,2,7,17,-10,37,-7,35,11,27,-4,32,-1,15,-21,-1,-28,7,-9,16,21,30,38,52,37,48,38,34,3}, + /* IRC_Composite_C_R0195_T255_P315.wav */ + {-2,2,-2,2,-2,2,-2,2,-2,2,-2,2,-2,2,-2,2,-2,2,-2,2,-2,2,-2,2,-3,3,-3,3,-3,4,-6,74,75,79,61,128,72,79,70,94,189,182,258,204,285,366,406,480,409,503,603,776,875,1073,1366,1703,1601,1585,1344,1253,1054,708,209,184,245,471,220,82,328,234,143,-43,229,63,146,52,46,143,-26,108,-162,-81,-337,-290,-357,-257,-274,-266,-282,-295,-253,-242,-177,-217,-241,-242,-180,-187,-261,-282,-286,-285,-278,-318,-290,-350,-241,-329,-216,-356,-250,-344,-253,-301,-333,-307,-381,-310,-406,-310,-398,-327,-389,-328,-383,-321,-397,-364,-416,-386,-447,-400,-469,-437,-477,-423,-470,-435,-472,-439,-441,-374,-396,-361,-359,-340,-300,-318,-278,-293,-236,-257,-203,-214,-171,-197,-164,-178,-159,-160,-150,-162,-167,-187,-166,-177,-176,-193,-156,-159,-125,-174,-110,-158,-92,-129,-96,-113,-107,-94,-103,-101,-73,-98,-45,-93,-27,-108,-14,-122,-36,-134,-52,-131,-66,-121,-83,-114,-73,-107,-51,-91,-32,-79,2,-74,2,-68,4,-68,20,-45,9,-63,-6,-50,8,-15,-2,-22,-9,6,4,25,-11,21,-12,18,-6,-1,-16,-16,-15,-8,7,8,-10,14,-14,21,-9,18,-34,-11,0,0,38,9,28,24,33,41,32,56,10,45}, + /* IRC_Composite_C_R0195_T270_P315.wav */ + {5,-5,5,-5,5,-5,5,-6,6,-6,6,-6,6,-6,7,-7,7,-7,8,-8,8,-9,9,-10,10,-11,12,-13,14,-16,19,-24,35,-112,-87,-172,-147,-119,-127,-152,-113,-103,-89,-113,-68,-116,-46,22,42,151,321,392,548,700,898,1192,1367,1289,1342,1432,1348,1134,1055,823,716,745,988,819,628,418,468,438,382,415,357,462,457,454,331,457,286,349,199,216,110,272,103,141,96,107,157,39,151,90,143,187,135,145,132,119,90,29,67,9,33,-99,-4,-78,-31,-119,-118,-125,-133,-39,-116,6,-53,-9,-2,-28,-8,-68,-109,-175,-231,-243,-320,-296,-388,-312,-439,-323,-431,-308,-403,-341,-379,-320,-333,-304,-317,-322,-321,-302,-340,-277,-367,-233,-348,-216,-344,-197,-304,-195,-255,-199,-226,-173,-179,-167,-168,-175,-177,-169,-197,-168,-238,-156,-231,-167,-240,-168,-219,-192,-218,-202,-205,-205,-186,-197,-157,-172,-143,-163,-152,-143,-150,-149,-177,-148,-159,-149,-150,-163,-154,-183,-160,-189,-132,-190,-124,-156,-96,-125,-92,-99,-99,-74,-85,-61,-83,-67,-68,-74,-62,-91,-54,-91,-46,-108,-22,-86,-15,-70,-6,-63,-3,-43,-35,-76,-52,-91,-99,-89,-83,-72,-67,-70,-42,-53,-31,-59,-32,-49,-39,-55,-65,-43,-43,-36,-50,-25,-39,-34,-26,-31}, + /* IRC_Composite_C_R0195_T285_P315.wav */ + {0,0,-1,1,-1,1,-1,1,-1,1,-1,1,-1,1,-1,1,-1,1,-1,1,-1,2,-2,2,-2,3,-3,0,-53,-54,-56,-85,-47,-114,-135,-181,-153,-193,-267,-244,-55,-96,1,65,-104,-149,-27,77,318,544,620,776,1096,1388,1457,1232,925,941,734,587,918,901,616,762,669,803,637,587,517,641,585,459,366,454,473,472,377,381,369,477,434,303,311,258,424,217,330,203,348,239,206,163,147,190,93,131,3,88,29,61,25,-4,30,-29,-32,-79,-68,-48,-73,-75,-119,-13,-30,17,-48,5,-35,5,-39,-80,-142,-105,-167,-166,-226,-200,-281,-233,-317,-276,-334,-290,-353,-338,-366,-316,-342,-297,-343,-306,-273,-252,-266,-278,-281,-276,-239,-265,-258,-270,-226,-250,-191,-215,-181,-217,-149,-202,-131,-195,-119,-225,-148,-263,-146,-220,-148,-221,-170,-215,-156,-215,-188,-262,-202,-269,-194,-235,-202,-241,-188,-209,-153,-204,-154,-203,-167,-217,-151,-214,-172,-225,-158,-182,-145,-170,-150,-164,-121,-136,-87,-134,-90,-102,-41,-87,-63,-73,-53,-73,-60,-73,-67,-76,-98,-68,-64,-72,-69,-72,-35,-71,-32,-55,-37,-56,-47,-39,-69,-58,-90,-53,-99,-57,-83,-34,-50,-55,-58,-67,-57,-63,-70,-62,-74,-53,-70,-30,-49,-56,-37,-35,-21,-10,-19,-29}, + /* IRC_Composite_C_R0195_T300_P315.wav */ + {2,-2,2,-2,2,-2,2,-2,2,-2,2,-2,2,-2,2,-2,2,-2,2,-2,2,-2,2,-2,2,-2,2,-3,4,-10,-133,-133,-170,-242,-249,-433,-299,-139,-159,192,133,-70,22,-303,18,-19,635,218,706,1507,1430,1383,2387,1796,1075,392,368,909,611,309,-168,353,2,566,420,570,464,600,533,643,640,611,592,536,352,641,435,676,299,466,377,487,301,349,330,262,326,234,228,152,89,163,56,54,-10,7,-78,-45,-66,-85,-68,-104,-18,4,-26,41,2,31,0,79,5,36,26,46,69,42,-19,-67,-82,-80,-133,-166,-222,-198,-222,-228,-274,-302,-279,-261,-261,-282,-253,-280,-237,-241,-240,-230,-249,-229,-231,-221,-230,-219,-245,-228,-234,-188,-202,-201,-222,-167,-182,-137,-203,-165,-172,-150,-180,-163,-208,-169,-194,-164,-229,-193,-245,-204,-252,-223,-246,-236,-257,-266,-272,-274,-271,-280,-267,-251,-243,-225,-207,-200,-174,-184,-165,-180,-160,-164,-160,-158,-167,-156,-169,-151,-138,-125,-125,-108,-104,-80,-94,-71,-82,-75,-85,-57,-68,-57,-49,-58,-52,-65,-51,-59,-53,-74,-44,-57,-44,-43,-49,-55,-42,-57,-55,-65,-60,-62,-53,-54,-78,-70,-64,-55,-66,-73,-66,-48,-68,-87,-49,-57,-32,-77,-35,-66,-33,-71,-35,-70,15,-17,-11,-33}, + /* IRC_Composite_C_R0195_T315_P315.wav */ + {-3,3,-4,4,-4,4,-4,4,-4,4,-4,5,-5,5,-5,5,-5,5,-6,6,-6,6,-6,6,-6,6,-5,3,2,-152,-177,-483,-235,-110,-653,-348,-150,-38,208,331,33,-385,-315,-27,364,811,-94,2504,888,2086,3574,3328,531,314,789,729,404,-337,56,36,119,405,418,656,613,503,327,557,477,459,703,343,212,405,546,432,317,280,459,304,364,271,334,99,204,199,155,38,34,17,94,27,75,-28,51,-48,69,-29,44,-56,45,-30,20,5,41,-28,41,31,56,13,68,19,90,-37,31,-65,21,-87,-51,-142,-103,-155,-98,-191,-137,-221,-116,-207,-139,-209,-136,-203,-147,-207,-168,-209,-164,-226,-176,-243,-198,-258,-194,-267,-197,-246,-196,-229,-191,-208,-190,-182,-197,-201,-171,-193,-170,-221,-151,-220,-141,-245,-167,-251,-184,-255,-210,-246,-238,-245,-223,-258,-247,-284,-254,-278,-249,-311,-255,-297,-217,-263,-193,-236,-176,-198,-176,-178,-155,-172,-140,-181,-132,-195,-91,-184,-92,-165,-90,-121,-68,-117,-88,-97,-47,-73,-55,-70,-16,-46,-13,-61,-26,-38,-50,-48,-41,-43,-40,-60,-49,-55,-58,-57,-47,-68,-57,-81,-49,-74,-61,-74,-29,-70,-35,-79,-39,-87,-35,-94,-49,-86,-55,-65,-49,-58,-43,-30,-32,-23,-33,-5,-23,19,-13,-17}, + /* IRC_Composite_C_R0195_T330_P315.wav */ + {-7,7,-7,7,-7,7,-7,7,-7,7,-7,7,-7,7,-7,7,-7,8,-8,8,-8,9,-9,9,-10,11,-11,-55,-296,-386,-360,-162,-486,-689,-594,323,-279,489,253,-225,-799,-208,-290,1653,-875,1421,3185,831,3972,4758,3089,-835,674,981,1685,-351,-114,-17,248,390,777,359,517,535,503,290,712,58,681,346,142,-203,198,199,331,-307,34,132,139,70,114,91,13,123,184,191,4,23,205,163,125,72,141,-40,14,124,-28,33,-51,40,-5,35,-58,73,-52,-16,56,78,-40,-37,45,32,27,55,-7,-15,-18,29,-29,22,-117,6,-57,-69,-79,-66,-162,-105,-133,-121,-164,-165,-210,-152,-236,-167,-217,-215,-247,-204,-244,-201,-239,-194,-273,-180,-249,-181,-223,-192,-224,-139,-212,-188,-198,-169,-226,-134,-219,-153,-208,-148,-204,-149,-219,-168,-214,-162,-245,-160,-257,-219,-229,-208,-279,-218,-292,-266,-282,-268,-275,-227,-298,-216,-236,-164,-238,-151,-226,-142,-217,-147,-203,-145,-177,-146,-138,-122,-131,-73,-151,-51,-110,-31,-107,-6,-86,-9,-40,-10,-36,-1,-49,-9,-42,-8,-74,2,-73,3,-80,-26,-79,6,-86,-19,-76,-29,-75,-32,-78,-38,-65,-46,-77,-46,-88,-55,-75,-74,-82,-34,-77,-58,-74,-44,-61,-16,-21,-21,-1,-32,25,-5,-9,-4}, + /* IRC_Composite_C_R0195_T345_P315.wav */ + {-3,3,-3,3,-3,3,-3,3,-3,3,-2,2,-2,2,-1,1,0,0,1,-2,3,-4,6,-9,13,-23,-205,-456,-570,-178,-609,-516,-1049,-349,515,157,-280,1141,-1675,-135,-1096,1502,703,-567,2409,4580,1615,5569,6660,-925,461,21,3064,-94,275,-939,176,137,1125,718,-145,338,911,169,330,365,552,148,369,-393,179,-127,204,80,-84,-339,8,81,-65,-288,145,50,-22,-24,61,62,45,-71,194,159,54,-19,226,-56,24,48,99,-31,-91,-8,53,24,-79,12,113,-69,-3,43,52,-53,25,22,42,-13,-5,-1,64,-132,-15,-42,-12,-176,-19,-103,-113,-146,-76,-99,-119,-177,-139,-96,-193,-229,-107,-210,-181,-214,-106,-219,-211,-156,-157,-151,-246,-153,-144,-151,-254,-173,-147,-195,-202,-155,-196,-154,-199,-151,-187,-147,-192,-144,-157,-216,-170,-139,-200,-196,-193,-158,-269,-172,-256,-190,-293,-226,-252,-229,-318,-238,-250,-265,-306,-166,-254,-191,-300,-137,-229,-129,-286,-136,-186,-156,-221,-108,-162,-109,-143,-76,-137,-51,-129,-13,-99,-24,-56,17,-62,-20,-41,40,-86,44,-52,39,-123,42,-65,39,-115,-16,-46,0,-101,-28,-69,-10,-115,-27,-101,20,-128,3,-110,13,-133,-3,-101,-28,-103,-3,-79,-10,-82,10,-56,27,-77,25,-50,57,-33,20,-49,39}}; + +const int16_t irc_composite_c_r0195_p330[][256] = + {/* IRC_Composite_C_R0195_T000_P330.wav */ + {-59,61,-63,65,-67,69,-71,73,-76,78,-81,83,-86,89,-92,95,-98,100,-102,103,-103,99,-90,66,27,-206,-447,-812,-1040,-532,-532,-221,-2282,2737,-1254,634,-40,577,-3131,-820,-289,5598,-4456,4514,11627,-957,5804,7393,108,-5260,1410,3318,1621,-1983,2287,284,1686,1238,2060,-1196,399,489,1551,51,439,-136,620,290,-51,26,548,-418,535,-429,119,-693,256,-317,191,-412,18,-258,-48,-331,42,-245,-151,-84,-244,-168,-112,-248,56,-243,-130,-346,3,-311,-140,-201,-205,-208,-164,-257,-153,-216,-142,-212,-32,-240,-169,-200,-80,-259,-56,-266,-49,-259,-153,-247,-131,-186,-180,-157,-174,-170,-168,-133,-130,-144,-118,-156,-112,-157,-115,-125,-52,-108,-95,-100,-113,-69,-143,-89,-121,-105,-124,-163,-161,-185,-102,-218,-93,-213,-136,-158,-144,-182,-122,-122,-161,-147,-153,-151,-116,-162,-100,-155,-94,-176,-98,-138,-94,-127,-119,-122,-158,-125,-151,-130,-178,-168,-144,-200,-186,-199,-181,-157,-192,-201,-216,-176,-230,-170,-197,-219,-207,-194,-203,-187,-195,-174,-204,-175,-205,-170,-207,-182,-206,-156,-195,-149,-193,-165,-200,-125,-166,-120,-182,-142,-113,-73,-137,-86,-97,-75,-88,-72,-83,-37,-82,-24,-72,-40,-53,24,-38,-23,-48,32,10,7,-19,18,7,43,7,34,32,65,53,56,21,94,55,42}, + /* IRC_Composite_C_R0195_T015_P330.wav */ + {-33,32,-32,31,-31,30,-28,27,-25,22,-19,15,-10,4,5,-16,31,-53,84,-136,240,-673,177,-174,-1030,-1321,-76,-1250,-372,289,-2348,2418,1442,-314,-2260,351,-1076,96,-4201,12653,-2793,350,16253,7416,-1734,373,1945,-1722,32,2367,1990,-2906,2200,2573,955,-397,1580,-424,684,371,1473,401,8,-1088,1156,-110,164,-526,361,-233,-337,-706,160,-330,-24,-130,132,-810,170,-491,242,-434,33,-622,355,-505,-308,-68,-61,-264,-71,-244,-294,-284,-179,-295,41,-462,-220,-369,-36,-507,-206,-424,-65,-416,-180,-392,-246,-316,-59,-245,-204,-339,-224,-205,-78,-425,-107,-254,-66,-399,-77,-350,46,-239,-96,-256,-101,-268,-138,-142,-190,-179,-136,-172,-84,-261,-40,-173,25,-283,-32,-282,9,-234,-87,-198,-101,-238,-154,-207,-159,-166,-106,-164,-136,-199,-109,-121,-73,-175,-65,-149,-73,-162,-108,-135,-78,-99,-124,-126,-179,-91,-133,-84,-188,-122,-167,-148,-155,-182,-183,-160,-158,-219,-207,-224,-181,-161,-192,-206,-207,-174,-243,-140,-189,-173,-194,-138,-203,-173,-192,-143,-155,-154,-216,-158,-170,-138,-202,-110,-182,-132,-154,-83,-201,-104,-130,-44,-129,-80,-139,-19,-72,-80,-88,-16,-110,-20,-46,-35,-88,15,-32,6,-67,-9,-16,52,-18,27,27,26,-2,60,51,76,28,98,40,77,33,94,11,62}, + /* IRC_Composite_C_R0195_T030_P330.wav */ + {15,-16,18,-19,21,-23,26,-29,32,-36,41,-47,54,-62,73,-86,104,-126,155,-102,601,-1085,-197,-1634,218,-1892,836,-1472,627,332,2248,1679,-4388,-1076,2778,-5228,3611,10385,-5976,10952,11266,9110,-4453,651,-313,-354,33,2898,-224,-1072,2571,1824,-115,539,940,-84,401,1263,337,3,-171,-426,685,-69,-27,-94,-916,-1001,131,-752,-458,-101,-74,-99,-305,-309,-166,-102,238,-512,98,-429,-43,-482,424,-183,-138,-249,-301,-240,-13,-418,-203,-184,-296,-341,-269,-474,-99,-447,-210,-601,-403,-560,-148,-418,-397,-430,-241,-495,-140,-391,-94,-358,-170,-431,-28,-368,-72,-337,-69,-376,-11,-267,-144,-348,-177,-200,-156,-317,-161,-307,-97,-238,-69,-237,-166,-271,-82,-187,-69,-237,-35,-248,-105,-213,-38,-207,-108,-284,-91,-256,-64,-223,-92,-227,-52,-205,-48,-178,-94,-174,-26,-226,-71,-202,-71,-184,-34,-207,-88,-154,-79,-180,-41,-222,-49,-215,-56,-245,-90,-251,-51,-260,-109,-246,-150,-229,-109,-239,-125,-253,-146,-228,-85,-219,-106,-252,-82,-250,-58,-194,-116,-232,-74,-193,-81,-184,-110,-195,-65,-162,-92,-193,-65,-199,-29,-203,0,-193,28,-151,-16,-159,-4,-109,-9,-90,-21,-99,36,-92,27,-82,30,-74,71,-74,64,-43,102,-28,93,18,88,22,98,29,116,21,126,-20,133,-40,143,-66}, + /* IRC_Composite_C_R0195_T045_P330.wav */ + {60,-60,60,-60,60,-60,59,-57,55,-51,47,-40,30,-15,-9,50,-138,582,-538,-783,-136,149,-1756,-848,-253,-449,1379,-797,1611,1075,2352,-3846,-4473,1829,1877,4778,13625,-4798,10293,11126,4895,-5087,-2315,-1117,-112,3354,2968,-606,-2642,2599,370,2356,-155,634,-118,50,-430,371,-366,83,83,846,-691,615,-552,-124,-1033,-101,-1482,300,-868,142,-849,-166,-356,234,-620,-26,-586,234,-687,38,-602,184,-537,-8,-502,109,-408,41,-516,144,-611,54,-513,-33,-440,-242,-381,-176,-498,-276,-575,-140,-578,-79,-706,-148,-475,-195,-498,-214,-378,-197,-385,-257,-315,-80,-443,-59,-376,-102,-363,-123,-440,-39,-442,-136,-304,-144,-309,-84,-271,-165,-242,-238,-200,-117,-273,-150,-258,-130,-283,-92,-309,-42,-296,-47,-253,-76,-218,8,-252,-78,-168,-84,-181,-86,-207,-54,-197,-48,-239,-19,-220,-19,-245,-20,-221,-34,-247,-31,-246,-93,-181,-96,-211,-94,-210,-85,-235,-118,-230,-101,-229,-107,-245,-109,-215,-112,-220,-113,-204,-100,-188,-108,-185,-89,-182,-62,-197,-34,-195,-28,-194,-18,-182,-55,-157,-81,-141,-90,-141,-91,-129,-74,-112,-85,-107,-64,-103,-64,-86,-50,-93,-43,-62,-35,-57,-57,-38,-24,-28,-39,-24,-20,-16,22,2,35,17,56,35,75,47,56,59,88,45,61,91,80,67,62,68,59}, + /* IRC_Composite_C_R0195_T060_P330.wav */ + {-16,18,-21,24,-27,30,-34,39,-45,51,-59,68,-79,93,-109,120,-488,-209,-843,-446,581,-2538,150,-1048,1674,-298,921,-422,4633,-1379,-3456,-5235,5897,-232,13802,4771,-1871,13790,9150,509,-7321,-2062,426,4376,2825,-66,-4120,2160,1014,1965,-182,10,-66,1012,-1092,605,-694,-103,-630,876,-1446,848,-616,147,-237,208,-605,430,-846,156,-639,-33,-577,-169,-800,-107,-681,-261,-491,-122,-613,-72,-646,-89,-523,-258,-835,-111,-630,-192,-684,-120,-546,-114,-589,-198,-484,-123,-592,-31,-441,-137,-356,-160,-262,-88,-337,-230,-206,-197,-286,-158,-371,-90,-386,-164,-357,-120,-396,-123,-371,-159,-394,-268,-385,-156,-411,-140,-387,-213,-351,-179,-344,-165,-326,-182,-213,-154,-216,-130,-224,-133,-226,-137,-212,-88,-294,-83,-283,-112,-241,-166,-252,-129,-207,-171,-148,-189,-146,-134,-197,-75,-135,-114,-111,-109,-99,-40,-137,-112,-65,-123,-140,-104,-208,-112,-191,-189,-188,-164,-205,-119,-255,-149,-161,-150,-183,-173,-162,-174,-126,-195,-97,-145,-155,-118,-129,-86,-145,-98,-141,-61,-119,-118,-63,-133,-28,-155,2,-161,-3,-128,-39,-100,-78,-65,-82,-72,-98,-47,-85,-70,-46,-75,-12,-101,21,-108,22,-100,40,-92,45,-75,34,-63,31,-40,44,-41,58,-23,74,-14,74,-19,131,-21,113,30,101,49,110,57,108,51}, + /* IRC_Composite_C_R0195_T075_P330.wav */ + {19,-20,21,-23,25,-26,28,-31,33,-36,39,-42,45,-49,49,-225,-118,-411,-201,-767,-125,-1198,-1086,1587,699,-114,366,4108,-2207,1045,-7292,822,4055,16836,-2188,1091,15052,6204,1571,-5363,-5126,3112,6714,709,-1466,-3132,1975,682,2154,-1147,580,-537,152,-77,-200,-771,391,-354,-354,-457,389,-921,324,-902,497,-455,-10,-125,532,-709,91,-249,-171,-384,-149,-591,-267,-649,-63,-714,-477,-400,-204,-742,-437,-554,-445,-510,-517,-561,-379,-611,-347,-592,-417,-611,-301,-482,-385,-503,-245,-302,-310,-258,-221,-225,-194,-239,-127,-248,-118,-185,-116,-277,-78,-132,-151,-167,-94,-212,-171,-287,-162,-301,-224,-259,-202,-317,-238,-263,-235,-226,-328,-285,-227,-274,-225,-257,-270,-272,-203,-280,-198,-229,-168,-195,-197,-135,-134,-188,-159,-143,-143,-184,-122,-175,-115,-188,-85,-144,-140,-133,-147,-139,-167,-127,-183,-140,-187,-127,-176,-160,-172,-135,-190,-179,-159,-144,-201,-142,-175,-140,-206,-117,-148,-146,-179,-121,-175,-129,-135,-92,-156,-126,-159,-68,-176,-108,-129,-91,-133,-75,-91,-67,-116,-63,-52,-31,-122,-11,-115,3,-129,15,-110,-22,-119,-11,-83,-62,-34,-39,-48,-27,-43,-19,-79,23,-48,-1,-86,40,-64,16,-37,23,-6,-6,9,18,18,23,24,45,17,82,40,87,28,105,40,116,77,81,80,85}, + /* IRC_Composite_C_R0195_T090_P330.wav */ + {6,-4,3,-1,0,3,-5,8,-11,16,-21,29,-39,57,-101,-332,89,-835,-16,8,-1022,-629,-474,1050,924,-984,2683,3640,-2207,-5863,2261,-3476,15168,2953,-3266,10727,14390,3174,-8455,-1966,1449,7129,3075,-3700,-2692,1997,423,1234,-1087,216,-471,1061,-1107,458,-1402,142,319,-88,-425,76,-1043,226,-230,12,-536,-207,-663,129,-455,253,-222,-284,-472,86,-636,265,-368,-261,-539,-160,-407,43,-778,-256,-683,-475,-586,-306,-628,-455,-647,-525,-513,-416,-500,-430,-812,-419,-489,-274,-452,-386,-597,-258,-363,-159,-236,-327,-241,-259,-214,-139,-145,-206,-216,-123,-79,-29,-96,-65,-217,-263,-177,-100,-202,-186,-272,-223,-240,-138,-180,-221,-352,-224,-206,-195,-213,-254,-233,-244,-227,-156,-224,-178,-249,-190,-199,-133,-154,-164,-183,-191,-143,-185,-101,-190,-166,-201,-98,-150,-133,-119,-165,-142,-178,-120,-115,-180,-149,-197,-152,-187,-75,-218,-143,-264,-128,-205,-139,-221,-148,-269,-159,-215,-126,-214,-123,-256,-105,-233,-71,-203,-97,-215,-79,-182,-53,-120,-82,-122,-49,-119,20,-120,24,-141,6,-133,24,-115,14,-116,-39,-71,-8,-71,6,-93,-11,-108,40,-59,43,-147,22,-98,28,-64,17,-87,-13,-67,63,-36,44,-53,62,-77,123,-52,145,-81,82,-33,141,-4,116,8,98,14,137,56,139,-7,138,4}, + /* IRC_Composite_C_R0195_T105_P330.wav */ + {61,-61,61,-61,62,-62,62,-62,63,-63,63,-64,64,-65,67,-301,294,-741,-246,-201,457,-1576,-144,-73,2001,-1275,1763,1153,6179,-10039,184,1999,2771,10917,-378,522,13686,11704,-7376,-5609,1748,7382,4547,-1426,-5565,-24,2068,630,-441,-1191,186,450,-243,220,-909,-111,-78,770,-727,124,-899,-156,-79,202,-685,-336,-650,-314,-474,-53,-350,-261,-756,191,-463,128,-417,-139,-484,-53,-226,-113,-375,-280,-491,-342,-457,-195,-589,-428,-623,-328,-572,-393,-551,-458,-577,-399,-453,-401,-526,-392,-496,-278,-442,-319,-518,-259,-377,-158,-346,-171,-366,-187,-308,-53,-156,-44,-233,-111,-203,-9,-156,-124,-286,-249,-248,-172,-147,-210,-273,-296,-215,-151,-154,-166,-209,-170,-207,-76,-118,-174,-253,-208,-154,-172,-145,-172,-242,-211,-169,-70,-176,-103,-198,-93,-133,-36,-100,-118,-191,-149,-134,-127,-138,-220,-229,-254,-179,-191,-192,-230,-211,-235,-161,-158,-161,-230,-195,-206,-174,-165,-144,-216,-190,-173,-127,-136,-156,-157,-179,-150,-128,-122,-138,-124,-152,-83,-109,-30,-73,-66,-103,-55,-77,-32,-75,-113,-125,-86,-47,-60,-39,-89,-29,-60,20,-18,-21,-32,-8,-50,24,-43,-1,-69,-6,-57,11,-53,21,-58,-2,-13,35,-10,30,5,44,12,60,31,79,-4,79,46,97,47,97,36,128,43,132,30,81,38}, + /* IRC_Composite_C_R0195_T120_P330.wav */ + {1,4,-8,14,-20,27,-35,44,-54,66,-79,95,-114,135,-159,177,-87,615,-830,163,-909,1304,-1626,-325,297,1206,69,-817,4405,1017,-2560,-616,-3770,4387,14092,-5772,3858,10124,9882,-2074,-2959,-3337,7410,7437,-1780,-6636,-2212,2639,1700,-1078,-1371,-262,1153,-566,417,-320,27,-39,-111,485,-441,-792,-288,-146,-108,-616,-333,-464,-419,-367,-303,-262,-308,-516,-133,-270,-200,-382,-324,-225,-347,-205,-250,-254,-201,-400,-249,-330,-242,-364,-366,-273,-444,-377,-352,-336,-480,-405,-375,-496,-436,-362,-461,-436,-395,-354,-445,-362,-274,-397,-353,-222,-328,-290,-351,-239,-329,-117,-69,-109,-182,-35,-73,-135,-239,-228,-363,-234,-203,-243,-333,-218,-233,-231,-229,-116,-190,-172,-177,-158,-144,-144,-172,-173,-215,-124,-180,-106,-203,-150,-185,-121,-123,-149,-106,-93,-132,-84,-116,-33,-138,-72,-186,-81,-187,-113,-205,-194,-255,-235,-238,-195,-263,-202,-264,-198,-268,-158,-241,-175,-267,-183,-249,-160,-199,-167,-203,-153,-184,-118,-137,-101,-164,-93,-143,-66,-128,-30,-127,-42,-113,-31,-78,-23,-59,-78,-78,-48,-79,-53,-70,-40,-106,-22,-46,-9,-51,-12,-37,-31,-54,-23,-49,-43,-68,-43,-55,-39,-14,-29,-16,-13,-8,32,0,28,5,31,33,68,33,69,42,71,38,89,64,82,64,81,94,57,103,62,51,87}, + /* IRC_Composite_C_R0195_T135_P330.wav */ + {13,-11,8,-5,2,2,-7,12,-18,26,-34,45,-58,75,-96,126,-173,263,-756,239,-382,327,-504,374,-988,304,-762,1963,32,1243,612,772,3408,-6680,-2100,10509,4890,-1044,6633,2426,10000,1888,-3554,-3952,7154,5846,-3202,-5792,-2133,1379,2382,-1044,-683,-846,1056,-24,511,18,303,-710,-25,-39,-85,-861,-206,-321,-34,-428,-245,-428,-30,-639,-166,-459,-198,-463,-354,-339,-255,-329,-348,-224,-286,-255,-287,-204,-181,-232,-210,-296,-199,-313,-262,-347,-268,-316,-307,-365,-310,-360,-308,-316,-324,-335,-371,-355,-377,-361,-310,-401,-283,-402,-244,-368,-238,-331,-274,-247,-165,-162,-178,-185,-197,-205,-193,-250,-203,-288,-187,-284,-187,-262,-183,-219,-206,-232,-196,-178,-177,-216,-242,-166,-214,-147,-219,-161,-195,-148,-178,-168,-190,-170,-141,-148,-164,-119,-116,-35,-121,-54,-95,-56,-120,-107,-112,-149,-152,-172,-185,-195,-227,-175,-253,-211,-280,-223,-250,-252,-243,-251,-235,-239,-213,-184,-218,-176,-225,-139,-205,-123,-184,-114,-154,-113,-98,-100,-89,-89,-91,-67,-61,-32,-65,-41,-68,-21,-70,-18,-87,-13,-97,-10,-79,-5,-41,-42,-23,-67,8,-74,15,-75,-43,-63,-41,-18,-81,-7,-69,6,-49,2,-9,-7,21,-8,24,-11,44,32,44,38,36,66,51,91,52,103,44,90,55,87,41,60,28,49}, + /* IRC_Composite_C_R0195_T150_P330.wav */ + {15,-18,20,-22,25,-28,31,-35,38,-42,47,-52,57,-63,70,-76,84,-91,97,-95,51,-395,348,-546,384,-515,230,-347,78,797,314,1274,475,2177,1227,-4216,-2348,10076,258,3084,3609,6149,5472,3566,-4049,-1019,5212,3739,-5321,-3637,-1580,2270,1066,-633,-1395,-136,1340,-32,75,-243,507,-628,158,108,-226,-702,-291,-112,-126,-404,-330,-254,-212,-362,-151,-436,-14,-424,-215,-583,-44,-508,-144,-379,-62,-360,-158,-295,-27,-305,-69,-339,-66,-367,-167,-326,-112,-361,-177,-468,-151,-402,-103,-407,-145,-397,-208,-387,-142,-366,-207,-368,-232,-411,-224,-349,-159,-358,-152,-366,-101,-313,-105,-307,-196,-327,-268,-341,-250,-319,-245,-350,-228,-325,-151,-267,-108,-294,-109,-251,-84,-251,-104,-207,-91,-253,-103,-244,-91,-252,-85,-272,-130,-311,-129,-241,-94,-173,-90,-147,-80,-140,-35,-126,-79,-186,-118,-197,-120,-210,-155,-272,-176,-269,-156,-268,-151,-277,-170,-275,-153,-283,-181,-265,-186,-257,-194,-217,-154,-209,-128,-189,-106,-201,-80,-156,-34,-154,-17,-151,12,-97,27,-70,9,-39,-39,-3,-46,-29,-50,-61,-28,-93,-9,-89,-8,-103,1,-74,-1,-70,-9,-35,-44,-51,-51,-29,-31,-30,-13,-35,10,8,7,-4,11,13,35,12,38,37,49,36,56,47,69,81,52,69,15,90,11,52,-17,56}, + /* IRC_Composite_C_R0195_T165_P330.wav */ + {-38,38,-39,39,-39,40,-40,41,-42,43,-44,46,-47,49,-52,55,-59,64,-70,79,-90,108,-143,381,-197,374,-312,378,-424,566,-428,1134,331,1309,501,1274,2077,-920,-3821,4175,5452,953,5098,2533,5399,3026,943,-4554,1848,1742,-912,-4176,-875,-267,1882,-418,-1283,-863,1021,217,-28,-198,445,-494,-277,-183,13,-551,-347,-206,-115,-218,-241,-334,-209,-134,-196,-318,-365,-189,-242,-376,-315,-159,-285,-247,-217,-119,-295,-192,-233,-63,-298,-72,-221,-78,-309,-142,-336,-207,-384,-220,-389,-223,-371,-217,-392,-165,-378,-175,-375,-156,-391,-200,-346,-196,-386,-169,-293,-171,-325,-127,-312,-180,-413,-244,-413,-275,-437,-268,-434,-258,-381,-194,-377,-183,-352,-126,-339,-126,-286,-92,-262,-50,-229,-59,-226,-70,-231,-92,-226,-106,-251,-108,-219,-79,-217,-73,-199,-48,-168,-54,-223,-113,-224,-121,-228,-134,-243,-180,-267,-148,-265,-176,-290,-166,-274,-160,-279,-151,-277,-159,-262,-146,-233,-125,-202,-129,-198,-90,-162,-74,-165,-69,-153,-33,-141,-13,-136,2,-114,40,-123,47,-102,43,-95,48,-108,50,-112,44,-100,32,-92,18,-95,22,-82,17,-82,23,-102,19,-84,22,-128,24,-87,44,-69,35,-66,63,-29,79,-26,54,-19,109,-9,110,-5,137,-8,111,4,120,34,92,-13,96,-16,93,-54,92}, + /* IRC_Composite_C_R0195_T180_P330.wav */ + {-26,26,-26,25,-25,25,-24,23,-23,22,-21,20,-19,17,-16,14,-11,8,-4,-1,7,-15,27,-46,89,61,-145,147,-71,284,-270,217,-379,526,177,1579,111,1304,-624,3003,-1570,-1854,2371,5215,2766,3717,2594,2071,4289,53,-1269,-2397,2282,-1207,-426,-1536,-96,41,749,-948,-416,181,838,-208,144,-15,-109,-323,76,15,16,-276,4,-8,-1,-123,-108,-209,-91,-130,71,-58,-162,-230,-119,-177,-166,-207,-161,-106,-120,-36,-71,-88,-115,-141,-23,-102,-161,-225,-242,-194,-306,-250,-347,-260,-382,-276,-340,-235,-319,-231,-283,-196,-267,-199,-293,-242,-296,-261,-227,-164,-163,-202,-268,-256,-325,-304,-440,-389,-451,-306,-367,-292,-374,-282,-281,-248,-233,-251,-211,-225,-190,-161,-223,-149,-241,-144,-229,-124,-173,-155,-158,-194,-95,-175,-83,-151,-123,-82,-138,-35,-195,-70,-203,-140,-178,-222,-181,-287,-200,-285,-259,-281,-289,-246,-302,-246,-260,-227,-227,-242,-189,-224,-180,-220,-169,-184,-146,-121,-121,-86,-121,-59,-86,-47,-78,-63,-71,-90,-66,-82,-53,-82,-82,-54,-72,-16,-90,-24,-88,-44,-31,-45,-32,-70,-43,-25,-59,-33,-76,-24,-59,-46,-55,-45,-56,-56,-64,-32,-54,-6,-57,-1,-22,23,2,9,-2,35,54,34,72,22,84,15,94,10,75,34,67,47,21,57,14,36}, + /* IRC_Composite_C_R0195_T195_P330.wav */ + {19,-19,19,-20,20,-21,21,-22,23,-23,24,-24,25,-26,26,-27,27,-28,29,-29,30,-30,30,-29,28,-25,18,14,179,64,-45,11,282,14,-136,-276,1166,711,738,-60,1508,162,166,165,449,1532,5355,3392,931,2171,2302,2446,-1456,-644,-2020,1267,216,-603,-1662,-126,427,-150,-158,310,15,459,-99,240,14,137,-140,123,54,92,-77,39,37,33,-11,20,88,136,-9,51,-50,-40,-148,-156,-244,-108,-193,-138,-117,-78,-113,-124,-103,-88,-193,-190,-214,-191,-267,-312,-314,-321,-363,-318,-371,-313,-329,-304,-279,-293,-244,-274,-270,-284,-268,-270,-226,-229,-218,-220,-244,-283,-323,-321,-343,-387,-412,-395,-369,-338,-385,-339,-361,-243,-279,-233,-263,-196,-226,-166,-214,-179,-210,-188,-205,-188,-194,-186,-176,-189,-187,-188,-151,-148,-132,-148,-138,-144,-161,-155,-165,-177,-165,-222,-217,-269,-237,-294,-251,-318,-235,-297,-234,-275,-203,-229,-195,-221,-176,-219,-151,-204,-120,-189,-136,-166,-97,-148,-79,-121,-68,-94,-95,-78,-72,-67,-80,-75,-79,-69,-70,-61,-76,-83,-71,-84,-52,-102,-40,-87,-65,-74,-59,-59,-44,-53,-35,-32,-38,-24,-30,-33,-41,-31,-32,-32,-53,-10,-53,7,-39,3,11,-3,30,29,44,32,63,46,79,39,81,48,51,38,73,36,50,22,77,-11}, + /* IRC_Composite_C_R0195_T210_P330.wav */ + {4,-4,4,-4,4,-4,4,-4,4,-4,4,-4,4,-4,4,-4,4,-4,4,-3,3,-3,2,-2,1,0,-1,4,-7,14,-37,-66,67,-14,-33,-186,91,-96,90,124,958,676,187,107,-283,310,1299,1265,1017,1358,3762,3924,898,885,85,1188,235,180,-1266,-91,662,-183,-222,-323,535,278,681,120,382,366,409,299,413,216,314,49,299,205,244,15,349,134,369,-25,355,37,155,-122,149,-90,17,-126,0,-119,-101,-98,-45,-101,-108,-137,-41,-165,-128,-190,-90,-220,-248,-213,-218,-183,-289,-234,-287,-213,-287,-181,-291,-252,-334,-216,-249,-215,-305,-231,-259,-234,-279,-267,-321,-303,-381,-272,-334,-278,-372,-241,-327,-217,-356,-191,-320,-190,-311,-163,-271,-146,-273,-141,-249,-181,-245,-176,-249,-177,-262,-171,-258,-201,-269,-174,-249,-165,-249,-157,-236,-157,-222,-150,-201,-185,-232,-186,-231,-212,-272,-199,-268,-197,-269,-185,-248,-203,-236,-188,-226,-202,-206,-202,-200,-198,-175,-174,-139,-154,-129,-144,-119,-116,-115,-116,-101,-118,-88,-110,-115,-124,-128,-104,-139,-117,-161,-110,-128,-96,-116,-100,-91,-78,-46,-54,-38,-54,-28,-28,-31,-22,-47,-22,-45,-45,-34,-47,-26,-63,-19,-69,15,-46,17,-34,36,-7,28,11,12,21,18,27,33,12,47,14,27,2,38,-2}, + /* IRC_Composite_C_R0195_T225_P330.wav */ + {-1,1,-1,1,-1,1,-1,1,-1,1,-1,1,-1,1,-1,1,-1,1,-1,1,-1,1,-1,1,-1,1,-1,1,-1,1,2,-68,-59,-5,-96,48,-93,-175,-114,61,113,32,173,565,369,-5,6,42,629,908,1232,1079,1509,2842,2279,709,505,658,652,456,31,-132,16,714,22,392,243,724,581,610,769,414,780,295,589,257,440,139,342,210,326,138,213,205,269,65,206,39,192,-75,77,-72,119,-93,32,-98,-25,-124,-61,-115,-124,-100,-152,-32,-169,-66,-171,-60,-174,-142,-204,-153,-209,-250,-258,-282,-284,-268,-319,-270,-333,-243,-293,-243,-293,-258,-255,-244,-253,-268,-246,-247,-251,-255,-254,-239,-252,-237,-244,-241,-235,-232,-243,-208,-257,-193,-268,-204,-268,-227,-248,-235,-265,-257,-230,-233,-218,-240,-222,-215,-193,-210,-173,-175,-183,-176,-191,-180,-202,-196,-223,-223,-252,-229,-258,-254,-257,-250,-252,-220,-262,-227,-250,-215,-243,-223,-230,-208,-202,-158,-170,-134,-160,-123,-166,-123,-164,-100,-174,-109,-180,-112,-164,-109,-168,-131,-171,-106,-158,-104,-162,-121,-153,-114,-132,-112,-125,-95,-82,-74,-52,-73,-50,-54,-44,-32,-38,-20,-57,-17,-36,-14,-46,-36,-11,-10,-23,-34,-7,-16,7,-20,-30,-21,-13,-19,-3,-32,-10,-25,9,-16,17,18,-2,23}, + /* IRC_Composite_C_R0195_T240_P330.wav */ + {6,-6,6,-6,6,-6,6,-6,7,-7,7,-7,7,-7,7,-7,7,-7,7,-7,8,-8,8,-8,8,-8,8,-8,1,-47,-37,-124,-93,-95,-58,-38,-70,-58,-105,-121,-151,-76,14,175,104,51,147,291,191,-40,158,429,1090,1311,1373,1241,1326,1526,1181,419,583,828,657,621,656,466,551,572,739,747,685,612,423,667,583,601,619,480,483,323,428,299,276,192,162,105,83,89,89,51,98,-46,97,-2,117,-65,34,-69,1,-69,-69,-127,-170,-131,-173,-121,-145,-147,-134,-204,-102,-232,-83,-273,-123,-325,-159,-309,-183,-275,-249,-249,-252,-185,-266,-178,-245,-201,-259,-208,-248,-243,-270,-238,-253,-227,-254,-209,-249,-203,-269,-184,-271,-141,-258,-121,-230,-113,-188,-120,-198,-167,-178,-200,-214,-242,-233,-264,-244,-276,-244,-267,-267,-261,-267,-239,-274,-233,-272,-226,-277,-260,-268,-247,-282,-253,-289,-226,-270,-213,-255,-189,-247,-158,-192,-146,-173,-170,-171,-177,-143,-184,-151,-187,-146,-147,-141,-154,-121,-150,-139,-166,-139,-155,-135,-176,-141,-174,-144,-174,-160,-177,-162,-156,-152,-157,-114,-126,-89,-103,-86,-98,-78,-68,-62,-53,-84,-35,-53,-7,-51,-15,-38,-6,-34,-13,-34,-15,-32,-28,-35,-19,-32,1,-19,7,-27,9,3,4,-14,3,-5,36,2,41}, + /* IRC_Composite_C_R0195_T255_P330.wav */ + {-1,1,-1,1,-1,1,-2,2,-2,2,-2,2,-2,2,-2,2,-2,3,-3,3,-3,3,-3,4,-4,4,-5,5,-6,7,-8,10,-13,22,-83,-97,-106,-152,-99,-59,-90,-92,-128,-74,-117,-122,-12,68,86,137,156,199,190,329,533,767,1036,1233,1574,1651,1491,1169,954,781,906,1168,950,370,704,786,629,605,494,653,561,698,550,738,501,619,385,441,218,218,189,132,100,39,147,27,91,-38,77,-24,27,-36,-30,-17,-41,-95,-49,-122,-68,-169,-78,-186,-103,-225,-135,-197,-120,-153,-145,-183,-142,-167,-150,-231,-159,-244,-153,-214,-165,-219,-193,-224,-173,-215,-157,-238,-162,-231,-149,-238,-155,-243,-185,-203,-182,-175,-206,-184,-214,-165,-184,-162,-168,-165,-138,-160,-133,-192,-158,-236,-216,-272,-273,-290,-295,-306,-316,-279,-301,-261,-303,-259,-299,-287,-289,-292,-273,-302,-247,-283,-207,-253,-202,-230,-201,-189,-205,-172,-192,-162,-193,-165,-169,-166,-191,-174,-179,-167,-155,-172,-157,-151,-142,-148,-140,-156,-161,-162,-168,-167,-172,-156,-177,-164,-174,-152,-168,-156,-171,-153,-173,-152,-171,-139,-162,-124,-144,-85,-111,-53,-108,-40,-71,-33,-75,-21,-62,-20,-68,0,-50,5,-37,-3,-30,28,-23,3,-23,12,-19,28,-12,24,-5,41,17,24,-7,17,-24,23}, + /* IRC_Composite_C_R0195_T270_P330.wav */ + {0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,-1,1,-1,1,-1,1,-2,3,-127,-152,-188,-165,-160,-178,-195,-131,-72,-37,-53,-57,-34,48,106,135,127,188,385,765,884,1201,1389,1575,1756,1541,1264,1282,908,615,982,1053,498,403,667,706,647,681,663,732,712,700,422,527,505,426,401,192,156,184,190,100,26,-34,55,-50,19,-119,28,-59,-54,-94,-103,-24,-139,-24,-165,-13,-154,-5,-159,-81,-104,-118,-99,-136,-98,-118,-106,-89,-142,-101,-168,-112,-178,-178,-148,-180,-114,-217,-134,-255,-104,-252,-140,-238,-148,-235,-187,-236,-202,-242,-197,-183,-140,-137,-108,-143,-129,-142,-128,-211,-166,-246,-182,-269,-212,-271,-233,-292,-267,-287,-263,-282,-269,-312,-249,-304,-257,-293,-240,-287,-230,-257,-242,-248,-234,-224,-236,-219,-241,-188,-227,-176,-221,-206,-210,-185,-185,-166,-179,-163,-176,-164,-168,-164,-186,-168,-180,-154,-159,-141,-142,-150,-142,-147,-152,-145,-159,-150,-168,-167,-187,-177,-186,-176,-192,-163,-197,-167,-186,-164,-170,-159,-148,-137,-112,-102,-104,-88,-70,-56,-50,-28,-30,-11,-12,-16,-10,-21,-31,-4,-26,2,15,8,10,-3,-2,5,10,12,-9,0,-5,9,-13,-19,-29,-16}, + /* IRC_Composite_C_R0195_T285_P330.wav */ + {0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,-1,1,-1,1,-1,1,-1,2,-2,3,-6,13,-111,-184,-215,-184,-140,-139,-179,-220,-168,-247,-124,1,-91,-84,42,-80,17,-54,56,218,634,881,967,1181,1587,1636,1273,1643,1650,702,357,915,823,759,711,555,754,615,798,685,720,707,793,638,630,510,595,460,429,211,188,178,148,102,16,162,0,142,-40,140,-32,103,24,-7,-30,11,-24,-1,-87,12,-168,51,-103,64,-105,39,-118,10,-91,-91,-119,-125,-90,-149,-75,-130,-138,-153,-195,-185,-254,-205,-313,-223,-303,-243,-266,-224,-223,-230,-188,-216,-155,-167,-129,-140,-137,-102,-110,-74,-146,-82,-177,-89,-192,-110,-215,-160,-231,-232,-220,-242,-237,-262,-238,-271,-237,-259,-263,-234,-261,-228,-278,-222,-281,-252,-268,-270,-265,-263,-228,-244,-187,-230,-176,-214,-164,-194,-141,-175,-152,-173,-143,-161,-171,-179,-174,-184,-157,-194,-163,-211,-148,-196,-144,-205,-158,-219,-155,-190,-162,-178,-166,-174,-156,-186,-169,-200,-161,-200,-167,-208,-167,-218,-158,-186,-143,-155,-119,-123,-104,-117,-86,-101,-54,-82,-39,-73,-25,-68,-7,-55,-18,-42,23,-14,19,-21,7,-24,19,-4,2,-14,7,-10,12,-14,13,-7,26,-23,8,-14}, + /* IRC_Composite_C_R0195_T300_P330.wav */ + {-3,3,-3,3,-3,3,-3,3,-3,3,-3,3,-3,3,-3,3,-3,3,-3,3,-3,3,-3,4,-4,4,-4,4,-4,4,-5,-62,-157,-83,-247,-284,-202,-303,-294,-235,-54,-305,15,156,241,-112,-479,-226,774,-232,495,1765,1236,1265,2221,2705,1763,850,505,963,731,757,546,519,206,795,441,708,577,742,596,545,491,526,666,413,377,307,300,457,188,260,34,250,133,251,58,87,249,132,194,-1,200,55,156,95,139,122,50,62,57,84,-3,-71,-62,-99,-113,-176,-122,-254,-180,-227,-166,-230,-202,-293,-196,-279,-248,-326,-256,-320,-217,-260,-211,-244,-154,-191,-125,-171,-113,-160,-81,-135,-55,-102,-70,-95,-24,-42,-53,-78,-73,-93,-89,-149,-139,-195,-180,-235,-192,-245,-233,-268,-234,-245,-224,-256,-221,-249,-225,-220,-233,-215,-250,-226,-218,-236,-221,-224,-181,-198,-176,-191,-173,-185,-156,-184,-162,-178,-171,-172,-156,-184,-183,-174,-192,-186,-179,-189,-180,-212,-184,-191,-167,-199,-217,-182,-203,-172,-210,-178,-224,-155,-220,-185,-224,-199,-194,-203,-201,-206,-192,-196,-188,-165,-158,-177,-147,-145,-112,-128,-98,-113,-74,-77,-54,-65,-67,-32,-44,-10,-54,-13,-29,-7,-15,-1,-23,19,-18,10,-27,11,-16,8,2,12,-12,4,-1,37,18,19,17,36}, + /* IRC_Composite_C_R0195_T315_P330.wav */ + {0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,-1,1,-1,1,-1,1,-1,1,-1,1,-1,1,-1,2,-3,-227,-204,-310,-344,-179,-311,-505,-298,-88,-402,115,712,-76,-577,-211,9,558,-404,914,2627,1047,2118,4245,2542,908,788,1382,865,422,674,-11,248,192,1314,191,817,366,966,306,467,192,289,334,100,222,7,227,236,315,254,209,305,479,300,240,335,445,260,199,112,202,84,42,63,85,-45,-100,-80,-73,-157,-181,-120,-209,-167,-259,-161,-262,-234,-281,-209,-243,-208,-221,-245,-173,-195,-197,-241,-151,-201,-166,-240,-132,-182,-104,-150,-85,-104,-75,-148,-59,-141,-36,-144,-33,-133,-71,-89,-7,-47,-109,-70,-108,-44,-152,-113,-164,-122,-182,-216,-170,-212,-172,-263,-155,-228,-178,-252,-176,-207,-200,-242,-206,-220,-204,-208,-195,-218,-188,-193,-156,-217,-159,-200,-119,-211,-133,-181,-131,-177,-169,-158,-166,-171,-195,-175,-181,-214,-193,-224,-184,-218,-204,-219,-183,-222,-197,-222,-197,-226,-215,-204,-205,-207,-216,-212,-210,-224,-222,-213,-210,-205,-202,-194,-174,-155,-148,-134,-144,-126,-89,-101,-75,-94,-84,-69,-47,-37,-69,-67,-26,-35,-13,-29,-17,-23,-3,13,-18,-8,-26,22,-30,20,-15,30,-13,8,-11,23,49,30,39,22,36,16}, + /* IRC_Composite_C_R0195_T330_P330.wav */ + {9,-9,9,-10,10,-10,10,-11,11,-12,12,-12,13,-13,14,-15,15,-16,17,-17,18,-19,20,-22,23,-25,30,-50,-332,-388,-131,-617,-214,-313,-733,-659,255,-155,-66,699,-77,-372,-1255,-242,2041,-707,-6,4736,2036,2809,3545,4509,22,-72,1207,1973,-173,508,959,302,542,540,656,123,461,665,400,314,251,504,190,265,35,373,467,91,154,128,447,203,170,90,232,80,149,193,21,-6,-90,-101,-59,-80,-255,-144,-154,-358,-29,-238,-96,-237,-126,-195,15,-288,-107,-147,-89,-276,-120,-223,-134,-174,-181,-152,-158,-179,-71,-138,-116,-177,-93,-189,-60,-206,-162,-159,-113,-111,-150,-123,-93,-106,-83,-101,-52,-113,-43,-99,-10,-99,-66,-88,-58,-127,-64,-152,-99,-152,-144,-200,-127,-259,-116,-227,-129,-245,-98,-258,-94,-239,-152,-227,-154,-225,-147,-242,-169,-202,-172,-212,-140,-232,-141,-182,-139,-168,-130,-205,-127,-137,-140,-176,-175,-160,-166,-171,-205,-188,-193,-212,-199,-238,-185,-248,-173,-246,-186,-227,-206,-245,-184,-218,-210,-235,-210,-228,-175,-238,-206,-213,-189,-196,-171,-200,-159,-123,-146,-114,-128,-128,-81,-88,-83,-80,-52,-124,-70,-58,-40,-45,-69,-50,-10,-13,-16,-42,-4,-25,50,-37,11,-37,37,-4,24,-11,12,-7,33,10,69,29,60,20,87,-1}, + /* IRC_Composite_C_R0195_T345_P330.wav */ + {1,0,0,0,-1,1,-2,2,-3,4,-5,6,-7,8,-9,11,-13,15,-17,20,-24,28,-33,40,-47,52,-211,-235,-688,-550,-297,-690,-704,-444,713,-1568,989,470,-39,-1205,323,-714,276,-2380,5748,2929,602,8080,5302,-453,-722,3812,715,-74,-365,1626,369,1163,1544,781,-4,229,668,441,364,676,214,441,36,409,174,565,-210,423,-90,172,-132,46,-99,78,-186,159,-181,96,-244,13,-280,-20,-156,-41,-291,-109,-365,-55,-322,-117,-274,-136,-169,-159,-189,-140,-191,12,-178,0,-295,-70,-206,-88,-236,-75,-169,-78,-191,-95,-204,-87,-146,-66,-185,-107,-238,-113,-249,-119,-185,-98,-187,-131,-176,-95,-137,-31,-142,-17,-163,-50,-120,-40,-114,-19,-100,-43,-142,-64,-163,-81,-167,-107,-184,-140,-220,-122,-200,-146,-203,-155,-165,-178,-177,-198,-166,-175,-153,-160,-161,-178,-163,-171,-143,-138,-136,-139,-146,-131,-151,-132,-167,-143,-153,-146,-165,-157,-219,-159,-215,-151,-222,-190,-225,-204,-218,-208,-214,-189,-218,-191,-221,-190,-214,-185,-216,-196,-215,-178,-200,-217,-218,-194,-162,-182,-170,-189,-185,-152,-157,-123,-144,-122,-111,-99,-99,-89,-76,-63,-81,-56,-61,-31,-49,-85,-23,-30,-14,-9,-21,-45,-18,-1,16,3,5,-5,5,8,-5,64,39,55,37,73,42,56,31,84}}; + +const int16_t irc_composite_c_r0195_p345[][256] = + {/* IRC_Composite_C_R0195_T000_P345.wav */ + {2,-1,1,-1,1,-1,1,-1,0,0,0,0,-1,1,-2,2,-3,4,-5,7,-9,11,-15,21,-203,-88,-927,-196,-604,-22,-2141,-496,-650,1585,-548,803,-263,412,-5131,5346,-2103,-7267,12288,1672,1403,10338,7456,-3182,-874,2680,1965,-5296,1356,1865,68,-578,2523,1092,1554,2056,2530,1058,1432,379,1062,-82,-380,-971,369,-376,62,-56,669,-390,190,379,537,98,-46,-35,315,348,110,183,491,-38,19,211,170,-22,-231,-125,-221,138,-512,-336,-126,10,-387,-311,-283,-176,-241,-345,-432,-207,-330,-489,-386,-392,-351,-423,-274,-457,-352,-361,-311,-213,-391,-323,-302,-143,-325,-378,-206,-229,-190,-319,-334,-196,-236,-126,-387,-117,-339,-105,-264,-135,-207,-172,-176,-254,-160,-183,-177,-200,-218,-201,-189,-116,-205,-120,-224,-113,-159,-119,-241,-118,-127,-119,-188,-91,-119,-15,-186,-30,-132,28,-213,-19,-166,21,-190,-46,-185,-49,-159,-65,-168,-65,-190,-59,-218,-46,-217,-24,-231,-74,-204,-52,-181,-89,-211,-94,-196,-70,-219,-104,-200,-55,-181,-86,-186,-39,-155,-66,-178,-1,-187,-47,-188,6,-142,-11,-161,-5,-114,-9,-113,6,-145,-7,-123,-21,-161,-34,-116,-47,-173,-82,-145,-76,-157,-72,-137,-75,-186,-79,-163,-31,-131,-17,-156,-51,-99,26,-74,-5,-101,37,-82,48,-83,35,-130,20}, + /* IRC_Composite_C_R0195_T015_P345.wav */ + {-8,8,-9,9,-9,10,-10,11,-11,12,-13,14,-15,17,-19,21,-23,26,-30,34,-31,-453,-1036,-33,-665,328,-1202,-749,-1695,-16,154,1160,807,430,-250,202,-9659,9217,1210,-9352,17225,6331,1506,7124,7939,-4308,-4730,1188,3176,-2475,1424,-48,-469,790,3944,1309,680,1752,2484,1048,341,-282,654,-429,-824,-317,110,-114,-77,514,87,-336,314,152,794,-136,169,-632,188,-172,280,-21,-59,-628,-366,-701,125,-342,195,-484,14,-385,142,-203,-25,-63,-111,-438,-237,-144,-19,-603,-408,-283,-225,-622,-635,-157,-385,-331,-577,-30,-389,-404,-427,-106,-226,-443,-405,-162,-392,-396,-469,17,-462,-322,-529,-39,-465,-308,-396,-19,-390,-307,-364,11,-326,-213,-317,-16,-354,-213,-313,-60,-343,-187,-297,-56,-344,-144,-249,-9,-231,-112,-162,-24,-173,-102,-136,27,-150,-25,-210,56,-219,64,-254,58,-220,23,-244,-12,-203,-8,-244,-39,-226,-35,-259,-51,-231,-34,-263,-76,-247,-60,-275,-44,-227,-27,-295,-46,-242,-22,-251,-17,-192,-18,-252,-5,-165,49,-233,62,-170,73,-217,68,-124,45,-174,23,-102,4,-167,36,-98,28,-187,19,-138,2,-165,-15,-133,-54,-155,-42,-139,-76,-178,-15,-149,-36,-176,14,-154,-30,-174,22,-136,-10,-157,29,-146,8,-148,67,-147,48,-150,87,-144,95,-120,68}, + /* IRC_Composite_C_R0195_T030_P345.wav */ + {-5,4,-2,1,1,-3,5,-8,11,-14,18,-22,28,-33,40,-47,52,-40,-408,-1246,-239,-636,412,-1166,-641,-1047,-627,-510,85,1903,1495,1980,-2101,-8149,8335,-7570,2561,15248,-2261,7441,12350,8152,-6960,-897,-1312,-80,35,3139,-1223,-111,782,2694,1229,2248,830,1733,-78,616,211,239,-1106,180,-577,174,-80,366,-414,-235,48,899,-101,-343,251,6,-139,-259,84,-149,-439,-392,-377,-353,-641,-180,-347,-645,-512,18,-444,-412,-353,-372,29,-540,-286,-340,91,-270,81,-387,-257,-287,-105,-191,-258,-436,11,-476,-269,-411,-52,-449,-147,-462,-317,-370,-394,-207,-298,-370,-445,-321,-432,-282,-273,-429,-392,-579,-276,-340,-267,-422,-237,-397,-288,-363,-183,-311,-252,-339,-167,-369,-181,-301,-85,-308,-243,-297,-206,-198,-141,-225,-133,-188,-51,-81,-5,-156,39,-131,-42,-137,-12,-105,-59,-165,-33,-91,-103,-131,-61,-132,-15,-166,-70,-196,-74,-136,-77,-240,-119,-181,-167,-207,-153,-210,-116,-267,-128,-236,-136,-218,-68,-191,-86,-215,-66,-87,-41,-115,-57,-149,39,-73,32,-134,21,-60,58,-96,16,-33,13,-37,-10,-59,0,-62,-11,-108,-26,-126,5,-173,-25,-133,-22,-188,-62,-152,-27,-144,-75,-133,-61,-105,-49,-114,-43,-128,-43,-94,-30,-105,-18,-125,-14,-97,30,-123,23,-65,42,-66,35,-56}, + /* IRC_Composite_C_R0195_T045_P345.wav */ + {-8,9,-9,10,-11,12,-14,15,-17,18,-20,23,-26,29,-32,27,-914,-992,248,-1147,746,-600,-1411,-744,-565,-282,187,2409,2683,-221,-1093,-6934,4033,-4276,11276,-297,4328,20553,5592,-1738,-1835,48,-2700,3273,333,-1462,-612,3497,663,1061,1222,2492,754,1168,-36,273,-1174,-272,-1054,891,-844,132,-415,315,-117,257,-335,440,-662,37,-141,164,-657,106,-514,-245,-612,-181,-587,-119,-354,-272,-370,70,-460,-182,-631,-316,-708,-110,-803,-99,-670,-498,-427,-54,-498,-454,-369,-295,-361,-311,-286,-279,-189,-173,-337,-51,-132,-118,-321,-115,-360,-197,-296,-134,-359,-223,-370,-154,-278,-322,-354,-297,-436,-355,-372,-287,-465,-352,-358,-269,-478,-297,-336,-244,-461,-251,-336,-244,-377,-295,-277,-241,-357,-246,-212,-210,-283,-149,-224,-96,-233,-70,-163,-39,-261,-32,-98,-58,-219,-32,-96,-67,-134,7,-101,-50,-139,18,-115,-50,-164,-70,-125,-101,-200,-71,-185,-130,-181,-136,-176,-132,-203,-147,-173,-138,-199,-159,-176,-127,-141,-96,-163,-71,-143,-38,-126,-27,-77,-59,-103,5,-43,-32,-25,-36,-5,-42,40,-34,18,-45,1,-59,7,-47,-28,-49,-55,-63,-33,-55,-79,-59,-82,-109,-94,-40,-81,-89,-121,-82,-83,-49,-111,-62,-79,-97,-59,-52,-74,-97,-50,-98,-31,-116,11,-85,32,-106,43,-88,79,-70}, + /* IRC_Composite_C_R0195_T060_P345.wav */ + {2,0,-1,2,-4,7,-10,13,-18,24,-33,46,-69,124,-1066,18,-1103,-58,-112,-921,-201,357,-2572,-292,84,3712,705,1846,-3182,733,-1997,-2694,2428,-595,23701,7754,-2253,5078,3945,-1381,793,-3491,-1163,3714,3613,-2506,296,680,1730,1310,1740,569,508,99,-722,-392,-453,-871,66,-328,-669,-71,-484,-147,-35,146,49,-532,-397,-139,-139,-522,-373,-116,-571,-415,-445,-69,-274,-275,-429,-422,-271,-360,-287,-382,-22,-407,-385,-428,-388,-334,-422,-213,-696,-500,-450,-310,-488,-360,-442,-369,-341,-387,-436,-315,-312,-227,-524,-284,-360,-145,-448,-103,-329,-255,-335,-170,-226,-169,-275,-186,-277,-175,-313,-153,-328,-106,-340,-187,-359,-196,-395,-186,-410,-204,-401,-242,-445,-212,-399,-269,-333,-214,-369,-275,-249,-157,-224,-209,-240,-93,-196,-87,-134,-136,-170,-118,-141,-152,-108,-143,-171,-122,-121,-75,-167,-118,-171,-34,-161,-63,-166,-73,-182,-54,-115,-102,-170,-130,-176,-78,-151,-106,-187,-132,-212,-115,-143,-110,-184,-158,-155,-100,-103,-72,-126,-49,-120,-26,-63,20,-83,6,-50,25,-17,26,-29,39,-31,8,-37,34,-77,20,-77,4,-80,-26,-93,-19,-70,-86,-71,-70,-82,-94,-51,-114,-61,-80,-78,-73,-90,-68,-70,-42,-80,-51,-78,-111,-16,-79,-18,-107,-18,-114,-3,-27,-1,-36,-19,-21,-10,39}, + /* IRC_Composite_C_R0195_T075_P345.wav */ + {13,-13,13,-13,13,-13,13,-14,14,-14,14,-14,13,-149,-486,-602,-335,-455,-212,-518,0,-1171,-540,-311,1579,3683,3360,-5772,-426,-1232,-715,5798,-5995,21017,15656,-2523,-3222,5350,1893,1313,-2953,-885,4326,1209,-279,-1161,869,861,1767,953,-85,210,-139,-663,-17,-553,268,-673,-215,-760,-236,-646,-139,-393,-427,-462,-55,-562,-131,-287,-193,-567,-273,-625,-345,-491,-150,-487,-111,-512,-283,-603,-50,-418,-328,-443,-111,-346,-355,-349,-35,-305,-248,-516,-491,-547,-144,-443,-470,-467,-394,-484,-216,-365,-370,-505,-259,-440,-318,-386,-196,-476,-376,-474,-304,-362,-195,-334,-380,-413,-292,-310,-213,-289,-263,-333,-155,-255,-179,-252,-180,-265,-153,-285,-190,-264,-147,-236,-237,-304,-248,-194,-182,-204,-186,-250,-173,-172,-144,-229,-114,-172,-167,-183,-136,-161,-106,-122,-161,-186,-139,-156,-91,-182,-139,-216,-134,-193,-139,-180,-115,-198,-155,-201,-90,-175,-115,-207,-160,-200,-119,-149,-119,-195,-93,-130,-96,-135,-84,-130,-121,-106,-85,-107,-61,-110,-53,-77,2,-45,-15,-51,-20,4,33,-23,-40,-36,1,-53,16,-46,-46,-69,1,-46,-34,-47,-35,-46,-45,-73,-11,-76,-35,-107,-21,-105,-57,-70,-68,-101,-73,-74,-59,-95,-48,-78,-69,-89,-82,-50,-61,-44,-75,-23,-52,-44,10,-43,-23,-47,63,-13,31,19}, + /* IRC_Composite_C_R0195_T090_P345.wav */ + {-40,45,-52,60,-69,79,-92,108,-129,156,-197,267,-462,-149,208,-635,-826,268,-390,-920,244,-62,-1260,-320,2937,4521,-564,-4829,2706,-6796,9962,-6783,6356,23745,4526,-4606,-155,5413,3814,-1343,-3207,4343,2939,-733,-2665,998,738,1892,332,-416,-415,112,-618,265,-680,-48,-750,405,-729,216,-673,-99,-730,-121,-855,-310,-693,-146,-680,-198,-695,-260,-522,-283,-523,-143,-597,-333,-173,-342,-516,-211,-519,-364,-344,-136,-671,-213,-411,-202,-398,-103,-578,-290,-294,-138,-306,-230,-402,-283,-321,-289,-337,-289,-473,-414,-279,-358,-413,-275,-431,-453,-313,-307,-448,-301,-341,-461,-354,-322,-417,-358,-269,-383,-284,-242,-302,-305,-257,-285,-308,-182,-268,-314,-270,-212,-228,-190,-249,-276,-158,-168,-143,-161,-146,-207,-91,-103,-158,-110,-126,-88,-73,-13,-110,-35,-46,-106,-57,-116,-140,-215,-98,-197,-197,-174,-246,-265,-202,-185,-227,-181,-218,-214,-125,-146,-175,-178,-153,-217,-80,-87,-137,-151,-98,-122,-94,-88,-136,-141,-168,-115,-121,-70,-143,-102,-56,-76,-36,6,-27,-49,15,48,-26,23,-19,19,-21,34,-59,8,-22,-47,2,-38,-69,-64,-5,-74,-105,-59,-66,-97,-94,-96,-78,-97,-52,-113,-64,-142,-56,-66,-39,-129,-40,-66,-56,-70,5,-96,-30,-55,4,-41,13,-55,20,-26,32,-22,71,-18,17,30}, + /* IRC_Composite_C_R0195_T105_P345.wav */ + {-67,70,-74,77,-81,86,-90,96,-101,108,-115,124,-133,69,-792,-387,271,-596,-717,859,-864,-953,-174,2178,-1005,1282,2249,6443,-10856,1162,-3444,7735,12493,-6470,12789,13090,-1110,-7098,1720,6128,6522,-3157,-2418,823,2649,-1545,-82,75,412,176,-227,-1102,-27,27,-108,-705,430,-492,-144,-623,458,-659,-400,-488,-145,-709,-561,-722,-446,-592,-392,-773,-320,-376,-219,-435,-230,-433,-344,-257,-282,-422,-405,-416,-298,-443,-368,-500,-217,-519,-166,-433,-279,-366,-130,-440,-244,-224,-238,-388,-222,-308,-238,-178,-169,-397,-297,-264,-256,-355,-298,-417,-426,-330,-305,-333,-364,-344,-434,-356,-283,-355,-382,-364,-318,-364,-236,-292,-356,-334,-239,-297,-263,-218,-274,-335,-226,-222,-269,-272,-264,-249,-207,-170,-177,-194,-173,-129,-105,-104,-107,-136,-46,-4,64,55,23,-30,-30,-40,-113,-234,-240,-226,-142,-197,-242,-304,-222,-141,-127,-177,-202,-148,-92,-95,-154,-222,-191,-141,-114,-142,-174,-129,-161,-67,-128,-34,-186,-89,-116,-46,-141,-77,-118,-122,-94,-41,-91,-117,-59,-17,-48,-43,-49,-43,-26,26,1,-33,-44,-8,1,24,-53,-52,-67,-16,-53,-12,-77,-55,-119,-25,-52,-50,-119,-50,-99,-48,-93,-38,-125,-90,-97,-23,-111,-77,-96,-23,-117,-28,-57,-25,-82,-14,-37,-3,-37,48,-10,30,-29,94,17,67}, + /* IRC_Composite_C_R0195_T120_P345.wav */ + {-3,4,-6,7,-9,12,-14,18,-21,26,-32,40,-51,68,-105,-237,-115,-196,-240,-710,503,-404,-299,-610,1573,-1052,1538,3156,4734,-10274,2428,47,3883,7208,-4454,16464,10228,-4264,-5316,5989,6995,2607,-3203,-668,1640,904,-2642,-469,203,1050,-729,-1586,-33,324,-227,138,-165,-76,50,-374,-903,385,-637,-751,-371,7,-958,-172,-379,-347,-638,-287,-600,-251,-397,-596,-514,-222,-363,-323,-357,-509,-371,-223,-433,-356,-378,-472,-223,-149,-381,-362,-256,-360,-249,-245,-414,-343,-215,-318,-296,-253,-281,-260,-186,-267,-334,-184,-218,-221,-298,-323,-280,-235,-266,-346,-291,-364,-250,-269,-330,-407,-337,-288,-297,-291,-367,-331,-313,-227,-277,-310,-324,-354,-249,-294,-307,-326,-246,-243,-266,-239,-219,-134,-170,-168,-181,-178,-179,-104,-105,-182,-117,-16,21,-1,-16,-123,-97,-70,-125,-190,-191,-224,-174,-191,-187,-240,-111,-186,-142,-146,-92,-134,-84,-176,-166,-110,-109,-146,-139,-231,-119,-73,-70,-193,-83,-143,-72,-63,-63,-163,-69,-90,-96,-97,-84,-157,-37,-91,-88,-116,-29,-99,-31,-73,-68,-76,24,-65,-8,-90,-71,-32,33,-119,-46,-74,-20,-92,37,-144,-45,-100,37,-86,3,-132,2,-42,-29,-104,-4,-152,-16,-85,-19,-175,14,-94,-9,-106,-24,-72,42,-77,-27,-22,2,-33,78,-29,-21,21,111,-5}, + /* IRC_Composite_C_R0195_T135_P345.wav */ + {-8,8,-9,9,-9,10,-10,11,-11,11,-12,12,-12,13,-12,12,-14,-199,-345,194,-292,-20,-363,219,-924,441,900,798,-558,3720,1651,-3208,-514,-4991,13814,1350,-1342,10873,8019,1098,-3160,1031,6880,5706,-3917,-4208,1661,708,-1088,-1854,-201,229,-177,-273,-648,-82,475,5,-734,169,-151,-581,-653,471,-491,-236,-587,-109,-475,-248,-581,-317,-480,-514,-555,-293,-524,-197,-571,-212,-393,-202,-577,-116,-375,-344,-418,-218,-355,-263,-348,-305,-348,-210,-371,-214,-394,-179,-361,-184,-347,-189,-348,-251,-234,-293,-267,-213,-252,-242,-269,-147,-370,-183,-325,-201,-333,-191,-269,-230,-243,-283,-228,-273,-293,-252,-342,-267,-359,-215,-428,-224,-365,-239,-330,-269,-348,-257,-251,-286,-250,-246,-272,-177,-242,-155,-277,-116,-236,-94,-218,-96,-109,-104,-71,-63,-76,-143,-84,-111,-151,-139,-218,-149,-229,-159,-185,-165,-192,-165,-156,-158,-137,-140,-159,-114,-167,-123,-164,-97,-148,-122,-112,-140,-90,-115,-78,-111,-104,-86,-114,-49,-142,12,-166,-28,-168,-17,-113,-58,-89,-81,-116,-67,-58,-65,-96,-68,-92,-33,-99,-37,-87,-67,-90,-37,-68,-114,-65,-87,-50,-124,-30,-83,-1,-96,-4,-55,-19,-87,13,-80,-10,-90,-5,-84,-42,-79,4,-67,-63,-38,-27,-36,-23,-7,11,-34,14,-18,51,-74,41,9,19,-7}, + /* IRC_Composite_C_R0195_T150_P345.wav */ + {9,-10,11,-12,13,-14,16,-18,20,-22,25,-28,32,-36,41,-48,57,-69,92,-187,-93,-65,148,-471,61,62,-108,-513,1146,660,197,907,4643,-2481,-3069,2384,-436,10331,-1444,3529,10790,4133,-3114,1101,5334,2231,175,-4639,-668,1364,-942,-1832,-179,106,-201,-621,-288,256,-14,7,-70,10,-62,-163,-538,-397,250,-662,-393,-549,-98,-597,-308,-388,-216,-452,-393,-291,-335,-411,-150,-445,-160,-290,-89,-416,-142,-299,-403,-360,-239,-271,-373,-246,-311,-338,-233,-221,-259,-317,-192,-311,-255,-207,-237,-276,-272,-275,-277,-233,-252,-250,-181,-293,-170,-245,-207,-274,-163,-258,-249,-201,-205,-239,-243,-229,-292,-303,-216,-288,-278,-312,-241,-345,-256,-284,-302,-346,-258,-265,-246,-271,-236,-265,-215,-277,-207,-240,-196,-255,-168,-178,-155,-144,-107,-152,-110,-104,-74,-180,-121,-180,-122,-196,-144,-245,-129,-207,-127,-182,-107,-225,-82,-192,-124,-217,-97,-227,-156,-196,-108,-174,-119,-141,-77,-130,-30,-126,-49,-162,-24,-161,-3,-167,-53,-117,-53,-120,-68,-81,-70,-85,-45,-100,-52,-108,-40,-95,-39,-123,-50,-123,-64,-117,-58,-114,-101,-106,-61,-84,-90,-69,-81,-96,-40,-52,-52,-64,-50,-43,-17,-37,-37,-34,-28,-67,-18,-49,-26,-48,-2,-23,-34,-24,7,4,-14,13,9,-1,-21,7,5,3,-11,-9}, + /* IRC_Composite_C_R0195_T165_P345.wav */ + {-16,16,-16,16,-16,17,-17,17,-17,18,-18,18,-18,19,-19,19,-19,19,-19,19,-18,15,-25,-180,39,144,-199,84,-114,-19,-188,1254,741,544,587,3703,-1657,-2617,2350,59,8499,1392,3068,7765,4121,-1476,-112,3753,-281,908,-3882,-912,-13,232,-1645,225,-231,30,-284,242,364,101,-416,-192,-215,-224,-211,-588,-413,94,-532,-410,-203,-224,-476,-149,-162,-206,-299,-157,-323,-203,-258,-155,-285,-195,-259,-160,-310,-161,-188,-219,-325,-228,-252,-329,-249,-318,-288,-289,-215,-279,-273,-237,-265,-253,-240,-166,-308,-223,-268,-204,-257,-185,-200,-214,-215,-169,-225,-233,-270,-224,-251,-229,-254,-245,-245,-239,-242,-271,-250,-255,-257,-319,-229,-320,-280,-334,-233,-335,-241,-307,-225,-319,-212,-265,-239,-293,-245,-235,-213,-218,-190,-158,-146,-114,-164,-138,-176,-126,-194,-163,-222,-185,-202,-181,-221,-210,-185,-167,-169,-169,-149,-149,-121,-122,-91,-166,-97,-138,-90,-161,-94,-134,-93,-167,-101,-146,-89,-171,-96,-130,-90,-122,-75,-116,-77,-102,-73,-86,-102,-91,-92,-79,-122,-82,-99,-73,-113,-89,-86,-78,-89,-91,-110,-85,-90,-61,-78,-110,-68,-88,-12,-123,-18,-120,12,-117,12,-119,3,-74,28,-74,-4,-63,7,-46,0,-67,7,-52,21,-52,5,-33,17,5,-5,-18,-6,3,-14,-17,-13,-5}, + /* IRC_Composite_C_R0195_T180_P345.wav */ + {14,-14,15,-15,15,-16,16,-17,18,-18,19,-20,20,-21,22,-23,25,-26,28,-31,33,-37,43,-53,84,-38,163,-71,-158,103,194,-92,53,482,91,607,1972,2332,-1572,-66,-260,1180,4049,-804,6508,7047,3077,-817,1693,1744,560,329,-3303,313,-242,-704,-831,249,-87,-229,183,93,110,39,-28,-382,-360,-250,-254,-387,-156,-146,-568,-151,-222,7,-192,-113,-168,-1,-224,-13,-332,-98,-241,-68,-295,-42,-210,-57,-298,-57,-251,-128,-317,-192,-299,-254,-305,-255,-341,-262,-251,-237,-260,-298,-266,-251,-226,-202,-261,-186,-262,-164,-227,-165,-247,-142,-218,-142,-250,-192,-303,-264,-330,-282,-325,-281,-325,-258,-295,-215,-312,-214,-308,-229,-274,-251,-309,-306,-294,-311,-271,-299,-256,-284,-250,-220,-205,-223,-287,-266,-239,-195,-135,-147,-134,-149,-99,-176,-199,-275,-263,-263,-286,-264,-295,-223,-247,-187,-176,-146,-128,-120,-103,-115,-67,-85,-89,-116,-143,-100,-117,-73,-127,-58,-97,-32,-121,-65,-147,-77,-157,-103,-172,-126,-140,-135,-140,-152,-146,-132,-154,-119,-152,-105,-161,-118,-130,-105,-117,-95,-88,-108,-66,-84,-63,-104,-55,-48,-36,-55,-20,-46,-8,-40,5,-55,-25,-61,-13,-59,-44,-51,-47,-33,-43,-29,-37,-41,-40,-27,-26,-28,-32,-23,-2,-7,24,-14,-21,-16,-7,-26,-52,-44}, + /* IRC_Composite_C_R0195_T195_P345.wav */ + {6,-6,6,-7,7,-7,7,-7,7,-7,8,-8,8,-8,9,-9,9,-10,10,-11,11,-12,14,-15,17,-21,38,-16,75,-220,-59,106,197,-177,5,-30,-104,721,1540,1146,-1007,1149,558,-528,530,189,5406,5771,1722,1227,2989,2251,-549,199,-1093,649,351,-1192,-797,86,474,-276,161,-98,513,-55,324,-259,35,-250,203,-294,-48,-254,-20,-107,43,-102,74,7,58,5,31,16,-81,-113,-13,-32,-61,-116,-90,-161,-57,-141,-149,-200,-221,-127,-211,-175,-301,-256,-303,-211,-249,-283,-281,-255,-186,-218,-164,-228,-189,-224,-141,-185,-133,-235,-194,-211,-160,-216,-234,-256,-252,-243,-263,-310,-294,-327,-270,-308,-298,-307,-298,-251,-283,-236,-306,-260,-308,-239,-300,-271,-297,-271,-265,-262,-234,-272,-265,-257,-272,-222,-275,-166,-216,-116,-201,-132,-209,-165,-253,-247,-300,-289,-266,-286,-274,-274,-215,-180,-166,-157,-142,-89,-78,-66,-105,-65,-95,-58,-92,-79,-99,-64,-63,-57,-120,-89,-112,-118,-168,-135,-190,-134,-199,-134,-197,-155,-189,-166,-196,-156,-193,-139,-163,-148,-164,-142,-130,-130,-110,-101,-105,-93,-75,-95,-30,-101,-23,-80,-12,-54,-3,-54,-21,-40,-15,-43,-40,-44,-53,-61,-41,-51,-45,-65,-44,-54,-43,-56,-70,-49,-64,-4,-42,4,-66,5,-19,28,-23,-34,-32,-54}, + /* IRC_Composite_C_R0195_T210_P345.wav */ + {-2,2,-2,2,-2,2,-2,1,-1,1,-1,1,-1,0,0,0,1,-1,1,-2,3,-3,4,-6,7,-9,13,-20,43,-89,-222,10,103,-107,-34,-36,142,-400,-30,464,582,998,189,-102,357,928,-496,3,2407,3014,4127,2156,97,2572,2367,192,-1077,553,225,383,-471,-511,-191,662,222,22,339,294,362,131,306,286,107,75,262,201,100,126,189,137,172,10,156,70,103,-4,-19,-25,-76,-86,-32,-105,-27,-153,-34,-185,-5,-211,-79,-248,-114,-227,-127,-231,-155,-259,-155,-256,-173,-269,-198,-265,-178,-210,-163,-209,-150,-214,-137,-236,-119,-268,-155,-295,-185,-296,-225,-287,-248,-275,-270,-274,-255,-294,-292,-305,-257,-323,-287,-335,-263,-321,-272,-309,-253,-288,-250,-283,-256,-298,-279,-264,-275,-235,-286,-206,-259,-185,-242,-173,-250,-179,-242,-182,-246,-189,-235,-187,-222,-180,-217,-154,-198,-159,-175,-140,-165,-133,-123,-114,-152,-125,-126,-116,-132,-139,-111,-130,-90,-128,-110,-154,-128,-148,-134,-149,-163,-162,-154,-157,-161,-181,-160,-162,-162,-166,-161,-175,-145,-160,-146,-183,-159,-151,-121,-142,-111,-108,-118,-82,-70,-76,-70,-81,-45,-58,-32,-55,-61,-50,-56,-13,-58,-19,-65,-23,-55,-34,-79,-47,-53,-75,-77,-59,-49,-49,-45,-23,-65,-15,-50,-1,-35,-44,-51,-18}, + /* IRC_Composite_C_R0195_T225_P345.wav */ + {4,-4,4,-4,5,-5,5,-5,5,-5,5,-5,5,-5,5,-6,6,-6,6,-6,6,-6,7,-7,7,-7,8,-8,8,-8,9,-22,-68,-102,-39,-33,-2,-125,-43,-65,-81,-88,504,491,271,136,50,242,413,575,246,1154,3270,2343,1173,1360,1386,1335,693,41,-16,626,497,-208,385,-141,534,407,815,196,540,538,608,534,497,553,348,395,314,320,191,167,89,75,-22,80,-26,77,-33,113,-103,86,-85,66,-164,-34,-138,-32,-150,-66,-143,-81,-162,-134,-151,-126,-180,-176,-206,-176,-215,-184,-242,-174,-257,-171,-240,-144,-234,-156,-238,-145,-213,-186,-241,-198,-236,-200,-304,-237,-309,-223,-315,-256,-320,-273,-306,-276,-282,-301,-281,-293,-281,-297,-281,-281,-278,-300,-296,-308,-282,-258,-291,-257,-276,-228,-231,-194,-203,-185,-194,-181,-173,-165,-183,-152,-208,-152,-227,-158,-213,-156,-222,-165,-206,-169,-192,-153,-170,-166,-179,-180,-153,-149,-139,-152,-153,-126,-111,-90,-107,-114,-128,-119,-123,-144,-150,-167,-156,-182,-180,-204,-160,-191,-173,-198,-185,-178,-167,-163,-174,-180,-175,-186,-163,-152,-140,-147,-132,-122,-102,-97,-72,-79,-75,-66,-54,-46,-62,-28,-64,-26,-69,-23,-69,-28,-68,-48,-80,-55,-84,-64,-71,-62,-66,-82,-71,-59,-36,-53,-39,-61,-45,-48,-32,-64}, + /* IRC_Composite_C_R0195_T240_P345.wav */ + {1,-1,1,-1,1,-1,1,-2,2,-2,2,-2,2,-2,2,-2,2,-2,2,-2,2,-2,3,-3,3,-3,4,-4,5,-7,11,-87,-91,-69,-34,-17,-83,-130,-72,-103,-104,-134,114,-4,-17,85,373,202,-49,-101,219,365,876,782,785,1628,2056,1505,913,728,668,1183,655,491,465,893,831,430,584,712,1020,649,708,723,730,613,504,566,414,315,187,278,133,226,116,259,129,186,116,185,45,88,-32,30,-121,-4,-138,-59,-242,-77,-196,-88,-164,-95,-154,-158,-144,-123,-166,-155,-131,-163,-193,-126,-158,-197,-221,-177,-191,-199,-207,-207,-232,-259,-213,-257,-223,-245,-210,-232,-214,-199,-250,-237,-259,-286,-308,-300,-276,-316,-306,-323,-315,-283,-322,-259,-334,-262,-308,-246,-279,-237,-252,-210,-235,-204,-211,-204,-216,-219,-187,-208,-184,-188,-172,-182,-147,-178,-171,-209,-148,-207,-140,-214,-153,-202,-132,-178,-141,-158,-129,-140,-132,-148,-104,-170,-94,-160,-103,-179,-127,-186,-148,-195,-164,-196,-178,-176,-178,-191,-206,-200,-201,-208,-201,-208,-200,-195,-166,-167,-160,-164,-138,-176,-171,-170,-145,-157,-125,-127,-91,-104,-77,-96,-65,-75,-71,-96,-65,-84,-60,-84,-57,-72,-49,-85,-51,-85,-52,-82,-51,-93,-48,-69,-42,-81,-54,-69,-69,-69,-62,-61,-61,-54,-82,-67}, + /* IRC_Composite_C_R0195_T255_P345.wav */ + {1,-1,1,-1,1,-1,1,-1,1,-1,1,-1,1,-1,1,-1,1,-1,1,-1,1,-1,1,-1,1,-1,2,-2,2,-2,2,-2,3,-3,5,-29,-81,-145,-126,-102,-79,-99,-83,-32,26,-25,-104,-85,-9,-133,-67,-57,165,465,613,590,447,814,1074,1050,1231,1480,1384,1691,1545,721,693,1027,967,911,897,961,823,595,850,817,872,522,568,476,335,222,147,215,189,255,142,248,142,175,30,4,8,-59,-17,-77,-20,-108,-60,-97,-148,-132,-187,-115,-138,-127,-111,-127,-113,-156,-101,-162,-102,-180,-108,-179,-120,-179,-195,-199,-229,-185,-270,-215,-278,-237,-258,-241,-261,-210,-244,-198,-260,-213,-287,-247,-340,-255,-344,-240,-343,-221,-330,-201,-299,-199,-288,-250,-262,-234,-258,-219,-245,-213,-243,-209,-226,-210,-206,-229,-185,-199,-171,-204,-168,-186,-151,-175,-154,-197,-147,-168,-130,-180,-149,-171,-122,-158,-129,-152,-132,-156,-139,-160,-164,-170,-174,-154,-162,-126,-151,-134,-162,-148,-177,-191,-199,-212,-200,-209,-220,-215,-220,-217,-209,-214,-187,-188,-163,-169,-145,-176,-134,-160,-141,-157,-132,-139,-118,-136,-115,-131,-104,-132,-107,-134,-96,-102,-76,-89,-80,-83,-80,-72,-65,-58,-64,-74,-61,-66,-46,-79,-66,-94,-77,-85,-74,-86,-77,-80,-67,-80,-78,-81,-61,-58}, + /* IRC_Composite_C_R0195_T270_P345.wav */ + {1,-1,1,-1,1,-1,1,-1,1,-1,1,-1,1,-1,1,-1,2,-2,2,-2,2,-2,2,-2,2,-3,3,-3,3,-3,4,-4,5,-5,7,-13,-9,25,107,159,235,223,228,197,135,148,180,104,249,332,345,533,679,812,953,1029,929,1077,1230,1424,1718,1696,1725,1631,997,986,1012,146,-166,111,353,347,-6,-43,73,193,90,62,-93,-92,-338,-310,-405,-336,-407,-441,-401,-511,-347,-531,-317,-432,-349,-454,-400,-327,-440,-322,-442,-318,-433,-382,-424,-384,-375,-393,-396,-458,-428,-480,-383,-434,-382,-464,-371,-432,-337,-394,-307,-399,-335,-417,-322,-442,-324,-436,-303,-449,-304,-409,-298,-363,-284,-317,-307,-308,-323,-288,-319,-293,-322,-285,-281,-248,-239,-242,-269,-244,-286,-239,-275,-228,-281,-236,-270,-215,-230,-183,-189,-147,-168,-142,-140,-138,-148,-145,-122,-112,-122,-100,-101,-92,-130,-111,-155,-125,-154,-111,-187,-117,-190,-95,-171,-106,-163,-109,-148,-117,-153,-132,-173,-135,-163,-114,-185,-122,-191,-116,-168,-92,-150,-108,-141,-94,-122,-95,-150,-97,-145,-88,-132,-65,-125,-53,-97,-33,-83,-12,-48,-5,-27,7,-11,35,-15,26,-18,26,-29,3,-32,-9,-39,15,11,7,13,12,6,12,11,20,5,2,4,3,-12,-24,-23,-5,-3,-22,4,5,1,4}, + /* IRC_Composite_C_R0195_T285_P345.wav */ + {0,0,0,0,0,0,0,0,0,-1,1,-1,1,-1,1,-1,1,-1,1,-1,1,-2,2,-2,2,-2,3,-3,3,-4,5,-6,8,-11,22,-196,-173,-113,-87,-175,-179,-137,-214,-236,-209,-197,-203,-89,115,94,63,76,-21,-123,-58,550,837,1139,1631,2133,1768,1607,1992,1501,200,482,1172,950,840,796,772,845,1106,940,1271,764,758,561,601,592,456,196,274,22,269,50,133,94,119,18,4,153,30,97,24,85,20,33,14,-38,-26,-152,-46,-172,-60,-155,-115,-148,-198,-89,-195,-112,-261,-100,-214,-212,-183,-197,-150,-219,-140,-179,-164,-230,-178,-187,-230,-226,-257,-158,-281,-201,-228,-189,-261,-210,-220,-259,-267,-256,-233,-276,-267,-249,-227,-294,-254,-243,-276,-267,-297,-257,-292,-260,-305,-245,-279,-264,-243,-230,-212,-214,-151,-148,-130,-127,-88,-74,-103,-75,-88,-67,-122,-69,-96,-106,-123,-96,-146,-128,-172,-140,-152,-152,-190,-154,-179,-153,-191,-155,-209,-127,-197,-114,-197,-134,-193,-131,-195,-130,-172,-126,-176,-158,-191,-169,-206,-154,-203,-183,-230,-172,-205,-185,-204,-179,-196,-157,-166,-116,-150,-115,-126,-82,-123,-71,-85,-83,-83,-68,-72,-63,-78,-79,-49,-75,-79,-92,-70,-98,-66,-95,-75,-105,-104,-85,-82,-92,-105,-73,-107,-87,-82,-75,-79,-91}, + /* IRC_Composite_C_R0195_T300_P345.wav */ + {-1,1,-1,1,-1,1,-1,1,-1,2,-2,2,-2,2,-2,2,-2,2,-2,2,-2,3,-3,3,-3,4,-4,5,-6,8,-10,16,-33,4,109,115,258,119,96,128,286,309,258,544,795,778,461,339,836,601,406,1682,1975,1154,2276,2834,1223,701,931,69,-668,-254,489,635,-539,173,339,531,396,94,191,-102,150,-127,-75,-251,-176,-409,-489,-562,-686,-623,-666,-571,-644,-520,-569,-434,-437,-442,-489,-355,-376,-438,-545,-399,-497,-467,-538,-479,-549,-412,-479,-420,-458,-466,-435,-392,-372,-360,-342,-413,-353,-363,-348,-419,-344,-451,-370,-415,-367,-468,-389,-500,-406,-491,-367,-474,-360,-481,-350,-435,-296,-421,-285,-402,-267,-363,-222,-350,-221,-334,-215,-270,-177,-235,-146,-214,-118,-175,-89,-140,-81,-133,-96,-92,-54,-91,-56,-86,-58,-64,-32,-51,-40,-76,-57,-55,-36,-113,-96,-83,-105,-91,-129,-152,-109,-131,-124,-142,-150,-156,-119,-136,-143,-152,-125,-140,-130,-126,-129,-99,-125,-119,-110,-121,-109,-118,-102,-117,-98,-117,-101,-93,-114,-105,-94,-69,-112,-75,-86,-60,-49,-25,-59,6,-33,-1,-8,8,-2,17,23,0,29,13,17,24,18,21,18,23,-13,11,-8,15,5,10,-10,8,-24,7,1,-11,1,27,-4,48,5,53,24,86,43,68,50,99}, + /* IRC_Composite_C_R0195_T315_P345.wav */ + {1,-1,1,-1,1,-1,1,-1,1,-1,1,-2,2,-2,2,-2,2,-2,2,-2,2,-2,2,-2,2,-2,2,-2,2,-1,0,6,-137,-110,-314,-483,-264,-184,-467,-482,89,-336,204,277,141,-93,-770,-71,819,-837,1577,3127,1312,2383,3512,2495,-80,350,890,699,71,561,954,294,1113,1117,1259,495,984,849,877,553,746,524,537,393,441,428,542,429,296,210,204,231,133,-55,33,-7,-111,-11,-91,34,-190,-56,-131,39,-14,66,-79,58,-96,105,-60,169,-64,84,-143,118,-100,-84,-157,-91,-304,-233,-224,-201,-251,-259,-309,-306,-307,-286,-299,-265,-407,-357,-301,-345,-348,-313,-294,-342,-334,-338,-298,-266,-288,-261,-298,-254,-268,-227,-236,-260,-244,-223,-227,-260,-233,-195,-202,-171,-228,-156,-170,-114,-170,-107,-146,-105,-89,-96,-83,-40,-73,-59,-66,-65,-71,-53,-107,-74,-75,-92,-122,-102,-105,-87,-130,-86,-139,-115,-142,-131,-148,-147,-160,-144,-151,-139,-173,-139,-186,-126,-170,-147,-194,-137,-189,-141,-184,-162,-186,-151,-208,-180,-195,-172,-186,-165,-204,-165,-157,-155,-139,-132,-141,-108,-126,-89,-110,-98,-101,-113,-94,-111,-133,-99,-126,-121,-126,-97,-115,-103,-122,-84,-114,-58,-120,-106,-109,-97,-103,-75,-128,-91,-104,-73,-91,-53,-59,-46,-63,-33,-22,-14}, + /* IRC_Composite_C_R0195_T330_P345.wav */ + {-9,9,-9,9,-9,9,-9,9,-9,9,-9,9,-9,9,-9,9,-9,9,-9,10,-10,10,-10,10,-10,10,-10,10,-7,-378,-458,118,-356,-539,-320,-251,-977,-150,272,171,262,290,-552,-354,-2031,2393,-54,-667,4888,3245,1575,3792,4173,-210,-907,432,1658,-334,338,623,940,723,1598,1314,1216,764,1038,976,401,116,250,473,239,334,528,294,340,65,340,194,348,152,304,92,279,123,93,-24,89,-192,-125,-56,-130,68,194,-86,77,174,220,128,98,-234,-216,-168,-160,-251,-283,-410,-399,-240,-227,-363,-327,-361,-302,-370,-271,-312,-309,-428,-402,-318,-223,-266,-304,-253,-264,-272,-246,-203,-278,-299,-319,-254,-236,-260,-281,-188,-203,-212,-220,-185,-220,-178,-230,-207,-215,-227,-248,-182,-197,-163,-179,-101,-143,-112,-138,-84,-115,-82,-111,-83,-102,-83,-51,-69,-49,-89,-18,-67,-73,-108,-72,-63,-79,-111,-167,-94,-130,-99,-166,-109,-181,-121,-156,-124,-154,-131,-162,-169,-165,-170,-136,-151,-159,-185,-163,-122,-139,-141,-175,-141,-148,-148,-172,-157,-130,-154,-150,-148,-142,-153,-141,-116,-159,-97,-155,-91,-162,-92,-141,-53,-134,-97,-132,-68,-111,-97,-127,-98,-128,-105,-137,-81,-141,-76,-120,-73,-136,-97,-105,-48,-102,-73,-108,-56,-97,-41,-75,-56,-70,-20,-33,-9,-51,-32}, + /* IRC_Composite_C_R0195_T345_P345.wav */ + {1,-2,2,-3,4,-4,5,-6,7,-8,9,-10,11,-13,14,-16,17,-19,21,-22,24,-25,25,-22,13,15,-152,-764,140,-602,18,-900,-519,-1092,-545,489,261,706,-1198,1895,-1912,-2689,2900,30,-3177,8188,3071,2405,4243,6073,615,-3233,507,1858,-510,-537,1688,110,230,1724,2185,1547,1263,1871,1145,573,342,-23,261,-614,-9,-121,805,-137,38,285,272,-29,312,280,184,234,346,-32,603,80,194,153,323,-216,458,198,10,-275,28,-275,-40,-248,-266,-416,-305,-349,-213,-336,-338,-460,-267,-400,-247,-403,-303,-379,-264,-460,-298,-347,-227,-420,-309,-405,-246,-238,-250,-319,-267,-250,-266,-221,-240,-318,-184,-236,-175,-305,-161,-225,-161,-213,-190,-219,-158,-206,-155,-198,-165,-211,-169,-178,-141,-180,-148,-191,-154,-191,-120,-222,-141,-171,-68,-131,-104,-142,-95,-72,-93,-71,-91,-94,-90,-92,-83,-97,-91,-117,-108,-109,-102,-101,-101,-116,-95,-82,-119,-115,-158,-119,-137,-108,-151,-151,-163,-123,-125,-148,-167,-118,-168,-149,-196,-146,-169,-130,-179,-147,-164,-153,-114,-109,-119,-128,-121,-82,-111,-89,-98,-81,-102,-105,-71,-85,-85,-116,-85,-98,-76,-124,-102,-118,-107,-84,-126,-141,-114,-78,-80,-110,-127,-121,-93,-112,-96,-109,-98,-90,-55,-53,-66,-54,-41,-26,-18,-19,-5,-33,-57,-33}}; + +const int16_t irc_composite_c_r0195_p000[][256] = + {/* IRC_Composite_C_R0195_T000_P000.wav */ + {1,-1,1,-1,0,0,0,0,0,1,-1,2,-2,2,-3,4,-5,6,-7,9,-11,14,-20,33,-756,-534,-348,102,-1032,-582,-1506,65,-959,133,-210,1928,576,-291,-5368,5774,-6962,-2265,14308,-3483,7090,10599,2076,-2731,3149,-192,-3412,-1652,3866,548,-1226,-1088,2012,2077,1960,-209,1087,1404,1871,1150,1665,536,626,1200,1268,592,-114,-160,-95,133,-37,-246,158,-144,-190,-548,421,96,105,-192,181,129,158,143,73,-164,-261,-64,-147,-128,-55,-166,-60,98,349,-253,126,-172,110,-391,-118,265,-317,256,-97,416,-62,274,-244,-355,-104,-427,-357,-741,-368,-489,-365,-418,-498,-150,-384,-361,-530,-257,-297,-381,-491,-511,-393,-429,-294,-395,-373,-408,-292,-342,-260,-297,-286,-427,-324,-291,-291,-276,-319,-244,-386,-221,-241,-141,-260,-273,-231,-153,-86,-203,-202,-152,-95,-140,-176,-127,-110,-156,-237,-148,-139,-94,-248,-190,-202,-85,-155,-205,-223,-169,-158,-183,-216,-167,-164,-146,-219,-103,-162,-71,-220,-10,-169,7,-134,32,-76,-42,-100,-66,-50,-44,-71,-88,-122,-40,-75,21,-78,-18,-125,-33,-69,53,-77,-22,-138,-41,-62,-55,-92,-83,-99,-115,-150,-17,-53,-12,-184,-50,-96,28,-81,-1,-78,-41,-118,-14,-66,38,-82,-29,-104,29,5,34,-62,14,7,47,-61,67,-43,51,-145,61}, + /* IRC_Composite_C_R0195_T015_P000.wav */ + {123,-125,127,-129,131,-133,135,-137,138,-140,141,-141,141,-140,137,-131,122,-106,76,-10,-238,-830,-319,-1069,17,-859,165,-1424,-309,-2002,526,695,3378,-155,-423,-6705,9134,-12025,1719,14823,-7846,16119,11510,-1243,-4202,5454,-1293,-2996,-1156,2894,-853,-440,-59,2319,814,827,428,2620,1829,1181,817,1748,469,1133,599,1385,-490,316,-1523,369,-621,391,-835,160,-740,77,-149,751,-195,-103,-622,191,-100,342,-303,-90,-465,82,62,259,-409,-385,44,87,-50,21,54,-467,-207,-79,-324,-108,-90,-152,-620,71,-110,47,-749,-161,-523,-153,-446,-157,-336,40,-137,-86,-507,-177,-322,-14,-430,-277,-557,-360,-326,-296,-274,-463,-359,-502,-263,-388,-316,-369,-353,-459,-464,-335,-293,-324,-333,-369,-324,-380,-288,-268,-287,-294,-317,-270,-395,-181,-204,-149,-383,-214,-212,-113,-188,-174,-211,-188,-99,-166,-173,-203,-111,-134,-148,-226,-166,-126,-86,-216,-199,-234,-119,-193,-118,-250,-172,-245,-101,-156,-114,-172,-153,-162,-98,-70,-71,-106,-70,-81,-14,-14,37,-38,-36,-107,-3,-9,52,-91,-63,-116,20,-50,18,-151,-37,-125,57,-97,31,-149,5,-133,4,-141,9,-132,-7,-168,9,-124,33,-144,22,-166,15,-117,46,-156,11,-127,36,-124,23,-58,69,-65,71,-34,110,-36,130,-45,96,-68,96,-59,38,-71}, + /* IRC_Composite_C_R0195_T030_P000.wav */ + {-63,66,-69,73,-76,80,-84,89,-93,98,-102,107,-110,111,-108,95,-54,-154,-785,-302,-1153,268,-1139,240,-1422,320,-2283,272,-629,4046,237,3112,-7137,2285,231,-13166,20036,-5068,11914,16533,2581,-5753,2916,1267,-3077,-1961,1642,-123,210,665,1080,1069,485,1122,1846,1948,901,1486,921,1132,470,865,-186,-57,-358,-153,-489,-118,-724,-365,-517,-156,-335,155,-255,96,-315,376,-169,22,-435,5,-544,-35,-484,-12,-457,280,-453,-177,-399,435,-423,-188,-484,161,-217,-38,-732,-416,-157,73,-480,-440,-302,-140,-260,-335,-490,-420,-470,-291,-617,-112,-315,20,-559,-210,-412,154,-162,-102,-453,-225,-205,-23,-247,-203,-409,-147,-366,-147,-367,-150,-477,-326,-502,-222,-390,-294,-498,-249,-423,-313,-510,-239,-382,-176,-516,-351,-496,-92,-337,-150,-462,-212,-366,-74,-288,-94,-322,-110,-334,-53,-240,11,-306,-108,-332,-45,-271,-52,-295,-76,-308,-99,-319,-57,-226,-50,-303,-130,-254,-54,-183,-89,-261,-98,-193,-6,-184,-12,-164,45,-175,27,-120,129,-69,82,-89,120,-26,113,-48,59,-77,18,-85,-3,-107,0,-78,-29,-159,0,-62,67,-139,-9,-138,19,-136,-3,-124,-27,-149,-30,-136,-11,-97,-7,-137,-19,-96,20,-84,15,-64,28,-92,9,15,121,-12,24,-32,74,36,98,-9,3,-30,36,-42,4}, + /* IRC_Composite_C_R0195_T045_P000.wav */ + {72,-74,77,-79,82,-86,91,-96,102,-111,121,-134,152,-179,228,-635,490,-1252,-114,-1202,742,-1779,495,-1563,532,-2895,2675,1167,4718,-4709,4646,-5559,-6673,7608,-8298,21201,11442,4596,920,5308,-1818,-1353,-1082,-283,-725,2349,-984,-140,614,1696,1859,2003,557,2080,1067,416,1112,525,38,-87,-851,-336,38,-375,-495,-291,-403,-90,-299,-393,-130,-647,-37,-270,-227,-182,-2,-485,-21,-380,-221,-179,-364,-244,-343,-247,-161,-213,-443,-201,-386,-349,-198,-453,-401,-479,-187,-349,-331,-486,-40,-343,-132,-248,-346,-150,-185,-498,-200,-280,-512,-271,-152,-472,-405,-421,-235,-444,-272,-369,-299,-285,-209,-483,-192,-140,-287,-241,-120,-328,36,-232,-275,-345,-26,-488,-78,-319,-205,-488,22,-390,-294,-391,-131,-450,-198,-416,-347,-391,-95,-530,-266,-359,-162,-403,-129,-438,-172,-313,-124,-327,-146,-338,-102,-245,-121,-328,-124,-244,-79,-277,-128,-320,-47,-241,-144,-306,-70,-274,-85,-274,-147,-219,-71,-212,-78,-214,-60,-127,0,-182,-21,-106,67,-97,35,-113,84,16,65,-67,99,-24,79,7,29,-85,99,-35,17,-91,57,-100,58,-112,-18,-130,73,-160,-25,-148,6,-136,45,-217,2,-142,79,-180,64,-198,81,-156,114,-200,98,-162,123,-120,152,-170,176,-85,130,-107,189,-99,158,-113,138,-127,161,-140,111,-221}, + /* IRC_Composite_C_R0195_T060_P000.wav */ + {123,-132,142,-153,166,-180,196,-215,238,-264,296,-338,397,-545,-26,-1070,548,-1189,312,-1132,-540,-593,703,-1932,23,1834,2821,3650,-4896,2244,-8990,3547,-3667,11688,19991,4002,-67,5265,-1066,-618,257,-237,-93,2490,-1554,-1096,1601,944,1501,1048,1374,1754,1792,566,335,-350,932,-271,-1525,-771,-646,-466,146,-610,113,-1035,510,-222,-23,-608,-443,-725,203,-887,-434,-589,-19,-438,-40,-610,14,-467,-137,-372,-214,-629,-55,-580,-263,-517,-350,-563,-317,-499,-281,-660,-233,-369,6,-533,-180,-457,-90,-331,-140,-395,-150,-246,-124,-225,-134,-388,-151,-322,-330,-363,-280,-394,-241,-503,-138,-532,-169,-402,-263,-433,-255,-363,-244,-372,-271,-364,-268,-337,-126,-472,-58,-441,6,-373,-75,-345,-91,-290,-171,-369,-128,-311,-189,-312,-183,-241,-230,-309,-78,-303,-168,-337,-178,-236,-164,-270,-238,-313,-113,-261,-195,-270,-208,-224,-136,-312,-153,-332,-79,-287,-193,-257,-149,-227,-124,-297,-82,-209,-107,-181,-87,-173,-18,-146,9,-126,28,-74,-22,-25,22,-50,40,-41,25,-17,-14,5,9,-67,23,-34,57,-46,5,3,37,-39,6,-36,11,-54,-39,-69,-61,-79,-35,-99,-38,-98,-29,-88,-12,-86,-19,-71,-19,-37,-12,-58,7,-41,34,-37,47,-22,42,-26,97,2,68,-13,74,7,35,-6,34,-1,0,-38}, + /* IRC_Composite_C_R0195_T075_P000.wav */ + {30,-35,41,-47,55,-63,72,-84,97,-113,134,-162,202,-472,-304,-372,274,-1176,-371,-433,-535,-54,364,-1802,1909,3073,5773,-6217,-2323,-1631,-4047,10163,-533,23315,9574,-7064,764,4451,279,-686,938,2910,-1594,-781,31,2302,-241,1013,1336,1127,1162,699,28,761,380,-426,-314,-917,-750,-202,-732,-632,-472,-517,-73,-213,-19,-699,-215,-440,-368,-595,-354,-716,-262,-604,-448,-306,-492,-226,-481,-404,-393,-315,-472,-290,-442,-401,-227,-459,-350,-606,-194,-594,-308,-438,-436,-423,-217,-461,-220,-298,-300,-185,-218,-342,-120,-282,-142,-260,-82,-276,-46,-282,-219,-258,-93,-447,-193,-418,-296,-377,-249,-479,-177,-358,-304,-416,-185,-366,-179,-486,-287,-406,-134,-512,-167,-454,-201,-352,-195,-386,-169,-289,-223,-334,-186,-280,-123,-302,-167,-268,-36,-301,-139,-264,-133,-195,-111,-276,-131,-175,-123,-195,-148,-226,-77,-211,-165,-231,-123,-257,-132,-285,-135,-289,-147,-278,-141,-274,-166,-238,-154,-189,-134,-212,-101,-127,-6,-142,-21,-106,41,-81,41,-105,54,-54,6,-41,-3,-62,27,-57,-25,-42,29,-39,10,-90,56,-38,43,-106,38,-63,27,-58,-21,-110,-19,-82,-36,-124,-57,-83,23,-113,9,-73,55,-109,34,-55,45,-13,62,-54,27,-33,52,12,35,-34,72,-19,33,-32,74,-59,39,-25,53,1,10,-3}, + /* IRC_Composite_C_R0195_T090_P000.wav */ + {-3,3,-4,5,-6,7,-9,11,-13,17,-23,36,-967,38,-432,430,-1179,327,-1275,279,-1297,1913,-1800,360,826,8131,-4106,182,-6660,-1308,8763,-6147,21004,13661,-1493,-3341,815,6070,1330,-1696,3689,487,-871,-2812,2923,1220,319,-441,2019,161,1147,-214,819,-1063,179,-1133,125,-1120,701,-1109,-441,-308,-3,-1227,135,-661,-529,-162,-375,-951,-262,-371,-533,-415,-491,-644,-459,-451,-508,-551,-360,-465,-434,-399,-416,-458,-204,-423,-358,-363,-290,-434,-346,-447,-174,-475,-424,-400,-239,-439,-175,-468,-418,-188,-90,-409,-214,-233,-225,-147,-140,-439,-299,-104,-157,-229,-145,-201,-232,-109,-193,-310,-284,-271,-306,-318,-342,-330,-320,-362,-208,-295,-346,-295,-212,-394,-231,-297,-357,-348,-177,-356,-301,-275,-291,-319,-184,-255,-307,-178,-245,-228,-206,-194,-280,-137,-218,-191,-183,-203,-186,-105,-193,-151,-164,-132,-164,-77,-203,-113,-167,-192,-164,-75,-239,-194,-160,-166,-186,-77,-232,-182,-148,-63,-204,-92,-164,-69,-54,4,-140,-18,-43,36,-61,41,-116,-2,-92,42,-126,-61,-150,-8,-117,-15,-62,-69,-114,61,-14,10,-74,8,-25,119,-37,0,-69,10,-56,37,-94,-111,-86,30,-107,-71,-119,4,-92,38,-125,29,-50,58,-57,64,-84,92,-4,67,-73,54,-8,88,-67,71,21,64,-36,123,-7,23,-36,115}, + /* IRC_Composite_C_R0195_T105_P000.wav */ + {67,-63,58,-51,43,-33,19,-1,-24,61,-121,244,-927,183,-686,327,-496,-330,-486,-340,-314,396,182,-1212,1259,6063,-556,-2378,-2582,-3838,9614,-8512,18557,14219,-782,-2750,529,5714,4180,-1314,2811,874,-1413,-2611,2334,2590,-537,-811,436,1284,515,-112,-879,-106,-221,-558,-955,-184,-84,-312,-553,-219,-379,-172,-456,-790,-576,-350,-708,-606,-553,-357,-404,-422,-537,-414,-291,-515,-518,-625,-263,-417,-492,-719,-360,-360,-368,-573,-481,-304,-142,-306,-536,-356,-164,-154,-398,-429,-388,-147,-288,-315,-532,-144,-267,-143,-527,-186,-329,6,-447,-226,-375,20,-329,-159,-444,-77,-329,-143,-450,-93,-327,2,-390,-87,-374,55,-418,-96,-445,-37,-436,-129,-505,-118,-444,-152,-487,-172,-395,-135,-432,-170,-368,-96,-414,-145,-394,-32,-326,-123,-434,-83,-276,-94,-355,-149,-242,-56,-256,-166,-269,-75,-228,-119,-313,-87,-207,-58,-304,-97,-234,-23,-285,-126,-259,-11,-242,-91,-247,-1,-204,-10,-242,-5,-194,74,-169,102,-92,170,-62,135,-93,108,-134,5,-190,-8,-203,-47,-223,16,-187,1,-174,58,-131,78,-113,75,-130,98,-75,44,-121,101,-58,53,-146,47,-70,70,-128,-19,-108,21,-89,-23,-119,-27,-39,31,-124,-51,-37,65,-45,0,4,66,6,17,11,14,30,64,44,27,-16,79,-9,-11,-60,55}, + /* IRC_Composite_C_R0195_T120_P000.wav */ + {-76,75,-75,74,-72,69,-65,59,-49,35,-14,-21,85,-263,347,27,-533,-67,-385,-42,-335,645,-2065,1305,1033,1922,-2255,6706,-3555,1838,-9048,1510,15767,522,6485,7416,2582,-1635,-1123,7704,6890,-3746,-2349,-815,4058,-1305,633,-1081,2060,-1086,490,-1435,1038,-465,-103,-1558,594,-576,-246,-797,-411,-729,216,-627,-444,-642,122,-670,-147,-796,-255,-906,76,-717,-449,-584,-119,-685,-337,-606,-352,-326,-478,-502,-420,-468,-445,-428,-466,-351,-293,-513,-155,-443,-243,-371,-82,-573,-58,-388,-209,-303,-195,-394,-165,-220,-376,-192,-262,-255,-289,-234,-278,-284,-271,-328,-227,-325,-185,-358,-162,-289,-168,-301,-148,-278,-221,-237,-227,-212,-236,-164,-236,-223,-147,-209,-256,-222,-209,-360,-125,-328,-206,-254,-219,-307,-162,-296,-276,-174,-324,-187,-268,-155,-286,-159,-317,-134,-252,-195,-181,-236,-204,-162,-148,-226,-141,-227,-132,-141,-218,-189,-165,-114,-288,-111,-256,-76,-252,-50,-308,-80,-158,-63,-217,-126,-102,12,-10,-77,13,74,33,49,-22,50,-56,57,-177,-13,-201,19,-144,-101,-181,59,-32,-49,-45,-72,-16,32,-40,-48,-33,-42,-45,64,-145,8,-69,33,-144,44,-92,56,-118,-2,-60,8,-77,-3,-91,-37,-73,-11,-53,0,-49,56,-45,17,-73,100,6,64,-7,125,-47,94,22,33,-55,12,-30,7}, + /* IRC_Composite_C_R0195_T135_P000.wav */ + {-47,46,-46,44,-43,40,-37,33,-28,21,-12,-1,20,-48,94,-206,95,-290,-258,153,-592,203,-293,297,-1153,1348,644,1791,-1433,6094,-3856,-260,-5383,6042,11744,-1699,5991,8208,1865,-2201,2510,6226,4590,-5126,-1361,782,2433,-2030,854,-833,853,-1117,568,-1205,680,-1070,-374,-797,196,-738,-381,-460,54,-509,80,-809,-117,-538,-17,-916,203,-876,-170,-815,-257,-782,-178,-520,-324,-433,-344,-353,-383,-382,-403,-522,-240,-512,-312,-523,-189,-533,-114,-477,-247,-366,-173,-363,-247,-204,-351,-273,-220,-239,-329,-247,-257,-330,-207,-331,-212,-342,-200,-298,-246,-261,-277,-228,-264,-213,-328,-212,-273,-219,-256,-244,-234,-265,-159,-264,-191,-244,-157,-229,-190,-221,-203,-203,-265,-198,-238,-189,-235,-164,-189,-220,-183,-216,-189,-257,-194,-227,-199,-234,-209,-214,-214,-195,-212,-179,-255,-200,-207,-151,-257,-182,-194,-172,-200,-169,-219,-215,-208,-180,-207,-192,-215,-173,-203,-137,-219,-101,-232,-121,-175,-4,-148,-45,-92,5,-35,0,-5,-45,0,8,10,-53,-107,-5,-81,6,-157,3,-130,19,-128,39,-138,5,-65,57,-90,7,-52,45,-79,13,-78,29,-109,14,-79,25,-126,7,-61,-11,-75,1,-51,-46,-64,-35,-74,-40,-48,36,-46,3,-64,53,-59,33,-26,105,4,63,13,64,-28,24,-1,3,-80,-21}, + /* IRC_Composite_C_R0195_T150_P000.wav */ + {-5,6,-7,7,-8,9,-10,11,-12,13,-15,16,-17,19,-20,20,-17,1,-234,227,-421,117,-50,-175,-40,33,-222,178,1388,586,910,1027,4228,-7050,1223,2017,6153,4669,1121,8628,6102,-3597,1339,6488,2001,-1618,-2670,715,665,-9,-1202,163,-501,22,-977,268,-483,330,-922,182,-469,62,-791,-223,-334,-215,-485,-347,-491,-176,-429,-178,-696,-63,-705,-191,-727,-100,-715,-18,-605,-161,-531,-137,-475,-197,-485,-201,-433,-135,-408,-232,-404,-136,-441,-131,-469,-151,-475,-144,-469,-155,-431,-165,-416,-207,-381,-199,-342,-208,-344,-172,-301,-189,-339,-193,-297,-159,-303,-169,-283,-144,-295,-190,-323,-198,-312,-172,-324,-214,-279,-169,-264,-159,-230,-157,-232,-160,-206,-167,-240,-200,-232,-191,-234,-175,-238,-189,-225,-142,-206,-214,-206,-200,-152,-232,-176,-232,-184,-217,-152,-205,-194,-188,-192,-160,-199,-163,-211,-186,-234,-194,-218,-212,-270,-194,-202,-181,-219,-182,-202,-169,-194,-142,-192,-142,-138,-58,-121,-63,-104,-7,-62,-12,-40,0,-55,-16,-33,-41,-87,-38,-71,-48,-89,-7,-81,-22,-59,5,-41,-28,-47,-20,-14,-23,-20,-10,-31,-22,-25,-11,-55,-34,-32,-33,-44,-53,1,-30,-56,-58,-29,-61,-47,-27,-44,-54,-52,-32,-37,-63,-16,18,1,-3,18,72,0,21,9,42,-23,-7,-40,-42}, + /* IRC_Composite_C_R0195_T165_P000.wav */ + {-31,31,-31,31,-30,30,-30,29,-28,27,-25,23,-20,16,-12,5,5,-18,40,-78,187,-74,-49,55,-110,112,-199,218,-425,530,199,1304,62,2352,168,2823,-5315,837,4953,2252,4624,4623,6361,3213,-1540,938,5698,-601,-1840,-1321,415,-514,-55,-892,3,-75,186,-753,471,-260,192,-752,21,-345,-220,-374,-258,-349,-282,-436,-427,-398,-253,-570,-291,-449,-37,-524,-88,-564,-148,-493,-85,-435,-73,-317,-105,-359,-181,-335,-176,-321,-195,-365,-206,-391,-180,-383,-187,-437,-200,-456,-199,-434,-221,-404,-185,-335,-239,-304,-221,-288,-247,-317,-222,-256,-192,-283,-227,-290,-193,-268,-185,-298,-185,-261,-149,-305,-194,-283,-162,-272,-207,-271,-209,-214,-185,-206,-201,-195,-156,-200,-178,-267,-179,-273,-180,-271,-149,-245,-175,-193,-143,-182,-204,-213,-175,-213,-191,-252,-204,-249,-193,-210,-188,-202,-194,-140,-186,-146,-188,-134,-188,-186,-187,-199,-210,-191,-202,-211,-250,-183,-225,-176,-254,-154,-188,-96,-150,-85,-102,-69,-80,-63,-71,-81,-74,-55,-81,-81,-69,-57,-79,-51,-37,-34,-57,-26,-18,6,-12,5,-5,22,-15,19,-13,6,-49,9,-38,-32,-76,-25,-65,-43,-75,-1,-82,-14,-96,-21,-95,-25,-47,-4,-67,-76,-41,-67,-39,-53,-5,-25,-37,30,14,61,-21,72,-39,30,-59,25,-48,-21}, + /* IRC_Composite_C_R0195_T180_P000.wav */ + {9,-9,9,-10,10,-11,11,-12,13,-13,14,-15,16,-17,18,-19,20,-21,22,-24,25,-26,25,-17,104,-114,196,-98,54,-126,232,-41,337,459,418,1258,2024,1306,-3081,2067,-322,1935,2599,4829,7530,3384,-1993,2649,3619,-256,-653,-863,-84,-381,-1124,-459,826,-56,-554,-375,247,362,403,-767,-54,-307,253,-528,-72,-499,-251,-538,-224,-365,-429,-387,-270,-98,-240,-189,-323,-221,-114,-200,-172,-265,-34,-233,-42,-325,-228,-392,-171,-360,-298,-341,-265,-231,-220,-299,-332,-289,-282,-307,-286,-295,-293,-265,-194,-288,-280,-341,-219,-319,-258,-367,-240,-308,-190,-294,-215,-308,-212,-271,-223,-283,-231,-230,-192,-212,-231,-187,-195,-184,-197,-212,-220,-239,-198,-224,-201,-254,-209,-229,-205,-256,-239,-265,-231,-229,-194,-202,-206,-179,-140,-169,-205,-189,-208,-194,-220,-219,-227,-219,-190,-210,-179,-230,-143,-194,-136,-216,-131,-181,-147,-210,-176,-226,-209,-222,-193,-214,-206,-190,-190,-205,-213,-200,-179,-148,-109,-85,-53,-73,-40,-100,-69,-150,-91,-144,-106,-152,-99,-85,-54,-38,-16,3,2,19,74,35,28,11,42,13,8,-18,-32,-36,-29,-19,-47,-86,-72,-66,-42,-83,-71,-83,-83,-72,-54,-20,-67,-68,-58,-74,-64,-79,-16,-60,-46,-42,-7,11,22,53,51,32,-13,33,28,3,-22,-46}, + /* IRC_Composite_C_R0195_T195_P000.wav */ + {12,-12,12,-12,12,-12,12,-12,12,-12,12,-12,12,-12,12,-12,11,-11,10,-9,8,-6,4,-1,-5,16,-58,-48,-46,77,37,-65,-49,-7,-27,197,332,255,848,1561,1608,-2167,729,-55,1458,1975,2964,6210,4289,99,1163,3083,300,-110,-1053,97,-72,-273,-635,580,371,-146,-5,150,225,128,22,74,-75,62,-60,-62,-162,-184,-359,-146,-99,33,-264,-53,-91,-68,-97,-68,-107,-168,-116,-102,-205,-156,-272,-180,-287,-225,-284,-228,-198,-203,-177,-240,-212,-239,-265,-245,-245,-239,-269,-232,-261,-248,-259,-262,-292,-271,-282,-251,-272,-265,-298,-265,-285,-267,-290,-263,-263,-239,-279,-219,-264,-196,-232,-191,-206,-170,-181,-179,-220,-200,-244,-184,-248,-231,-259,-234,-254,-249,-268,-279,-250,-255,-219,-236,-226,-171,-197,-163,-222,-176,-205,-187,-189,-200,-188,-195,-184,-172,-221,-174,-245,-154,-259,-156,-238,-169,-237,-202,-213,-230,-237,-242,-223,-236,-210,-202,-201,-217,-219,-206,-204,-158,-180,-128,-112,-64,-38,-58,-71,-95,-94,-116,-117,-160,-118,-117,-105,-102,-88,-47,-31,-21,-9,0,24,-11,6,-19,6,1,-16,-22,-27,-22,-10,-73,-58,-79,-51,-76,-65,-82,-99,-73,-70,-36,-82,-58,-110,-76,-105,-83,-46,-122,-54,-86,17,-33,15,14,-1,0,13,44,23,-6,2,-42}, + /* IRC_Composite_C_R0195_T210_P000.wav */ + {6,-6,6,-6,7,-7,7,-7,7,-7,7,-7,8,-8,8,-8,8,-9,9,-9,10,-10,10,-11,12,-12,13,-13,-3,-140,-62,16,74,-75,-81,-9,-154,52,75,214,374,933,1063,-50,-424,1215,-28,199,1671,2854,5292,2238,1086,1897,2061,-301,-116,-17,89,220,-368,-4,403,517,47,242,331,363,284,327,289,188,152,291,134,170,136,286,56,186,15,53,-103,44,-142,-179,-241,-86,-171,-94,-229,-133,-165,-58,-222,-146,-225,-113,-173,-105,-202,-191,-235,-184,-216,-202,-258,-257,-241,-211,-214,-256,-271,-278,-250,-239,-276,-254,-285,-245,-277,-265,-277,-285,-295,-284,-275,-264,-248,-242,-249,-215,-225,-190,-221,-208,-231,-195,-210,-191,-211,-203,-222,-199,-222,-228,-234,-247,-238,-233,-235,-232,-214,-216,-211,-201,-215,-188,-250,-191,-224,-183,-218,-210,-195,-212,-196,-217,-202,-221,-224,-225,-263,-219,-268,-220,-277,-225,-254,-223,-243,-232,-228,-229,-221,-217,-194,-205,-169,-170,-122,-124,-95,-89,-83,-76,-91,-74,-103,-96,-96,-80,-91,-101,-107,-123,-107,-115,-88,-113,-70,-93,-25,-70,-18,-56,-11,-38,-15,-23,-20,-23,-29,-31,-55,-70,-58,-57,-52,-93,-74,-95,-46,-80,-63,-97,-83,-108,-85,-90,-88,-102,-101,-88,-79,-54,-29,-30,-33,-9,-5,-3,24,-9,7,-23}, + /* IRC_Composite_C_R0195_T225_P000.wav */ + {-1,1,-1,1,-1,1,-1,1,-1,1,-1,1,-1,1,-1,1,-1,1,-1,1,-2,2,-2,2,-3,4,-4,6,-9,17,-16,-98,-60,-77,48,3,-81,-121,-43,-55,1,19,93,305,537,631,131,-260,545,567,27,1207,2128,2827,2558,1188,1325,1915,744,102,304,255,211,393,327,58,643,377,590,456,611,727,425,704,395,620,277,569,148,252,218,128,12,-134,-32,-106,-54,-94,-7,3,-19,-31,-59,-78,-49,-114,-79,-187,-72,-177,-103,-203,-112,-200,-151,-256,-183,-276,-182,-258,-181,-256,-202,-239,-241,-240,-281,-205,-284,-252,-257,-288,-259,-319,-259,-309,-239,-302,-232,-305,-231,-282,-235,-251,-227,-244,-183,-227,-177,-216,-169,-213,-156,-216,-165,-230,-183,-214,-199,-211,-207,-215,-203,-236,-236,-213,-269,-197,-284,-219,-271,-227,-256,-210,-245,-209,-247,-203,-231,-195,-235,-194,-248,-194,-245,-212,-237,-203,-241,-198,-226,-200,-216,-221,-205,-209,-210,-188,-216,-146,-202,-108,-186,-110,-157,-108,-129,-121,-125,-129,-118,-125,-104,-120,-104,-95,-101,-78,-84,-57,-71,-28,-65,-16,-83,-25,-68,-41,-63,-42,-59,-29,-46,-33,-54,-48,-58,-50,-85,-80,-93,-89,-117,-98,-103,-82,-104,-94,-118,-105,-87,-86,-85,-96,-100,-64,-69,-48,-63,-65,-45,-15,-39,-27,-55,-31}, + /* IRC_Composite_C_R0195_T240_P000.wav */ + {0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,-1,1,-1,1,-1,1,-1,2,-2,2,-3,5,-9,-97,-133,-196,-147,-157,-47,-93,37,-29,1,0,59,-116,-55,150,274,295,91,361,1004,1112,1011,1324,1700,2156,1559,331,574,1032,523,614,629,882,990,1087,1066,943,977,621,691,470,536,382,281,280,209,295,117,203,196,393,300,250,185,141,127,-15,-42,-136,-73,-208,-111,-261,-80,-157,-42,-115,-38,-121,-105,-155,-167,-199,-209,-235,-206,-227,-259,-253,-267,-251,-281,-214,-260,-213,-271,-234,-259,-192,-277,-204,-294,-201,-341,-226,-327,-238,-309,-206,-263,-189,-240,-185,-208,-186,-211,-192,-209,-158,-223,-155,-231,-165,-251,-159,-233,-168,-259,-189,-266,-206,-255,-200,-268,-211,-290,-218,-286,-233,-302,-227,-283,-216,-289,-207,-262,-193,-248,-196,-236,-182,-219,-173,-221,-200,-230,-207,-207,-205,-216,-209,-217,-217,-212,-207,-184,-207,-178,-189,-131,-147,-115,-129,-101,-99,-95,-93,-92,-108,-91,-105,-77,-114,-73,-104,-68,-90,-52,-74,-36,-70,-50,-80,-45,-81,-54,-78,-26,-78,-29,-94,-58,-114,-94,-144,-115,-134,-101,-99,-82,-78,-81,-65,-58,-71,-110,-125,-130,-133,-109,-102,-88,-107,-78,-90,-39,-58,-31,-64,-17,-39,-13}, + /* IRC_Composite_C_R0195_T255_P000.wav */ + {2,-2,2,-2,2,-2,2,-2,2,-2,2,-2,2,-2,2,-2,2,-2,2,-2,2,-3,3,-3,3,-3,3,-3,3,-3,4,-4,4,-5,5,-6,9,-19,-71,-126,-111,-131,-223,-213,-179,-73,-136,-101,-19,30,101,69,89,95,323,406,515,721,768,1140,1389,925,919,1504,1350,1027,1508,1800,2083,1870,1354,950,539,490,494,392,378,293,344,461,473,522,375,344,271,237,162,56,42,11,83,-5,55,-30,81,-3,71,-18,-50,0,-151,-66,-240,-143,-251,-198,-256,-250,-192,-239,-205,-271,-195,-240,-195,-267,-238,-257,-227,-242,-241,-251,-226,-256,-185,-246,-212,-252,-224,-227,-235,-216,-249,-192,-222,-170,-209,-167,-231,-184,-255,-188,-250,-181,-283,-185,-279,-146,-264,-164,-273,-161,-261,-181,-298,-216,-272,-226,-251,-181,-242,-222,-248,-216,-260,-249,-302,-271,-289,-255,-276,-256,-246,-225,-218,-198,-202,-222,-210,-223,-219,-210,-230,-209,-218,-149,-175,-141,-182,-152,-139,-146,-160,-169,-152,-140,-120,-129,-130,-102,-120,-59,-92,-67,-121,-84,-93,-62,-69,-69,-85,-101,-60,-94,-80,-123,-92,-126,-73,-83,-80,-90,-79,-71,-42,-42,-54,-59,-77,-88,-83,-86,-100,-126,-130,-119,-130,-124,-145,-127,-148,-111,-124,-92,-85,-75,-85,-70,-68,-66,-66,-59,-78,-67,-59,-65}, + /* IRC_Composite_C_R0195_T270_P000.wav */ + {-1,1,-1,1,-1,1,-1,1,-1,1,-1,1,-1,1,-1,1,-1,1,-1,1,-1,1,-1,1,-1,1,-1,1,-1,1,-1,2,-2,2,-2,3,-4,9,-91,-159,-163,-111,-130,-163,-90,-102,-89,-163,-184,-188,-171,-83,30,36,169,298,65,214,405,559,898,896,1271,2043,2157,2404,2912,2261,1344,1103,1268,1220,756,484,263,283,379,505,398,413,455,527,338,482,334,406,255,187,211,119,189,4,182,-16,145,-56,26,-74,-83,-100,-148,-133,-197,-139,-191,-198,-217,-194,-192,-215,-224,-197,-182,-180,-242,-199,-243,-153,-261,-180,-264,-151,-250,-164,-239,-176,-235,-178,-231,-191,-258,-227,-259,-218,-280,-188,-268,-159,-259,-154,-260,-137,-244,-185,-245,-228,-251,-260,-250,-247,-239,-233,-209,-205,-224,-262,-274,-268,-248,-262,-263,-277,-234,-255,-222,-233,-226,-262,-221,-257,-211,-267,-218,-251,-186,-221,-179,-224,-195,-213,-161,-226,-193,-212,-178,-219,-191,-204,-183,-190,-175,-182,-165,-164,-140,-131,-97,-107,-80,-102,-45,-85,-60,-81,-66,-74,-59,-82,-82,-91,-88,-102,-100,-120,-85,-122,-72,-129,-98,-137,-55,-101,-62,-69,-41,-72,-56,-83,-69,-90,-53,-98,-81,-105,-80,-107,-83,-112,-102,-115,-97,-125,-111,-136,-123,-145,-118,-131,-95,-114,-62,-70,-59,-73,-48,-57}, + /* IRC_Composite_C_R0195_T285_P000.wav */ + {-1,1,-1,1,-1,1,-1,1,-1,1,-1,1,-1,1,-1,1,-1,1,-1,2,-2,2,-2,2,-2,2,-2,2,-2,2,-2,2,-3,3,-4,6,-9,19,-100,-184,-163,-233,-161,-194,-211,-190,-252,-368,-121,58,37,-12,35,111,-96,-223,-19,464,559,1425,2382,1927,1709,2204,1586,984,1062,1267,1442,1113,1078,1182,1130,1148,848,575,534,584,541,405,456,300,346,294,405,279,183,240,258,176,160,100,-27,100,-24,93,-139,0,-270,60,-268,20,-249,-123,-250,-178,-127,-185,-84,-288,-109,-237,-116,-161,-179,-137,-193,-134,-189,-164,-211,-174,-211,-244,-187,-259,-227,-263,-160,-220,-206,-234,-213,-214,-205,-225,-212,-266,-182,-254,-229,-238,-166,-272,-223,-282,-263,-305,-287,-272,-261,-288,-239,-247,-227,-247,-224,-236,-226,-259,-230,-256,-220,-217,-226,-224,-232,-214,-225,-205,-261,-251,-252,-219,-235,-230,-230,-236,-236,-209,-203,-226,-221,-222,-180,-160,-190,-161,-159,-154,-156,-97,-111,-129,-127,-107,-66,-95,-69,-92,-64,-55,-64,-43,-78,-65,-103,-71,-87,-110,-101,-114,-93,-117,-90,-112,-88,-109,-72,-82,-69,-68,-63,-65,-90,-69,-67,-77,-62,-56,-47,-91,-59,-73,-34,-101,-70,-91,-61,-105,-91,-128,-154,-149,-128,-150,-140,-142,-93,-111,-108,-78,-62,-77,-59,-62}, + /* IRC_Composite_C_R0195_T300_P000.wav */ + {4,-4,4,-4,4,-4,4,-4,4,-5,5,-5,5,-5,5,-5,5,-6,6,-6,6,-6,6,-7,7,-7,8,-8,8,-9,10,-11,13,-16,24,-75,-192,-243,-206,-121,-200,-413,-335,-275,-203,-85,-38,193,-95,-382,283,-596,-531,1085,630,1024,2620,2194,1518,1499,1392,607,112,846,1164,950,675,1005,1305,1174,1521,1142,1208,859,1074,771,735,552,498,679,274,454,160,457,127,176,172,280,101,64,67,-27,98,11,3,-150,-68,-72,-105,-163,-160,-139,-135,-141,-133,-136,-55,-184,-109,-200,-81,-104,-78,-213,-147,-100,-188,-136,-214,-162,-281,-139,-265,-190,-228,-184,-320,-138,-205,-130,-238,-158,-209,-100,-297,-175,-328,-233,-318,-199,-385,-256,-355,-254,-336,-269,-310,-262,-286,-237,-272,-206,-279,-217,-262,-212,-280,-260,-296,-269,-321,-249,-293,-297,-294,-263,-252,-259,-248,-231,-189,-223,-204,-205,-214,-202,-172,-186,-170,-216,-162,-160,-149,-167,-137,-119,-139,-138,-127,-91,-90,-111,-123,-105,-94,-88,-92,-101,-104,-94,-87,-104,-82,-106,-68,-106,-75,-113,-53,-76,-65,-86,-59,-75,-81,-73,-84,-59,-75,-52,-59,-73,-55,-69,-45,-87,-49,-62,-47,-76,-53,-57,-57,-74,-83,-72,-109,-94,-135,-114,-127,-112,-128,-125,-121,-104,-114,-94,-97,-102,-114,-98,-73,-61,-46,-72}, + /* IRC_Composite_C_R0195_T315_P000.wav */ + {-3,3,-3,3,-3,3,-3,3,-3,3,-3,3,-3,3,-3,3,-3,3,-4,4,-4,4,-4,4,-5,5,-5,6,-7,8,-10,14,-29,-234,-230,-266,-204,-305,-247,-524,-359,-150,55,142,71,572,-570,-1004,861,-803,-374,3189,1120,2120,3144,2428,636,1026,1269,59,-187,571,1369,181,332,532,1375,1240,1554,1033,1045,866,1172,1026,1022,606,720,597,671,349,376,201,259,47,222,146,93,-124,20,-35,-174,-23,6,-91,-108,-18,6,38,-2,-118,67,-77,-79,-121,-96,-206,-171,-74,-193,-141,-167,-54,-168,-65,-180,-137,-281,-209,-164,-172,-178,-158,-127,-188,-174,-11,-152,-142,-222,-182,-348,-203,-338,-287,-380,-316,-332,-319,-347,-297,-281,-364,-362,-314,-304,-376,-347,-321,-343,-330,-367,-276,-306,-278,-290,-268,-295,-267,-229,-264,-248,-290,-244,-263,-201,-223,-197,-193,-189,-154,-159,-137,-178,-126,-217,-115,-163,-129,-166,-130,-150,-147,-107,-147,-129,-143,-117,-141,-126,-115,-135,-99,-146,-90,-112,-81,-119,-52,-80,-79,-84,-83,-64,-59,-53,-89,-91,-74,-101,-29,-121,-49,-139,-64,-94,-23,-57,-36,-29,-38,-50,-50,-47,-30,-53,-56,-81,-58,-97,-62,-69,-56,-82,-74,-103,-94,-111,-122,-150,-117,-148,-135,-117,-104,-87,-95,-77,-107,-55,-89,-56,-82,-65,-70,-36,-13}, + /* IRC_Composite_C_R0195_T330_P000.wav */ + {-15,15,-15,15,-15,15,-15,16,-16,16,-16,16,-16,16,-16,16,-16,16,-16,16,-16,15,-15,14,-13,11,-8,5,0,-7,12,-293,-526,-71,-505,-401,-1003,-83,-138,277,-121,450,638,-529,-2538,1974,-1021,-928,6018,1862,2117,4170,3177,-540,80,773,465,-382,760,707,287,-96,895,1260,1490,749,709,1138,1758,1405,855,490,661,833,888,382,366,310,491,203,204,155,246,122,-32,-122,-37,50,94,-114,-242,-173,94,-97,-233,-239,44,3,91,-64,-78,-19,148,9,2,-196,62,-119,-154,-277,128,-102,-103,-182,-189,-136,-49,51,-260,-285,-17,-46,-344,-260,-264,-310,-481,-356,-385,-299,-442,-338,-388,-433,-413,-352,-281,-444,-379,-419,-361,-361,-316,-320,-407,-371,-354,-283,-325,-263,-281,-264,-300,-242,-200,-218,-231,-208,-283,-185,-218,-137,-254,-173,-210,-124,-178,-189,-155,-197,-150,-148,-83,-183,-134,-157,-104,-154,-134,-133,-118,-172,-161,-120,-119,-156,-145,-96,-143,-172,-117,-71,-107,-120,-113,-53,-111,-85,-65,-51,-97,-63,-15,-77,-94,-67,-18,-45,-126,-60,-101,-21,-115,3,-114,-45,-90,-5,-69,-55,-89,-42,-99,-38,-100,-24,-95,-21,-116,-37,-113,-13,-85,-64,-143,-57,-122,-61,-126,-85,-123,-78,-94,-71,-113,-55,-72,-37,-93,-41,-42,-10,-34,-37,-13,-34}, + /* IRC_Composite_C_R0195_T345_P000.wav */ + {-1,1,-1,1,-1,1,-1,1,-1,1,-1,1,-1,1,-1,1,-1,1,-1,1,-1,1,-1,1,-2,2,-3,-196,-727,-30,-506,-136,-1125,-612,-692,-135,283,619,802,-51,-2221,1398,-964,-5308,8059,961,2013,8066,4451,-858,1438,1808,-955,-2183,475,1387,742,458,140,184,1030,1768,1316,965,1001,1155,1147,1222,1301,1124,461,289,297,592,383,273,-220,-259,-53,323,264,-32,-200,42,191,421,77,-195,-257,18,-154,-38,-140,-135,-290,-47,107,49,-29,-98,-94,-50,49,264,36,153,-148,86,-24,287,-156,101,-86,125,-186,-182,-278,-236,-501,-636,-484,-401,-330,-428,-463,-554,-283,-366,-318,-494,-441,-375,-318,-358,-465,-416,-438,-337,-400,-344,-393,-247,-308,-313,-356,-181,-229,-204,-374,-214,-320,-200,-307,-179,-339,-189,-252,-152,-246,-143,-210,-166,-196,-69,-139,-172,-216,-103,-73,-127,-214,-116,-118,-96,-247,-135,-187,-78,-264,-166,-235,-95,-173,-142,-167,-169,-129,-117,-114,-125,-125,-103,-137,-47,-110,-26,-121,-21,-72,-15,-71,-68,-34,-53,-38,-72,-96,-96,-90,-11,-126,-33,-166,-22,-92,18,-75,-35,-69,-60,-67,-82,-72,-61,-96,-68,-103,-26,-100,-19,-108,0,-110,-34,-106,-39,-54,-75,-76,-83,-35,-42,-69,-51,-59,-38,-55,19,-49,-3,-43,27,-14,-5,-46,-24}}; + +const int16_t irc_composite_c_r0195_p015[][256] = + {/* IRC_Composite_C_R0195_T000_P015.wav */ + {4,-4,4,-4,4,-4,4,-4,4,-4,4,-5,5,-5,4,-4,4,-4,3,-2,1,2,-5,-101,-665,-424,-897,-170,-507,-460,-1001,-828,-356,47,474,980,404,2103,-3700,-5968,7765,-9133,9741,10867,71,6925,3456,-1455,-2458,2718,-170,-1201,-1751,1809,507,632,-1165,1945,1684,1705,-450,1152,360,1527,269,952,644,931,727,1576,725,835,741,915,186,162,327,384,20,24,-311,60,225,484,-123,-28,-210,118,15,76,-335,-153,-249,49,-120,24,-200,-169,-213,-175,-235,-158,-280,-399,-410,-304,-104,-355,-319,-311,-79,-366,-75,-79,-120,-487,147,-42,-198,-175,135,-242,55,233,123,-314,-58,-179,-96,-352,-26,7,11,-105,-129,125,-110,-293,-276,-461,-507,-406,-422,-497,-481,-446,-459,-325,-336,-443,-420,-320,-391,-360,-309,-407,-375,-288,-289,-385,-315,-272,-292,-271,-256,-318,-173,-220,-185,-329,-142,-239,-76,-219,-117,-277,-107,-204,-117,-233,-199,-220,-177,-238,-171,-170,-187,-285,-154,-168,-145,-192,-131,-239,-194,-170,-109,-205,-155,-164,-163,-187,-126,-85,-138,-195,-140,-189,-84,-134,-22,-177,-51,-128,-26,-105,26,-144,-65,-123,-28,-136,19,-69,-43,-112,40,-43,59,-67,32,-35,97,-21,99,-27,90,23,141,21,38,39,61,12,10,-1,37,-3,7,2,41,-19,-12,-12,16,-53}, + /* IRC_Composite_C_R0195_T015_P015.wav */ + {100,-101,102,-102,102,-102,102,-102,101,-99,96,-93,88,-81,71,-58,37,-6,-46,154,-587,-280,-52,-712,-530,-842,-228,-637,-217,-1535,-463,274,2688,1298,-390,-2760,4349,-16532,13572,-2117,2580,22789,-850,559,1346,2618,-4202,397,316,686,-1654,1356,-848,1484,164,1873,376,1720,39,1069,309,1623,748,807,488,1192,547,1364,679,594,48,334,-179,362,98,-149,-587,-275,-81,371,-430,-372,-369,412,-126,-39,-294,169,-117,61,-209,-100,-271,-40,-262,-244,-406,12,-243,-253,-484,54,-243,-283,-575,-138,-358,-262,-307,-61,-567,-206,25,51,-696,-10,-132,-245,-388,187,-132,-194,-316,-92,-71,-291,-374,27,-295,-84,-4,145,-576,122,-217,-153,-724,53,-337,-126,-502,-305,-401,-138,-372,-296,-502,-318,-375,-140,-645,-283,-375,-217,-529,-172,-316,-324,-287,-239,-309,-305,-230,-228,-310,-332,-289,-168,-302,-286,-241,-197,-266,-181,-156,-260,-272,-185,-148,-225,-221,-239,-178,-223,-142,-203,-226,-260,-124,-148,-200,-259,-124,-203,-182,-207,-78,-186,-163,-136,-74,-179,-123,-45,-87,-180,-103,-28,-117,-135,-64,-29,-148,-90,-13,-28,-111,-40,7,-42,-98,-26,-4,-57,-22,22,-38,-72,76,67,1,-21,80,49,39,63,66,26,23,62,97,29,39,49,28,-1,52,74,-6,-4,19,15,-15,-12,17,-50}, + /* IRC_Composite_C_R0195_T030_P015.wav */ + {-101,102,-103,105,-105,106,-106,106,-104,102,-97,90,-79,62,-35,-12,109,-431,-649,311,-951,117,-1193,10,-1439,943,-1996,57,-1866,3892,-160,3817,-4940,7695,-14389,320,6811,-6312,29122,1548,2820,2754,2735,-5703,-20,838,313,-2387,2394,-1371,1731,262,1937,34,1456,-294,1713,221,997,660,1535,549,1179,928,928,293,321,-76,273,-164,-356,-622,24,-278,-26,-520,-116,-436,-61,-412,-21,-436,-263,-534,-25,-24,127,-328,-77,-189,107,-200,-313,-487,-278,-245,-413,-269,-216,-100,-434,-149,-238,-270,-527,-94,-144,-244,-529,-94,-120,-125,-544,-205,-477,-214,-375,-52,-404,-184,-361,-14,-189,-53,-346,-89,-276,-127,-233,-126,-417,-169,-225,-263,-470,-227,-238,-276,-329,-339,-304,-226,-209,-212,-255,-261,-275,-96,-168,-289,-298,-134,-296,-334,-291,-248,-272,-268,-362,-278,-286,-108,-360,-215,-330,-76,-394,-179,-365,-104,-449,-206,-420,-128,-409,-167,-434,-175,-416,-105,-318,-125,-408,-157,-270,-45,-277,-160,-326,-56,-220,-114,-280,-69,-199,-100,-242,-24,-143,-39,-214,3,-104,20,-184,20,-140,-7,-183,31,-135,-21,-164,13,-128,42,-188,49,-113,71,-189,90,-97,102,-87,84,-61,147,-63,149,-94,170,-108,183,-108,208,-150,179,-163,250,-98,215,-137,235,-107,214,-135,246,-126,201,-171,186,-149,135,-194,121}, + /* IRC_Composite_C_R0195_T045_P015.wav */ + {-146,153,-161,170,-179,189,-200,212,-224,238,-252,266,-278,280,-241,-262,-1208,369,-877,618,-1865,344,-1624,1098,-2418,1166,-2204,3338,-287,6351,-5670,4490,-9471,-5595,10415,-3658,26422,6163,571,640,3020,-3458,345,262,-17,-2379,1504,-915,2155,96,1778,-36,1408,800,1533,117,618,355,1940,474,1443,116,436,-13,320,-248,-589,-590,-422,-164,-365,-343,-251,-474,20,-343,-59,-619,-421,-460,-72,-443,-399,-435,-238,-311,-325,-256,-210,-285,-282,-222,-283,-212,-374,-178,-386,-213,-343,-35,-317,-276,-390,-79,-244,-300,-445,-222,-296,-233,-458,-263,-403,-259,-462,-188,-423,-283,-371,-78,-225,-208,-380,-203,-156,-140,-307,-202,-208,-86,-293,-179,-233,-185,-390,-305,-381,-170,-416,-344,-426,-229,-331,-201,-370,-183,-273,-88,-257,-116,-206,-60,-298,-72,-233,-115,-304,-131,-336,-187,-271,-44,-375,-190,-355,-35,-321,-189,-400,-136,-365,-198,-383,-200,-395,-244,-401,-188,-398,-184,-393,-179,-365,-150,-314,-118,-327,-118,-291,-73,-272,-87,-247,-52,-258,-27,-226,25,-210,8,-202,51,-136,62,-162,35,-135,61,-156,-12,-167,55,-165,15,-174,47,-131,28,-125,76,-91,69,-71,126,-60,104,-40,141,-51,117,-33,141,-58,103,-38,158,-48,95,-52,149,-45,86,-47,120,-68,88,-39,116,-45,74,-35,89,-68,62,-90,53}, + /* IRC_Composite_C_R0195_T060_P015.wav */ + {29,-34,39,-45,52,-59,69,-80,93,-110,131,-160,205,-308,-459,-920,-171,388,-674,-719,-1064,350,-1149,552,-1891,2109,-28,5773,-3032,5413,-11840,194,-1388,1646,23561,8535,2547,1107,-66,257,913,517,-131,-1162,-856,344,2136,-879,1332,-592,2048,385,1977,174,2217,-131,2091,-69,1390,-1343,963,-905,515,-1176,-81,-1557,250,-301,-119,-467,-479,-337,138,-523,-373,-525,-22,-558,-74,-857,-193,-765,-208,-1034,-15,-947,-96,-803,45,-665,15,-523,-1,-312,49,-446,-141,-311,-126,-246,-344,-467,-27,-388,-328,-427,-198,-495,-118,-356,-277,-493,-134,-525,-71,-411,-262,-445,-175,-367,-96,-400,-267,-383,-49,-313,-260,-205,-179,-166,-56,-272,-194,-146,-166,-310,-204,-405,-207,-321,-211,-468,-223,-383,-150,-326,-281,-312,-146,-220,-208,-241,-286,-189,-157,-134,-243,-213,-136,-145,-92,-188,-205,-244,-73,-253,-131,-321,-224,-277,-194,-364,-238,-342,-254,-284,-301,-272,-246,-239,-245,-226,-236,-207,-220,-191,-251,-189,-226,-188,-154,-195,-171,-163,-122,-121,-97,-111,-91,-108,-20,-120,-59,-33,-136,-42,-55,-62,-49,-97,-100,16,-97,-13,-104,7,-74,-20,-54,27,-6,45,-12,56,35,24,86,33,48,24,68,72,-3,50,22,52,12,43,-32,109,-68,106,-66,49,-61,102,-74,44,-72,28,-60,21,-71,-4,-76,16}, + /* IRC_Composite_C_R0195_T075_P015.wav */ + {19,-16,12,-7,2,4,-11,18,-27,36,-47,59,-68,37,-553,36,-916,-89,-13,-916,-389,-1224,572,-284,1453,853,1354,3254,1573,-10498,-1005,-5881,20609,10333,4130,9878,-3085,-1702,1001,5191,773,-2104,-1269,-41,1645,-349,309,387,447,1304,620,1533,360,1697,867,1545,-94,149,-359,473,-1422,-432,-1225,-246,-886,186,-914,-144,-892,309,-602,60,-603,-112,-520,-150,-750,-486,-681,-262,-736,-448,-1017,-103,-734,-305,-903,-61,-752,39,-644,-19,-565,33,-324,-70,-458,-205,-229,-182,-403,-322,-245,-211,-302,-350,-295,-222,-298,-293,-385,-263,-357,-235,-379,-252,-470,-139,-429,-84,-457,-178,-385,-120,-380,-232,-339,-192,-273,-205,-196,-146,-164,-174,-129,-218,-203,-145,-229,-290,-273,-262,-283,-222,-302,-215,-314,-150,-291,-151,-311,-154,-281,-177,-260,-179,-190,-177,-222,-159,-209,-215,-215,-185,-191,-224,-214,-221,-182,-209,-269,-260,-254,-234,-284,-241,-293,-201,-269,-218,-264,-188,-203,-199,-218,-207,-118,-151,-146,-184,-126,-92,-92,-129,-108,-109,-42,-120,-57,-144,-45,-102,-23,-164,-63,-98,-36,-106,-81,-3,-74,-55,-79,-49,-60,-59,-60,-31,-36,-35,57,-37,99,-7,135,-26,115,14,154,-6,78,28,119,-3,53,-48,80,-38,55,-56,13,-65,74,-45,19,-79,50,-39,24,-95,40,-41,15,-61,55,-129}, + /* IRC_Composite_C_R0195_T090_P015.wav */ + {106,-108,111,-113,116,-119,121,-123,125,-125,121,-102,-150,-386,-21,-846,-117,203,-762,-759,-425,-20,26,535,183,3678,866,180,877,-12284,4742,-888,21444,10158,-2546,4612,-145,-630,3778,3819,1940,-4361,-1372,2229,1116,-1596,-423,1713,840,536,1040,2123,618,888,-302,898,-618,52,-845,240,-1279,-435,-1143,105,-929,-50,-1077,-180,-541,98,-930,-161,-671,-158,-656,-338,-774,-263,-620,-260,-792,-238,-787,-172,-874,-235,-797,-199,-767,-178,-702,-101,-607,-63,-479,-72,-504,-49,-400,-118,-387,-113,-404,-73,-442,-100,-429,-15,-496,-79,-480,-58,-494,-80,-498,-112,-411,-138,-447,-136,-406,-145,-485,-166,-440,-131,-485,-87,-417,-104,-413,-102,-426,-31,-350,-62,-299,27,-272,-36,-302,-22,-320,-120,-364,-124,-307,-111,-382,-185,-231,-108,-275,-163,-225,-123,-157,-158,-254,-183,-208,-146,-274,-183,-229,-148,-301,-148,-275,-212,-388,-177,-304,-193,-358,-180,-265,-195,-243,-171,-184,-212,-200,-212,-156,-173,-149,-232,-145,-162,-100,-147,-119,-102,-57,-53,-87,-18,-36,-11,-87,-46,-46,17,-89,-82,-57,6,-100,-61,-101,-38,-60,-46,-108,-40,-41,19,-104,-21,-14,77,-24,51,22,136,25,103,19,102,3,92,-42,26,-51,25,-38,-15,-65,-12,-35,-8,28,24,-31,-1,36,92,2,6,8,52,-33,-7,-25,-27}, + /* IRC_Composite_C_R0195_T105_P015.wav */ + {16,-19,23,-28,34,-40,48,-57,68,-82,100,-121,33,-823,199,-561,-96,-481,741,-1752,-67,-480,1840,-966,591,1601,5280,-3652,770,-7901,214,10553,2446,18816,1377,-3359,-1411,6633,5595,-115,-232,1823,-2575,-717,-242,2719,-2255,509,511,2747,225,1585,549,951,-478,-34,-731,-13,-1246,-512,-881,-341,-1127,207,-442,-495,-702,-517,-566,35,-620,-983,-450,-179,-571,-441,-658,-297,-211,-340,-730,-514,-334,-479,-562,-662,-463,-524,-336,-537,-522,-336,-226,-483,-343,-332,-364,-181,-306,-365,-271,-175,-316,-213,-283,-326,-115,-221,-216,-290,-146,-290,-98,-334,-301,-279,-138,-349,-278,-318,-250,-285,-237,-388,-326,-257,-206,-416,-256,-293,-207,-350,-156,-397,-149,-314,-125,-323,-153,-327,-64,-258,-145,-241,-10,-249,-51,-197,-108,-234,33,-286,-138,-267,-26,-273,-117,-347,-77,-229,-117,-301,-119,-229,-104,-247,-175,-311,-162,-227,-248,-287,-181,-293,-170,-318,-206,-285,-159,-334,-167,-232,-161,-235,-76,-246,-154,-204,-10,-221,-106,-229,14,-182,-12,-251,30,-154,32,-190,61,-152,90,-145,48,-122,71,-96,29,-121,6,-57,66,-80,40,-49,59,-112,-8,-129,54,-18,76,-21,97,37,171,23,59,-61,75,-37,21,-127,1,-154,68,-112,-12,-180,86,-58,91,-120,128,-28,129,-100,167,-17,106,-84,94,-49,76,-37}, + /* IRC_Composite_C_R0195_T120_P015.wav */ + {39,-39,39,-39,39,-38,38,-37,35,-32,27,-19,0,-223,40,-675,329,-841,457,-348,-274,-1647,1770,432,-494,3,4260,289,887,-4454,-3947,7446,-2378,15614,7773,95,-2028,2409,7171,2953,950,1439,-1677,-1118,-1770,1575,491,137,-1226,1181,2451,1325,-149,862,100,-196,-613,-302,-1148,-1325,-632,-731,-1011,-688,174,-67,-430,-378,-168,-383,-445,-570,-934,-576,-71,-333,-939,-576,-180,-299,-551,-542,-536,-312,-421,-506,-398,-463,-360,-423,-342,-463,-390,-485,-299,-416,-291,-519,-227,-313,-272,-376,-199,-241,-176,-354,-259,-245,-94,-289,-288,-266,-79,-203,-210,-332,-200,-224,-227,-331,-258,-315,-219,-293,-232,-358,-231,-306,-201,-298,-234,-413,-216,-295,-220,-417,-228,-272,-126,-326,-197,-254,-104,-275,-161,-280,-75,-215,-110,-276,-33,-130,-19,-261,-8,-171,30,-242,-55,-229,-65,-254,-74,-218,-134,-301,-99,-211,-155,-300,-156,-234,-131,-322,-164,-311,-142,-349,-229,-334,-148,-297,-165,-289,-133,-215,-100,-254,-101,-185,-51,-234,-38,-186,-6,-168,0,-111,-29,-125,29,-96,-23,-153,22,-59,-32,-103,28,-118,11,-64,83,-103,41,-112,28,-109,99,-41,39,-86,157,43,121,-15,157,-11,121,-30,90,-113,25,-109,38,-172,-47,-151,15,-102,21,-97,16,-33,104,12,59,-55,121,-5,63,-116,114,-30,115,-85}, + /* IRC_Composite_C_R0195_T135_P015.wav */ + {15,-17,19,-21,24,-28,32,-37,43,-50,60,-73,90,-115,157,-297,361,-673,287,-116,-598,-544,1021,-902,-263,1113,1563,-848,3395,810,-1816,-1445,-4755,10984,3295,5362,6262,2589,-529,1444,7698,4758,-2233,-2138,-1622,980,-226,100,-517,871,952,682,983,1138,-542,-341,-164,-377,-526,-992,-910,-828,-413,-830,-380,-392,-124,-316,-225,-315,-25,-678,-691,-468,-412,-545,-395,-676,-301,-467,-356,-548,-300,-516,-260,-553,-295,-485,-232,-525,-220,-421,-217,-488,-227,-455,-268,-426,-269,-474,-232,-395,-199,-378,-219,-372,-176,-401,-202,-354,-114,-340,-124,-267,-86,-322,-166,-298,-181,-316,-179,-307,-163,-290,-175,-321,-186,-325,-234,-372,-200,-335,-202,-352,-199,-268,-180,-319,-236,-296,-200,-331,-209,-300,-142,-273,-118,-226,-91,-203,-97,-250,-91,-176,-72,-201,-65,-156,-22,-171,-59,-200,-36,-210,-93,-201,-48,-197,-73,-185,-73,-195,-95,-266,-166,-294,-169,-299,-216,-315,-187,-265,-167,-287,-146,-275,-175,-265,-144,-282,-127,-249,-66,-222,-39,-146,-32,-126,-20,-76,5,-92,18,-56,21,-91,13,-119,-23,-127,37,-71,16,-116,10,-105,12,-96,55,-69,43,-76,137,-20,86,-3,123,28,100,8,49,-58,60,-31,-10,-117,-15,-44,-19,-89,2,-51,-18,-51,62,-39,31,-46,59,-59,67,-57,112,-72,62,-22}, + /* IRC_Composite_C_R0195_T150_P015.wav */ + {22,-23,24,-24,25,-26,27,-27,28,-28,28,-28,26,-24,19,-9,-13,134,-205,5,10,-146,-122,-75,-78,-152,345,-100,1711,198,1882,367,2730,-6333,870,5933,2367,6468,4276,4587,970,719,5822,3692,-1207,-2971,-1058,797,482,-202,-740,507,536,875,480,668,-204,-211,-690,500,-1107,-874,-841,-223,-910,-122,-465,-407,-494,-192,-244,-202,-337,-294,-451,-519,-512,-353,-580,-345,-407,-307,-608,-176,-450,-303,-475,-205,-453,-277,-342,-237,-284,-212,-314,-272,-380,-283,-368,-307,-417,-297,-337,-321,-374,-329,-283,-379,-283,-310,-317,-249,-232,-217,-269,-190,-252,-183,-270,-238,-227,-187,-286,-191,-260,-230,-245,-233,-289,-230,-246,-218,-241,-208,-260,-204,-276,-211,-286,-243,-284,-211,-281,-227,-258,-199,-238,-185,-241,-133,-220,-133,-189,-80,-190,-65,-146,-88,-167,-80,-129,-110,-166,-124,-142,-139,-178,-111,-146,-116,-143,-102,-118,-138,-137,-152,-173,-190,-174,-190,-193,-230,-208,-208,-216,-246,-203,-231,-209,-216,-167,-210,-170,-158,-128,-148,-142,-127,-104,-88,-98,-82,-75,-58,-53,-46,-85,-53,-56,-1,-14,17,-80,-58,-41,-57,-6,-70,6,-78,37,-14,58,10,69,35,20,45,42,27,3,3,-29,-5,-10,26,-54,-21,-21,39,-58,-58,-23,10,-16,-35,6,-22,2,-8,54,-13,7,-2,51,6}, + /* IRC_Composite_C_R0195_T165_P015.wav */ + {20,-21,21,-22,23,-23,24,-25,26,-26,27,-28,29,-29,30,-30,29,-28,22,-5,163,-209,156,-276,329,-325,157,-151,120,93,1138,522,872,2060,255,503,-4749,3937,3841,3248,5654,4851,3110,1024,1230,4380,981,-2433,-2102,345,461,292,-696,50,937,951,-195,227,100,-111,-682,-242,-295,-695,-542,-371,-167,-471,-237,-441,-313,-257,-355,-428,-338,-330,-347,-256,-340,-455,-298,-395,-419,-365,-319,-336,-313,-224,-306,-223,-245,-277,-258,-289,-268,-318,-289,-331,-299,-363,-319,-390,-363,-384,-327,-342,-339,-372,-297,-284,-259,-319,-264,-296,-233,-297,-258,-289,-255,-235,-239,-231,-238,-221,-219,-255,-237,-261,-205,-265,-219,-253,-180,-250,-196,-250,-200,-248,-207,-278,-194,-276,-219,-261,-199,-283,-219,-241,-187,-217,-180,-196,-141,-180,-121,-175,-110,-181,-79,-171,-84,-195,-62,-164,-99,-185,-115,-180,-164,-155,-140,-152,-147,-164,-113,-154,-117,-178,-132,-201,-137,-183,-162,-227,-171,-200,-151,-199,-165,-161,-150,-178,-144,-157,-158,-179,-144,-176,-163,-168,-98,-168,-122,-152,-52,-148,-86,-125,-9,-56,-37,-107,-79,-44,-26,-30,-92,-55,-14,29,14,24,62,67,48,30,48,10,57,-9,40,-55,25,-77,17,-96,25,-64,21,-101,-1,4,58,-21,25,24,70,3,30,9,36,-16,6,-21,-39,-15}, + /* IRC_Composite_C_R0195_T180_P015.wav */ + {-9,9,-9,9,-10,10,-10,10,-11,11,-11,11,-12,12,-13,13,-14,15,-15,16,-17,19,-21,50,-66,25,121,28,-120,155,-168,335,416,671,222,2210,806,1236,-2882,612,2593,898,4860,6139,4465,870,-196,3886,2840,-2197,-1283,-67,115,-435,2,221,731,-37,-52,179,744,-513,-450,-510,328,-437,-225,-503,1,-204,-76,-530,-331,-379,-166,-435,-279,-284,-151,-266,-225,-384,-293,-305,-154,-293,-219,-273,-134,-244,-264,-371,-311,-317,-245,-345,-336,-343,-237,-329,-370,-441,-342,-336,-321,-387,-336,-319,-260,-281,-304,-299,-268,-249,-331,-318,-306,-253,-254,-267,-280,-263,-244,-262,-278,-297,-251,-213,-235,-246,-260,-197,-223,-206,-272,-188,-228,-176,-268,-213,-279,-202,-229,-233,-258,-216,-166,-186,-223,-208,-202,-136,-198,-161,-225,-147,-174,-123,-192,-165,-169,-119,-164,-157,-169,-110,-156,-134,-190,-131,-196,-117,-175,-127,-176,-95,-111,-96,-149,-112,-153,-144,-212,-161,-218,-171,-217,-170,-191,-155,-154,-114,-156,-141,-134,-77,-145,-152,-168,-124,-167,-164,-158,-156,-173,-143,-139,-75,-112,-48,-151,-85,-139,-30,-81,-70,-106,-105,-40,-41,29,15,57,54,103,128,65,65,35,45,-4,-23,-56,-53,-39,-44,-11,-66,11,-23,53,-3,44,26,45,23,36,66,32,13,3,39,11,-14,8,-12}, + /* IRC_Composite_C_R0195_T195_P015.wav */ + {3,-3,4,-4,4,-5,5,-5,6,-6,7,-7,8,-9,9,-10,11,-13,14,-16,19,-22,27,-33,44,-87,114,-132,102,-154,155,-97,-20,-37,-29,363,412,474,853,1336,1092,-788,-1724,2098,1099,2640,4844,4905,2895,-39,1681,2632,175,-1400,-248,325,22,-206,196,419,561,41,192,277,401,-173,2,-114,106,-178,-24,-176,30,-91,54,-173,-183,-124,-65,-136,-127,-106,-163,-59,-115,-115,-267,-191,-291,-145,-314,-229,-397,-205,-300,-229,-373,-215,-360,-218,-346,-234,-397,-220,-360,-257,-382,-254,-389,-213,-322,-234,-324,-214,-345,-227,-381,-255,-363,-174,-354,-207,-318,-230,-299,-263,-316,-271,-243,-219,-239,-195,-263,-183,-262,-181,-310,-210,-258,-227,-229,-243,-215,-263,-194,-269,-182,-235,-184,-209,-147,-196,-148,-196,-147,-207,-155,-221,-187,-223,-184,-221,-208,-205,-186,-173,-165,-148,-132,-141,-127,-151,-120,-154,-131,-136,-104,-119,-115,-117,-108,-145,-140,-188,-146,-212,-174,-199,-183,-203,-176,-190,-174,-178,-155,-183,-156,-192,-139,-178,-158,-185,-162,-172,-198,-158,-136,-126,-116,-127,-125,-157,-109,-154,-85,-152,-60,-135,-26,-117,10,-26,16,-15,36,69,64,91,41,52,-5,31,-40,-16,-60,-3,-25,-20,-13,18,23,6,15,15,42,21,45,37,32,17,30,46,26,13,23,-19}, + /* IRC_Composite_C_R0195_T210_P015.wav */ + {5,-5,6,-6,6,-6,6,-6,6,-6,7,-7,7,-7,7,-8,8,-8,8,-9,9,-9,9,-9,9,-9,7,-1,-45,-76,21,-111,31,-35,-9,-141,17,-81,104,351,414,314,1167,1284,-698,-334,-392,1578,2039,2332,5286,3230,840,735,2013,886,-250,-668,388,293,-154,194,485,539,373,247,362,554,379,14,200,206,243,111,161,197,127,193,183,265,-4,123,-60,64,-188,-26,-316,-89,-293,-150,-287,-153,-256,-136,-244,-221,-246,-144,-274,-195,-276,-224,-307,-223,-340,-289,-302,-287,-323,-301,-317,-303,-243,-292,-298,-294,-312,-280,-306,-243,-309,-226,-276,-214,-262,-251,-287,-252,-261,-239,-268,-241,-251,-220,-263,-226,-289,-222,-266,-235,-299,-243,-268,-249,-232,-227,-224,-212,-188,-201,-205,-192,-215,-147,-197,-162,-188,-158,-182,-181,-170,-210,-185,-204,-179,-201,-174,-187,-176,-157,-173,-157,-158,-147,-138,-131,-123,-128,-126,-106,-147,-126,-173,-126,-184,-147,-199,-168,-203,-173,-188,-192,-200,-201,-192,-187,-199,-212,-196,-185,-179,-192,-178,-197,-173,-188,-148,-169,-132,-157,-149,-165,-140,-138,-136,-123,-135,-83,-122,-54,-79,-22,-60,-30,-15,-6,29,-8,28,-22,45,-13,39,-33,30,5,20,-21,0,10,30,-5,10,-9,35,17,55,16,50,34,50,43,21,55,18,41}, + /* IRC_Composite_C_R0195_T225_P015.wav */ + {3,-3,3,-3,3,-3,3,-3,3,-3,3,-3,3,-3,3,-3,3,-3,3,-3,3,-3,3,-2,2,-2,2,-1,1,0,-5,-60,-132,-64,-20,-85,-7,-176,-5,-52,-74,-72,84,430,320,474,759,338,-353,-244,418,1855,1977,2351,3573,2234,1049,638,1145,830,98,-47,162,541,334,496,426,623,595,701,586,611,652,468,517,289,478,320,343,117,277,121,176,-24,22,-90,-30,-74,-66,-98,-115,-117,-110,-81,-130,-60,-133,-100,-189,-113,-212,-176,-266,-224,-323,-264,-344,-286,-346,-280,-345,-296,-322,-294,-296,-283,-287,-282,-269,-247,-271,-247,-275,-227,-292,-244,-283,-265,-275,-259,-269,-255,-263,-256,-260,-233,-264,-234,-273,-263,-267,-260,-261,-252,-268,-235,-241,-202,-245,-183,-233,-168,-240,-165,-221,-162,-203,-155,-192,-162,-177,-157,-183,-162,-190,-153,-205,-157,-215,-152,-221,-147,-199,-142,-182,-144,-171,-142,-156,-148,-173,-154,-163,-157,-167,-170,-162,-167,-179,-174,-184,-178,-180,-174,-206,-178,-198,-188,-207,-204,-201,-199,-190,-214,-206,-229,-200,-212,-173,-192,-170,-215,-166,-189,-131,-174,-137,-168,-105,-105,-64,-83,-43,-48,2,-25,-6,-28,14,-21,8,-16,16,3,33,17,37,28,49,34,62,28,60,13,34,6,35,-8,13,-2,6,11,8,6,-3,12}, + /* IRC_Composite_C_R0195_T240_P015.wav */ + {0,0,0,0,1,-1,1,-1,1,-1,1,-1,1,-1,1,-1,2,-2,2,-2,2,-2,3,-3,3,-4,4,-5,6,-7,9,-12,19,-47,-65,-42,-103,-56,-86,-61,-84,-136,-84,-125,-76,46,139,205,243,514,311,-296,-329,431,1034,1553,1739,2266,2575,1097,709,1340,1199,680,407,592,719,747,950,891,924,905,835,742,679,646,246,443,284,396,76,270,79,360,141,234,132,176,91,44,66,-33,37,-24,-26,-52,-140,-134,-171,-129,-209,-205,-236,-225,-189,-236,-238,-284,-228,-319,-232,-313,-237,-314,-261,-304,-270,-301,-338,-291,-284,-273,-275,-283,-246,-253,-222,-299,-253,-305,-231,-286,-251,-299,-224,-259,-244,-287,-247,-273,-229,-277,-233,-278,-222,-256,-210,-244,-213,-226,-197,-200,-180,-187,-177,-191,-166,-184,-157,-198,-181,-196,-166,-179,-163,-184,-173,-192,-155,-187,-160,-198,-161,-183,-151,-173,-150,-191,-183,-210,-175,-205,-187,-208,-163,-185,-168,-170,-162,-180,-159,-180,-156,-158,-160,-186,-181,-225,-214,-217,-218,-234,-238,-231,-223,-221,-238,-230,-217,-206,-178,-170,-207,-185,-183,-154,-152,-154,-148,-132,-105,-76,-64,-31,-49,-9,-20,6,-12,13,19,25,27,32,34,31,31,36,25,9,26,16,20,-7,0,-5,22,12,4,3,18,14,9,-1,-12,-43}, + /* IRC_Composite_C_R0195_T255_P015.wav */ + {1,-1,2,-2,2,-2,2,-2,2,-2,2,-2,2,-2,2,-2,2,-2,2,-2,2,-2,2,-2,2,-2,3,-3,3,-3,3,-4,5,-6,10,-30,-120,-103,-93,-24,-84,-108,-106,-140,-67,-76,-73,-121,-133,-119,68,29,241,313,294,61,72,517,913,1238,1676,2099,2348,2019,1198,861,1666,1655,972,1040,933,753,835,866,579,456,471,608,472,520,250,352,304,298,249,242,223,199,145,120,235,93,122,49,3,-39,-15,-81,-71,-213,-81,-243,-130,-266,-181,-248,-213,-210,-258,-180,-292,-166,-311,-258,-301,-258,-288,-304,-246,-283,-255,-296,-284,-260,-336,-252,-351,-234,-370,-214,-328,-219,-303,-234,-296,-242,-261,-235,-272,-252,-263,-194,-230,-174,-222,-179,-253,-165,-264,-178,-236,-165,-217,-158,-210,-155,-187,-164,-190,-169,-218,-178,-196,-162,-199,-188,-233,-170,-224,-161,-189,-146,-207,-166,-198,-168,-188,-182,-169,-203,-189,-199,-180,-192,-203,-183,-200,-212,-208,-215,-216,-240,-188,-209,-178,-223,-162,-198,-167,-209,-211,-239,-196,-203,-168,-214,-197,-239,-145,-209,-149,-231,-167,-214,-131,-185,-129,-161,-129,-137,-105,-119,-97,-124,-84,-109,-56,-30,29,13,51,35,66,39,42,23,8,-3,12,8,29,13,32,3,26,10,21,-16,-7,-1,7,2,-5,-3,-3,-1}, + /* IRC_Composite_C_R0195_T270_P015.wav */ + {-1,1,-1,1,-1,1,-1,1,-1,1,-1,1,-1,1,-1,1,-1,1,-1,1,-1,1,-1,1,-1,1,-1,1,-1,1,-1,1,-1,1,-2,2,-2,3,-5,38,41,81,155,185,109,43,168,195,276,233,321,381,601,818,813,1005,694,582,706,813,1380,2408,2859,3417,3192,1234,164,601,379,145,-151,-609,-543,-195,49,-120,-111,136,61,-67,-105,-152,-114,-346,-267,-397,-310,-240,-454,-334,-493,-338,-595,-418,-579,-472,-506,-514,-489,-592,-416,-592,-471,-574,-491,-531,-515,-529,-526,-514,-446,-512,-519,-497,-434,-454,-430,-467,-415,-434,-400,-422,-405,-387,-422,-391,-428,-343,-360,-326,-389,-310,-335,-241,-318,-246,-341,-202,-264,-207,-278,-221,-234,-239,-210,-224,-186,-240,-226,-220,-207,-184,-222,-220,-245,-206,-189,-231,-234,-238,-195,-237,-129,-227,-188,-216,-145,-224,-225,-241,-252,-274,-214,-216,-202,-212,-164,-190,-171,-187,-134,-175,-141,-168,-164,-167,-150,-174,-138,-128,-101,-135,-76,-112,-70,-116,-70,-102,-92,-120,-98,-123,-112,-146,-92,-145,-88,-135,-96,-115,-84,-87,-67,-61,-82,-42,-48,11,-13,5,25,35,52,70,95,98,101,116,108,116,94,137,109,102,83,89,88,89,64,59,40,43,14,53,3,16,-3,7,12,-13,16,-2,28,6,37,18}, + /* IRC_Composite_C_R0195_T285_P015.wav */ + {0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,-1,1,-1,1,-1,2,-3,6,-109,-159,-182,-252,-225,-252,-209,-220,-323,-249,-123,27,108,237,39,-227,-176,-141,26,983,1563,1989,2256,2042,1443,1643,1525,1088,1602,1474,1170,1150,824,708,924,1176,1036,269,443,781,769,425,308,405,367,433,461,448,234,194,163,218,246,100,-62,56,-52,109,-114,68,-271,33,-231,-10,-251,-107,-211,-189,-193,-271,-173,-258,-220,-344,-246,-222,-245,-241,-310,-276,-282,-207,-275,-244,-325,-317,-257,-224,-252,-269,-249,-255,-262,-227,-233,-233,-271,-256,-189,-188,-164,-249,-173,-198,-150,-210,-124,-172,-127,-154,-156,-239,-193,-248,-200,-220,-169,-212,-123,-176,-157,-249,-249,-297,-272,-295,-303,-316,-296,-263,-235,-217,-173,-234,-208,-250,-182,-255,-227,-234,-247,-244,-205,-187,-219,-199,-160,-156,-146,-178,-163,-199,-166,-183,-164,-214,-208,-196,-160,-168,-196,-193,-160,-204,-162,-188,-175,-206,-218,-198,-177,-203,-194,-169,-169,-172,-168,-160,-149,-149,-113,-128,-93,-92,-23,-69,-13,-56,-10,-44,8,-28,9,-21,0,13,41,16,24,3,43,28,54,11,12,-13,29,-17,4,-27,25,-15,29,-5,31,21,23}, + /* IRC_Composite_C_R0195_T300_P015.wav */ + {3,-3,3,-3,3,-3,3,-3,4,-4,4,-4,4,-4,4,-4,4,-4,5,-5,5,-5,5,-6,6,-6,6,-7,7,-8,9,-10,13,-17,28,-93,-234,-180,-207,-251,-293,-262,-250,-346,-376,-130,67,-6,108,-151,-710,-124,223,55,1941,2161,1520,2079,1404,552,750,680,497,384,1277,1177,733,896,1394,1657,1268,1283,1309,1208,892,805,954,655,670,296,482,320,570,596,507,445,118,348,106,469,16,25,-157,191,5,47,-157,-78,-270,-124,-129,-61,-219,-206,-188,-118,-157,-193,-216,-244,-222,-155,-190,-248,-263,-189,-260,-255,-305,-285,-272,-179,-250,-320,-353,-177,-207,-158,-311,-279,-294,-136,-181,-176,-231,-183,-184,-86,-72,-138,-172,-154,-185,-208,-132,-194,-245,-197,-155,-169,-170,-257,-250,-273,-291,-308,-267,-292,-322,-261,-262,-223,-282,-280,-302,-284,-249,-260,-293,-276,-203,-199,-209,-219,-251,-230,-225,-236,-239,-254,-234,-253,-179,-216,-170,-211,-150,-212,-180,-190,-146,-198,-192,-173,-159,-174,-166,-187,-188,-211,-146,-166,-163,-201,-184,-171,-129,-130,-154,-144,-145,-167,-132,-161,-149,-160,-132,-127,-113,-104,-89,-73,-95,-76,-83,-55,-53,-22,-5,-13,-3,5,22,0,15,5,-11,-15,15,-13,-5,-8,7,-15,26,37,33,12,52,50,45,34,80,35,79,31}, + /* IRC_Composite_C_R0195_T315_P015.wav */ + {-3,3,-3,3,-3,3,-3,3,-3,3,-3,3,-3,3,-3,3,-3,3,-3,3,-3,3,-3,3,-3,3,-3,3,-3,2,-2,2,-3,-145,-419,-273,-234,-320,-494,-368,-332,-137,23,435,141,-274,189,-707,-1152,1545,415,1656,4094,1604,1580,2053,473,-525,351,1046,431,4,613,555,843,494,1071,1048,1375,1139,994,1431,1283,1617,838,1011,491,1146,716,701,358,388,443,310,457,32,-5,-93,169,269,-27,91,-165,86,-189,135,-120,-103,-270,-74,-56,-217,-55,-238,-119,-288,-120,-263,-264,-223,-279,-179,-302,-173,-211,-262,-189,-255,-177,-382,-172,-263,-206,-302,-155,-222,-165,-156,-91,-226,-129,-208,-104,-116,-7,-182,-57,-43,-59,-165,-192,-254,-334,-329,-222,-307,-218,-299,-183,-230,-153,-307,-262,-341,-352,-360,-306,-320,-349,-329,-284,-313,-262,-287,-276,-348,-332,-282,-281,-235,-311,-213,-232,-205,-232,-209,-189,-252,-237,-224,-214,-199,-229,-157,-205,-196,-185,-173,-156,-212,-157,-188,-156,-172,-131,-151,-175,-157,-139,-172,-126,-151,-157,-177,-123,-115,-119,-116,-103,-151,-157,-157,-103,-111,-83,-143,-134,-106,-78,-57,-83,-76,-143,-96,-110,-30,-81,-37,-72,-10,-21,-9,18,1,-1,-42,21,2,14,26,52,25,26,34,53,32,44,40,74,42,66,49,90,62,69,50}, + /* IRC_Composite_C_R0195_T330_P015.wav */ + {1,-1,1,0,0,0,0,0,0,0,0,1,-1,1,-2,2,-2,3,-3,4,-5,6,-7,9,-11,14,-18,25,-38,72,-494,-377,-163,-458,-490,-5,-649,-505,-448,737,42,720,-5,-1466,655,-1957,-272,3638,1401,3790,5414,320,1005,1351,716,-1658,736,755,629,-241,367,775,655,1313,522,711,607,1017,814,1153,1014,727,1072,1193,1369,911,877,665,747,788,602,389,-2,301,79,299,235,78,-171,-171,-12,-204,-96,-124,-285,-390,-109,136,-127,-79,-244,-125,-236,-21,-257,-313,-264,-266,-248,-188,-112,-345,-304,-50,-265,-235,-235,-38,-345,-176,-119,-52,-156,-49,-98,-184,-269,-46,-139,-85,-206,-15,-64,-75,-25,28,-69,-306,-216,-145,-321,-127,-396,-359,-364,-161,-402,-370,-313,-329,-419,-337,-409,-317,-386,-319,-505,-396,-337,-244,-368,-365,-380,-286,-289,-230,-330,-297,-331,-175,-233,-184,-243,-195,-204,-144,-176,-187,-238,-158,-239,-170,-186,-124,-214,-122,-133,-166,-196,-122,-151,-204,-200,-147,-209,-159,-148,-140,-151,-108,-106,-148,-137,-107,-109,-157,-138,-152,-159,-152,-143,-132,-169,-84,-121,-99,-110,-53,-57,-49,-65,-56,-59,-72,-39,-80,-76,-50,-24,-78,-31,-4,10,-54,43,12,18,43,54,13,-6,86,38,22,50,62,50,38,99,31,62,60,58,29,11,38}, + /* IRC_Composite_C_R0195_T345_P015.wav */ + {-11,11,-12,12,-13,13,-14,15,-16,16,-17,18,-19,20,-22,23,-24,26,-27,29,-32,34,-38,43,-52,76,-328,-363,-214,-611,-509,-335,-643,-447,-869,31,57,1040,355,-374,1188,-2297,-4332,5821,-1811,4636,8422,1800,2573,2446,333,-2543,348,441,1038,-1221,-120,503,1705,557,994,621,1108,529,1387,553,713,268,987,1168,1373,742,1063,633,1079,800,733,478,430,-8,448,243,389,-34,240,-156,40,-68,208,-262,-163,-284,-85,-119,4,-292,-169,-173,-9,-177,-185,-284,-261,-239,-151,-333,-444,-278,-212,-269,-320,-207,-353,-167,-122,5,-101,-146,-236,26,158,-193,-6,31,-110,-13,194,-336,-291,166,-187,-339,71,83,-256,-36,64,-291,-253,-258,-423,-483,-404,-542,-450,-398,-404,-517,-415,-395,-300,-370,-455,-406,-370,-334,-365,-318,-452,-367,-267,-228,-288,-345,-283,-288,-195,-237,-211,-198,-242,-243,-148,-120,-215,-182,-175,-171,-178,-118,-143,-239,-153,-169,-160,-247,-180,-208,-203,-145,-188,-199,-188,-106,-162,-137,-157,-131,-109,-183,-154,-130,-109,-197,-150,-64,-168,-155,-207,-78,-176,15,-176,-113,-192,-34,-58,-13,-144,-90,-50,-81,-89,24,-35,-100,-56,-23,-9,29,37,-35,-51,58,55,55,-8,24,51,71,28,39,46,49,51,27,-11,48,41,22,-3,23,-14,20,-19,-13}}; + +const int16_t irc_composite_c_r0195_p030[][256] = + {/* IRC_Composite_C_R0195_T000_P030.wav */ + {-54,55,-55,56,-57,57,-58,58,-58,59,-58,58,-58,56,-55,52,-48,42,-34,21,1,-38,103,-854,-293,-223,-413,-662,-699,-525,-649,-153,-821,-339,218,2022,2054,-2330,-2792,1747,-9777,10764,1443,5425,12121,258,-1458,190,2417,73,-781,460,-747,-560,1297,150,875,435,1456,292,1300,-209,948,616,962,-100,649,571,794,577,979,597,904,611,978,804,976,113,890,241,591,176,615,-381,92,-14,208,-196,144,-25,-87,-290,-51,-201,-11,-376,13,-401,6,-326,16,-348,-73,-269,-196,-276,-77,-266,-298,-332,-42,-326,-208,-519,-147,-423,-93,-432,-257,-451,-109,-270,-192,-375,-151,-315,-188,-206,-37,-459,-168,-241,-10,-278,-73,-257,87,-155,-18,-234,-126,-305,-82,-294,-405,-294,-172,-409,-47,-89,-22,-89,128,-65,30,48,-38,-108,-174,-371,-254,-219,-255,-366,-375,-485,-192,-333,-204,-374,-171,-415,-254,-437,-213,-353,-257,-320,-265,-229,-230,-199,-344,-342,-208,-291,-328,-415,-224,-366,-172,-311,-156,-289,-178,-259,-84,-212,-136,-195,-136,-204,-23,-192,-135,-219,-38,-190,-41,-200,-34,-195,-41,-117,-47,-160,-96,-111,-97,-131,-73,-186,-78,-137,-70,-202,-93,-119,-55,-149,-67,-139,-48,-119,-34,-122,-39,-119,-31,-62,-1,-61,-28,-48,31,6,17,-8,75,29,40,45,65,21,45,14}, + /* IRC_Composite_C_R0195_T015_P030.wav */ + {45,-49,52,-57,61,-66,71,-77,83,-90,98,-107,116,-126,138,-150,164,-177,188,-179,-73,-966,-102,-989,560,-1173,-327,-1684,597,-1115,259,-1628,2183,1183,3440,-5302,3131,-10502,-529,12738,-6827,21598,5258,-7,-3187,1963,495,327,502,-665,-1769,1964,-371,1009,596,896,151,1819,-137,401,915,974,249,114,559,962,751,584,371,576,838,1123,906,351,-23,332,521,427,-138,-265,-100,55,-54,59,-205,-261,-77,2,-107,-229,-219,-403,-235,-134,-143,-470,-234,-177,-61,-236,-83,-258,-197,-192,-101,-300,-331,-254,-216,-242,-271,-289,-310,-261,-262,-333,-321,-293,-270,-333,-255,-243,-275,-316,-256,-252,-176,-243,-182,-212,-172,-289,-118,-228,-303,-307,-77,-225,-298,-230,-122,-403,-282,-152,-387,-451,8,13,-160,-49,124,-112,-104,87,-45,-358,-217,-135,-206,-167,-172,-229,-317,-197,-162,-307,-272,-363,-290,-325,-168,-360,-289,-359,-212,-286,-239,-278,-241,-270,-234,-333,-266,-308,-234,-344,-218,-357,-274,-331,-208,-244,-205,-284,-219,-226,-149,-177,-181,-201,-159,-198,-109,-172,-113,-218,-95,-145,-67,-151,-73,-148,-84,-129,-52,-118,-51,-142,-54,-113,-29,-150,-66,-118,-44,-150,-8,-105,-59,-133,-10,-103,-51,-102,-26,-114,2,-62,-14,-78,24,-62,35,-36,28,-33,74,-22,57,13,101,-23,94,36,50,32}, + /* IRC_Composite_C_R0195_T030_P030.wav */ + {22,-22,22,-22,22,-22,21,-21,20,-19,18,-16,13,-9,3,6,-23,62,-927,-68,-994,540,-922,-246,-1369,328,-1205,-194,-1442,1439,570,2758,-885,3393,-4925,-8851,7444,-11725,26992,7407,2376,4575,-342,-2312,-724,2866,-938,-1366,86,-585,1150,1637,627,500,364,713,726,1042,-231,616,243,1337,315,722,212,833,509,1128,741,763,-23,706,-315,392,-241,100,-771,-188,-212,298,-422,-413,-205,37,-281,-130,-523,-88,-503,-112,-573,-211,-454,-80,-504,-132,-294,-80,-239,-22,-204,-163,-320,-82,-296,-297,-476,-149,-313,-195,-414,-189,-328,-173,-333,-197,-351,-236,-353,-212,-335,-289,-358,-212,-232,-235,-269,-127,-247,-255,-385,-167,-387,-267,-392,-201,-373,-245,-334,-180,-331,-245,-322,-210,-238,-125,-198,-59,-138,-44,-137,-3,-58,9,-199,-115,-324,-112,-304,-122,-334,-195,-342,-88,-186,-146,-296,-157,-238,-181,-256,-164,-321,-198,-320,-193,-346,-189,-318,-169,-297,-240,-369,-178,-273,-261,-370,-222,-299,-226,-327,-186,-311,-192,-283,-157,-277,-144,-267,-140,-249,-109,-220,-95,-203,-109,-172,-51,-152,-88,-149,-49,-133,-83,-123,-30,-107,-62,-145,-55,-94,-23,-81,-49,-86,-1,-42,-24,-119,-38,-58,-5,-105,-50,-71,1,-68,-36,-53,-6,-19,20,-14,18,-29,55,-17,56,-9,73,3,81,35,97,32,73}, + /* IRC_Composite_C_R0195_T045_P030.wav */ + {9,-7,5,-3,0,3,-7,12,-18,25,-34,45,-60,79,-107,151,-363,-677,-240,-871,518,-941,-269,-1276,265,-1859,1187,-1350,2149,1,5698,-4523,4866,-12427,-2128,5167,3666,25146,2435,401,417,1825,-3164,2605,613,-1851,-1719,979,-329,2301,-426,1606,-126,1675,119,1149,-57,595,-80,1644,19,793,-129,1326,678,1045,-17,856,-22,267,-380,190,-844,-146,-664,199,-735,-167,-428,32,-402,-217,-411,-308,-427,-284,-543,-438,-454,-307,-561,-259,-430,-199,-381,-181,-382,-148,-228,-87,-405,-169,-303,-91,-361,-175,-378,-291,-318,-127,-301,-269,-260,-171,-316,-164,-268,-241,-361,-178,-341,-229,-322,-201,-335,-221,-314,-343,-328,-221,-321,-372,-308,-210,-400,-380,-322,-269,-386,-304,-356,-223,-292,-166,-317,-192,-91,-100,-158,-51,-73,-116,-23,-56,-150,-243,-92,-175,-169,-181,-177,-294,-169,-187,-210,-286,-130,-227,-213,-253,-159,-238,-150,-211,-249,-260,-144,-232,-247,-283,-227,-289,-178,-293,-252,-299,-148,-285,-211,-271,-197,-276,-200,-222,-204,-252,-176,-235,-205,-223,-107,-218,-119,-198,-109,-182,-35,-149,-101,-171,-62,-123,-44,-128,-75,-141,-37,-172,-54,-143,-11,-144,-27,-104,26,-103,23,-105,37,-73,18,-74,15,-63,4,-83,16,-68,-5,-50,47,-28,24,-35,41,-27,78,-21,77,-17,105,16,96,7,122,24}, + /* IRC_Composite_C_R0195_T060_P030.wav */ + {8,-7,7,-6,5,-5,4,-3,3,-3,3,-3,5,-9,20,86,-790,103,-872,55,-678,-196,-962,-387,-893,162,722,1609,608,2929,1713,-6837,-667,-12691,15713,13906,9242,5982,83,-1809,1076,1596,1326,-1529,-1392,-366,731,78,1368,-246,850,592,521,699,1115,310,930,973,592,664,159,708,710,489,-74,485,-697,374,-695,249,-1252,204,-915,137,-777,-91,-241,-403,-350,-466,-269,-521,-499,-540,-380,-320,-709,-233,-517,-228,-634,-244,-652,-290,-447,-147,-545,-241,-352,5,-488,-69,-437,53,-465,-6,-353,-78,-525,-24,-335,-157,-418,-27,-367,-103,-287,-241,-397,-158,-276,-304,-351,-181,-369,-225,-401,-209,-512,-184,-447,-153,-476,-152,-484,-76,-492,-243,-498,-127,-515,-186,-392,-135,-408,-53,-239,-82,-191,13,-138,-32,-118,-6,-177,-44,-187,-143,-246,-50,-262,-108,-264,-71,-256,-48,-325,-90,-349,-63,-352,-110,-280,-159,-311,-153,-350,-187,-295,-152,-318,-137,-192,-178,-252,-142,-284,-157,-277,-190,-286,-151,-282,-119,-257,-91,-310,-54,-224,-93,-207,-75,-245,-9,-203,-48,-237,12,-182,-40,-201,33,-146,-26,-200,-37,-156,-43,-159,-28,-130,-32,-114,20,-143,6,-113,12,-126,58,-103,21,-121,59,-91,44,-108,59,-43,47,-58,60,-38,52,-61,83,-36,62,3,66,27,84,84,65,66,103}, + /* IRC_Composite_C_R0195_T075_P030.wav */ + {100,-103,106,-109,112,-115,118,-120,122,-122,119,-110,88,-25,53,-874,378,-897,369,-1226,537,-1263,224,-1798,1274,98,2195,-462,4170,361,-3989,-3149,-11328,17722,9812,11683,3944,-511,-808,1931,2611,1751,-2704,-605,-430,728,-349,908,612,941,-687,931,759,-66,894,2123,310,1489,449,746,475,494,-493,-725,213,-1074,381,-1100,-205,-670,-115,-645,-170,-469,-431,31,-824,-248,-707,-458,-476,-267,-901,-228,-228,-601,-420,-335,-514,-545,-367,-632,-446,-566,-351,-391,-306,-444,-134,-388,-144,-333,-243,-194,-98,-434,-105,-173,-206,-249,-121,-377,-102,-345,-159,-256,-146,-470,-106,-327,-152,-330,-205,-368,-192,-409,-197,-444,-310,-359,-229,-461,-161,-393,-318,-351,-227,-387,-261,-314,-291,-270,-249,-242,-213,-299,-192,-269,-108,-253,-73,-278,80,-171,-11,-111,7,-198,-30,-155,-148,-117,-164,-168,-181,-155,-172,-221,-156,-207,-176,-229,-130,-292,-171,-280,-233,-288,-132,-237,-242,-209,-166,-245,-147,-263,-160,-240,-126,-261,-109,-276,-150,-274,-92,-247,-135,-225,-43,-207,-57,-174,-33,-157,-10,-172,-20,-116,-56,-146,22,-132,-61,-119,-53,-166,-22,-107,-81,-99,-40,-65,-89,-70,-32,-77,-31,-75,-25,-75,36,-146,3,-52,3,-100,13,-55,-5,-17,13,-41,5,-12,59,-13,32,3,61,44,107,53,129,85,118,95}, + /* IRC_Composite_C_R0195_T090_P030.wav */ + {-85,89,-94,98,-102,107,-112,116,-119,121,-119,106,-58,-455,60,99,-1054,-36,-88,242,-1635,433,-1255,1196,-1210,2540,-840,5344,-2798,4670,-10472,-874,1281,9198,19443,266,2404,-1705,3816,1662,3038,83,-1499,-2490,1368,-37,500,-334,1490,-626,909,-15,1075,678,1886,658,852,386,1418,146,400,-1073,-338,-633,-165,-1024,-383,-1251,-264,-407,-25,-1062,-140,-559,70,-716,-459,-642,-96,-725,-472,-412,-384,-647,-281,-605,-395,-572,-399,-706,-351,-583,-297,-495,-434,-423,-201,-406,-407,-323,-268,-341,-186,-360,-201,-241,-141,-313,-94,-257,-169,-208,-194,-224,-147,-275,-276,-255,-215,-224,-195,-256,-265,-271,-252,-352,-290,-430,-350,-309,-268,-387,-267,-289,-271,-262,-303,-332,-237,-287,-328,-292,-220,-307,-211,-297,-184,-216,-183,-248,-116,-180,-163,-187,-112,-160,-80,-117,-46,-81,-25,-91,-53,-114,-120,-176,-181,-173,-184,-205,-224,-154,-233,-229,-178,-204,-243,-197,-188,-194,-206,-154,-204,-205,-213,-116,-191,-127,-210,-145,-216,-173,-183,-177,-263,-209,-141,-116,-148,-107,-124,-68,-87,-45,-121,-68,-134,-77,-87,-29,-115,-67,-117,-46,-114,-27,-115,-45,-100,-16,-82,-32,-85,-17,-74,-28,-41,-27,-85,-23,-68,-40,-47,-15,-96,-13,-38,11,-39,-38,-19,1,-19,-3,-8,-15,-22,28,17,70,76,128,80,124,112,151}, + /* IRC_Composite_C_R0195_T105_P030.wav */ + {104,-111,117,-125,133,-143,153,-166,180,-197,218,-245,288,-401,132,-429,-287,-538,503,-701,-185,-1083,867,-1151,1236,-192,2589,704,2171,-548,-4634,-1245,-3979,18483,6281,7009,-196,501,1762,4063,3838,-1,-2732,-1085,-409,884,444,-393,-9,733,362,138,1179,1594,1300,947,317,734,700,55,-900,-943,-310,-712,-521,-933,-377,-1554,-143,-410,48,-924,-152,-440,-212,-554,-286,-601,-527,-371,-454,-653,-533,-426,-450,-579,-486,-483,-369,-609,-354,-543,-201,-457,-293,-586,-146,-430,-339,-522,-196,-439,-324,-340,-141,-423,-157,-344,-39,-390,-88,-363,-66,-244,-152,-284,-176,-153,-268,-264,-245,-148,-256,-290,-221,-325,-190,-340,-211,-490,-165,-333,-238,-471,-203,-235,-228,-397,-218,-264,-147,-365,-267,-367,-142,-319,-211,-356,-158,-290,-141,-243,-127,-217,-86,-135,-64,-128,-75,-153,-67,-155,-25,-190,-45,-193,-18,-259,1,-235,-77,-284,-88,-227,-151,-250,-186,-245,-111,-256,-126,-274,-46,-272,-88,-254,-32,-214,-71,-201,-144,-187,-98,-154,-182,-196,-78,-154,-85,-159,-36,-221,-26,-121,-4,-188,-47,-127,-22,-132,-24,-158,-49,-155,-2,-158,-21,-135,-30,-121,10,-87,-16,-118,10,-48,-22,-86,-12,-80,14,-80,13,-86,50,-103,29,-96,64,-85,45,-104,50,-45,64,-67,-11,-39,117,-14,158,27,234,13,225,68}, + /* IRC_Composite_C_R0195_T120_P030.wav */ + {28,-28,29,-29,29,-29,29,-27,25,-21,15,-5,-12,43,-140,88,-559,-189,139,-567,198,-314,-739,-383,1538,-450,794,642,4608,-2302,1277,-7331,2563,5148,5379,12280,2713,-323,-1693,7682,3305,1850,-1530,-2098,-1411,1431,-392,275,-715,741,-246,1255,618,1870,987,1146,368,730,-169,18,-891,-805,-1522,-439,-421,-616,-1006,-319,-441,-263,-511,-164,-630,-92,-636,-184,-746,-202,-725,-323,-658,-347,-529,-471,-673,-383,-415,-310,-627,-358,-521,-225,-567,-258,-424,-264,-432,-273,-312,-333,-413,-308,-330,-340,-367,-280,-273,-329,-276,-223,-220,-254,-266,-200,-177,-199,-276,-232,-217,-244,-204,-238,-182,-299,-214,-297,-195,-311,-235,-299,-177,-299,-241,-285,-222,-309,-277,-284,-244,-293,-235,-236,-241,-258,-206,-250,-223,-292,-255,-245,-224,-254,-179,-205,-152,-237,-116,-146,-78,-191,-75,-140,-27,-176,-63,-134,-82,-199,-79,-164,-84,-202,-123,-164,-105,-201,-110,-207,-105,-211,-126,-202,-134,-203,-129,-204,-131,-154,-100,-194,-94,-163,-74,-166,-82,-216,-107,-149,-27,-161,-56,-129,-40,-130,-55,-114,-53,-102,-63,-77,-31,-89,-106,-127,-83,-123,-74,-94,-24,-173,-31,-94,11,-133,-8,-83,-23,-88,12,-65,-46,-111,25,-94,13,-117,28,-88,37,-79,24,-35,51,-27,22,7,50,-3,53,7,103,86,163,91,162,75}, + /* IRC_Composite_C_R0195_T135_P030.wav */ + {6,-6,7,-7,8,-8,9,-9,10,-10,11,-12,13,-14,16,-18,-114,-234,-98,-110,-115,-173,-170,-232,-274,363,317,1353,133,2229,1323,703,-7423,4287,1521,5913,8831,4880,804,-872,5713,3941,3130,-2817,-2290,-315,304,-50,-26,-209,494,295,1100,751,1288,533,716,964,54,-449,-399,-528,-808,-934,-947,-951,-457,-538,-157,-625,-308,-385,-95,-571,-310,-374,-278,-494,-514,-429,-461,-556,-433,-529,-432,-535,-327,-580,-236,-492,-287,-555,-216,-430,-280,-451,-246,-351,-270,-341,-290,-325,-254,-404,-265,-383,-232,-420,-232,-398,-198,-399,-199,-300,-210,-291,-213,-230,-263,-273,-261,-215,-220,-262,-203,-257,-189,-275,-199,-298,-243,-292,-198,-258,-246,-263,-220,-238,-243,-271,-259,-242,-227,-263,-220,-247,-166,-281,-180,-274,-165,-251,-184,-245,-176,-216,-217,-207,-158,-156,-155,-166,-106,-127,-108,-139,-81,-152,-98,-167,-88,-170,-109,-150,-125,-150,-119,-119,-124,-135,-111,-142,-113,-144,-112,-168,-76,-193,-154,-199,-103,-124,-149,-137,-125,-90,-105,-96,-120,-90,-84,-94,-99,-96,-69,-90,-69,-82,-94,-80,-67,-60,-94,-70,-110,-76,-102,-51,-103,-55,-74,-38,-105,-57,-91,-76,-96,-51,-36,-59,-52,-57,-56,-61,-46,-70,-31,-32,-25,-47,6,-6,17,-33,-1,20,49,32,43,24,63,112,138,114,43,112}, + /* IRC_Composite_C_R0195_T150_P030.wav */ + {40,-42,44,-47,49,-53,56,-60,64,-69,75,-82,90,-99,110,-125,148,-207,85,-316,199,-301,148,-341,137,-377,122,76,967,463,1542,593,2935,-2335,-3746,3892,1126,7018,5853,6116,-1287,2364,4148,3574,181,-2393,-1125,-178,512,-398,-17,-462,310,1083,929,1228,538,479,431,450,-124,-446,-926,-681,-484,-854,-858,-886,-425,-380,-163,-187,-475,-333,-267,-193,-398,-286,-470,-331,-472,-511,-390,-535,-398,-443,-447,-458,-316,-355,-323,-348,-312,-366,-331,-282,-304,-410,-276,-324,-270,-334,-288,-347,-263,-345,-283,-348,-294,-324,-322,-384,-264,-346,-287,-352,-258,-326,-246,-345,-247,-332,-220,-276,-243,-276,-171,-257,-208,-277,-184,-237,-184,-265,-186,-245,-165,-259,-201,-253,-180,-238,-198,-272,-176,-246,-187,-277,-161,-252,-150,-275,-137,-267,-126,-254,-141,-241,-149,-227,-151,-206,-151,-203,-126,-181,-101,-170,-85,-185,-87,-146,-87,-170,-106,-170,-90,-160,-111,-150,-107,-140,-90,-144,-63,-132,-90,-238,-87,-160,-61,-203,-110,-165,-57,-102,-36,-116,-53,-98,-41,-96,-62,-105,-61,-102,-70,-120,-86,-109,-103,-103,-70,-67,-72,-85,-74,-124,-64,-94,-56,-139,-102,-99,-75,-97,-58,-61,-30,-94,-25,-74,-43,-104,-48,-76,-56,-75,-27,-39,-24,-3,21,-10,-1,6,97,22,44,1,101,93,134,63,82,45}, + /* IRC_Composite_C_R0195_T165_P030.wav */ + {-5,5,-6,6,-6,6,-6,7,-7,7,-8,8,-9,9,-10,11,-12,14,-17,25,-163,17,-80,12,2,-73,14,-200,92,163,755,471,1371,723,2348,-678,-3403,2150,1314,5172,5430,6608,508,1066,3048,2691,1177,-1954,-1312,-864,1114,-99,-395,-170,755,800,840,925,689,-7,-2,-92,-86,-565,-703,-538,-415,-580,-306,-607,-337,-594,-235,-197,-71,-393,-276,-461,-131,-367,-319,-372,-331,-590,-309,-448,-316,-391,-370,-330,-202,-345,-331,-285,-252,-333,-332,-343,-285,-341,-299,-355,-341,-319,-335,-324,-306,-321,-304,-329,-296,-343,-321,-372,-317,-381,-294,-371,-299,-341,-289,-334,-273,-311,-239,-276,-234,-285,-217,-256,-202,-250,-169,-221,-150,-205,-167,-217,-204,-247,-225,-236,-214,-247,-214,-207,-155,-197,-183,-192,-155,-160,-191,-236,-192,-219,-188,-240,-195,-229,-141,-216,-140,-210,-108,-198,-110,-186,-131,-164,-118,-161,-128,-132,-117,-113,-132,-130,-137,-165,-120,-157,-110,-125,-107,-149,-159,-131,-114,-123,-178,-132,-136,-75,-82,-32,-23,-71,-33,-49,-17,-94,-64,-87,-53,-94,-80,-79,-100,-87,-113,-66,-94,-109,-102,-113,-90,-94,-92,-105,-128,-124,-129,-89,-94,-67,-93,-83,-95,-80,-94,-87,-89,-91,-53,-81,-31,-57,18,-49,-13,-37,25,-7,9,-13,-1,53,58,59,79,109,133,124,84,60}, + /* IRC_Composite_C_R0195_T180_P030.wav */ + {11,-12,12,-12,13,-13,14,-14,15,-15,16,-17,18,-19,20,-21,23,-25,28,-33,39,-51,85,16,-87,129,-81,94,33,-96,-40,382,84,940,857,674,1733,496,-329,-2997,2450,4178,3824,5230,3832,1334,27,2852,1755,841,-2811,-750,709,798,-492,-489,295,1105,873,238,383,361,52,-483,-293,-277,-430,-468,-128,-283,-308,-272,-363,-267,-297,-189,-289,-143,-258,-242,-373,-291,-355,-331,-315,-246,-385,-325,-276,-197,-314,-270,-313,-267,-313,-307,-364,-269,-366,-341,-361,-328,-371,-363,-339,-357,-363,-329,-335,-334,-353,-302,-358,-291,-419,-330,-381,-332,-384,-319,-331,-309,-321,-276,-296,-256,-286,-256,-306,-268,-277,-214,-200,-211,-199,-179,-152,-213,-196,-243,-196,-273,-196,-241,-183,-226,-151,-195,-173,-198,-169,-171,-172,-166,-179,-179,-198,-192,-224,-204,-227,-203,-207,-198,-181,-177,-142,-148,-122,-136,-110,-124,-113,-120,-142,-140,-137,-157,-161,-213,-116,-179,-81,-164,-112,-182,-121,-113,-114,-132,-189,-130,-138,-73,-121,-47,-93,-7,-49,37,-29,-7,-77,-47,-80,-70,-85,-99,-107,-91,-49,-61,-81,-124,-69,-122,-37,-160,-57,-192,-64,-193,-77,-181,-87,-158,-111,-159,-105,-113,-89,-109,-88,-83,-83,-88,-75,-44,-73,-39,-53,-1,-6,52,20,54,20,68,72,99,107,107,128,102,141,66}, + /* IRC_Composite_C_R0195_T195_P030.wav */ + {-2,2,-2,2,-2,2,-2,2,-2,2,-2,2,-2,2,-2,1,-1,1,-1,0,0,-1,2,-3,4,-1,20,-103,88,-97,77,-98,0,-92,123,108,628,541,868,946,950,-323,-2426,1960,1868,3655,4432,4145,1520,425,2257,2243,324,-1505,-101,360,566,-340,-97,521,852,598,427,177,417,31,149,-102,-24,-395,-11,-98,-149,-157,-72,-31,-41,-10,-27,-82,-183,-287,-120,-245,-255,-309,-131,-235,-200,-246,-222,-253,-210,-246,-249,-280,-281,-300,-278,-328,-316,-325,-352,-332,-302,-334,-406,-331,-345,-314,-353,-298,-355,-316,-331,-341,-377,-390,-356,-375,-325,-347,-301,-303,-263,-297,-274,-288,-262,-288,-280,-284,-231,-214,-205,-233,-199,-221,-205,-231,-218,-253,-209,-218,-203,-211,-199,-179,-191,-186,-199,-201,-193,-181,-187,-195,-182,-193,-188,-207,-197,-194,-197,-186,-192,-163,-178,-160,-165,-142,-165,-124,-155,-125,-159,-112,-160,-116,-190,-111,-165,-107,-172,-122,-190,-155,-169,-127,-164,-144,-157,-122,-135,-85,-113,-68,-116,-46,-67,-29,-54,-47,-65,-70,-92,-108,-92,-118,-99,-92,-87,-77,-99,-78,-106,-86,-118,-92,-123,-105,-136,-119,-126,-128,-143,-162,-171,-163,-154,-125,-147,-120,-134,-91,-125,-91,-103,-88,-80,-77,-45,-46,-3,16,24,24,27,58,42,84,85,129,127,141,127,125}, + /* IRC_Composite_C_R0195_T210_P030.wav */ + {-3,3,-3,3,-3,3,-3,3,-3,3,-3,3,-3,3,-3,4,-4,4,-4,4,-4,5,-5,6,-6,7,-9,12,-23,-83,-41,-51,-44,-54,-36,-72,-62,13,62,362,515,745,618,964,123,-1474,242,1605,2935,3779,3927,2051,644,1570,1770,938,-679,-243,210,595,104,204,284,657,698,453,338,507,437,72,227,224,63,-55,88,141,215,131,225,97,158,60,38,-69,-107,-265,-85,-124,-202,-245,-131,-257,-191,-192,-178,-241,-145,-269,-233,-270,-235,-338,-254,-338,-292,-359,-271,-388,-290,-377,-344,-357,-293,-419,-370,-380,-349,-397,-346,-388,-357,-344,-302,-335,-306,-279,-276,-246,-273,-284,-282,-237,-264,-250,-226,-225,-248,-218,-230,-247,-240,-246,-259,-217,-216,-201,-233,-193,-207,-183,-205,-202,-228,-190,-207,-213,-222,-215,-237,-186,-220,-176,-235,-136,-217,-116,-201,-133,-200,-133,-159,-165,-150,-164,-152,-134,-165,-144,-175,-119,-164,-116,-139,-114,-152,-126,-132,-128,-147,-152,-173,-154,-145,-152,-115,-135,-100,-99,-65,-75,-79,-71,-101,-104,-126,-96,-135,-132,-139,-125,-80,-131,-108,-168,-90,-142,-81,-142,-146,-133,-121,-109,-142,-113,-121,-117,-148,-144,-150,-144,-150,-171,-140,-133,-106,-126,-118,-107,-114,-61,-79,-59,-59,-5,-31,13,-2,37,21,52,67,76,116,109,131,125}, + /* IRC_Composite_C_R0195_T225_P030.wav */ + {-1,1,-1,1,-1,1,-1,1,-1,1,-1,1,-1,1,-1,2,-2,2,-2,2,-2,2,-2,3,-3,3,-4,5,-7,14,-133,-89,-52,-68,-90,-53,-98,-77,-65,-141,-33,-38,144,467,407,586,730,265,-702,-561,1147,2053,3022,3625,2128,1090,1205,1750,1123,130,-65,370,545,406,83,446,636,787,529,661,549,526,605,368,384,101,455,156,304,149,290,238,156,207,48,113,-86,-57,-36,-176,-93,-129,-107,-209,-107,-181,-195,-158,-169,-157,-239,-205,-240,-206,-253,-259,-282,-324,-296,-377,-347,-379,-340,-400,-365,-377,-398,-363,-379,-335,-398,-330,-337,-283,-328,-250,-335,-246,-279,-228,-301,-284,-299,-253,-272,-251,-277,-231,-269,-213,-274,-207,-256,-219,-248,-212,-205,-234,-191,-244,-197,-238,-194,-226,-213,-226,-214,-216,-219,-201,-233,-193,-216,-184,-183,-195,-171,-198,-136,-183,-121,-177,-126,-151,-131,-156,-138,-160,-140,-152,-130,-143,-109,-146,-113,-168,-114,-165,-123,-191,-139,-186,-143,-163,-101,-126,-91,-147,-80,-140,-85,-158,-107,-181,-100,-149,-100,-152,-116,-152,-120,-144,-111,-159,-127,-155,-129,-148,-146,-173,-157,-145,-134,-136,-128,-146,-120,-128,-115,-125,-133,-149,-144,-153,-151,-147,-139,-128,-113,-110,-89,-78,-59,-67,-27,-39,-8,-19,9,9,34,44,63,65,98,113}, + /* IRC_Composite_C_R0195_T240_P030.wav */ + {0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,-1,1,-1,1,-1,1,-1,1,-1,1,-1,1,-1,1,1,-97,-166,-68,-82,-98,-145,6,-178,-148,-79,-61,-4,226,289,466,695,150,-363,-564,127,1778,2371,2730,2418,1314,1333,1958,1573,362,208,618,691,672,358,520,767,841,691,635,738,675,472,476,447,387,278,358,303,322,169,236,146,167,153,93,161,-50,82,-137,22,-215,-48,-220,-102,-183,-156,-150,-189,-134,-246,-183,-293,-242,-320,-276,-333,-344,-324,-341,-336,-359,-336,-345,-340,-353,-365,-331,-325,-327,-353,-302,-293,-288,-330,-303,-271,-296,-272,-313,-253,-304,-231,-298,-228,-279,-211,-267,-230,-276,-219,-263,-229,-267,-208,-254,-187,-265,-150,-261,-152,-276,-148,-255,-165,-256,-184,-237,-198,-227,-197,-220,-180,-224,-157,-210,-127,-200,-118,-181,-83,-171,-100,-162,-99,-139,-124,-137,-122,-137,-107,-131,-113,-152,-146,-173,-133,-190,-141,-182,-105,-142,-132,-151,-164,-146,-160,-134,-193,-153,-203,-128,-188,-144,-178,-164,-173,-166,-129,-130,-161,-160,-179,-136,-174,-125,-201,-162,-176,-100,-152,-132,-174,-152,-170,-137,-167,-127,-157,-111,-151,-85,-120,-73,-114,-88,-97,-72,-75,-82,-80,-55,-47,-53,-51,-32,-31,18,-8,35,22,73,65}, + /* IRC_Composite_C_R0195_T255_P030.wav */ + {1,-1,1,-1,1,-1,1,-1,1,-1,1,-1,1,-1,1,-1,1,-1,1,-1,2,-2,2,-2,2,-2,3,-3,4,-4,5,-6,8,-12,21,-67,-186,-86,-78,-102,-151,-102,-59,-120,-188,-201,-77,-93,-133,190,315,388,642,206,-429,-627,-41,1512,2532,2996,2313,1394,1764,1723,1247,1241,976,437,321,813,834,517,405,589,1007,744,553,490,641,609,429,381,224,460,347,308,191,277,332,175,143,27,143,-12,2,-104,-86,-53,-114,-79,-254,-137,-211,-109,-349,-252,-297,-195,-348,-341,-330,-262,-295,-325,-316,-289,-220,-305,-292,-333,-256,-294,-316,-394,-312,-314,-286,-387,-317,-323,-233,-326,-302,-311,-250,-238,-292,-290,-296,-199,-259,-241,-292,-230,-245,-240,-270,-264,-223,-199,-230,-230,-203,-176,-179,-217,-226,-180,-203,-190,-239,-188,-230,-173,-226,-190,-237,-149,-184,-187,-190,-158,-126,-139,-130,-127,-103,-111,-83,-113,-124,-148,-136,-140,-136,-138,-118,-132,-127,-165,-140,-179,-161,-224,-175,-209,-159,-182,-149,-200,-170,-196,-150,-218,-152,-222,-177,-254,-168,-210,-186,-215,-137,-166,-128,-166,-128,-184,-106,-195,-174,-233,-137,-162,-153,-164,-155,-113,-146,-99,-141,-92,-113,-93,-86,-120,-80,-115,-69,-118,-75,-88,-73,-78,-77,-48,-78,-41,-54,-26,-50,-11,-21,13,14,31}, + /* IRC_Composite_C_R0195_T270_P030.wav */ + {0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,-1,2,-2,4,-6,13,-118,-205,-234,-125,-147,-202,-210,-154,-149,-302,-174,-138,74,139,366,494,501,-30,-788,-660,656,2336,3548,2933,1466,1698,2087,1596,1331,660,745,348,609,649,510,592,471,837,634,881,692,658,657,499,658,390,588,291,406,283,323,211,84,205,87,193,12,79,-62,28,-174,-101,-266,-102,-288,-156,-317,-186,-313,-245,-324,-271,-272,-274,-260,-294,-238,-276,-248,-278,-258,-319,-271,-317,-281,-337,-312,-324,-337,-313,-331,-296,-324,-287,-323,-272,-287,-250,-300,-247,-285,-236,-292,-228,-244,-210,-272,-214,-253,-227,-239,-213,-239,-229,-229,-214,-226,-214,-215,-199,-227,-177,-203,-180,-193,-178,-164,-152,-146,-149,-125,-151,-141,-143,-150,-155,-139,-130,-168,-120,-114,-108,-126,-125,-154,-199,-182,-165,-181,-236,-178,-180,-150,-191,-167,-205,-200,-201,-200,-203,-212,-213,-204,-217,-180,-215,-180,-171,-178,-161,-169,-169,-194,-215,-198,-197,-181,-198,-150,-180,-147,-175,-139,-160,-138,-133,-143,-113,-144,-115,-133,-126,-110,-112,-106,-129,-97,-110,-96,-107,-98,-88,-83,-69,-93,-79,-84,-51,-65,-56,-47,-41,-18,-38,4,-20,19,6}, + /* IRC_Composite_C_R0195_T285_P030.wav */ + {0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,-1,1,-1,1,-1,1,-1,2,-2,3,-3,4,-7,13,-190,-183,-235,-156,-218,-208,-304,-188,-211,-199,-139,-161,153,336,233,-123,-164,-557,-685,1147,1908,2035,3160,1492,785,1242,1667,1506,818,1143,817,1178,780,1057,657,807,713,683,790,563,729,595,855,599,704,615,614,540,520,362,304,245,337,219,250,133,197,59,126,-65,-70,-160,-89,-159,-231,-254,-291,-203,-274,-185,-304,-208,-285,-189,-264,-256,-331,-243,-311,-262,-308,-280,-304,-246,-335,-250,-318,-269,-309,-282,-300,-260,-340,-268,-307,-236,-284,-269,-325,-251,-300,-247,-310,-244,-277,-183,-239,-198,-269,-229,-228,-237,-229,-245,-249,-264,-220,-228,-201,-208,-179,-162,-96,-70,-76,-134,-147,-121,-145,-142,-175,-192,-136,-108,-69,-153,-156,-210,-206,-236,-198,-320,-233,-263,-173,-277,-125,-224,-144,-166,-131,-224,-164,-197,-216,-260,-240,-244,-177,-196,-187,-157,-155,-171,-202,-195,-206,-203,-187,-195,-236,-200,-188,-186,-176,-232,-177,-163,-133,-189,-164,-179,-140,-153,-146,-152,-146,-135,-108,-114,-110,-119,-88,-135,-96,-118,-89,-127,-122,-110,-95,-72,-108,-98,-91,-59,-48,-71,-44,-52,-26,-30,-20,-36,-34,-3,-13,9,16}, + /* IRC_Composite_C_R0195_T300_P030.wav */ + {1,-1,2,-2,2,-2,2,-2,2,-2,2,-2,2,-2,2,-3,3,-3,3,-3,3,-4,4,-4,5,-5,5,-6,7,-8,9,-10,12,-16,27,-117,-227,-314,-248,-219,-228,-232,-272,-389,-204,-111,300,56,366,86,-1011,-298,44,-215,2477,2714,2032,2711,645,499,536,1654,449,374,611,764,837,821,1350,980,1002,808,1149,967,761,841,707,633,745,777,758,579,642,512,570,444,429,371,457,296,276,181,191,36,17,10,-72,-108,-107,-232,-151,-205,-165,-300,-163,-265,-253,-269,-203,-258,-301,-275,-269,-292,-272,-260,-302,-297,-289,-229,-300,-264,-305,-243,-283,-264,-269,-279,-280,-287,-259,-269,-276,-247,-263,-262,-247,-231,-256,-218,-280,-241,-295,-239,-252,-218,-256,-230,-258,-241,-251,-148,-189,-159,-149,-45,-91,-37,-119,-83,-194,-47,-124,-58,-134,-130,-231,-259,-239,-276,-269,-285,-258,-248,-198,-188,-240,-206,-250,-202,-249,-205,-247,-163,-226,-240,-217,-219,-189,-209,-212,-237,-257,-142,-184,-180,-176,-167,-213,-202,-187,-188,-191,-235,-229,-241,-179,-197,-185,-210,-199,-189,-160,-163,-174,-186,-151,-189,-138,-133,-147,-130,-137,-107,-118,-101,-102,-120,-113,-110,-78,-102,-64,-110,-74,-76,-59,-92,-76,-85,-71,-79,-43,-88,-39,-85,-19,-81,13,-60,8,-26,31,8,16,23}, + /* IRC_Composite_C_R0195_T315_P030.wav */ + {-3,3,-3,3,-3,3,-3,3,-3,3,-3,3,-3,3,-3,3,-3,3,-3,3,-4,4,-4,4,-4,5,-5,6,-7,10,-15,27,-80,-353,-110,-370,-400,-195,-345,-309,-394,-97,-188,193,754,56,-290,-194,-1922,214,1653,1159,4327,2754,1527,1212,743,317,539,1293,-56,-21,321,1262,295,442,708,1213,856,813,872,1000,917,1073,730,897,944,834,864,775,610,537,692,383,605,370,536,245,348,155,280,101,83,31,90,-127,-63,-179,-119,-272,-116,-304,-232,-233,-212,-345,-176,-295,-247,-334,-204,-342,-233,-195,-284,-324,-229,-236,-255,-304,-258,-323,-258,-297,-224,-323,-252,-263,-224,-317,-252,-224,-255,-281,-221,-218,-205,-288,-209,-253,-193,-244,-249,-201,-218,-210,-200,-172,-180,-181,-84,-203,-75,-54,19,-108,-2,-46,-171,-209,-203,-294,-294,-286,-264,-295,-154,-187,-222,-182,-252,-197,-249,-211,-305,-232,-289,-224,-284,-267,-256,-184,-221,-260,-295,-281,-237,-233,-228,-269,-251,-203,-162,-193,-192,-189,-166,-237,-188,-201,-208,-209,-181,-224,-228,-151,-192,-200,-209,-131,-196,-159,-162,-159,-166,-116,-160,-151,-131,-47,-142,-119,-122,-73,-93,-108,-96,-133,-90,-84,-86,-83,-90,-56,-96,-65,-65,-48,-80,-67,-79,-50,-60,-46,-63,-39,-44,-12,-27,9,-35,23,-3,24,6,41}, + /* IRC_Composite_C_R0195_T330_P030.wav */ + {-5,5,-5,5,-5,5,-5,5,-5,6,-6,6,-6,6,-6,6,-6,6,-7,7,-7,7,-8,8,-9,9,-11,14,-26,-447,-195,-107,-505,-457,-480,-118,-514,-260,-345,82,416,511,1319,-1763,-994,103,-1768,3014,4385,2231,5234,1274,290,-49,1679,111,-604,148,524,573,-122,973,360,1000,586,1035,552,578,788,885,854,618,749,894,854,1021,930,968,502,1006,765,768,438,460,339,388,236,299,159,177,-65,162,-6,-74,-134,-51,-230,-132,-193,-105,-327,-152,-257,-191,-291,-227,-315,-280,-252,-192,-306,-253,-281,-182,-326,-252,-283,-225,-353,-255,-282,-258,-322,-236,-271,-288,-246,-248,-299,-265,-254,-312,-330,-129,-295,-141,-204,-129,-190,-22,-154,-203,-125,-172,-156,-188,-111,-223,-112,-106,-241,-86,-37,-262,-238,-193,-103,-105,-7,-250,-241,-39,-63,-184,-238,-261,-364,-243,-263,-310,-361,-286,-407,-227,-301,-289,-311,-319,-263,-215,-201,-304,-287,-339,-226,-246,-239,-335,-276,-276,-181,-234,-181,-257,-180,-234,-151,-165,-203,-213,-194,-170,-205,-157,-164,-191,-168,-154,-86,-133,-128,-162,-96,-147,-81,-165,-68,-173,-90,-164,-64,-127,-68,-162,-97,-99,-50,-115,-84,-111,-102,-87,-74,-80,-123,-76,-57,-95,-59,-42,-45,-74,-38,-27,-9,-30,0,-55,36,-23,58,-21,61,-7,59,29}, + /* IRC_Composite_C_R0195_T345_P030.wav */ + {0,0,0,0,0,0,0,1,-1,1,-1,2,-2,2,-3,3,-4,5,-5,6,-7,8,-9,10,-10,-1,-563,-328,-175,-595,-424,-853,130,-981,36,-843,608,167,1104,1422,-2010,-2282,1073,-3818,6278,4848,2400,7303,1229,-443,-1130,2494,95,-616,-743,335,108,968,417,599,684,1073,630,810,221,839,469,1136,218,739,427,945,692,942,955,757,852,1008,762,614,598,745,200,403,51,357,29,333,-147,-65,9,35,-114,-161,-231,-52,-197,-139,-202,-61,-310,-211,-214,-165,-307,-207,-293,-261,-259,-204,-201,-310,-323,-244,-250,-260,-335,-226,-421,-206,-333,-191,-401,-188,-244,-264,-283,-225,-208,-338,-233,-69,-304,-181,-213,-18,-265,39,-68,-82,-113,-107,16,-329,-80,-324,-205,-399,-184,-328,-391,-87,-9,36,1,59,-85,124,-60,-143,-307,-231,-406,-296,-456,-311,-297,-269,-433,-350,-189,-283,-277,-303,-343,-325,-287,-213,-328,-275,-279,-293,-296,-268,-217,-284,-286,-294,-277,-236,-247,-201,-305,-195,-224,-135,-244,-185,-128,-225,-126,-186,-74,-235,-93,-209,-95,-133,-90,-131,-165,-80,-89,-69,-148,-104,-72,-157,-77,-134,-88,-171,-67,-113,-124,-110,-76,-110,-130,-59,-96,-88,-119,-61,-109,-81,-81,-67,-82,-54,-22,-49,-19,-18,2,-15,25,13,24,8,45,57,4,20,23,60}}; + +const int16_t irc_composite_c_r0195_p045[][256] = + {/* IRC_Composite_C_R0195_T000_P045.wav */ + {-30,30,-31,32,-33,34,-35,36,-37,38,-39,40,-41,42,-43,43,-44,44,-42,38,-28,-9,-568,-169,-659,-127,-629,-131,-879,-529,-702,-207,-686,-146,757,1500,46,1169,-4599,-1790,163,-4363,15423,4884,3940,951,722,-172,1158,1933,-227,-750,303,-613,1137,856,493,193,1077,347,1002,300,420,882,521,444,512,395,442,484,453,404,610,620,839,826,758,649,736,767,608,484,493,147,169,427,10,204,-311,19,-245,98,-241,0,-167,16,-127,-26,-105,-158,-191,-79,-246,-206,-268,-177,-349,-216,-328,-216,-366,-178,-332,-214,-308,-168,-352,-204,-312,-132,-301,-215,-309,-152,-304,-202,-336,-200,-308,-229,-340,-205,-331,-196,-326,-240,-274,-198,-310,-200,-321,-159,-282,-190,-264,-176,-207,-74,-246,-157,-167,-77,-245,18,-155,-133,-274,-40,-289,-273,-267,-139,-520,-214,-265,-252,-317,-46,-133,-17,38,121,-153,151,27,-13,-114,13,-297,-74,-188,-147,-332,-197,-268,-297,-199,-221,-310,-309,-263,-206,-258,-237,-414,-155,-291,-200,-367,-189,-287,-139,-241,-261,-247,-142,-190,-233,-239,-158,-249,-110,-242,-173,-207,-82,-217,-136,-180,-136,-164,-112,-177,-97,-134,-40,-164,-32,-115,-16,-91,-12,-112,4,-81,-19,-100,-20,-70,-21,-87,-6,-59,22,-68,-7,-66,36,-20,13,-47,-2,-27,28,-4,13}, + /* IRC_Composite_C_R0195_T015_P045.wav */ + {16,-14,12,-9,7,-4,1,3,-7,12,-17,22,-29,36,-45,56,-69,85,-107,143,-247,-403,-616,-230,-517,-270,-663,-670,-583,-556,-570,-300,850,1495,298,1490,-1579,-5393,1137,-9580,17573,8715,3526,4288,-490,-1083,280,3534,-555,-1021,-341,-114,804,1521,139,355,844,673,325,735,-35,1245,91,909,-18,912,280,465,82,792,554,703,257,849,594,952,381,794,169,438,96,219,18,183,-220,-11,-303,284,-345,-44,-429,-44,-355,-58,-224,-155,-342,-30,-264,-140,-360,-95,-355,-133,-396,-161,-354,-174,-312,-65,-314,-194,-357,-142,-289,-170,-390,-213,-337,-152,-369,-215,-364,-191,-332,-194,-330,-206,-298,-234,-318,-198,-344,-225,-368,-198,-333,-240,-308,-215,-230,-223,-291,-224,-258,-133,-220,-212,-260,-84,-207,-164,-294,-157,-248,-263,-285,-134,-218,-267,-295,-223,-237,-192,-163,-155,-68,86,-74,-67,-51,-46,-49,-45,-144,-181,-126,-51,-106,-136,-213,-189,-163,-189,-238,-230,-238,-276,-288,-300,-274,-264,-268,-228,-288,-202,-201,-180,-232,-217,-238,-174,-222,-220,-242,-187,-203,-190,-211,-143,-223,-148,-187,-110,-202,-179,-175,-124,-140,-207,-165,-148,-117,-138,-129,-91,-111,-52,-96,-31,-72,-57,-38,-41,-39,-63,-12,-65,-2,-67,12,-64,37,-48,34,-49,20,-4,26,-16,4,-9,34,-12,34,-23}, + /* IRC_Composite_C_R0195_T030_P045.wav */ + {53,-54,56,-58,59,-62,64,-67,70,-73,77,-81,87,-93,100,-110,122,-140,176,-592,-266,-620,-93,-807,111,-1033,-244,-1451,661,-1378,1038,309,3279,-1940,4376,-7187,-1452,-4658,38,22380,4913,3629,1106,-370,-1337,2958,848,-390,-2691,1295,-1155,2599,-36,956,635,787,53,821,200,581,67,983,274,565,247,646,211,751,653,273,546,759,707,776,230,704,98,258,-440,41,-44,-99,-570,-276,125,-89,-316,-287,-67,-256,-409,-175,-243,-300,-369,-371,-102,-291,-350,-382,-109,-199,-343,-360,-210,-168,-316,-283,-194,-249,-97,-252,-166,-374,-161,-245,-317,-297,-364,-202,-337,-267,-324,-305,-165,-347,-151,-302,-171,-371,-213,-207,-334,-383,-313,-191,-297,-398,-235,-273,-140,-391,-214,-292,-119,-316,-254,-221,-194,-227,-232,-269,-229,-362,-187,-364,-206,-335,-144,-247,-146,-237,-158,-174,-97,-117,50,-80,66,-91,178,-80,-68,-202,-96,-198,-132,-223,-177,-186,-153,-217,-140,-249,-182,-298,-91,-285,-230,-334,-153,-250,-218,-295,-154,-260,-140,-294,-141,-233,-87,-273,-133,-222,-68,-214,-169,-225,-107,-209,-114,-264,-92,-242,-49,-295,-99,-226,-66,-252,-85,-193,-65,-189,-57,-171,-14,-172,-4,-179,85,-157,3,-134,92,-113,-4,-109,69,-51,13,-79,68,-27,61,-58,66,-43,79,-23,54,-24,30,-27,66,-41}, + /* IRC_Composite_C_R0195_T045_P045.wav */ + {-71,74,-76,79,-82,84,-87,90,-93,96,-99,101,-102,102,-97,85,-52,-97,-607,-227,-812,438,-1024,-204,-760,95,-1636,140,-766,2279,-763,3056,150,2120,-5760,-6480,1711,-14,24602,5276,1036,-813,1031,1166,848,674,-478,-1487,-945,687,1855,-292,799,1002,786,207,559,505,363,364,343,530,502,11,500,785,837,217,463,907,599,564,357,380,-197,84,-367,169,-530,-810,-282,-180,-171,-519,-179,-442,-72,-160,-239,-494,-294,-88,-553,-268,-512,-278,-503,-198,-473,-229,-317,-213,-370,-187,-331,-249,-344,-154,-381,-135,-317,-147,-279,-176,-355,-133,-274,-305,-330,-235,-314,-312,-303,-290,-257,-159,-269,-234,-243,-173,-350,-204,-383,-239,-309,-233,-404,-183,-289,-321,-344,-212,-319,-251,-253,-168,-303,-204,-277,-189,-284,-216,-333,-204,-348,-208,-298,-166,-339,-189,-186,-273,-152,-87,-184,-166,10,48,-40,2,0,-18,-97,-3,-134,-124,-166,-216,-161,-149,-234,-162,-175,-239,-198,-150,-294,-241,-224,-190,-303,-189,-150,-236,-232,-162,-159,-199,-136,-231,-166,-169,-136,-155,-152,-125,-146,-133,-100,-123,-156,-121,-135,-145,-164,-174,-157,-151,-189,-118,-198,-112,-121,-108,-138,-101,-93,-63,-79,-82,-83,-26,-44,-58,-96,20,-91,-15,-43,22,-69,29,5,6,-19,24,3,1,25,26,26,17,31,13,22,22}, + /* IRC_Composite_C_R0195_T060_P045.wav */ + {59,-58,56,-55,52,-49,45,-40,34,-25,14,1,-23,55,-108,214,-619,-300,-123,-537,-172,-393,-415,-586,-104,-1306,69,-335,1375,535,2761,-686,3824,-8072,-1237,-7735,11382,19138,4865,2658,-2047,1916,571,2806,-624,-694,-1625,70,232,950,-64,1404,-414,1196,34,1403,-261,1089,343,834,-188,587,-251,1086,518,827,135,641,494,842,-31,161,-360,-291,-317,171,-783,-241,-1031,130,-617,-201,-704,-152,-418,-199,-454,-247,-285,-342,-410,-378,-429,-306,-467,-327,-419,-184,-471,-188,-474,-157,-446,-174,-469,-146,-371,-239,-326,-267,-268,-265,-279,-218,-230,-216,-236,-148,-289,-212,-358,-216,-330,-124,-259,-173,-298,-166,-287,-241,-319,-291,-323,-261,-320,-293,-286,-298,-318,-332,-305,-269,-323,-212,-266,-206,-301,-143,-304,-236,-268,-212,-344,-239,-265,-225,-364,-157,-292,-187,-206,-129,-241,-186,-152,-141,-106,-103,-15,-115,56,-68,26,-47,-92,-125,-141,-154,-176,-196,-181,-250,-197,-136,-112,-240,-89,-180,-145,-235,-146,-223,-248,-178,-229,-152,-219,-112,-250,-98,-141,-152,-155,-116,-137,-150,-110,-137,-128,-94,-113,-72,-189,-29,-176,-93,-125,-129,-123,-190,-65,-169,-92,-169,-68,-159,-48,-109,-46,-109,-25,-81,-31,-72,-27,-71,7,-65,-16,-30,-30,-44,30,-39,8,-56,53,-36,81,-51,97,-64,95,-35,60,-30}, + /* IRC_Composite_C_R0195_T075_P045.wav */ + {7,-7,6,-6,5,-5,4,-3,2,0,-2,5,-9,14,-23,45,-729,-86,-463,-18,-765,398,-1336,518,-1262,-130,-544,1878,-781,3861,-694,2694,-2479,-6040,-3168,1816,21907,5475,5383,-2274,1082,1645,3196,-180,-862,-1506,-288,-182,870,-91,1493,-388,779,238,870,64,614,1007,850,122,95,413,1397,214,480,297,844,-36,272,27,-433,-791,-467,60,-441,-446,-697,-441,-417,-402,-239,-587,-268,-547,-343,-464,-296,-435,-388,-347,-416,-430,-420,-403,-296,-406,-376,-408,-228,-422,-330,-353,-273,-416,-325,-234,-301,-372,-327,-219,-336,-296,-307,-215,-240,-202,-250,-161,-199,-207,-315,-154,-247,-142,-267,-158,-266,-221,-313,-201,-303,-302,-339,-264,-278,-318,-314,-301,-319,-312,-315,-204,-334,-223,-340,-127,-300,-165,-343,-198,-307,-246,-285,-207,-293,-254,-293,-183,-246,-202,-252,-100,-213,-78,-138,-60,-197,-164,-160,-109,-141,-50,-142,2,-155,50,-176,0,-217,-67,-265,-39,-241,-180,-198,-196,-186,-182,-111,-135,-134,-125,-176,-98,-184,-100,-212,-114,-228,-113,-95,-140,-143,-141,-78,-133,-100,-67,-178,-75,-161,-2,-319,35,-236,-7,-269,53,-223,-47,-143,-20,-108,-80,-80,-54,-96,-67,-96,-24,-164,-15,-108,-24,-125,-8,-47,-26,-57,4,-8,-39,-35,32,-69,30,-49,58,-82,89,-80,107,-80,104,-56,112}, + /* IRC_Composite_C_R0195_T090_P045.wav */ + {82,-84,86,-88,90,-91,93,-94,95,-95,94,-92,87,-78,61,-33,-240,-426,56,-683,113,-744,373,-1306,284,-1194,1644,-667,2652,-291,4245,-2423,-2014,-4070,-4168,13696,10251,10475,-3031,1940,668,3850,1809,-625,-1492,-994,-456,184,563,568,-96,1027,330,539,32,648,1068,571,102,392,437,1555,535,636,312,223,-470,382,-426,-676,-1277,-176,-342,-251,-706,-452,-477,-459,-100,-511,-346,-481,-454,-509,-470,-397,-448,-337,-587,-233,-611,-143,-563,-280,-521,-364,-455,-244,-466,-275,-326,-321,-388,-219,-441,-210,-415,-297,-415,-244,-390,-213,-373,-184,-325,-193,-152,-257,-243,-230,-170,-254,-69,-214,-166,-228,-242,-227,-305,-281,-292,-232,-343,-218,-305,-280,-254,-331,-243,-363,-249,-287,-245,-320,-255,-262,-272,-215,-275,-191,-269,-200,-292,-200,-287,-217,-267,-214,-217,-218,-189,-189,-148,-203,-120,-154,-118,-165,-102,-132,-161,-134,-144,-132,-168,-94,-134,-87,-100,-96,-67,-104,-58,-178,-114,-154,-134,-226,-145,-224,-215,-130,-174,-117,-174,-71,-88,-63,-109,-58,-78,-120,-50,-84,-79,-101,-63,-97,-110,-65,-129,-123,-160,-83,-162,-109,-152,-92,-143,-76,-108,-73,-110,-42,-74,-64,-99,-35,-69,-11,-79,-57,-78,-51,-41,-69,-60,-68,-23,-64,48,-114,27,-96,75,-109,79,-84,99,-121,109,-64,69,-52,115,-98}, + /* IRC_Composite_C_R0195_T105_P045.wav */ + {28,-30,31,-33,35,-38,40,-43,47,-51,55,-60,67,-75,85,-99,111,-72,-376,-88,-540,307,-524,-278,-931,669,115,481,924,2601,65,2397,-5614,-1324,-2647,10799,8614,10099,118,-2294,4895,2386,2569,-875,-924,-2723,797,264,147,-195,968,521,465,-376,1032,858,1253,-135,495,389,792,755,975,0,305,-239,-39,-629,-469,-1320,-587,-582,-54,-751,-376,-721,-115,-425,-260,-335,-443,-566,-430,-612,-353,-563,-240,-666,-197,-569,-261,-523,-199,-719,-213,-574,-181,-663,-112,-511,-96,-492,-191,-414,-208,-419,-314,-403,-269,-455,-207,-442,-146,-453,-78,-424,-85,-318,-146,-327,-204,-254,-94,-214,-119,-280,-105,-318,-172,-355,-253,-350,-232,-337,-213,-346,-174,-337,-196,-365,-187,-307,-214,-347,-231,-304,-233,-293,-202,-273,-150,-270,-125,-278,-116,-278,-165,-297,-205,-241,-198,-221,-169,-202,-83,-240,-100,-225,-103,-190,-146,-171,-133,-169,-147,-152,-62,-157,-83,-150,-63,-116,-82,-115,-86,-88,-91,-126,-129,-161,-111,-196,-127,-178,-79,-197,-117,-103,-73,-136,-26,-65,-11,-114,16,-69,41,-165,27,-83,-32,-83,-83,-89,-127,-138,-85,-115,-105,-151,-76,-90,-77,-130,-45,-81,-85,-105,-60,-35,-80,-62,-86,-27,-81,-18,-56,-46,-45,-2,5,-61,-31,-63,-14,-64,-36,8,-66,36,-43,73,-62,61,-61,80}, + /* IRC_Composite_C_R0195_T120_P045.wav */ + {-12,12,-13,13,-14,14,-15,16,-17,18,-20,22,-25,29,-34,41,-54,124,-315,231,-915,409,-381,76,-1011,336,-106,1161,-312,2462,834,1649,-538,-4939,-381,2968,10089,6155,8410,-2705,920,4167,3787,745,-2352,-1802,-28,292,-651,645,868,-434,709,424,388,1070,1285,-64,322,849,520,234,673,260,-252,-169,-219,-404,-898,-522,-1000,-760,-659,-37,-489,-625,-480,-117,-279,-318,-554,-444,-479,-476,-447,-397,-506,-426,-374,-367,-517,-341,-428,-392,-509,-321,-429,-316,-424,-291,-372,-230,-336,-266,-337,-314,-357,-245,-417,-326,-393,-223,-400,-276,-346,-207,-347,-196,-289,-214,-324,-195,-253,-149,-240,-179,-253,-165,-279,-187,-327,-224,-367,-235,-338,-247,-316,-185,-311,-218,-272,-162,-301,-187,-304,-196,-322,-148,-322,-157,-312,-104,-274,-142,-240,-141,-246,-193,-235,-171,-192,-186,-214,-189,-212,-181,-191,-123,-229,-156,-208,-159,-193,-117,-178,-146,-179,-127,-164,-97,-166,-64,-139,-51,-149,-26,-150,-13,-111,-43,-117,-85,-121,-102,-128,-113,-107,-143,-80,-75,-82,-86,-55,-52,-89,-61,-20,-13,-63,41,-66,-14,-65,-25,-99,-66,-90,-83,-119,-73,-60,-96,-94,-16,-118,-43,-110,-43,-94,-27,-153,-76,-126,-21,-94,-45,-92,-51,-33,3,-63,-11,-52,-9,-73,9,-41,-20,-40,3,-44,-34,0,0,-32,16}, + /* IRC_Composite_C_R0195_T135_P045.wav */ + {15,-16,17,-18,19,-20,21,-22,23,-24,25,-27,28,-29,29,-28,24,-11,-117,-145,-90,-121,-109,-173,-39,-435,83,-128,1107,344,1609,548,3339,-3005,-2346,-79,2825,6596,9304,4876,-1449,2789,1884,4671,84,-2193,-1608,-211,-70,580,333,233,35,298,-56,1401,612,725,299,829,803,425,-150,290,-261,-139,-438,-491,-740,-322,-692,-659,-1013,-400,-489,-130,-431,-461,-332,-161,-302,-454,-443,-489,-403,-443,-472,-442,-454,-357,-427,-305,-456,-382,-411,-333,-418,-365,-303,-376,-334,-316,-292,-311,-265,-339,-352,-281,-323,-329,-379,-310,-346,-316,-320,-334,-291,-274,-280,-297,-290,-272,-270,-230,-233,-229,-239,-263,-224,-254,-270,-293,-279,-264,-289,-263,-287,-260,-261,-232,-231,-207,-225,-198,-204,-183,-223,-189,-221,-183,-247,-188,-250,-189,-248,-190,-209,-183,-176,-163,-144,-170,-178,-173,-185,-158,-195,-164,-234,-222,-222,-206,-165,-198,-160,-175,-170,-167,-137,-130,-154,-126,-141,-104,-142,-91,-116,-80,-71,-71,-88,-42,-93,-59,-74,-48,-70,-66,-85,-79,-76,-90,-60,-94,-51,-65,-11,-46,-25,-16,-13,-19,-24,-51,-74,-67,-91,-21,-96,-77,-66,2,-73,-36,-56,-44,-134,-42,-35,-70,-118,-47,-68,-116,-72,-82,-75,-136,-24,-85,-36,-90,1,-62,-4,-20,-33,-14,1,-17,-33,-28,-35,-37,-9,-16}, + /* IRC_Composite_C_R0195_T150_P045.wav */ + {13,-14,16,-17,19,-20,22,-25,27,-30,34,-38,42,-48,55,-63,75,-91,119,-208,14,-179,43,-143,14,-237,127,-445,347,318,882,1092,671,2337,-2,-2406,-2033,3403,2686,9080,6065,1654,-385,2378,3537,1656,-964,-2514,-65,-61,604,313,-548,8,506,751,592,750,799,256,788,738,691,-486,-503,-169,-52,-560,-771,-513,-275,-418,-623,-527,-620,-550,-389,-73,-175,-357,-452,-335,-134,-523,-381,-574,-280,-554,-346,-501,-295,-395,-265,-376,-302,-406,-287,-380,-314,-400,-273,-369,-283,-407,-233,-368,-270,-408,-280,-353,-282,-379,-324,-362,-292,-349,-288,-317,-293,-299,-273,-329,-303,-342,-223,-328,-247,-359,-217,-332,-232,-362,-243,-293,-224,-266,-223,-255,-188,-248,-204,-263,-170,-234,-148,-228,-150,-221,-123,-200,-152,-223,-169,-222,-177,-244,-174,-210,-124,-206,-129,-206,-127,-177,-96,-151,-166,-237,-192,-200,-169,-204,-186,-233,-213,-217,-169,-178,-153,-156,-130,-156,-129,-136,-88,-131,-105,-128,-101,-129,-90,-83,-84,-106,-52,-43,-49,-65,-44,-66,-38,-70,26,-76,6,-88,-13,-82,-34,-41,-4,-34,-1,-14,9,-50,-23,-69,-16,-101,-10,-110,-17,-119,-37,-74,-19,-102,-24,-41,-56,-111,-39,-111,-124,-147,-53,-65,-109,-46,-41,-68,-52,-7,-29,-64,-43,-18,-36,-49,-47,1,-26,-14,-36,-17}, + /* IRC_Composite_C_R0195_T165_P045.wav */ + {-5,6,-6,7,-7,8,-9,9,-10,11,-13,14,-16,18,-20,23,-27,31,-37,45,-57,83,-131,98,-140,102,-154,88,-172,82,24,745,704,1159,942,1377,685,-3350,-75,2310,5071,6483,6504,-342,276,2310,3436,1307,-1968,-1879,-504,1149,580,-184,-486,250,728,683,585,651,265,901,267,694,-240,-105,-607,-269,-676,-500,-479,-378,-278,-303,-345,-367,-327,-471,-422,-217,-111,-233,-480,-413,-399,-332,-479,-354,-471,-387,-344,-260,-374,-267,-230,-298,-347,-353,-301,-344,-328,-357,-343,-372,-361,-346,-368,-366,-338,-358,-344,-365,-318,-353,-318,-336,-351,-319,-335,-289,-334,-309,-321,-326,-336,-348,-335,-354,-321,-327,-304,-329,-269,-270,-235,-245,-249,-208,-236,-185,-237,-214,-241,-196,-206,-169,-200,-166,-161,-150,-185,-187,-205,-193,-210,-166,-170,-138,-171,-155,-201,-170,-152,-109,-129,-189,-178,-185,-130,-167,-150,-206,-233,-222,-196,-166,-206,-190,-201,-159,-173,-138,-175,-147,-160,-114,-124,-105,-119,-77,-100,-117,-136,-90,-111,-91,-103,-87,-79,-69,-28,-22,-44,-41,-25,-6,-27,-13,-21,-10,-27,9,1,-13,14,-2,20,6,-17,-7,-23,-45,-82,-88,-89,-86,-91,-67,-101,-82,-96,-74,-71,-81,-64,-98,-60,-94,-66,-105,-103,-119,-65,-33,-28,-21,-24,-24,-66,-22,-25,-29,-63,-45,-61,-77}, + /* IRC_Composite_C_R0195_T180_P045.wav */ + {1,-1,1,-1,2,-2,2,-2,2,-2,2,-2,3,-3,3,-3,3,-4,4,-4,4,-4,3,-2,9,-9,70,-59,121,-126,127,-45,222,619,653,1289,587,1432,300,-1888,-1240,3381,3947,6141,5035,479,-148,2113,2998,773,-1353,-1664,690,1041,-58,-664,-159,711,746,445,284,522,787,289,291,216,96,-622,-296,-616,-684,-562,-153,-193,-156,-160,-230,-111,-119,-291,-584,-298,-144,-240,-398,-579,-379,-509,-208,-402,-311,-292,-227,-267,-259,-266,-213,-263,-327,-375,-294,-371,-362,-434,-364,-436,-381,-431,-375,-404,-373,-398,-329,-365,-326,-409,-345,-385,-333,-367,-288,-357,-297,-388,-309,-398,-314,-430,-330,-408,-271,-349,-243,-310,-230,-234,-197,-214,-201,-187,-203,-238,-232,-238,-212,-225,-205,-211,-176,-179,-201,-211,-228,-202,-224,-150,-202,-129,-214,-123,-184,-94,-133,-120,-193,-166,-163,-113,-144,-135,-197,-168,-225,-183,-213,-177,-199,-157,-207,-155,-195,-92,-173,-121,-200,-130,-155,-103,-148,-111,-166,-85,-143,-56,-147,-56,-154,-59,-141,-36,-120,-42,-95,-13,-67,16,-28,44,-44,24,-33,43,-27,82,-21,103,-17,122,-2,81,-69,27,-35,-19,-94,-112,-111,-109,-91,-94,-119,-134,-85,-80,-83,-129,-79,-97,-58,-112,-41,-119,-24,-104,-33,-99,-41,-79,-41,-68,-44,-36,-44,-71,-84,-70}, + /* IRC_Composite_C_R0195_T195_P045.wav */ + {-10,11,-11,11,-12,12,-12,13,-13,14,-14,15,-15,16,-17,17,-18,19,-20,21,-22,23,-24,26,-28,29,-70,1,21,-50,-12,7,-98,118,-221,432,295,945,530,962,1220,-497,-1827,293,2669,3717,6133,2421,667,647,2517,2286,876,-1019,-811,715,567,146,-552,68,761,748,406,192,794,584,428,293,114,-104,-113,-153,-376,-408,-234,-64,-66,-15,75,-43,-32,-84,-58,-289,-255,-381,-227,-301,-267,-362,-351,-257,-250,-265,-197,-240,-267,-205,-195,-288,-234,-313,-331,-391,-314,-376,-403,-414,-376,-388,-386,-415,-381,-386,-367,-392,-359,-382,-358,-395,-338,-337,-339,-329,-314,-348,-381,-383,-383,-385,-396,-353,-328,-277,-291,-244,-249,-220,-237,-207,-212,-209,-222,-227,-219,-186,-196,-201,-196,-190,-194,-210,-219,-243,-209,-201,-163,-195,-186,-186,-145,-123,-130,-154,-175,-167,-170,-122,-126,-124,-185,-179,-196,-160,-179,-161,-199,-183,-176,-134,-131,-147,-153,-166,-151,-161,-167,-166,-166,-160,-162,-139,-166,-135,-153,-143,-142,-125,-105,-102,-94,-90,-78,-72,-57,-50,-51,-21,-44,-9,-31,-16,-46,-3,-13,24,10,27,32,25,66,-13,-17,-41,-15,-51,-100,-128,-126,-123,-107,-116,-127,-132,-96,-99,-88,-127,-87,-121,-70,-130,-87,-96,-70,-95,-94,-64,-80,-73,-64,-54,-55,-52,-44,-57}, + /* IRC_Composite_C_R0195_T210_P045.wav */ + {-1,1,-1,1,-1,1,-1,1,-2,2,-2,2,-2,2,-2,3,-3,3,-3,4,-4,4,-5,5,-6,6,-7,9,-41,-44,-65,-30,-65,-8,-83,-60,-92,30,110,507,445,762,568,1174,-1167,-1019,628,2221,3797,4340,2039,369,1655,2128,2575,-74,-532,-44,828,327,-261,-60,394,940,522,463,465,701,615,239,186,177,226,24,55,-107,-121,-144,122,80,101,60,168,75,69,-42,-103,-208,-202,-275,-257,-213,-175,-274,-214,-201,-239,-203,-186,-264,-269,-249,-233,-294,-292,-329,-314,-341,-364,-402,-341,-398,-395,-448,-334,-429,-332,-431,-329,-423,-331,-395,-337,-340,-332,-347,-337,-368,-357,-389,-353,-376,-312,-331,-265,-307,-238,-251,-202,-250,-204,-228,-195,-243,-216,-235,-186,-234,-184,-216,-198,-218,-214,-243,-226,-238,-184,-210,-192,-215,-154,-146,-144,-168,-181,-177,-171,-140,-114,-169,-137,-193,-142,-178,-151,-186,-152,-183,-144,-144,-116,-143,-141,-149,-130,-174,-129,-185,-122,-193,-143,-180,-134,-169,-148,-166,-136,-141,-122,-130,-103,-114,-106,-102,-72,-91,-60,-67,-53,-70,-51,-40,-61,-20,-57,16,-56,22,-58,21,-49,-10,-48,3,-48,-36,-55,-46,-67,-83,-96,-101,-127,-82,-139,-122,-145,-101,-121,-109,-139,-130,-124,-118,-116,-120,-95,-97,-71,-73,-66,-58,-48,-24,-31,-11}, + /* IRC_Composite_C_R0195_T225_P045.wav */ + {-3,3,-3,3,-3,3,-3,4,-4,4,-4,4,-4,4,-4,5,-5,5,-5,5,-6,6,-6,7,-7,8,-9,10,-13,20,-75,-97,-66,-47,-85,-74,-101,-34,-123,-132,-22,222,292,490,466,903,275,-1049,-389,584,2358,3669,3152,1258,1261,1738,2331,1624,37,-28,186,674,40,219,313,646,713,656,528,603,598,473,348,444,144,373,151,223,11,95,112,186,158,153,282,115,221,30,18,-91,-32,-200,-106,-202,-164,-212,-141,-234,-274,-248,-205,-234,-252,-279,-220,-289,-259,-337,-286,-358,-337,-398,-361,-383,-377,-397,-403,-387,-416,-383,-391,-375,-383,-384,-351,-350,-337,-371,-325,-339,-314,-362,-307,-327,-273,-302,-260,-293,-265,-270,-253,-272,-249,-242,-222,-243,-203,-221,-193,-215,-188,-249,-219,-244,-222,-251,-227,-225,-219,-214,-206,-186,-149,-158,-170,-204,-178,-182,-146,-152,-156,-169,-175,-163,-157,-142,-159,-167,-156,-155,-165,-170,-148,-148,-131,-118,-114,-117,-133,-119,-145,-148,-151,-147,-145,-167,-157,-166,-139,-171,-158,-145,-135,-121,-133,-97,-119,-63,-84,-67,-101,-69,-67,-57,-75,-59,-62,-49,-63,-41,-77,-33,-68,-14,-83,-22,-64,-11,-77,-48,-83,-73,-129,-98,-117,-128,-152,-116,-144,-123,-154,-107,-170,-138,-138,-97,-115,-107,-82,-89,-65,-72,-83,-58,-72,-30}, + /* IRC_Composite_C_R0195_T240_P045.wav */ + {0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,-1,1,-1,1,-1,2,-5,-83,-84,-72,-135,-73,-91,-109,-87,-133,-84,-173,-91,-36,199,307,354,685,469,-284,-594,-516,1131,3046,2851,2215,1571,1578,1884,1712,836,241,420,708,320,266,499,693,588,631,732,645,553,384,505,486,415,335,428,309,250,327,166,248,139,364,147,249,184,201,90,89,-4,-84,-57,-140,-149,-268,-170,-229,-181,-241,-235,-260,-225,-272,-259,-355,-270,-344,-221,-429,-254,-394,-305,-443,-348,-406,-390,-405,-383,-420,-382,-385,-345,-366,-308,-315,-295,-317,-301,-307,-307,-319,-316,-337,-273,-310,-260,-332,-260,-287,-238,-267,-235,-236,-259,-223,-218,-231,-226,-221,-205,-245,-216,-236,-203,-244,-208,-224,-221,-211,-210,-180,-186,-192,-188,-204,-183,-150,-161,-144,-182,-151,-199,-152,-176,-159,-170,-168,-142,-147,-118,-110,-117,-97,-125,-84,-116,-88,-125,-124,-122,-160,-117,-177,-157,-199,-179,-171,-156,-148,-174,-137,-139,-117,-130,-135,-112,-124,-104,-131,-80,-123,-82,-99,-80,-101,-46,-83,-75,-90,-80,-63,-59,-51,-122,-65,-82,-66,-92,-66,-83,-92,-88,-117,-122,-158,-126,-150,-152,-142,-139,-83,-139,-101,-146,-84,-121,-95,-110,-91,-88,-79,-72,-47}, + /* IRC_Composite_C_R0195_T255_P045.wav */ + {-1,1,-1,1,-1,1,-1,1,-2,2,-2,2,-2,2,-2,2,-3,3,-3,3,-4,4,-4,5,-5,6,-6,7,-8,10,-12,15,-21,35,-138,-127,-91,-119,-131,-100,-136,-154,-123,-162,-225,-17,208,157,422,539,315,-186,-874,-513,1317,2834,3212,2035,1165,1380,1985,1825,760,470,698,708,299,840,390,551,566,771,555,435,745,529,606,365,682,399,459,406,443,291,358,302,320,305,286,249,233,146,69,90,0,-26,-85,-106,-54,-197,-179,-210,-226,-306,-252,-328,-293,-273,-273,-342,-265,-302,-315,-353,-309,-387,-325,-457,-297,-442,-355,-408,-318,-407,-303,-346,-296,-308,-269,-329,-219,-337,-263,-338,-282,-364,-295,-326,-281,-306,-274,-302,-265,-294,-241,-286,-262,-250,-230,-264,-201,-225,-215,-219,-198,-213,-212,-205,-209,-201,-251,-202,-242,-212,-196,-186,-200,-172,-211,-195,-206,-149,-217,-164,-212,-147,-189,-117,-143,-88,-129,-55,-121,-125,-117,-90,-114,-92,-138,-75,-136,-123,-181,-119,-197,-132,-134,-131,-172,-162,-136,-187,-159,-156,-181,-172,-159,-163,-167,-116,-199,-172,-135,-102,-126,-131,-78,-101,-129,-76,-94,-120,-143,-86,-127,-74,-98,-46,-113,-79,-97,-44,-125,-131,-113,-86,-115,-124,-120,-129,-130,-91,-134,-118,-138,-78,-131,-110,-118,-103,-103,-97,-94,-71,-62,-45}, + /* IRC_Composite_C_R0195_T270_P045.wav */ + {-2,2,-2,2,-2,2,-2,2,-2,2,-2,2,-2,2,-2,2,-2,2,-2,2,-2,2,-2,2,-2,2,-2,3,-3,4,-4,6,-9,16,-64,-161,-143,-126,-145,-185,-145,-161,-193,-254,-120,-230,-52,146,316,329,329,253,-502,-967,-275,2022,3347,2710,947,1504,1884,1375,1901,809,618,609,671,737,376,610,640,652,766,573,669,526,783,441,547,504,550,598,531,667,314,551,275,554,115,507,96,221,103,170,93,44,90,-71,5,-109,-137,-246,-250,-241,-295,-289,-315,-295,-325,-258,-316,-295,-327,-300,-348,-332,-372,-354,-355,-374,-324,-355,-324,-341,-316,-283,-312,-268,-304,-257,-336,-239,-313,-303,-322,-321,-295,-351,-234,-324,-242,-342,-262,-303,-265,-288,-272,-254,-289,-193,-250,-195,-234,-188,-219,-212,-194,-217,-186,-231,-179,-233,-211,-209,-215,-227,-234,-228,-223,-193,-202,-196,-175,-156,-144,-145,-108,-127,-113,-111,-50,-105,-58,-119,-95,-117,-140,-166,-165,-130,-150,-114,-120,-117,-146,-129,-172,-175,-166,-189,-168,-160,-197,-193,-208,-220,-186,-186,-158,-220,-160,-145,-123,-177,-155,-188,-132,-141,-109,-129,-71,-127,-88,-107,-92,-141,-124,-132,-136,-104,-99,-93,-109,-90,-81,-97,-101,-102,-111,-126,-107,-115,-121,-123,-88,-123,-117,-104,-75,-79,-90,-92,-70,-77,-68,-59}, + /* IRC_Composite_C_R0195_T285_P045.wav */ + {5,-5,5,-5,5,-5,5,-5,5,-5,5,-5,5,-5,5,-5,5,-5,5,-5,5,-5,5,-5,5,-5,6,-6,6,-7,8,-12,29,101,50,50,117,188,58,104,258,239,166,306,686,455,833,834,700,494,424,49,743,2627,3265,3156,1306,-112,1114,559,356,246,113,-51,-113,695,8,344,126,384,-46,304,134,58,125,-42,60,-117,150,-115,60,-66,-74,-290,-174,-222,-336,-410,-393,-454,-500,-436,-457,-579,-566,-638,-618,-691,-652,-777,-695,-736,-701,-743,-633,-655,-650,-600,-573,-602,-585,-524,-544,-563,-496,-527,-483,-513,-414,-513,-387,-442,-351,-441,-335,-395,-360,-389,-346,-380,-350,-337,-339,-350,-286,-303,-303,-289,-278,-317,-289,-279,-256,-274,-224,-223,-201,-206,-165,-204,-151,-205,-147,-195,-148,-200,-177,-178,-170,-179,-162,-156,-149,-146,-135,-109,-105,-142,-94,-76,-93,-61,-56,-26,-52,-21,-71,-105,-113,-130,-140,-154,-74,-110,-107,-129,-114,-156,-108,-147,-143,-197,-121,-103,-164,-177,-160,-133,-135,-137,-138,-104,-105,-75,-105,-83,-126,-87,-67,-44,-97,-18,-59,-13,-45,-23,-53,-56,-43,-12,-14,-6,-40,1,-18,27,-19,35,-53,6,-9,7,11,2,-8,-3,22,16,-2,32,44,18,-5,28,17,14,19,9,27,26,56,44,54,64,65,88}, + /* IRC_Composite_C_R0195_T300_P045.wav */ + {0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,-2,6,-170,-102,-259,-234,-199,-268,-51,-528,-137,-354,-180,92,-180,340,604,11,-371,-583,-984,10,2699,3142,3080,1412,327,1987,956,830,764,347,196,382,1091,294,859,889,945,523,1199,597,582,854,625,694,469,918,446,675,763,646,482,552,534,540,563,394,300,434,396,321,144,359,-94,142,-43,165,-138,6,-183,-179,-221,-161,-328,-285,-267,-402,-333,-327,-294,-327,-363,-296,-325,-259,-291,-320,-304,-304,-298,-291,-297,-314,-280,-329,-297,-314,-281,-234,-352,-241,-283,-217,-315,-280,-263,-279,-310,-268,-299,-296,-261,-277,-280,-277,-225,-216,-261,-203,-214,-192,-239,-227,-233,-213,-266,-208,-195,-200,-197,-191,-188,-186,-205,-150,-205,-153,-112,-151,-87,-35,-120,-124,-179,-137,-157,-174,-117,-144,-171,-153,-152,-138,-131,-174,-135,-128,-150,-114,-214,-174,-206,-228,-215,-210,-194,-196,-208,-219,-189,-187,-156,-164,-222,-168,-262,-148,-197,-176,-237,-214,-212,-100,-129,-146,-213,-165,-123,-124,-133,-162,-159,-151,-116,-118,-122,-155,-138,-137,-123,-99,-108,-104,-101,-84,-78,-75,-64,-83,-84,-92,-41,-94,-60,-107,-53,-86,-49,-66,-60,-68,-31,-65,-31,-29}, + /* IRC_Composite_C_R0195_T315_P045.wav */ + {-1,1,-1,1,-1,1,-1,1,-1,1,-1,1,-1,1,-1,1,-1,1,-1,1,-1,1,-1,1,-1,2,-2,4,-7,17,-100,-204,-340,-371,-160,-344,-275,-415,-419,-249,41,-182,77,891,131,2,-1052,-1535,924,162,4003,4715,1544,1179,621,1424,674,1203,-197,55,273,545,402,710,753,673,897,715,647,832,758,686,732,814,620,688,779,690,604,649,586,620,586,617,478,512,430,467,385,389,275,162,250,131,159,97,100,-42,-92,-107,-156,-193,-270,-280,-274,-295,-312,-336,-301,-367,-310,-306,-332,-327,-275,-299,-308,-302,-264,-320,-240,-345,-250,-325,-245,-353,-231,-309,-234,-308,-240,-335,-236,-284,-266,-330,-242,-316,-259,-281,-275,-309,-222,-254,-279,-277,-236,-254,-242,-235,-247,-199,-188,-240,-203,-141,-139,-214,-131,-200,-170,-111,-70,-193,-145,-148,-101,-173,-117,-218,-163,-214,-197,-208,-111,-206,-113,-191,-104,-100,-70,-142,-89,-169,-106,-184,-156,-187,-185,-255,-195,-151,-192,-223,-180,-149,-215,-245,-216,-248,-232,-230,-274,-292,-203,-223,-171,-218,-208,-215,-141,-139,-194,-204,-163,-184,-155,-159,-154,-164,-134,-151,-130,-134,-130,-152,-122,-129,-133,-120,-106,-118,-102,-72,-84,-69,-84,-65,-68,-55,-55,-56,-72,-45,-57,-43,-50,-64,-34,-44,-40,-36,-22,-14,-32,2}, + /* IRC_Composite_C_R0195_T330_P045.wav */ + {0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,-1,1,-1,1,-1,1,-2,2,-2,3,-4,5,-8,-246,-348,-392,-294,-392,-112,-819,-204,-558,205,-381,275,768,476,319,-1186,-2693,1397,173,2904,7126,2191,1840,-141,1440,643,1148,280,-721,280,227,474,413,704,690,753,889,309,920,476,802,503,682,628,623,869,602,783,626,695,707,691,850,635,757,447,558,418,394,227,133,146,217,152,239,143,20,23,13,-113,-130,-110,-235,-231,-231,-223,-290,-311,-279,-339,-297,-335,-258,-358,-326,-332,-275,-283,-293,-287,-240,-279,-296,-244,-248,-261,-257,-280,-251,-271,-271,-291,-272,-276,-292,-293,-287,-273,-297,-261,-259,-291,-282,-238,-254,-285,-269,-242,-239,-234,-237,-211,-232,-160,-167,-153,-54,-121,-133,-68,-127,-55,-93,-170,-253,-307,-209,-255,-315,-307,-329,-320,-102,-172,-106,-41,-45,45,-125,12,-25,-98,-177,-185,-220,-166,-179,-213,-164,-210,-152,-240,-217,-183,-300,-182,-230,-224,-316,-238,-276,-217,-287,-209,-290,-207,-191,-176,-237,-198,-251,-191,-184,-201,-245,-147,-177,-164,-190,-112,-183,-108,-171,-100,-163,-85,-153,-101,-137,-74,-146,-57,-120,-58,-110,-43,-90,-50,-94,-38,-79,-64,-64,-54,-43,-59,-47,-30,-23,-30,-38,-10,-18,12,-13,10,-24,5}, + /* IRC_Composite_C_R0195_T345_P045.wav */ + {16,-16,16,-16,16,-16,16,-16,16,-15,15,-14,14,-13,11,-9,7,-4,0,5,-13,25,-45,84,-233,-338,-363,-485,-200,-447,-145,-898,-330,-612,95,-690,294,625,1287,-151,69,-4722,1521,-1345,1556,10396,3052,2742,68,1282,287,1583,813,-614,-309,335,28,768,552,558,584,902,480,734,438,738,500,576,491,546,493,681,435,611,561,752,683,864,812,738,819,537,703,581,367,218,0,254,20,344,-127,37,-166,211,-28,-62,-54,-160,-27,-117,-166,-287,-249,-103,-237,-262,-426,-232,-222,-305,-355,-320,-241,-298,-264,-311,-247,-264,-248,-253,-288,-211,-240,-212,-321,-248,-261,-248,-306,-335,-270,-295,-235,-360,-279,-274,-198,-287,-320,-253,-257,-194,-270,-236,-243,-201,-255,-169,-230,-203,-38,-204,-189,64,-47,-32,-96,-24,-149,-249,-211,-363,-445,-322,-199,-451,-374,-212,-206,-56,-101,52,-41,90,144,4,-84,38,-111,-234,-118,-217,-227,-246,-157,-274,-235,-253,-192,-341,-200,-287,-253,-258,-299,-242,-317,-247,-273,-208,-264,-246,-273,-176,-209,-205,-204,-238,-186,-184,-158,-218,-216,-193,-143,-174,-141,-179,-144,-152,-78,-161,-124,-122,-54,-126,-102,-95,-62,-64,-102,-54,-75,-51,-32,-78,-64,-53,-39,-81,-33,-75,-14,-32,-27,-68,2,-35,2,-22,5,-24,24,-12,-6,0}}; + +const int16_t irc_composite_c_r0195_p060[][256] = + {/* IRC_Composite_C_R0195_T000_P060.wav */ + {6,-6,6,-6,6,-6,6,-6,6,-6,6,-6,6,-6,5,-5,5,-6,7,-9,18,-62,-477,-304,-336,-458,-335,-491,-402,-829,-409,-707,-293,-66,983,571,504,539,-3665,-1218,-4247,2405,12687,3792,3387,222,562,745,2239,1323,-741,-286,-83,813,783,665,-6,1300,432,669,373,824,513,741,332,740,326,679,126,625,66,777,213,737,590,515,621,819,662,597,612,571,221,378,180,456,-17,193,-19,201,-103,188,-114,-50,-223,41,-272,-7,-204,-73,-274,-27,-179,-118,-266,-52,-236,-150,-298,-185,-328,-211,-330,-359,-321,-288,-288,-390,-233,-301,-238,-287,-186,-257,-179,-231,-185,-249,-207,-244,-242,-257,-261,-239,-326,-213,-312,-252,-311,-214,-308,-230,-270,-210,-269,-227,-231,-256,-281,-231,-249,-231,-210,-238,-174,-214,-204,-218,-156,-225,-179,-158,-114,-151,-171,-128,-150,-184,-87,-97,-145,-171,-201,-199,-179,-259,-239,-309,-310,-314,-227,-196,-170,-192,-26,-115,43,-61,74,-180,-52,-139,-83,-192,-18,-187,-104,-111,-67,-175,-118,-149,-164,-160,-118,-201,-219,-161,-187,-241,-189,-199,-195,-205,-163,-179,-154,-122,-150,-171,-167,-152,-190,-183,-192,-193,-159,-180,-150,-160,-87,-151,-131,-124,-50,-117,-92,-81,-58,-70,-16,-38,-34,-12,39,-10,47,38,87,17,83,31,82,15,52,1}, + /* IRC_Composite_C_R0195_T030_P060.wav */ + {20,-21,22,-23,24,-25,26,-28,29,-31,33,-35,37,-40,44,-49,57,-69,99,-268,-386,-296,-341,-306,-439,-461,-219,-926,-450,-515,-185,-151,2064,-248,2208,-557,-2551,-2664,-6482,6173,13499,9000,-215,137,1397,897,1847,1331,-989,-1152,-31,458,894,968,314,922,758,306,312,862,265,458,604,466,114,555,424,349,321,475,497,583,497,442,464,481,505,592,441,239,76,20,163,45,-273,-262,-336,-166,-64,-152,-189,-166,-140,-223,-230,-243,-348,-277,-328,-243,-252,-223,-247,-223,-225,-225,-211,-310,-229,-259,-253,-320,-259,-356,-330,-309,-289,-302,-271,-256,-257,-245,-269,-281,-283,-267,-256,-268,-267,-280,-263,-261,-261,-284,-289,-275,-272,-247,-274,-279,-271,-276,-305,-292,-268,-282,-266,-248,-248,-229,-247,-221,-202,-217,-233,-213,-204,-239,-209,-216,-194,-227,-155,-201,-185,-243,-159,-264,-210,-189,-201,-282,-175,-151,-178,-186,-73,-151,-201,-182,-71,-254,-132,-94,-52,-235,-22,-27,-101,-159,-17,-130,-166,-55,-23,-206,-132,-126,-159,-211,-63,-187,-189,-148,-84,-174,-142,-124,-126,-185,-149,-155,-156,-187,-149,-205,-136,-153,-105,-164,-138,-137,-75,-158,-128,-135,-115,-148,-94,-125,-133,-123,-51,-120,-102,-85,-18,-84,-52,-42,8,-51,10,0,9,-20,40,16,14,24,69,21,25,23,33,2}, + /* IRC_Composite_C_R0195_T060_P060.wav */ + {49,-49,50,-50,49,-49,48,-47,45,-43,39,-34,27,-17,1,24,-67,158,-573,27,-273,-533,-199,-358,-57,-1237,277,-801,-585,81,1678,-52,2584,-635,2281,-6550,-1907,-4733,12287,16138,2134,560,-1141,3390,1047,2071,-850,-288,-1730,541,499,654,282,1081,13,1053,-59,982,296,639,439,387,97,735,-102,457,298,541,229,951,144,642,-123,725,239,371,-407,173,-217,5,-418,64,-709,-238,-708,-195,-402,-221,-414,-199,-238,-175,-279,-243,-447,-227,-531,-270,-429,-178,-455,-173,-350,-170,-312,-238,-284,-268,-355,-248,-402,-300,-358,-251,-472,-244,-350,-274,-353,-195,-309,-279,-276,-243,-232,-248,-185,-198,-344,-177,-263,-274,-355,-198,-341,-222,-358,-178,-344,-238,-289,-271,-338,-286,-218,-316,-206,-256,-230,-264,-214,-239,-277,-261,-259,-225,-268,-221,-134,-246,-182,-192,-154,-260,-116,-218,-234,-234,-227,-254,-254,-224,-164,-220,-284,-56,-257,-45,-289,-17,-299,22,-225,103,-264,-36,-33,-158,-37,-147,39,-238,-62,-117,-69,-262,42,-153,-162,-86,-22,-214,-31,-193,-19,-225,-87,-128,-143,-84,-102,-114,-95,-106,-146,-143,-5,-308,-86,-140,-102,-254,41,-68,-208,37,-26,-47,-124,31,-99,-131,-35,-104,-59,-141,32,-115,3,-48,1,-54,32,-67,-4,-31,-40,-50,45,-30,-17,43,3,8,10,37}, + /* IRC_Composite_C_R0195_T090_P060.wav */ + {20,-22,23,-25,26,-28,30,-33,35,-38,41,-45,49,-54,60,-66,75,-87,-10,-413,-67,-233,-277,-289,-247,-282,-617,-150,-183,967,613,1964,132,3132,-4890,-30,-7492,6867,13211,9263,1898,-2424,3417,1010,3400,-250,-877,-1856,320,-121,560,520,754,353,491,-65,931,362,494,135,792,604,460,-39,664,258,476,82,671,151,141,64,562,-256,-248,-550,-170,-357,-396,-602,-158,-390,-415,-610,-298,-495,-302,-712,-205,-411,-237,-411,-197,-452,-250,-501,-239,-421,-284,-481,-170,-438,-181,-420,-184,-419,-262,-417,-218,-376,-336,-394,-333,-356,-335,-390,-304,-355,-247,-352,-237,-395,-250,-384,-156,-227,-147,-266,-148,-197,-207,-288,-280,-286,-312,-317,-266,-316,-238,-306,-223,-320,-187,-342,-177,-339,-198,-318,-212,-319,-219,-304,-241,-273,-207,-279,-190,-248,-107,-255,-126,-251,-112,-272,-95,-249,-163,-250,-133,-247,-177,-254,-198,-235,-194,-229,-212,-194,-191,-142,-191,-92,-153,-45,-145,-65,-138,-64,-93,-82,-65,-113,-49,-140,-78,-118,-32,-137,-37,-107,-30,-59,-142,-106,-132,-11,-240,49,-159,20,-179,55,-117,-17,-81,-27,-68,-101,-72,-98,-48,-107,-136,-19,-93,-74,-119,94,-211,28,-129,49,-157,78,-96,21,-143,105,-58,-15,-28,13,-42,14,-40,-16,-41,-38,-39,21,-40,-15,-19,-23,-62,3}, + /* IRC_Composite_C_R0195_T120_P060.wav */ + {-11,11,-11,11,-11,10,-10,10,-9,9,-8,7,-6,4,-1,-4,11,-23,54,-588,228,-347,10,-393,78,-279,-133,-619,373,107,880,1033,1066,2024,-241,-1833,-4036,1812,3742,13338,5090,1325,-1146,2733,2814,1516,-118,-2246,-488,96,262,192,815,13,476,529,495,181,475,265,780,210,341,81,498,784,541,47,-30,-8,36,-29,-17,-592,-609,-210,-251,-604,-616,-485,-190,-407,-234,-662,-290,-550,-301,-639,-361,-458,-250,-462,-279,-431,-205,-390,-287,-423,-271,-396,-265,-443,-263,-432,-176,-477,-278,-447,-188,-475,-218,-453,-216,-462,-223,-420,-282,-431,-248,-385,-293,-403,-276,-425,-204,-367,-74,-400,-116,-375,-65,-372,-120,-338,-171,-350,-192,-321,-226,-351,-196,-350,-234,-344,-164,-341,-158,-306,-138,-338,-156,-268,-116,-265,-176,-243,-204,-212,-198,-229,-226,-192,-144,-137,-153,-181,-152,-194,-135,-178,-195,-187,-227,-175,-208,-170,-244,-159,-254,-137,-245,-163,-226,-152,-162,-107,-156,-107,-123,-75,-95,-2,-107,-23,-62,-12,-74,-60,-54,-88,-41,-86,-11,-107,-13,-74,-29,-98,-23,-52,-89,-64,-61,-71,-89,-73,19,-96,13,-86,41,-106,92,-5,1,-25,-7,-37,-56,34,-91,-24,-86,10,-49,-33,-31,-56,10,-83,56,-62,45,-72,47,-53,7,-38,12,-40,-11,-4,-41,-34,-33,-33}, + /* IRC_Composite_C_R0195_T150_P060.wav */ + {-16,16,-17,17,-18,18,-19,20,-20,21,-21,22,-22,23,-23,23,-23,22,-20,15,-1,-129,61,-83,-118,-17,-51,-30,-188,-95,109,510,730,1005,940,2137,-1039,-410,-2971,1165,5406,9365,3937,2137,-649,991,4216,933,-931,-1885,270,39,471,-2,92,175,614,97,357,338,829,636,315,-32,510,435,434,61,257,-175,-293,-260,-101,-474,-492,-360,-265,-315,-367,-291,-300,-567,-400,-366,-215,-495,-451,-522,-369,-523,-275,-402,-310,-403,-263,-346,-267,-319,-256,-370,-300,-367,-298,-401,-295,-443,-304,-417,-322,-444,-298,-381,-312,-439,-329,-390,-317,-384,-301,-362,-302,-383,-295,-368,-322,-392,-272,-324,-246,-326,-211,-303,-225,-323,-240,-326,-214,-295,-235,-325,-259,-307,-231,-265,-204,-266,-221,-283,-193,-226,-150,-215,-156,-235,-140,-226,-173,-226,-138,-181,-139,-211,-149,-228,-146,-230,-122,-202,-104,-157,-89,-169,-122,-180,-141,-172,-119,-242,-182,-247,-146,-221,-160,-209,-160,-222,-134,-194,-158,-172,-105,-115,-90,-104,-68,-78,-70,-76,-8,-32,19,-27,16,-5,-14,-32,0,-60,-11,-71,10,-79,-60,-81,-34,-99,-28,-10,-16,0,23,9,-66,-19,-7,-4,-45,41,7,-5,-25,-4,7,-31,-3,22,-19,-42,6,-62,-7,-36,56,-32,47,-21,39,-84,-6,-47,-14,-45,33,-19,-3,-57}, + /* IRC_Composite_C_R0195_T180_P060.wav */ + {10,-10,10,-11,11,-10,10,-10,10,-10,10,-10,9,-9,8,-7,6,-5,3,-1,-2,6,-12,19,-22,153,-20,-54,110,44,10,-80,172,36,605,639,998,719,1758,-623,-249,-1810,1268,4590,6942,3540,1131,148,816,4013,533,-755,-1438,271,-13,605,142,55,202,291,-131,465,665,844,217,120,213,663,92,-57,-100,67,-191,-270,-528,-381,-572,-177,-319,-6,-278,-198,-211,58,-313,-463,-592,-248,-465,-368,-496,-344,-514,-270,-361,-242,-341,-263,-349,-193,-335,-278,-416,-272,-416,-335,-439,-313,-451,-348,-453,-349,-443,-392,-472,-363,-430,-386,-408,-312,-392,-320,-379,-310,-370,-331,-403,-335,-385,-348,-366,-269,-311,-276,-302,-234,-290,-316,-341,-275,-241,-217,-208,-227,-228,-238,-222,-184,-224,-208,-241,-186,-232,-205,-245,-203,-214,-163,-151,-128,-135,-145,-180,-187,-204,-171,-188,-145,-158,-112,-157,-75,-116,-69,-149,-129,-160,-136,-115,-133,-152,-184,-177,-167,-165,-197,-218,-169,-194,-138,-205,-133,-162,-112,-132,-87,-117,-57,-79,-38,-55,-39,-50,-29,-58,-18,2,13,-3,31,17,9,8,29,-11,-10,-3,19,-49,-35,-14,-39,-21,-64,-32,-53,-24,-15,-14,-23,27,17,51,-9,-15,-18,18,-37,5,-74,-13,-18,63,0,46,7,63,-15,19,-10,7,-30,-32,-44,-79,-107}, + /* IRC_Composite_C_R0195_T210_P060.wav */ + {-5,5,-5,5,-5,5,-5,5,-5,5,-5,5,-5,5,-5,5,-5,5,-5,5,-5,5,-4,4,-4,4,-4,-36,-70,2,-43,-45,-73,17,-83,-76,-100,274,192,709,499,1049,474,192,-1587,-214,1449,4357,5043,2092,784,976,2790,1402,1131,-633,70,310,385,217,300,87,393,309,640,644,573,357,469,403,414,393,291,95,9,177,41,-38,-147,-145,-178,-91,51,-23,82,6,81,-66,-34,-205,-207,-298,-223,-348,-275,-305,-257,-377,-308,-322,-230,-324,-269,-300,-305,-334,-286,-346,-334,-384,-351,-411,-405,-389,-413,-415,-387,-382,-401,-384,-393,-405,-388,-417,-364,-399,-357,-398,-374,-392,-376,-343,-359,-316,-314,-243,-324,-280,-326,-283,-293,-262,-262,-275,-249,-231,-206,-195,-215,-224,-241,-210,-220,-242,-276,-255,-264,-198,-192,-190,-189,-210,-164,-172,-169,-172,-189,-153,-193,-127,-166,-117,-158,-112,-128,-133,-107,-136,-117,-161,-108,-110,-142,-127,-184,-138,-172,-147,-192,-188,-193,-158,-153,-183,-168,-189,-142,-164,-152,-151,-135,-105,-91,-78,-46,-70,-9,-22,-10,6,-3,11,9,0,-18,-34,-49,-47,-78,-56,-80,-88,-84,-41,-76,-89,-89,-34,-62,21,-11,29,-63,38,13,32,-26,-6,-18,-15,-14,-11,-43,-37,-18,40,-10,3,-52,3,-65,-18,-66,-33,-83,-53}, + /* IRC_Composite_C_R0195_T240_P060.wav */ + {-5,5,-5,5,-5,6,-6,6,-6,6,-6,6,-6,6,-6,6,-6,6,-6,6,-6,7,-7,7,-6,6,-6,4,1,-68,-141,-43,-120,-98,-71,-122,-111,-111,-115,-151,-61,253,299,522,567,502,242,-1198,-647,857,3187,4265,2111,1152,1068,2588,1288,1124,171,189,245,613,495,306,631,410,664,513,726,614,455,361,493,538,415,305,397,398,328,162,355,127,167,95,91,92,127,159,137,126,136,112,42,-54,-59,-214,-99,-334,-210,-340,-229,-357,-264,-345,-232,-312,-253,-294,-304,-352,-288,-347,-349,-364,-352,-373,-394,-376,-368,-434,-376,-402,-393,-434,-406,-412,-357,-430,-351,-354,-374,-337,-289,-323,-276,-287,-233,-304,-270,-311,-283,-321,-267,-321,-256,-305,-231,-272,-202,-256,-217,-272,-229,-238,-234,-247,-261,-261,-253,-218,-198,-218,-190,-226,-148,-209,-150,-201,-174,-181,-174,-172,-158,-155,-141,-131,-122,-138,-117,-118,-119,-130,-144,-143,-130,-146,-135,-155,-149,-172,-147,-158,-167,-175,-169,-144,-176,-150,-108,-129,-129,-136,-93,-143,-97,-99,-87,-71,-74,-15,-38,-48,-75,-66,-95,-101,-91,-93,-103,-154,-122,-128,-116,-84,-70,-79,-65,-51,-28,-23,-20,-21,-53,-15,-9,15,-49,5,-40,-18,-42,-33,-30,-86,-81,-68,-62,-85,-44,-26,-89,-49,-55,-24,-61,-16}, + /* IRC_Composite_C_R0195_T270_P060.wav */ + {5,-5,6,-6,6,-6,6,-6,7,-7,7,-7,8,-8,8,-9,9,-10,10,-11,12,-12,13,-15,16,-19,22,-27,38,-79,-217,-116,-99,-179,-211,-36,-231,-204,-130,-219,-231,-133,80,277,396,430,429,-88,-768,-1080,327,3345,4354,1751,849,1821,1217,1828,885,517,269,311,1017,365,820,357,716,549,590,621,516,736,326,780,263,770,281,705,378,636,371,475,311,429,255,409,238,273,140,314,213,208,138,105,115,-23,74,-205,-25,-248,-155,-287,-280,-252,-297,-307,-288,-293,-267,-333,-245,-352,-304,-348,-348,-351,-433,-337,-451,-334,-472,-324,-423,-375,-381,-372,-314,-375,-276,-267,-321,-301,-279,-273,-291,-287,-302,-304,-304,-290,-236,-296,-289,-263,-294,-284,-324,-265,-281,-292,-245,-245,-266,-260,-214,-224,-254,-200,-260,-209,-252,-220,-200,-254,-196,-233,-158,-228,-136,-185,-148,-205,-143,-175,-148,-149,-166,-120,-148,-104,-128,-140,-125,-152,-101,-130,-77,-199,-88,-178,-108,-197,-126,-198,-117,-194,-113,-172,-133,-128,-87,-131,-57,-112,-60,-136,-71,-150,-125,-162,-106,-205,-139,-165,-138,-149,-118,-134,-84,-134,-83,-120,-64,-109,-79,-116,-53,-90,-64,-49,-97,-69,-54,-86,-38,-63,-120,-64,-43,-89,-78,-45,-32,-110,-9,-49,-30,-91,-36,-87,-34,-90,-12,-62,-20,-67}, + /* IRC_Composite_C_R0195_T300_P060.wav */ + {5,-5,5,-5,6,-6,6,-6,6,-6,6,-6,6,-7,7,-7,7,-7,7,-8,8,-8,8,-9,9,-9,10,-11,16,-103,-316,-221,-172,-255,-229,-364,-48,-533,-236,-67,-300,259,642,185,100,427,-1196,-1745,476,2467,5309,2589,680,1682,925,1569,1041,949,-237,234,798,421,371,569,750,604,969,544,826,529,786,297,628,582,490,514,782,585,536,518,533,446,484,585,264,478,477,367,328,417,65,219,65,246,-91,156,-64,72,-138,50,-297,-48,-254,-210,-356,-206,-256,-287,-317,-214,-367,-254,-373,-339,-401,-317,-430,-376,-368,-350,-418,-316,-298,-353,-273,-305,-281,-287,-227,-371,-293,-252,-310,-323,-260,-272,-324,-243,-257,-310,-290,-276,-297,-255,-201,-324,-280,-270,-230,-282,-221,-248,-298,-222,-268,-241,-260,-255,-277,-235,-187,-236,-210,-172,-224,-173,-145,-191,-157,-155,-157,-126,-186,-84,-150,-177,-153,-72,-128,-133,-127,-100,-229,-197,-160,-241,-236,-171,-176,-120,-75,-47,-120,-105,-128,-76,-160,-120,-127,-109,-139,-135,-144,-192,-179,-152,-203,-173,-143,-161,-61,-179,-143,-104,-126,-147,-78,-176,-138,-177,-87,-160,-110,-137,-186,-155,-86,-135,-127,-105,-97,-124,-34,-111,-50,-117,-72,-123,-75,-65,-94,-51,-83,-75,-56,-47,-45,-13,-39,-55,20,-56,36,-41,12,-24}, + /* IRC_Composite_C_R0195_T330_P060.wav */ + {3,-3,4,-4,4,-4,5,-5,5,-6,6,-6,7,-7,8,-9,10,-11,13,-15,18,-22,28,-41,71,-233,-373,-96,-345,-318,-371,-265,-252,-594,-439,-312,-197,197,323,301,1318,-757,-1616,-868,-2081,2422,6337,5378,1528,381,1195,1007,1545,810,158,-165,224,315,652,555,370,728,945,600,540,730,606,560,567,567,493,662,473,574,547,639,468,638,527,625,475,584,476,643,403,465,284,372,243,235,43,108,49,102,20,-28,-86,-48,-49,-120,-149,-133,-123,-173,-198,-229,-246,-291,-242,-300,-328,-373,-296,-357,-299,-377,-351,-356,-276,-311,-324,-289,-282,-268,-271,-255,-256,-273,-249,-285,-230,-270,-219,-299,-282,-269,-275,-287,-307,-267,-290,-268,-297,-284,-263,-248,-257,-244,-243,-257,-290,-273,-259,-265,-237,-261,-238,-273,-209,-234,-171,-171,-157,-118,-120,-137,-80,-115,-96,-166,-45,-140,-79,-126,-114,-259,-243,-265,-228,-274,-267,-264,-220,-203,-117,-137,-118,-112,-33,-82,-41,-93,-102,-121,-120,-145,-175,-177,-147,-143,-65,-127,-138,-136,-94,-127,-130,-217,-183,-159,-168,-202,-184,-203,-240,-202,-146,-195,-160,-147,-169,-143,-72,-128,-191,-139,-128,-170,-166,-129,-160,-152,-97,-112,-123,-68,-72,-87,-76,-27,-63,-48,-50,-16,-43,16,-15,-10,-1,57,47,30,49,50,35,14}}; + +const int16_t irc_composite_c_r0195_p075[][256] = + {/* IRC_Composite_C_R0195_T000_P075.wav */ + {12,-12,13,-13,13,-13,13,-13,14,-14,14,-14,14,-14,15,-15,14,-14,13,-12,7,8,-224,-367,-189,-595,-344,-165,-801,-581,-302,-836,-618,407,530,134,1455,-966,-265,-4329,-4113,4525,6992,8542,304,-199,2762,1323,1544,1359,195,-1272,815,548,454,489,1188,303,1217,433,606,486,940,200,865,394,415,452,439,167,687,262,388,668,618,250,776,505,518,525,594,287,552,461,358,276,304,160,239,30,35,-21,-62,-125,-17,-31,-371,39,-211,-122,-197,40,-391,72,-166,-93,-262,-84,-337,-76,-325,-174,-301,-196,-378,-120,-367,-324,-293,-268,-402,-208,-271,-377,-322,-212,-314,-262,-200,-245,-294,-186,-222,-275,-226,-280,-246,-300,-262,-182,-254,-272,-206,-217,-307,-182,-214,-248,-250,-184,-258,-303,-196,-229,-312,-240,-213,-256,-268,-167,-202,-256,-145,-174,-141,-306,-39,-235,-192,-132,-161,-142,-205,-78,-192,-93,-268,-65,-239,-166,-229,-88,-215,-227,-134,-262,-99,-230,-94,-297,-147,-247,-15,-245,-197,-51,-171,-94,-46,-60,-97,-14,-100,14,-99,-80,-92,-24,-178,55,-112,-23,-104,47,-121,-73,-80,-27,-196,-98,-74,-188,-162,-54,-110,-182,-133,-30,-121,-83,-72,-116,-145,-93,-89,-136,-159,-163,-75,-145,-145,-85,-61,-103,-107,5,-74,-43,-3,-22,-72,-26,-24,6,-85,5}, + /* IRC_Composite_C_R0195_T060_P075.wav */ + {-14,15,-15,16,-17,18,-19,20,-21,22,-23,24,-25,27,-28,30,-32,35,-42,65,-281,-389,123,-583,-268,-119,-384,-436,-660,282,-1424,931,-191,1870,-113,1526,243,-2974,-2922,-3038,6766,14120,3000,-177,427,2482,2472,607,1128,-2092,683,-982,2038,-839,1456,-30,1050,188,720,669,391,864,257,483,214,493,238,321,378,114,592,45,886,-9,646,103,701,13,345,57,116,121,-88,171,-386,97,-335,-262,-369,-373,-308,-409,33,-533,5,-484,24,-435,-237,-319,-208,-233,-416,-146,-317,-166,-309,-232,-304,-340,-158,-439,-169,-482,-113,-521,-200,-450,-258,-409,-296,-298,-425,-296,-402,-280,-434,-185,-317,-227,-279,-194,-241,-311,-232,-320,-314,-306,-233,-329,-276,-236,-245,-297,-256,-187,-267,-262,-210,-246,-287,-248,-205,-331,-278,-229,-232,-260,-191,-174,-217,-208,-141,-202,-243,-151,-110,-284,-132,-132,-134,-277,-115,-207,-229,-254,-153,-213,-329,-111,-236,-248,-245,-82,-281,-102,-144,-93,-133,-52,-82,-139,-23,-121,-40,-135,7,-127,-34,-127,-9,-67,-119,-37,-34,-134,4,-70,-77,-81,-62,-64,-127,-18,-109,11,-156,60,-224,27,-77,-130,6,-101,-38,-151,93,-175,-123,0,-32,-129,-111,115,-229,-48,12,-46,-196,-37,-14,-163,-83,-1,-137,-48,-101,91,-141,15,-60,28,-35,-33,-10,-28}, + /* IRC_Composite_C_R0195_T120_P075.wav */ + {-13,13,-14,14,-15,15,-16,16,-17,17,-18,18,-19,19,-20,20,-19,19,-16,12,-1,-57,-48,-189,-132,-75,-227,-56,-343,28,-501,199,201,917,964,1130,545,1226,-3782,-981,-1166,8252,10379,3341,-521,-669,4572,428,2096,-1386,-405,-1098,1383,-2,236,525,510,124,361,340,604,403,459,20,703,550,361,4,507,76,379,200,87,66,-36,226,133,30,-418,-183,-144,-119,-262,-333,-294,-550,-263,-353,-297,-518,-525,-481,-405,-172,-382,-282,-383,-195,-329,-205,-358,-340,-304,-296,-417,-305,-321,-319,-400,-254,-454,-283,-418,-294,-482,-262,-427,-329,-482,-271,-419,-319,-403,-345,-351,-373,-409,-381,-310,-297,-225,-271,-290,-302,-223,-252,-250,-249,-220,-293,-267,-259,-270,-267,-281,-238,-321,-208,-300,-203,-357,-197,-295,-169,-277,-177,-273,-210,-234,-196,-252,-188,-188,-144,-221,-121,-160,-150,-180,-111,-124,-171,-110,-188,-133,-213,-124,-220,-168,-203,-176,-182,-229,-156,-255,-117,-221,-99,-240,-61,-158,-122,-129,-39,-92,-133,4,-74,-53,-77,35,-129,8,-50,5,-61,-24,-33,-46,31,-56,58,-35,39,-37,57,-74,2,-108,-10,-136,18,-141,8,-114,4,-132,-22,-37,-31,-20,-71,79,-149,14,-32,50,-85,-47,94,-59,21,-39,82,-145,47,-47,-14,-82,-16,-3,-58,-48,-44,-22,-40}, + /* IRC_Composite_C_R0195_T180_P075.wav */ + {-17,17,-17,17,-16,16,-16,16,-15,15,-15,14,-14,14,-13,12,-12,11,-10,9,-7,5,-2,-2,11,164,-118,156,-74,171,-166,281,-259,331,-163,647,483,1342,96,2125,-652,369,-2506,1012,3788,6928,5436,-854,927,1011,2973,419,642,-1377,-224,371,309,-128,78,268,228,233,434,427,397,-91,692,541,379,-60,222,-23,131,178,60,-172,-56,-82,-145,-311,-297,-431,-248,-412,-107,-371,-72,-353,-277,-448,-332,-408,-362,-461,-379,-530,-388,-407,-290,-364,-334,-409,-288,-419,-285,-384,-338,-447,-343,-386,-344,-453,-378,-390,-377,-502,-386,-429,-361,-457,-343,-463,-441,-462,-348,-398,-369,-416,-375,-435,-312,-336,-244,-309,-307,-321,-263,-272,-272,-262,-236,-196,-217,-232,-215,-261,-261,-304,-256,-296,-220,-241,-218,-248,-199,-190,-206,-213,-223,-195,-209,-182,-150,-161,-155,-165,-145,-155,-129,-127,-102,-102,-126,-107,-145,-133,-143,-141,-131,-140,-117,-119,-86,-121,-100,-131,-130,-130,-139,-142,-162,-155,-148,-128,-171,-136,-140,-117,-101,-76,-112,-44,-78,-20,-37,-38,-18,15,-6,2,39,44,55,-20,46,41,30,8,37,20,-14,0,-10,0,-57,-38,-55,-62,-113,-27,-88,-33,-69,-21,-80,1,-86,62,-11,16,23,128,6,102,-12,152,3,46,29,36,-97,2,-15,22,-109,-54}, + /* IRC_Composite_C_R0195_T240_P075.wav */ + {3,-3,3,-3,3,-3,3,-2,2,-2,2,-2,1,-1,1,0,0,1,-2,3,-4,5,-7,10,-13,20,-42,-111,-129,-71,-115,-137,-61,-159,-123,-192,-19,-219,406,390,680,549,886,-353,-736,-1471,543,4694,6064,1476,785,1081,2341,1848,567,793,-1054,878,-19,694,-27,624,453,574,713,561,524,289,447,341,517,436,227,504,311,411,259,396,204,319,114,96,38,-78,39,-23,9,123,-104,210,-61,44,-245,-60,-225,-163,-284,-292,-366,-364,-318,-284,-326,-284,-257,-269,-279,-348,-246,-376,-367,-329,-388,-355,-428,-316,-418,-375,-426,-318,-396,-436,-393,-407,-383,-405,-383,-442,-422,-393,-344,-375,-309,-340,-270,-311,-270,-269,-256,-268,-233,-283,-233,-230,-236,-283,-256,-265,-301,-274,-317,-261,-298,-265,-239,-262,-221,-248,-188,-287,-192,-249,-152,-246,-140,-231,-148,-218,-130,-170,-165,-154,-171,-127,-181,-99,-138,-109,-120,-99,-88,-129,-61,-150,-63,-199,-44,-206,-106,-201,-113,-189,-182,-146,-180,-174,-177,-146,-150,-167,-103,-131,-76,-138,-14,-119,-32,-66,-17,-34,-48,8,-80,-30,-22,-34,-12,-52,1,-29,-7,12,-34,-30,-36,-43,-95,-29,-80,-102,-17,-119,-24,-119,10,-78,-16,-105,-38,-66,-45,-15,9,-36,-32,33,-12,-9,-66,-12,-59,8,-23,-10,-6,-22}, + /* IRC_Composite_C_R0195_T300_P075.wav */ + {5,-5,5,-6,6,-6,6,-7,7,-7,8,-8,9,-9,10,-10,11,-12,13,-14,16,-19,23,-30,49,-174,-216,-220,-266,-146,-276,-310,-167,-398,-257,-446,-119,2,150,904,282,399,-249,-994,-2605,51,5569,5353,3304,-294,1237,2394,748,1912,84,-494,511,465,372,641,761,120,1277,373,764,283,1087,-27,731,558,213,625,490,578,331,755,261,576,308,533,270,531,241,337,345,292,249,254,130,252,95,124,0,-94,22,-124,-166,-120,-222,-195,-188,-102,-288,-138,-247,-177,-285,-135,-368,-230,-298,-370,-301,-355,-365,-394,-255,-460,-300,-338,-376,-356,-289,-388,-412,-303,-449,-224,-403,-214,-327,-319,-233,-280,-268,-283,-281,-308,-227,-300,-251,-210,-330,-195,-309,-196,-294,-197,-342,-211,-310,-249,-268,-272,-295,-243,-275,-237,-249,-206,-274,-172,-248,-191,-211,-154,-258,-145,-194,-183,-134,-143,-154,-131,-111,-163,-86,-129,-136,-111,-159,-135,-153,-128,-194,-126,-204,-111,-239,-106,-205,-215,-163,-175,-185,-155,-144,-130,-163,-92,-105,-142,-4,-111,-109,-19,-118,13,-189,9,-76,-118,-8,-6,-126,-54,46,-138,-64,43,-92,-25,-132,48,-164,-57,-43,-117,-199,-11,-164,-58,-105,-75,-100,-150,-59,-62,-137,-80,-148,-65,-126,-49,-62,-33,-77,-10,-18,-11,-21,-15,-36,-38,-38,9}}; + +const int16_t irc_composite_c_r0195_p090[][256] = + {/* IRC_Composite_C_R0195_T000_P090.wav */ + {-1,1,-1,1,-1,1,-1,2,-2,2,-3,3,-3,4,-5,5,-7,8,-10,14,-25,-385,-133,-356,-344,-390,-312,-438,-400,-852,-107,-995,-245,-186,1358,-771,1473,28,-1710,-3762,-3025,4119,8200,5927,-785,1558,2250,1837,2098,631,-318,150,-77,1215,-23,792,865,600,981,659,366,950,511,766,502,516,589,299,331,571,105,609,462,485,538,594,470,640,393,432,325,604,259,355,336,454,33,263,182,-24,-124,121,-231,-50,-93,49,-230,-101,25,-196,-123,-21,-160,-161,-132,-136,-155,-268,-121,-146,-311,-112,-328,-201,-220,-280,-214,-304,-233,-227,-250,-242,-334,-256,-283,-393,-267,-285,-332,-331,-224,-295,-279,-171,-151,-361,-199,-239,-227,-317,-168,-211,-370,-182,-150,-343,-204,-192,-224,-319,-203,-137,-357,-146,-223,-237,-276,-162,-201,-262,-186,-209,-147,-267,-46,-252,-162,-146,-182,-199,-143,-218,-204,-108,-250,-163,-90,-241,-137,-125,-122,-272,-111,-188,-211,-160,-160,-164,-201,-231,-114,-175,-180,-212,-172,-181,-129,-130,-77,-150,-114,-39,12,-122,-9,-11,-46,-21,-4,7,-73,-20,-50,-21,-80,-102,2,-103,-66,-67,-8,-41,-119,-26,29,-208,3,-108,-34,-228,114,-196,-84,-28,1,-109,-92,77,-169,-27,48,-12,-79,3,-100,-44,-53,13,-103,-90,-14,-91,-23,-64,-21,-116}}; + +struct Elevation +{ + /** + * An array of |count| impulse responses of 256 samples for the left ear. + * The impulse responses in each elevation are at equally spaced azimuths + * for a full 360 degree revolution, ordered clockwise from in front the + * listener. + */ + const int16_t (*azimuths)[256]; + int count; +}; + +/** + * irc_composite_c_r0195 is an array with each element containing data for one + * elevation. + */ +const Elevation irc_composite_c_r0195[] = + {{irc_composite_c_r0195_p315, MOZ_ARRAY_LENGTH(irc_composite_c_r0195_p315)}, + {irc_composite_c_r0195_p330, MOZ_ARRAY_LENGTH(irc_composite_c_r0195_p330)}, + {irc_composite_c_r0195_p345, MOZ_ARRAY_LENGTH(irc_composite_c_r0195_p345)}, + {irc_composite_c_r0195_p000, MOZ_ARRAY_LENGTH(irc_composite_c_r0195_p000)}, + {irc_composite_c_r0195_p015, MOZ_ARRAY_LENGTH(irc_composite_c_r0195_p015)}, + {irc_composite_c_r0195_p030, MOZ_ARRAY_LENGTH(irc_composite_c_r0195_p030)}, + {irc_composite_c_r0195_p045, MOZ_ARRAY_LENGTH(irc_composite_c_r0195_p045)}, + {irc_composite_c_r0195_p060, MOZ_ARRAY_LENGTH(irc_composite_c_r0195_p060)}, + {irc_composite_c_r0195_p075, MOZ_ARRAY_LENGTH(irc_composite_c_r0195_p075)}, + {irc_composite_c_r0195_p090, MOZ_ARRAY_LENGTH(irc_composite_c_r0195_p090)}}; + +const int irc_composite_c_r0195_first_elevation = -45; /* degrees */ +const int irc_composite_c_r0195_elevation_interval = 15; /* degrees */ +const int irc_composite_c_r0195_sample_rate = 44100; /* Hz */ diff --git a/dom/media/webaudio/blink/PeriodicWave.cpp b/dom/media/webaudio/blink/PeriodicWave.cpp new file mode 100644 index 000000000..3f949207a --- /dev/null +++ b/dom/media/webaudio/blink/PeriodicWave.cpp @@ -0,0 +1,358 @@ +/* + * Copyright (C) 2012 Google Inc. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * 3. Neither the name of Apple Computer, Inc. ("Apple") nor the names of + * its contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY APPLE AND ITS CONTRIBUTORS "AS IS" AND ANY + * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + * DISCLAIMED. IN NO EVENT SHALL APPLE OR ITS CONTRIBUTORS BE LIABLE FOR ANY + * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES + * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; + * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND + * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF + * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +#include "PeriodicWave.h" +#include <algorithm> +#include <cmath> +#include <limits> +#include "mozilla/FFTBlock.h" + +const unsigned MinPeriodicWaveSize = 4096; // This must be a power of two. +const unsigned MaxPeriodicWaveSize = 8192; // This must be a power of two. +const float CentsPerRange = 1200 / 3; // 1/3 Octave. + +using namespace mozilla; +using mozilla::dom::OscillatorType; + +namespace WebCore { + +already_AddRefed<PeriodicWave> +PeriodicWave::create(float sampleRate, + const float* real, + const float* imag, + size_t numberOfComponents, + bool disableNormalization) +{ + bool isGood = real && imag && numberOfComponents > 0; + MOZ_ASSERT(isGood); + if (isGood) { + RefPtr<PeriodicWave> periodicWave = + new PeriodicWave(sampleRate, numberOfComponents, + disableNormalization); + + // Limit the number of components used to those for frequencies below the + // Nyquist of the fixed length inverse FFT. + size_t halfSize = periodicWave->m_periodicWaveSize / 2; + numberOfComponents = std::min(numberOfComponents, halfSize); + periodicWave->m_numberOfComponents = numberOfComponents; + periodicWave->m_realComponents = new AudioFloatArray(numberOfComponents); + periodicWave->m_imagComponents = new AudioFloatArray(numberOfComponents); + memcpy(periodicWave->m_realComponents->Elements(), real, + numberOfComponents * sizeof(float)); + memcpy(periodicWave->m_imagComponents->Elements(), imag, + numberOfComponents * sizeof(float)); + + return periodicWave.forget(); + } + return nullptr; +} + +already_AddRefed<PeriodicWave> +PeriodicWave::createSine(float sampleRate) +{ + RefPtr<PeriodicWave> periodicWave = + new PeriodicWave(sampleRate, MinPeriodicWaveSize, false); + periodicWave->generateBasicWaveform(OscillatorType::Sine); + return periodicWave.forget(); +} + +already_AddRefed<PeriodicWave> +PeriodicWave::createSquare(float sampleRate) +{ + RefPtr<PeriodicWave> periodicWave = + new PeriodicWave(sampleRate, MinPeriodicWaveSize, false); + periodicWave->generateBasicWaveform(OscillatorType::Square); + return periodicWave.forget(); +} + +already_AddRefed<PeriodicWave> +PeriodicWave::createSawtooth(float sampleRate) +{ + RefPtr<PeriodicWave> periodicWave = + new PeriodicWave(sampleRate, MinPeriodicWaveSize, false); + periodicWave->generateBasicWaveform(OscillatorType::Sawtooth); + return periodicWave.forget(); +} + +already_AddRefed<PeriodicWave> +PeriodicWave::createTriangle(float sampleRate) +{ + RefPtr<PeriodicWave> periodicWave = + new PeriodicWave(sampleRate, MinPeriodicWaveSize, false); + periodicWave->generateBasicWaveform(OscillatorType::Triangle); + return periodicWave.forget(); +} + +PeriodicWave::PeriodicWave(float sampleRate, size_t numberOfComponents, bool disableNormalization) + : m_sampleRate(sampleRate) + , m_centsPerRange(CentsPerRange) + , m_maxPartialsInBandLimitedTable(0) + , m_normalizationScale(1.0f) + , m_disableNormalization(disableNormalization) +{ + float nyquist = 0.5 * m_sampleRate; + + if (numberOfComponents <= MinPeriodicWaveSize) { + m_periodicWaveSize = MinPeriodicWaveSize; + } else { + unsigned npow2 = powf(2.0f, floorf(logf(numberOfComponents - 1.0)/logf(2.0f) + 1.0f)); + m_periodicWaveSize = std::min(MaxPeriodicWaveSize, npow2); + } + + m_numberOfRanges = (unsigned)(3.0f*logf(m_periodicWaveSize)/logf(2.0f)); + m_bandLimitedTables.SetLength(m_numberOfRanges); + m_lowestFundamentalFrequency = nyquist / maxNumberOfPartials(); + m_rateScale = m_periodicWaveSize / m_sampleRate; +} + +size_t PeriodicWave::sizeOfIncludingThis(mozilla::MallocSizeOf aMallocSizeOf) const +{ + size_t amount = aMallocSizeOf(this); + + amount += m_bandLimitedTables.ShallowSizeOfExcludingThis(aMallocSizeOf); + for (size_t i = 0; i < m_bandLimitedTables.Length(); i++) { + if (m_bandLimitedTables[i]) { + amount += m_bandLimitedTables[i]->ShallowSizeOfIncludingThis(aMallocSizeOf); + } + } + + return amount; +} + +void PeriodicWave::waveDataForFundamentalFrequency(float fundamentalFrequency, float* &lowerWaveData, float* &higherWaveData, float& tableInterpolationFactor) +{ + + // Negative frequencies are allowed, in which case we alias + // to the positive frequency. + fundamentalFrequency = fabsf(fundamentalFrequency); + + // We only need to rebuild to the tables if the new fundamental + // frequency is low enough to allow for more partials below the + // Nyquist frequency. + unsigned numberOfPartials = numberOfPartialsForRange(0); + float nyquist = 0.5 * m_sampleRate; + if (fundamentalFrequency != 0.0) { + numberOfPartials = std::min(numberOfPartials, (unsigned)(nyquist / fundamentalFrequency)); + } + if (numberOfPartials > m_maxPartialsInBandLimitedTable) { + for (unsigned rangeIndex = 0; rangeIndex < m_numberOfRanges; ++rangeIndex) { + m_bandLimitedTables[rangeIndex] = 0; + } + + // We need to create the first table to determine the normalization + // constant. + createBandLimitedTables(fundamentalFrequency, 0); + m_maxPartialsInBandLimitedTable = numberOfPartials; + } + + // Calculate the pitch range. + float ratio = fundamentalFrequency > 0 ? fundamentalFrequency / m_lowestFundamentalFrequency : 0.5; + float centsAboveLowestFrequency = logf(ratio)/logf(2.0f) * 1200; + + // Add one to round-up to the next range just in time to truncate + // partials before aliasing occurs. + float pitchRange = 1 + centsAboveLowestFrequency / m_centsPerRange; + + pitchRange = std::max(pitchRange, 0.0f); + pitchRange = std::min(pitchRange, static_cast<float>(m_numberOfRanges - 1)); + + // The words "lower" and "higher" refer to the table data having + // the lower and higher numbers of partials. It's a little confusing + // since the range index gets larger the more partials we cull out. + // So the lower table data will have a larger range index. + unsigned rangeIndex1 = static_cast<unsigned>(pitchRange); + unsigned rangeIndex2 = rangeIndex1 < m_numberOfRanges - 1 ? rangeIndex1 + 1 : rangeIndex1; + + if (!m_bandLimitedTables[rangeIndex1].get()) + createBandLimitedTables(fundamentalFrequency, rangeIndex1); + + if (!m_bandLimitedTables[rangeIndex2].get()) + createBandLimitedTables(fundamentalFrequency, rangeIndex2); + + lowerWaveData = m_bandLimitedTables[rangeIndex2]->Elements(); + higherWaveData = m_bandLimitedTables[rangeIndex1]->Elements(); + + // Ranges from 0 -> 1 to interpolate between lower -> higher. + tableInterpolationFactor = rangeIndex2 - pitchRange; +} + +unsigned PeriodicWave::maxNumberOfPartials() const +{ + return m_periodicWaveSize / 2; +} + +unsigned PeriodicWave::numberOfPartialsForRange(unsigned rangeIndex) const +{ + // Number of cents below nyquist where we cull partials. + float centsToCull = rangeIndex * m_centsPerRange; + + // A value from 0 -> 1 representing what fraction of the partials to keep. + float cullingScale = pow(2, -centsToCull / 1200); + + // The very top range will have all the partials culled. + unsigned numberOfPartials = cullingScale * maxNumberOfPartials(); + + return numberOfPartials; +} + +// Convert into time-domain wave buffers. +// One table is created for each range for non-aliasing playback +// at different playback rates. Thus, higher ranges have more +// high-frequency partials culled out. +void PeriodicWave::createBandLimitedTables(float fundamentalFrequency, + unsigned rangeIndex) +{ + unsigned fftSize = m_periodicWaveSize; + unsigned i; + + const float *realData = m_realComponents->Elements(); + const float *imagData = m_imagComponents->Elements(); + + // This FFTBlock is used to cull partials (represented by frequency bins). + FFTBlock frame(fftSize); + + // Find the starting bin where we should start culling the aliasing + // partials for this pitch range. We need to clear out the highest + // frequencies to band-limit the waveform. + unsigned numberOfPartials = numberOfPartialsForRange(rangeIndex); + // Also limit to the number of components that are provided. + numberOfPartials = std::min(numberOfPartials, m_numberOfComponents - 1); + + // Limit number of partials to those below Nyquist frequency + float nyquist = 0.5 * m_sampleRate; + if (fundamentalFrequency != 0.0) { + numberOfPartials = std::min(numberOfPartials, + (unsigned)(nyquist / fundamentalFrequency)); + } + + // Copy from loaded frequency data and generate complex conjugate + // because of the way the inverse FFT is defined. + // The coefficients of higher partials remain zero, as initialized in + // the FFTBlock constructor. + for (i = 0; i < numberOfPartials + 1; ++i) { + frame.RealData(i) = realData[i]; + frame.ImagData(i) = -imagData[i]; + } + + // Clear any DC-offset. + frame.RealData(0) = 0; + // Clear value which has no effect. + frame.ImagData(0) = 0; + + // Create the band-limited table. + AlignedAudioFloatArray* table = new AlignedAudioFloatArray(m_periodicWaveSize); + m_bandLimitedTables[rangeIndex] = table; + + // Apply an inverse FFT to generate the time-domain table data. + float* data = m_bandLimitedTables[rangeIndex]->Elements(); + frame.GetInverseWithoutScaling(data); + + // For the first range (which has the highest power), calculate + // its peak value then compute normalization scale. + if (!m_disableNormalization && !rangeIndex) { + float maxValue; + maxValue = AudioBufferPeakValue(data, m_periodicWaveSize); + + if (maxValue) + m_normalizationScale = 1.0f / maxValue; + } + + // Apply normalization scale. + if (!m_disableNormalization) { + AudioBufferInPlaceScale(data, m_normalizationScale, m_periodicWaveSize); + } +} + +void PeriodicWave::generateBasicWaveform(OscillatorType shape) +{ + const float piFloat = float(M_PI); + unsigned fftSize = periodicWaveSize(); + unsigned halfSize = fftSize / 2; + + m_numberOfComponents = halfSize; + m_realComponents = new AudioFloatArray(halfSize); + m_imagComponents = new AudioFloatArray(halfSize); + float* realP = m_realComponents->Elements(); + float* imagP = m_imagComponents->Elements(); + + // Clear DC and imag value which is ignored. + realP[0] = 0; + imagP[0] = 0; + + for (unsigned n = 1; n < halfSize; ++n) { + float omega = 2 * piFloat * n; + float invOmega = 1 / omega; + + // Fourier coefficients according to standard definition. + float a; // Coefficient for cos(). + float b; // Coefficient for sin(). + + // Calculate Fourier coefficients depending on the shape. + // Note that the overall scaling (magnitude) of the waveforms + // is normalized in createBandLimitedTables(). + switch (shape) { + case OscillatorType::Sine: + // Standard sine wave function. + a = 0; + b = (n == 1) ? 1 : 0; + break; + case OscillatorType::Square: + // Square-shaped waveform with the first half its maximum value + // and the second half its minimum value. + a = 0; + b = invOmega * ((n & 1) ? 2 : 0); + break; + case OscillatorType::Sawtooth: + // Sawtooth-shaped waveform with the first half ramping from + // zero to maximum and the second half from minimum to zero. + a = 0; + b = -invOmega * cos(0.5 * omega); + break; + case OscillatorType::Triangle: + // Triangle-shaped waveform going from its maximum value to + // its minimum value then back to the maximum value. + a = 0; + if (n & 1) { + b = 2 * (2 / (n * piFloat) * 2 / (n * piFloat)) * ((((n - 1) >> 1) & 1) ? -1 : 1); + } else { + b = 0; + } + break; + default: + NS_NOTREACHED("invalid oscillator type"); + a = 0; + b = 0; + break; + } + + realP[n] = a; + imagP[n] = b; + } +} + +} // namespace WebCore diff --git a/dom/media/webaudio/blink/PeriodicWave.h b/dom/media/webaudio/blink/PeriodicWave.h new file mode 100644 index 000000000..47381d450 --- /dev/null +++ b/dom/media/webaudio/blink/PeriodicWave.h @@ -0,0 +1,118 @@ +/* + * Copyright (C) 2012 Google Inc. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * 3. Neither the name of Apple Computer, Inc. ("Apple") nor the names of + * its contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY APPLE AND ITS CONTRIBUTORS "AS IS" AND ANY + * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + * DISCLAIMED. IN NO EVENT SHALL APPLE OR ITS CONTRIBUTORS BE LIABLE FOR ANY + * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES + * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; + * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND + * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF + * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +#ifndef PeriodicWave_h +#define PeriodicWave_h + +#include "mozilla/dom/OscillatorNodeBinding.h" +#include <nsAutoPtr.h> +#include <nsTArray.h> +#include "AlignedTArray.h" +#include "mozilla/MemoryReporting.h" + +namespace WebCore { + +typedef AlignedTArray<float> AlignedAudioFloatArray; +typedef nsTArray<float> AudioFloatArray; + +class PeriodicWave { +public: + NS_INLINE_DECL_THREADSAFE_REFCOUNTING(WebCore::PeriodicWave); + + static already_AddRefed<PeriodicWave> createSine(float sampleRate); + static already_AddRefed<PeriodicWave> createSquare(float sampleRate); + static already_AddRefed<PeriodicWave> createSawtooth(float sampleRate); + static already_AddRefed<PeriodicWave> createTriangle(float sampleRate); + + // Creates an arbitrary periodic wave given the frequency components + // (Fourier coefficients). + static already_AddRefed<PeriodicWave> create(float sampleRate, + const float* real, + const float* imag, + size_t numberOfComponents, + bool disableNormalization); + + // Returns pointers to the lower and higher wave data for the pitch range + // containing the given fundamental frequency. These two tables are in + // adjacent "pitch" ranges where the higher table will have the maximum + // number of partials which won't alias when played back at this + // fundamental frequency. The lower wave is the next range containing fewer + // partials than the higher wave. Interpolation between these two tables + // can be made according to tableInterpolationFactor. Where values + // from 0 -> 1 interpolate between lower -> higher. + void waveDataForFundamentalFrequency(float, float* &lowerWaveData, float* &higherWaveData, float& tableInterpolationFactor); + + // Returns the scalar multiplier to the oscillator frequency to calculate + // wave buffer phase increment. + float rateScale() const { return m_rateScale; } + + unsigned periodicWaveSize() const { return m_periodicWaveSize; } + float sampleRate() const { return m_sampleRate; } + + size_t sizeOfIncludingThis(mozilla::MallocSizeOf aMallocSizeOf) const; + +private: + explicit PeriodicWave(float sampleRate, size_t numberOfComponents, bool disableNormalization); + ~PeriodicWave() {} + + void generateBasicWaveform(mozilla::dom::OscillatorType); + + float m_sampleRate; + unsigned m_periodicWaveSize; + unsigned m_numberOfRanges; + float m_centsPerRange; + unsigned m_numberOfComponents; + nsAutoPtr<AudioFloatArray> m_realComponents; + nsAutoPtr<AudioFloatArray> m_imagComponents; + + // The lowest frequency (in Hertz) where playback will include all of the + // partials. Playing back lower than this frequency will gradually lose + // more high-frequency information. + // This frequency is quite low (~10Hz @ // 44.1KHz) + float m_lowestFundamentalFrequency; + + float m_rateScale; + + unsigned numberOfRanges() const { return m_numberOfRanges; } + + // Maximum possible number of partials (before culling). + unsigned maxNumberOfPartials() const; + + unsigned numberOfPartialsForRange(unsigned rangeIndex) const; + + // Creates table for specified index based on fundamental frequency. + void createBandLimitedTables(float fundamentalFrequency, unsigned rangeIndex); + unsigned m_maxPartialsInBandLimitedTable; + float m_normalizationScale; + bool m_disableNormalization; + nsTArray<nsAutoPtr<AlignedAudioFloatArray> > m_bandLimitedTables; +}; + +} // namespace WebCore + +#endif // PeriodicWave_h diff --git a/dom/media/webaudio/blink/README b/dom/media/webaudio/blink/README new file mode 100644 index 000000000..96d209dfc --- /dev/null +++ b/dom/media/webaudio/blink/README @@ -0,0 +1,24 @@ +This directory contains the code originally borrowed from the Blink Web Audio +implementation. We are forking the code here because in many cases the burden +of adopting Blink specific utilities is too large compared to the prospect of +importing upstream fixes by just copying newer versions of the code in the +future. + +The process of borrowing code from Blink is as follows: + +* Try to borrow utility classes only, and avoid borrowing code which depends + too much on the Blink specific utilities. +* First, import the pristine files from the Blink repository before adding + them to the build system, noting the SVN revision of Blink from which the + original files were copied in the commit message. +* In a separate commit, add the imported source files to the build system, + and apply the necessary changes to make it build successfully. +* Use the code in a separate commit. +* Never add headers as exported headers. All headers should be included + using the following convention: #include "blink/Header.h". +* Leave the imported code in the WebCore namespace, and import the needed + names into the Mozilla code via `using'. +* Cherry-pick upsteam fixes manually when needed. In case you fix a problem + that is not Mozilla specific locally, try to upstream your changes into + Blink. +* Ping ehsan for any questions. diff --git a/dom/media/webaudio/blink/Reverb.cpp b/dom/media/webaudio/blink/Reverb.cpp new file mode 100644 index 000000000..4fca0822b --- /dev/null +++ b/dom/media/webaudio/blink/Reverb.cpp @@ -0,0 +1,243 @@ +/* + * Copyright (C) 2010 Google Inc. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * 3. Neither the name of Apple Computer, Inc. ("Apple") nor the names of + * its contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY APPLE AND ITS CONTRIBUTORS "AS IS" AND ANY + * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + * DISCLAIMED. IN NO EVENT SHALL APPLE OR ITS CONTRIBUTORS BE LIABLE FOR ANY + * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES + * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; + * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND + * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF + * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +#include "Reverb.h" +#include "ReverbConvolverStage.h" + +#include <math.h> +#include "ReverbConvolver.h" +#include "mozilla/FloatingPoint.h" + +using namespace mozilla; + +namespace WebCore { + +// Empirical gain calibration tested across many impulse responses to ensure perceived volume is same as dry (unprocessed) signal +const float GainCalibration = -58; +const float GainCalibrationSampleRate = 44100; + +// A minimum power value to when normalizing a silent (or very quiet) impulse response +const float MinPower = 0.000125f; + +static float calculateNormalizationScale(ThreadSharedFloatArrayBufferList* response, size_t aLength, float sampleRate) +{ + // Normalize by RMS power + size_t numberOfChannels = response->GetChannels(); + + float power = 0; + + for (size_t i = 0; i < numberOfChannels; ++i) { + float channelPower = AudioBufferSumOfSquares(static_cast<const float*>(response->GetData(i)), aLength); + power += channelPower; + } + + power = sqrt(power / (numberOfChannels * aLength)); + + // Protect against accidental overload + if (!IsFinite(power) || IsNaN(power) || power < MinPower) + power = MinPower; + + float scale = 1 / power; + + scale *= powf(10, GainCalibration * 0.05f); // calibrate to make perceived volume same as unprocessed + + // Scale depends on sample-rate. + if (sampleRate) + scale *= GainCalibrationSampleRate / sampleRate; + + // True-stereo compensation + if (response->GetChannels() == 4) + scale *= 0.5f; + + return scale; +} + +Reverb::Reverb(ThreadSharedFloatArrayBufferList* impulseResponse, size_t impulseResponseBufferLength, size_t maxFFTSize, size_t numberOfChannels, bool useBackgroundThreads, bool normalize, float sampleRate) +{ + float scale = 1; + + AutoTArray<const float*,4> irChannels; + for (size_t i = 0; i < impulseResponse->GetChannels(); ++i) { + irChannels.AppendElement(impulseResponse->GetData(i)); + } + AutoTArray<float,1024> tempBuf; + + if (normalize) { + scale = calculateNormalizationScale(impulseResponse, impulseResponseBufferLength, sampleRate); + + if (scale) { + tempBuf.SetLength(irChannels.Length()*impulseResponseBufferLength); + for (uint32_t i = 0; i < irChannels.Length(); ++i) { + float* buf = &tempBuf[i*impulseResponseBufferLength]; + AudioBufferCopyWithScale(irChannels[i], scale, buf, + impulseResponseBufferLength); + irChannels[i] = buf; + } + } + } + + initialize(irChannels, impulseResponseBufferLength, + maxFFTSize, numberOfChannels, useBackgroundThreads); +} + +size_t Reverb::sizeOfIncludingThis(mozilla::MallocSizeOf aMallocSizeOf) const +{ + size_t amount = aMallocSizeOf(this); + amount += m_convolvers.ShallowSizeOfExcludingThis(aMallocSizeOf); + for (size_t i = 0; i < m_convolvers.Length(); i++) { + if (m_convolvers[i]) { + amount += m_convolvers[i]->sizeOfIncludingThis(aMallocSizeOf); + } + } + + amount += m_tempBuffer.SizeOfExcludingThis(aMallocSizeOf, false); + return amount; +} + + +void Reverb::initialize(const nsTArray<const float*>& impulseResponseBuffer, + size_t impulseResponseBufferLength, + size_t maxFFTSize, size_t numberOfChannels, bool useBackgroundThreads) +{ + m_impulseResponseLength = impulseResponseBufferLength; + + // The reverb can handle a mono impulse response and still do stereo processing + size_t numResponseChannels = impulseResponseBuffer.Length(); + m_convolvers.SetCapacity(numberOfChannels); + + int convolverRenderPhase = 0; + for (size_t i = 0; i < numResponseChannels; ++i) { + const float* channel = impulseResponseBuffer[i]; + size_t length = impulseResponseBufferLength; + + nsAutoPtr<ReverbConvolver> convolver(new ReverbConvolver(channel, length, maxFFTSize, convolverRenderPhase, useBackgroundThreads)); + m_convolvers.AppendElement(convolver.forget()); + + convolverRenderPhase += WEBAUDIO_BLOCK_SIZE; + } + + // For "True" stereo processing we allocate a temporary buffer to avoid repeatedly allocating it in the process() method. + // It can be bad to allocate memory in a real-time thread. + if (numResponseChannels == 4) { + m_tempBuffer.AllocateChannels(2); + WriteZeroesToAudioBlock(&m_tempBuffer, 0, WEBAUDIO_BLOCK_SIZE); + } +} + +void Reverb::process(const AudioBlock* sourceBus, AudioBlock* destinationBus) +{ + // Do a fairly comprehensive sanity check. + // If these conditions are satisfied, all of the source and destination pointers will be valid for the various matrixing cases. + bool isSafeToProcess = sourceBus && destinationBus && sourceBus->ChannelCount() > 0 && destinationBus->mChannelData.Length() > 0 + && WEBAUDIO_BLOCK_SIZE <= MaxFrameSize && WEBAUDIO_BLOCK_SIZE <= size_t(sourceBus->GetDuration()) && WEBAUDIO_BLOCK_SIZE <= size_t(destinationBus->GetDuration()); + + MOZ_ASSERT(isSafeToProcess); + if (!isSafeToProcess) + return; + + // For now only handle mono or stereo output + MOZ_ASSERT(destinationBus->ChannelCount() <= 2); + + float* destinationChannelL = static_cast<float*>(const_cast<void*>(destinationBus->mChannelData[0])); + const float* sourceBusL = static_cast<const float*>(sourceBus->mChannelData[0]); + + // Handle input -> output matrixing... + size_t numInputChannels = sourceBus->ChannelCount(); + size_t numOutputChannels = destinationBus->ChannelCount(); + size_t numReverbChannels = m_convolvers.Length(); + + if (numInputChannels == 2 && numReverbChannels == 2 && numOutputChannels == 2) { + // 2 -> 2 -> 2 + const float* sourceBusR = static_cast<const float*>(sourceBus->mChannelData[1]); + float* destinationChannelR = static_cast<float*>(const_cast<void*>(destinationBus->mChannelData[1])); + m_convolvers[0]->process(sourceBusL, destinationChannelL); + m_convolvers[1]->process(sourceBusR, destinationChannelR); + } else if (numInputChannels == 1 && numOutputChannels == 2 && numReverbChannels == 2) { + // 1 -> 2 -> 2 + for (int i = 0; i < 2; ++i) { + float* destinationChannel = static_cast<float*>(const_cast<void*>(destinationBus->mChannelData[i])); + m_convolvers[i]->process(sourceBusL, destinationChannel); + } + } else if (numInputChannels == 1 && numReverbChannels == 1 && numOutputChannels == 2) { + // 1 -> 1 -> 2 + m_convolvers[0]->process(sourceBusL, destinationChannelL); + + // simply copy L -> R + float* destinationChannelR = static_cast<float*>(const_cast<void*>(destinationBus->mChannelData[1])); + bool isCopySafe = destinationChannelL && destinationChannelR && size_t(destinationBus->GetDuration()) >= WEBAUDIO_BLOCK_SIZE; + MOZ_ASSERT(isCopySafe); + if (!isCopySafe) + return; + PodCopy(destinationChannelR, destinationChannelL, WEBAUDIO_BLOCK_SIZE); + } else if (numInputChannels == 1 && numReverbChannels == 1 && numOutputChannels == 1) { + // 1 -> 1 -> 1 + m_convolvers[0]->process(sourceBusL, destinationChannelL); + } else if (numInputChannels == 2 && numReverbChannels == 4 && numOutputChannels == 2) { + // 2 -> 4 -> 2 ("True" stereo) + const float* sourceBusR = static_cast<const float*>(sourceBus->mChannelData[1]); + float* destinationChannelR = static_cast<float*>(const_cast<void*>(destinationBus->mChannelData[1])); + + float* tempChannelL = static_cast<float*>(const_cast<void*>(m_tempBuffer.mChannelData[0])); + float* tempChannelR = static_cast<float*>(const_cast<void*>(m_tempBuffer.mChannelData[1])); + + // Process left virtual source + m_convolvers[0]->process(sourceBusL, destinationChannelL); + m_convolvers[1]->process(sourceBusL, destinationChannelR); + + // Process right virtual source + m_convolvers[2]->process(sourceBusR, tempChannelL); + m_convolvers[3]->process(sourceBusR, tempChannelR); + + AudioBufferAddWithScale(tempChannelL, 1.0f, destinationChannelL, sourceBus->GetDuration()); + AudioBufferAddWithScale(tempChannelR, 1.0f, destinationChannelR, sourceBus->GetDuration()); + } else if (numInputChannels == 1 && numReverbChannels == 4 && numOutputChannels == 2) { + // 1 -> 4 -> 2 (Processing mono with "True" stereo impulse response) + // This is an inefficient use of a four-channel impulse response, but we should handle the case. + float* destinationChannelR = static_cast<float*>(const_cast<void*>(destinationBus->mChannelData[1])); + + float* tempChannelL = static_cast<float*>(const_cast<void*>(m_tempBuffer.mChannelData[0])); + float* tempChannelR = static_cast<float*>(const_cast<void*>(m_tempBuffer.mChannelData[1])); + + // Process left virtual source + m_convolvers[0]->process(sourceBusL, destinationChannelL); + m_convolvers[1]->process(sourceBusL, destinationChannelR); + + // Process right virtual source + m_convolvers[2]->process(sourceBusL, tempChannelL); + m_convolvers[3]->process(sourceBusL, tempChannelR); + + AudioBufferAddWithScale(tempChannelL, 1.0f, destinationChannelL, sourceBus->GetDuration()); + AudioBufferAddWithScale(tempChannelR, 1.0f, destinationChannelR, sourceBus->GetDuration()); + } else { + // Handle gracefully any unexpected / unsupported matrixing + // FIXME: add code for 5.1 support... + destinationBus->SetNull(destinationBus->GetDuration()); + } +} + +} // namespace WebCore diff --git a/dom/media/webaudio/blink/Reverb.h b/dom/media/webaudio/blink/Reverb.h new file mode 100644 index 000000000..35e72283d --- /dev/null +++ b/dom/media/webaudio/blink/Reverb.h @@ -0,0 +1,78 @@ +/* + * Copyright (C) 2010 Google Inc. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * 3. Neither the name of Apple Computer, Inc. ("Apple") nor the names of + * its contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY APPLE AND ITS CONTRIBUTORS "AS IS" AND ANY + * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + * DISCLAIMED. IN NO EVENT SHALL APPLE OR ITS CONTRIBUTORS BE LIABLE FOR ANY + * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES + * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; + * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND + * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF + * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +#ifndef Reverb_h +#define Reverb_h + +#include "ReverbConvolver.h" +#include "nsAutoPtr.h" +#include "nsTArray.h" +#include "AudioBlock.h" +#include "mozilla/MemoryReporting.h" + +namespace mozilla { +class ThreadSharedFloatArrayBufferList; +} // namespace mozilla + +namespace WebCore { + +// Multi-channel convolution reverb with channel matrixing - one or more ReverbConvolver objects are used internally. + +class Reverb { +public: + enum { MaxFrameSize = 256 }; + + // renderSliceSize is a rendering hint, so the FFTs can be optimized to not all occur at the same time (very bad when rendering on a real-time thread). + Reverb(mozilla::ThreadSharedFloatArrayBufferList* impulseResponseBuffer, + size_t impulseResponseBufferLength, size_t maxFFTSize, + size_t numberOfChannels, bool useBackgroundThreads, bool normalize, + float sampleRate); + + void process(const mozilla::AudioBlock* sourceBus, + mozilla::AudioBlock* destinationBus); + + size_t impulseResponseLength() const { return m_impulseResponseLength; } + + size_t sizeOfIncludingThis(mozilla::MallocSizeOf aMallocSizeOf) const; + +private: + void initialize(const nsTArray<const float*>& impulseResponseBuffer, + size_t impulseResponseBufferLength, size_t maxFFTSize, + size_t numberOfChannels, bool useBackgroundThreads); + + size_t m_impulseResponseLength; + + nsTArray<nsAutoPtr<ReverbConvolver> > m_convolvers; + + // For "True" stereo processing + mozilla::AudioBlock m_tempBuffer; +}; + +} // namespace WebCore + +#endif // Reverb_h diff --git a/dom/media/webaudio/blink/ReverbAccumulationBuffer.cpp b/dom/media/webaudio/blink/ReverbAccumulationBuffer.cpp new file mode 100644 index 000000000..4405164b2 --- /dev/null +++ b/dom/media/webaudio/blink/ReverbAccumulationBuffer.cpp @@ -0,0 +1,114 @@ +/* + * Copyright (C) 2010 Google Inc. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * 3. Neither the name of Apple Computer, Inc. ("Apple") nor the names of + * its contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY APPLE AND ITS CONTRIBUTORS "AS IS" AND ANY + * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + * DISCLAIMED. IN NO EVENT SHALL APPLE OR ITS CONTRIBUTORS BE LIABLE FOR ANY + * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES + * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; + * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND + * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF + * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +#include "ReverbAccumulationBuffer.h" +#include "AudioNodeEngine.h" +#include "mozilla/PodOperations.h" +#include <algorithm> + +using namespace mozilla; + +namespace WebCore { + +ReverbAccumulationBuffer::ReverbAccumulationBuffer(size_t length) + : m_readIndex(0) + , m_readTimeFrame(0) +{ + m_buffer.SetLength(length); + PodZero(m_buffer.Elements(), length); +} + +void ReverbAccumulationBuffer::readAndClear(float* destination, size_t numberOfFrames) +{ + size_t bufferLength = m_buffer.Length(); + bool isCopySafe = m_readIndex <= bufferLength && numberOfFrames <= bufferLength; + + MOZ_ASSERT(isCopySafe); + if (!isCopySafe) + return; + + size_t framesAvailable = bufferLength - m_readIndex; + size_t numberOfFrames1 = std::min(numberOfFrames, framesAvailable); + size_t numberOfFrames2 = numberOfFrames - numberOfFrames1; + + float* source = m_buffer.Elements(); + memcpy(destination, source + m_readIndex, sizeof(float) * numberOfFrames1); + memset(source + m_readIndex, 0, sizeof(float) * numberOfFrames1); + + // Handle wrap-around if necessary + if (numberOfFrames2 > 0) { + memcpy(destination + numberOfFrames1, source, sizeof(float) * numberOfFrames2); + memset(source, 0, sizeof(float) * numberOfFrames2); + } + + m_readIndex = (m_readIndex + numberOfFrames) % bufferLength; + m_readTimeFrame += numberOfFrames; +} + +void ReverbAccumulationBuffer::updateReadIndex(int* readIndex, size_t numberOfFrames) const +{ + // Update caller's readIndex + *readIndex = (*readIndex + numberOfFrames) % m_buffer.Length(); +} + +int ReverbAccumulationBuffer::accumulate(const float* source, size_t numberOfFrames, int* readIndex, size_t delayFrames) +{ + size_t bufferLength = m_buffer.Length(); + + size_t writeIndex = (*readIndex + delayFrames) % bufferLength; + + // Update caller's readIndex + *readIndex = (*readIndex + numberOfFrames) % bufferLength; + + size_t framesAvailable = bufferLength - writeIndex; + size_t numberOfFrames1 = std::min(numberOfFrames, framesAvailable); + size_t numberOfFrames2 = numberOfFrames - numberOfFrames1; + + float* destination = m_buffer.Elements(); + + bool isSafe = writeIndex <= bufferLength && numberOfFrames1 + writeIndex <= bufferLength && numberOfFrames2 <= bufferLength; + MOZ_ASSERT(isSafe); + if (!isSafe) + return 0; + + AudioBufferAddWithScale(source, 1.0f, destination + writeIndex, numberOfFrames1); + if (numberOfFrames2 > 0) { + AudioBufferAddWithScale(source + numberOfFrames1, 1.0f, destination, numberOfFrames2); + } + + return writeIndex; +} + +void ReverbAccumulationBuffer::reset() +{ + PodZero(m_buffer.Elements(), m_buffer.Length()); + m_readIndex = 0; + m_readTimeFrame = 0; +} + +} // namespace WebCore diff --git a/dom/media/webaudio/blink/ReverbAccumulationBuffer.h b/dom/media/webaudio/blink/ReverbAccumulationBuffer.h new file mode 100644 index 000000000..a37741a2e --- /dev/null +++ b/dom/media/webaudio/blink/ReverbAccumulationBuffer.h @@ -0,0 +1,73 @@ +/* + * Copyright (C) 2010 Google Inc. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * 3. Neither the name of Apple Computer, Inc. ("Apple") nor the names of + * its contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY APPLE AND ITS CONTRIBUTORS "AS IS" AND ANY + * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + * DISCLAIMED. IN NO EVENT SHALL APPLE OR ITS CONTRIBUTORS BE LIABLE FOR ANY + * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES + * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; + * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND + * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF + * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +#ifndef ReverbAccumulationBuffer_h +#define ReverbAccumulationBuffer_h + +#include "AlignedTArray.h" +#include "mozilla/MemoryReporting.h" + +namespace WebCore { + +// ReverbAccumulationBuffer is a circular delay buffer with one client reading from it and multiple clients +// writing/accumulating to it at different delay offsets from the read position. The read operation will zero the memory +// just read from the buffer, so it will be ready for accumulation the next time around. +class ReverbAccumulationBuffer { +public: + explicit ReverbAccumulationBuffer(size_t length); + + // This will read from, then clear-out numberOfFrames + void readAndClear(float* destination, size_t numberOfFrames); + + // Each ReverbConvolverStage will accumulate its output at the appropriate delay from the read position. + // We need to pass in and update readIndex here, since each ReverbConvolverStage may be running in + // a different thread than the realtime thread calling ReadAndClear() and maintaining m_readIndex + // Returns the writeIndex where the accumulation took place + int accumulate(const float* source, size_t numberOfFrames, int* readIndex, size_t delayFrames); + + size_t readIndex() const { return m_readIndex; } + void updateReadIndex(int* readIndex, size_t numberOfFrames) const; + + size_t readTimeFrame() const { return m_readTimeFrame; } + + void reset(); + + size_t sizeOfExcludingThis(mozilla::MallocSizeOf aMallocSizeOf) const + { + return m_buffer.ShallowSizeOfExcludingThis(aMallocSizeOf); + } + +private: + AlignedTArray<float, 16> m_buffer; + size_t m_readIndex; + size_t m_readTimeFrame; // for debugging (frame on continuous timeline) +}; + +} // namespace WebCore + +#endif // ReverbAccumulationBuffer_h diff --git a/dom/media/webaudio/blink/ReverbConvolver.cpp b/dom/media/webaudio/blink/ReverbConvolver.cpp new file mode 100644 index 000000000..e739400ae --- /dev/null +++ b/dom/media/webaudio/blink/ReverbConvolver.cpp @@ -0,0 +1,265 @@ +/* + * Copyright (C) 2010 Google Inc. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * 3. Neither the name of Apple Computer, Inc. ("Apple") nor the names of + * its contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY APPLE AND ITS CONTRIBUTORS "AS IS" AND ANY + * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + * DISCLAIMED. IN NO EVENT SHALL APPLE OR ITS CONTRIBUTORS BE LIABLE FOR ANY + * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES + * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; + * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND + * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF + * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +#include "ReverbConvolver.h" +#include "ReverbConvolverStage.h" + +using namespace mozilla; + +namespace WebCore { + +const int InputBufferSize = 8 * 16384; + +// We only process the leading portion of the impulse response in the real-time thread. We don't exceed this length. +// It turns out then, that the background thread has about 278msec of scheduling slop. +// Empirically, this has been found to be a good compromise between giving enough time for scheduling slop, +// while still minimizing the amount of processing done in the primary (high-priority) thread. +// This was found to be a good value on Mac OS X, and may work well on other platforms as well, assuming +// the very rough scheduling latencies are similar on these time-scales. Of course, this code may need to be +// tuned for individual platforms if this assumption is found to be incorrect. +const size_t RealtimeFrameLimit = 8192 + 4096 // ~278msec @ 44.1KHz + - WEBAUDIO_BLOCK_SIZE; +// First stage will have size MinFFTSize - successive stages will double in +// size each time until we hit the maximum size. +const size_t MinFFTSize = 256; +// If we are using background threads then don't exceed this FFT size for the +// stages which run in the real-time thread. This avoids having only one or +// two large stages (size 16384 or so) at the end which take a lot of time +// every several processing slices. This way we amortize the cost over more +// processing slices. +const size_t MaxRealtimeFFTSize = 4096; + +ReverbConvolver::ReverbConvolver(const float* impulseResponseData, + size_t impulseResponseLength, + size_t maxFFTSize, + size_t convolverRenderPhase, + bool useBackgroundThreads) + : m_impulseResponseLength(impulseResponseLength) + , m_accumulationBuffer(impulseResponseLength + WEBAUDIO_BLOCK_SIZE) + , m_inputBuffer(InputBufferSize) + , m_backgroundThread("ConvolverWorker") + , m_backgroundThreadCondition(&m_backgroundThreadLock) + , m_useBackgroundThreads(useBackgroundThreads) + , m_wantsToExit(false) + , m_moreInputBuffered(false) +{ + // For the moment, a good way to know if we have real-time constraint is to check if we're using background threads. + // Otherwise, assume we're being run from a command-line tool. + bool hasRealtimeConstraint = useBackgroundThreads; + + const float* response = impulseResponseData; + size_t totalResponseLength = impulseResponseLength; + + // The total latency is zero because the first FFT stage is small enough + // to return output in the first block. + size_t reverbTotalLatency = 0; + + size_t stageOffset = 0; + size_t stagePhase = 0; + size_t fftSize = MinFFTSize; + while (stageOffset < totalResponseLength) { + size_t stageSize = fftSize / 2; + + // For the last stage, it's possible that stageOffset is such that we're straddling the end + // of the impulse response buffer (if we use stageSize), so reduce the last stage's length... + if (stageSize + stageOffset > totalResponseLength) { + stageSize = totalResponseLength - stageOffset; + // Use smallest FFT that is large enough to cover the last stage. + fftSize = MinFFTSize; + while (stageSize * 2 > fftSize) { + fftSize *= 2; + } + } + + // This "staggers" the time when each FFT happens so they don't all happen at the same time + int renderPhase = convolverRenderPhase + stagePhase; + + nsAutoPtr<ReverbConvolverStage> stage + (new ReverbConvolverStage(response, totalResponseLength, + reverbTotalLatency, stageOffset, stageSize, + fftSize, renderPhase, + &m_accumulationBuffer)); + + bool isBackgroundStage = false; + + if (this->useBackgroundThreads() && stageOffset > RealtimeFrameLimit) { + m_backgroundStages.AppendElement(stage.forget()); + isBackgroundStage = true; + } else + m_stages.AppendElement(stage.forget()); + + // Figure out next FFT size + fftSize *= 2; + + stageOffset += stageSize; + + if (hasRealtimeConstraint && !isBackgroundStage + && fftSize > MaxRealtimeFFTSize) { + fftSize = MaxRealtimeFFTSize; + // Custom phase positions for all but the first of the realtime + // stages of largest size. These spread out the work of the + // larger realtime stages. None of the FFTs of size 1024, 2048 or + // 4096 are performed when processing the same block. The first + // MaxRealtimeFFTSize = 4096 stage, at the end of the doubling, + // performs its FFT at block 7. The FFTs of size 2048 are + // performed in blocks 3 + 8 * n and size 1024 at 1 + 4 * n. + const uint32_t phaseLookup[] = { 14, 0, 10, 4 }; + stagePhase = WEBAUDIO_BLOCK_SIZE * + phaseLookup[m_stages.Length() % ArrayLength(phaseLookup)]; + } else if (fftSize > maxFFTSize) { + fftSize = maxFFTSize; + // A prime offset spreads out FFTs in a way that all + // available phase positions will be used if there are sufficient + // stages. + stagePhase += 5 * WEBAUDIO_BLOCK_SIZE; + } else if (stageSize > WEBAUDIO_BLOCK_SIZE) { + // As the stages are doubling in size, the next FFT will occur + // mid-way between FFTs for this stage. + stagePhase = stageSize - WEBAUDIO_BLOCK_SIZE; + } + } + + // Start up background thread + // FIXME: would be better to up the thread priority here. It doesn't need to be real-time, but higher than the default... + if (this->useBackgroundThreads() && m_backgroundStages.Length() > 0) { + if (!m_backgroundThread.Start()) { + NS_WARNING("Cannot start convolver thread."); + return; + } + m_backgroundThread.message_loop()->PostTask(NewNonOwningRunnableMethod(this, + &ReverbConvolver::backgroundThreadEntry)); + } +} + +ReverbConvolver::~ReverbConvolver() +{ + // Wait for background thread to stop + if (useBackgroundThreads() && m_backgroundThread.IsRunning()) { + m_wantsToExit = true; + + // Wake up thread so it can return + { + AutoLock locker(m_backgroundThreadLock); + m_moreInputBuffered = true; + m_backgroundThreadCondition.Signal(); + } + + m_backgroundThread.Stop(); + } +} + +size_t ReverbConvolver::sizeOfIncludingThis(mozilla::MallocSizeOf aMallocSizeOf) const +{ + size_t amount = aMallocSizeOf(this); + amount += m_stages.ShallowSizeOfExcludingThis(aMallocSizeOf); + for (size_t i = 0; i < m_stages.Length(); i++) { + if (m_stages[i]) { + amount += m_stages[i]->sizeOfIncludingThis(aMallocSizeOf); + } + } + + amount += m_backgroundStages.ShallowSizeOfExcludingThis(aMallocSizeOf); + for (size_t i = 0; i < m_backgroundStages.Length(); i++) { + if (m_backgroundStages[i]) { + amount += m_backgroundStages[i]->sizeOfIncludingThis(aMallocSizeOf); + } + } + + // NB: The buffer sizes are static, so even though they might be accessed + // in another thread it's safe to measure them. + amount += m_accumulationBuffer.sizeOfExcludingThis(aMallocSizeOf); + amount += m_inputBuffer.sizeOfExcludingThis(aMallocSizeOf); + + // Possible future measurements: + // - m_backgroundThread + // - m_backgroundThreadLock + // - m_backgroundThreadCondition + return amount; +} + +void ReverbConvolver::backgroundThreadEntry() +{ + while (!m_wantsToExit) { + // Wait for realtime thread to give us more input + m_moreInputBuffered = false; + { + AutoLock locker(m_backgroundThreadLock); + while (!m_moreInputBuffered && !m_wantsToExit) + m_backgroundThreadCondition.Wait(); + } + + // Process all of the stages until their read indices reach the input buffer's write index + int writeIndex = m_inputBuffer.writeIndex(); + + // Even though it doesn't seem like every stage needs to maintain its own version of readIndex + // we do this in case we want to run in more than one background thread. + int readIndex; + + while ((readIndex = m_backgroundStages[0]->inputReadIndex()) != writeIndex) { // FIXME: do better to detect buffer overrun... + // Accumulate contributions from each stage + for (size_t i = 0; i < m_backgroundStages.Length(); ++i) + m_backgroundStages[i]->processInBackground(this); + } + } +} + +void ReverbConvolver::process(const float* sourceChannelData, + float* destinationChannelData) +{ + const float* source = sourceChannelData; + float* destination = destinationChannelData; + bool isDataSafe = source && destination; + MOZ_ASSERT(isDataSafe); + if (!isDataSafe) + return; + + // Feed input buffer (read by all threads) + m_inputBuffer.write(source, WEBAUDIO_BLOCK_SIZE); + + // Accumulate contributions from each stage + for (size_t i = 0; i < m_stages.Length(); ++i) + m_stages[i]->process(source); + + // Finally read from accumulation buffer + m_accumulationBuffer.readAndClear(destination, WEBAUDIO_BLOCK_SIZE); + + // Now that we've buffered more input, wake up our background thread. + + // Not using a MutexLocker looks strange, but we use a tryLock() instead because this is run on the real-time + // thread where it is a disaster for the lock to be contended (causes audio glitching). It's OK if we fail to + // signal from time to time, since we'll get to it the next time we're called. We're called repeatedly + // and frequently (around every 3ms). The background thread is processing well into the future and has a considerable amount of + // leeway here... + if (m_backgroundThreadLock.Try()) { + m_moreInputBuffered = true; + m_backgroundThreadCondition.Signal(); + m_backgroundThreadLock.Release(); + } +} + +} // namespace WebCore diff --git a/dom/media/webaudio/blink/ReverbConvolver.h b/dom/media/webaudio/blink/ReverbConvolver.h new file mode 100644 index 000000000..b7eea45b8 --- /dev/null +++ b/dom/media/webaudio/blink/ReverbConvolver.h @@ -0,0 +1,90 @@ +/* + * Copyright (C) 2010 Google Inc. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * 3. Neither the name of Apple Computer, Inc. ("Apple") nor the names of + * its contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY APPLE AND ITS CONTRIBUTORS "AS IS" AND ANY + * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + * DISCLAIMED. IN NO EVENT SHALL APPLE OR ITS CONTRIBUTORS BE LIABLE FOR ANY + * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES + * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; + * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND + * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF + * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +#ifndef ReverbConvolver_h +#define ReverbConvolver_h + +#include "ReverbAccumulationBuffer.h" +#include "ReverbInputBuffer.h" +#include "nsAutoPtr.h" +#include "mozilla/MemoryReporting.h" +#ifdef LOG +#undef LOG +#endif +#include "base/condition_variable.h" +#include "base/lock.h" +#include "base/thread.h" + +namespace WebCore { + +class ReverbConvolverStage; + +class ReverbConvolver { +public: + // maxFFTSize can be adjusted (from say 2048 to 32768) depending on how much precision is necessary. + // For certain tweaky de-convolving applications the phase errors add up quickly and lead to non-sensical results with + // larger FFT sizes and single-precision floats. In these cases 2048 is a good size. + // If not doing multi-threaded convolution, then should not go > 8192. + ReverbConvolver(const float* impulseResponseData, + size_t impulseResponseLength, size_t maxFFTSize, + size_t convolverRenderPhase, bool useBackgroundThreads); + ~ReverbConvolver(); + + void process(const float* sourceChannelData, + float* destinationChannelData); + + size_t impulseResponseLength() const { return m_impulseResponseLength; } + + ReverbInputBuffer* inputBuffer() { return &m_inputBuffer; } + + bool useBackgroundThreads() const { return m_useBackgroundThreads; } + void backgroundThreadEntry(); + + size_t sizeOfIncludingThis(mozilla::MallocSizeOf aMallocSizeOf) const; +private: + nsTArray<nsAutoPtr<ReverbConvolverStage> > m_stages; + nsTArray<nsAutoPtr<ReverbConvolverStage> > m_backgroundStages; + size_t m_impulseResponseLength; + + ReverbAccumulationBuffer m_accumulationBuffer; + + // One or more background threads read from this input buffer which is fed from the realtime thread. + ReverbInputBuffer m_inputBuffer; + + // Background thread and synchronization + base::Thread m_backgroundThread; + Lock m_backgroundThreadLock; + ConditionVariable m_backgroundThreadCondition; + bool m_useBackgroundThreads; + bool m_wantsToExit; + bool m_moreInputBuffered; +}; + +} // namespace WebCore + +#endif // ReverbConvolver_h diff --git a/dom/media/webaudio/blink/ReverbConvolverStage.cpp b/dom/media/webaudio/blink/ReverbConvolverStage.cpp new file mode 100644 index 000000000..055098e88 --- /dev/null +++ b/dom/media/webaudio/blink/ReverbConvolverStage.cpp @@ -0,0 +1,107 @@ +/* + * Copyright (C) 2010 Google Inc. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * 3. Neither the name of Apple Computer, Inc. ("Apple") nor the names of + * its contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY APPLE AND ITS CONTRIBUTORS "AS IS" AND ANY + * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + * DISCLAIMED. IN NO EVENT SHALL APPLE OR ITS CONTRIBUTORS BE LIABLE FOR ANY + * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES + * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; + * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND + * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF + * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +#include "ReverbConvolverStage.h" + +#include "ReverbAccumulationBuffer.h" +#include "ReverbConvolver.h" +#include "ReverbInputBuffer.h" +#include "mozilla/PodOperations.h" + +using namespace mozilla; + +namespace WebCore { + +ReverbConvolverStage::ReverbConvolverStage(const float* impulseResponse, size_t, + size_t reverbTotalLatency, + size_t stageOffset, + size_t stageLength, + size_t fftSize, size_t renderPhase, + ReverbAccumulationBuffer* accumulationBuffer) + : m_accumulationBuffer(accumulationBuffer) + , m_accumulationReadIndex(0) + , m_inputReadIndex(0) +{ + MOZ_ASSERT(impulseResponse); + MOZ_ASSERT(accumulationBuffer); + + m_fftKernel = new FFTBlock(fftSize); + m_fftKernel->PadAndMakeScaledDFT(impulseResponse + stageOffset, stageLength); + m_fftConvolver = new FFTConvolver(fftSize, renderPhase); + + // The convolution stage at offset stageOffset needs to have a corresponding delay to cancel out the offset. + size_t totalDelay = stageOffset + reverbTotalLatency; + + // But, the FFT convolution itself incurs latency, so subtract this out... + size_t fftLatency = m_fftConvolver->latencyFrames(); + MOZ_ASSERT(totalDelay >= fftLatency); + totalDelay -= fftLatency; + + m_postDelayLength = totalDelay; +} + +size_t ReverbConvolverStage::sizeOfIncludingThis(mozilla::MallocSizeOf aMallocSizeOf) const +{ + size_t amount = aMallocSizeOf(this); + + if (m_fftKernel) { + amount += m_fftKernel->SizeOfIncludingThis(aMallocSizeOf); + } + + if (m_fftConvolver) { + amount += m_fftConvolver->sizeOfIncludingThis(aMallocSizeOf); + } + + return amount; +} + +void ReverbConvolverStage::processInBackground(ReverbConvolver* convolver) +{ + ReverbInputBuffer* inputBuffer = convolver->inputBuffer(); + float* source = inputBuffer->directReadFrom(&m_inputReadIndex, + WEBAUDIO_BLOCK_SIZE); + process(source); +} + +void ReverbConvolverStage::process(const float* source) +{ + MOZ_ASSERT(source); + if (!source) + return; + + // Now, run the convolution (into the delay buffer). + // An expensive FFT will happen every fftSize / 2 frames. + const float* output = m_fftConvolver->process(m_fftKernel, source); + + // Now accumulate into reverb's accumulation buffer. + m_accumulationBuffer->accumulate(output, WEBAUDIO_BLOCK_SIZE, + &m_accumulationReadIndex, + m_postDelayLength); +} + +} // namespace WebCore diff --git a/dom/media/webaudio/blink/ReverbConvolverStage.h b/dom/media/webaudio/blink/ReverbConvolverStage.h new file mode 100644 index 000000000..0ebc33f3a --- /dev/null +++ b/dom/media/webaudio/blink/ReverbConvolverStage.h @@ -0,0 +1,79 @@ +/* + * Copyright (C) 2010 Google Inc. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * 3. Neither the name of Apple Computer, Inc. ("Apple") nor the names of + * its contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY APPLE AND ITS CONTRIBUTORS "AS IS" AND ANY + * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + * DISCLAIMED. IN NO EVENT SHALL APPLE OR ITS CONTRIBUTORS BE LIABLE FOR ANY + * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES + * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; + * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND + * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF + * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +#ifndef ReverbConvolverStage_h +#define ReverbConvolverStage_h + +#include "FFTConvolver.h" + +#include "nsAutoPtr.h" +#include "nsTArray.h" +#include "mozilla/FFTBlock.h" +#include "mozilla/MemoryReporting.h" + +namespace WebCore { + +using mozilla::FFTBlock; + +class ReverbAccumulationBuffer; +class ReverbConvolver; + +// A ReverbConvolverStage represents the convolution associated with a sub-section of a large impulse response. +// It incorporates a delay line to account for the offset of the sub-section within the larger impulse response. +class ReverbConvolverStage { +public: + // renderPhase is useful to know so that we can manipulate the pre versus post delay so that stages will perform + // their heavy work (FFT processing) on different slices to balance the load in a real-time thread. + ReverbConvolverStage(const float* impulseResponse, size_t responseLength, size_t reverbTotalLatency, size_t stageOffset, size_t stageLength, size_t fftSize, size_t renderPhase, ReverbAccumulationBuffer*); + + // |source| must point to an array of WEBAUDIO_BLOCK_SIZE elements. + void process(const float* source); + + void processInBackground(ReverbConvolver* convolver); + + // Useful for background processing + int inputReadIndex() const { return m_inputReadIndex; } + + size_t sizeOfIncludingThis(mozilla::MallocSizeOf aMallocSizeOf) const; + +private: + nsAutoPtr<FFTBlock> m_fftKernel; + nsAutoPtr<FFTConvolver> m_fftConvolver; + + ReverbAccumulationBuffer* m_accumulationBuffer; + int m_accumulationReadIndex; + int m_inputReadIndex; + + size_t m_postDelayLength; + + nsTArray<float> m_temporaryBuffer; +}; + +} // namespace WebCore + +#endif // ReverbConvolverStage_h diff --git a/dom/media/webaudio/blink/ReverbInputBuffer.cpp b/dom/media/webaudio/blink/ReverbInputBuffer.cpp new file mode 100644 index 000000000..8221f8151 --- /dev/null +++ b/dom/media/webaudio/blink/ReverbInputBuffer.cpp @@ -0,0 +1,87 @@ +/* + * Copyright (C) 2010 Google Inc. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * 3. Neither the name of Apple Computer, Inc. ("Apple") nor the names of + * its contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY APPLE AND ITS CONTRIBUTORS "AS IS" AND ANY + * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + * DISCLAIMED. IN NO EVENT SHALL APPLE OR ITS CONTRIBUTORS BE LIABLE FOR ANY + * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES + * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; + * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND + * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF + * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +#include "ReverbInputBuffer.h" +#include "mozilla/PodOperations.h" + +using namespace mozilla; + +namespace WebCore { + +ReverbInputBuffer::ReverbInputBuffer(size_t length) + : m_writeIndex(0) +{ + m_buffer.SetLength(length); + PodZero(m_buffer.Elements(), length); +} + +void ReverbInputBuffer::write(const float* sourceP, size_t numberOfFrames) +{ + size_t bufferLength = m_buffer.Length(); + bool isCopySafe = m_writeIndex + numberOfFrames <= bufferLength; + MOZ_ASSERT(isCopySafe); + if (!isCopySafe) + return; + + memcpy(m_buffer.Elements() + m_writeIndex, sourceP, sizeof(float) * numberOfFrames); + + m_writeIndex += numberOfFrames; + MOZ_ASSERT(m_writeIndex <= bufferLength); + + if (m_writeIndex >= bufferLength) + m_writeIndex = 0; +} + +float* ReverbInputBuffer::directReadFrom(int* readIndex, size_t numberOfFrames) +{ + size_t bufferLength = m_buffer.Length(); + bool isPointerGood = readIndex && *readIndex >= 0 && *readIndex + numberOfFrames <= bufferLength; + MOZ_ASSERT(isPointerGood); + if (!isPointerGood) { + // Should never happen in practice but return pointer to start of buffer (avoid crash) + if (readIndex) + *readIndex = 0; + return m_buffer.Elements(); + } + + float* sourceP = m_buffer.Elements(); + float* p = sourceP + *readIndex; + + // Update readIndex + *readIndex = (*readIndex + numberOfFrames) % bufferLength; + + return p; +} + +void ReverbInputBuffer::reset() +{ + PodZero(m_buffer.Elements(), m_buffer.Length()); + m_writeIndex = 0; +} + +} // namespace WebCore diff --git a/dom/media/webaudio/blink/ReverbInputBuffer.h b/dom/media/webaudio/blink/ReverbInputBuffer.h new file mode 100644 index 000000000..906021c0d --- /dev/null +++ b/dom/media/webaudio/blink/ReverbInputBuffer.h @@ -0,0 +1,71 @@ +/* + * Copyright (C) 2010 Google Inc. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * 3. Neither the name of Apple Computer, Inc. ("Apple") nor the names of + * its contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY APPLE AND ITS CONTRIBUTORS "AS IS" AND ANY + * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + * DISCLAIMED. IN NO EVENT SHALL APPLE OR ITS CONTRIBUTORS BE LIABLE FOR ANY + * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES + * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; + * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND + * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF + * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +#ifndef ReverbInputBuffer_h +#define ReverbInputBuffer_h + +#include "nsTArray.h" +#include "mozilla/MemoryReporting.h" + +namespace WebCore { + +// ReverbInputBuffer is used to buffer input samples for deferred processing by the background threads. +class ReverbInputBuffer { +public: + explicit ReverbInputBuffer(size_t length); + + // The realtime audio thread keeps writing samples here. + // The assumption is that the buffer's length is evenly divisible by numberOfFrames (for nearly all cases this will be fine). + // FIXME: remove numberOfFrames restriction... + void write(const float* sourceP, size_t numberOfFrames); + + // Background threads can call this to check if there's anything to read... + size_t writeIndex() const { return m_writeIndex; } + + // The individual background threads read here (and hope that they can keep up with the buffer writing). + // readIndex is updated with the next readIndex to read from... + // The assumption is that the buffer's length is evenly divisible by numberOfFrames. + // FIXME: remove numberOfFrames restriction... + float* directReadFrom(int* readIndex, size_t numberOfFrames); + + void reset(); + + size_t sizeOfExcludingThis(mozilla::MallocSizeOf aMallocSizeOf) const + { + return m_buffer.ShallowSizeOfExcludingThis(aMallocSizeOf); + } + + +private: + nsTArray<float> m_buffer; + size_t m_writeIndex; +}; + +} // namespace WebCore + +#endif // ReverbInputBuffer_h diff --git a/dom/media/webaudio/blink/ZeroPole.cpp b/dom/media/webaudio/blink/ZeroPole.cpp new file mode 100644 index 000000000..ac0b15c7a --- /dev/null +++ b/dom/media/webaudio/blink/ZeroPole.cpp @@ -0,0 +1,78 @@ +/* + * Copyright (C) 2011 Google Inc. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * 3. Neither the name of Apple Computer, Inc. ("Apple") nor the names of + * its contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY APPLE AND ITS CONTRIBUTORS "AS IS" AND ANY + * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + * DISCLAIMED. IN NO EVENT SHALL APPLE OR ITS CONTRIBUTORS BE LIABLE FOR ANY + * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES + * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; + * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND + * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF + * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +#include "ZeroPole.h" + +#include <cmath> +#include <float.h> + +namespace WebCore { + +void ZeroPole::process(const float *source, float *destination, int framesToProcess) +{ + float zero = m_zero; + float pole = m_pole; + + // Gain compensation to make 0dB @ 0Hz + const float k1 = 1 / (1 - zero); + const float k2 = 1 - pole; + + // Member variables to locals. + float lastX = m_lastX; + float lastY = m_lastY; + + for (int i = 0; i < framesToProcess; ++i) { + float input = source[i]; + + // Zero + float output1 = k1 * (input - zero * lastX); + lastX = input; + + // Pole + float output2 = k2 * output1 + pole * lastY; + lastY = output2; + + destination[i] = output2; + } + + // Locals to member variables. Flush denormals here so we don't + // slow down the inner loop above. + if (lastX == 0.0f && lastY != 0.0f && fabsf(lastY) < FLT_MIN) { + // Flush future values to zero (until there is new input). + lastY = 0.0; + // Flush calculated values. + for (int i = framesToProcess; i-- && fabsf(destination[i]) < FLT_MIN; ) { + destination[i] = 0.0f; + } + } + + m_lastX = lastX; + m_lastY = lastY; +} + +} // namespace WebCore diff --git a/dom/media/webaudio/blink/ZeroPole.h b/dom/media/webaudio/blink/ZeroPole.h new file mode 100644 index 000000000..7381bde3d --- /dev/null +++ b/dom/media/webaudio/blink/ZeroPole.h @@ -0,0 +1,66 @@ +/* + * Copyright (C) 2011 Google Inc. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * 3. Neither the name of Apple Computer, Inc. ("Apple") nor the names of + * its contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY APPLE AND ITS CONTRIBUTORS "AS IS" AND ANY + * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + * DISCLAIMED. IN NO EVENT SHALL APPLE OR ITS CONTRIBUTORS BE LIABLE FOR ANY + * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES + * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; + * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND + * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF + * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +#ifndef ZeroPole_h +#define ZeroPole_h + +namespace WebCore { + +// ZeroPole is a simple filter with one zero and one pole. + +class ZeroPole { +public: + ZeroPole() + : m_zero(0) + , m_pole(0) + , m_lastX(0) + , m_lastY(0) + { + } + + void process(const float *source, float *destination, int framesToProcess); + + // Reset filter state. + void reset() { m_lastX = 0; m_lastY = 0; } + + void setZero(float zero) { m_zero = zero; } + void setPole(float pole) { m_pole = pole; } + + float zero() const { return m_zero; } + float pole() const { return m_pole; } + +private: + float m_zero; + float m_pole; + float m_lastX; + float m_lastY; +}; + +} // namespace WebCore + +#endif // ZeroPole_h diff --git a/dom/media/webaudio/blink/moz.build b/dom/media/webaudio/blink/moz.build new file mode 100644 index 000000000..385614de7 --- /dev/null +++ b/dom/media/webaudio/blink/moz.build @@ -0,0 +1,39 @@ +# -*- Mode: python; indent-tabs-mode: nil; tab-width: 40 -*- +# vim: set filetype=python: +# This Source Code Form is subject to the terms of the Mozilla Public +# License, v. 2.0. If a copy of the MPL was not distributed with this +# file, You can obtain one at http://mozilla.org/MPL/2.0/. + +UNIFIED_SOURCES += [ + 'Biquad.cpp', + 'DynamicsCompressor.cpp', + 'DynamicsCompressorKernel.cpp', + 'FFTConvolver.cpp', + 'HRTFDatabase.cpp', + 'HRTFDatabaseLoader.cpp', + 'HRTFElevation.cpp', + 'HRTFKernel.cpp', + 'HRTFPanner.cpp', + 'IIRFilter.cpp', + 'PeriodicWave.cpp', + 'Reverb.cpp', + 'ReverbAccumulationBuffer.cpp', + 'ReverbConvolver.cpp', + 'ReverbConvolverStage.cpp', + 'ReverbInputBuffer.cpp', + 'ZeroPole.cpp', +] + +# Are we targeting x86 or x64? If so, build SSE2 files. +if CONFIG['INTEL_ARCHITECTURE']: + DEFINES['USE_SSE2'] = True + +include('/ipc/chromium/chromium-config.mozbuild') + +FINAL_LIBRARY = 'xul' +LOCAL_INCLUDES += [ + '/dom/media/webaudio', +] + +if CONFIG['GNU_CXX']: + CXXFLAGS += ['-Wno-shadow'] diff --git a/dom/media/webaudio/gtest/TestAudioEventTimeline.cpp b/dom/media/webaudio/gtest/TestAudioEventTimeline.cpp new file mode 100644 index 000000000..cc731d3e2 --- /dev/null +++ b/dom/media/webaudio/gtest/TestAudioEventTimeline.cpp @@ -0,0 +1,450 @@ +/* -*- Mode: C++; tab-width: 2; indent-tabs-mode: nil; c-basic-offset: 2 -*- */ +/* vim:set ts=2 sw=2 sts=2 et cindent: */ +/* This Source Code Form is subject to the terms of the Mozilla Public + * License, v. 2.0. If a copy of the MPL was not distributed with this + * file, You can obtain one at http://mozilla.org/MPL/2.0/. */ + +#include "AudioEventTimeline.h" +#include <sstream> +#include <limits> +#include "gtest/gtest.h" + +// Mock the MediaStream class +namespace mozilla { +class MediaStream +{ + NS_INLINE_DECL_THREADSAFE_REFCOUNTING(MediaStream) +private: + ~MediaStream() { + }; +}; +} + +using namespace mozilla; +using namespace mozilla::dom; +using std::numeric_limits; + +// Some simple testing primitives +void ok(bool val, const char* msg) +{ + if (!val) { + fprintf(stderr, "failure: %s", msg); + } + ASSERT_TRUE(val); +} + +namespace std { + +template <class T> +basic_ostream<T, char_traits<T> >& +operator<<(basic_ostream<T, char_traits<T> >& os, nsresult rv) +{ + os << static_cast<uint32_t>(rv); + return os; +} + +} // namespace std + +template <class T, class U> +void is(const T& a, const U& b, const char* msg) +{ + std::stringstream ss; + ss << msg << ", Got: " << a << ", expected: " << b << std::endl; + ok(a == b, ss.str().c_str()); +} + +template <> +void is(const float& a, const float& b, const char* msg) +{ + // stupidly high, since we mostly care about the correctness of the algorithm + const float kEpsilon = 0.00001f; + + std::stringstream ss; + ss << msg << ", Got: " << a << ", expected: " << b << std::endl; + ok(fabsf(a - b) < kEpsilon, ss.str().c_str()); +} + +class ErrorResultMock +{ +public: + ErrorResultMock() + : mRv(NS_OK) + { + } + void Throw(nsresult aRv) + { + mRv = aRv; + } + + operator nsresult() const + { + return mRv; + } + + ErrorResultMock& operator=(nsresult aRv) + { + mRv = aRv; + return *this; + } + +private: + nsresult mRv; +}; + +typedef AudioEventTimeline Timeline; + +TEST(AudioEventTimeline, SpecExample) +{ + // First, run the basic tests + Timeline timeline(10.0f); + is(timeline.Value(), 10.0f, "Correct default value returned"); + + ErrorResultMock rv; + + uint32_t curveLength = 44100; + float* curve = new float[curveLength]; + for (uint32_t i = 0; i < curveLength; ++i) { + curve[i] = sin(M_PI * i / float(curveLength)); + } + + // This test is copied from the example in the Web Audio spec + const double t0 = 0.0, + t1 = 0.1, + t2 = 0.2, + t3 = 0.3, + t4 = 0.4, + t5 = 0.6, + t6 = 0.7, + t7 = 1.0; + timeline.SetValueAtTime(0.2f, t0, rv); + is(rv, NS_OK, "SetValueAtTime succeeded"); + timeline.SetValueAtTime(0.3f, t1, rv); + is(rv, NS_OK, "SetValueAtTime succeeded"); + timeline.SetValueAtTime(0.4f, t2, rv); + is(rv, NS_OK, "SetValueAtTime succeeded"); + timeline.LinearRampToValueAtTime(1.0f, t3, rv); + is(rv, NS_OK, "LinearRampToValueAtTime succeeded"); + timeline.LinearRampToValueAtTime(0.15f, t4, rv); + is(rv, NS_OK, "LinearRampToValueAtTime succeeded"); + timeline.ExponentialRampToValueAtTime(0.75f, t5, rv); + is(rv, NS_OK, "ExponentialRampToValueAtTime succeeded"); + timeline.ExponentialRampToValueAtTime(0.05f, t6, rv); + is(rv, NS_OK, "ExponentialRampToValueAtTime succeeded"); + timeline.SetValueCurveAtTime(curve, curveLength, t6, t7 - t6, rv); + is(rv, NS_OK, "SetValueCurveAtTime succeeded"); + + is(timeline.GetValueAtTime(0.0), 0.2f, "Correct value"); + is(timeline.GetValueAtTime(0.05), 0.2f, "Correct value"); + is(timeline.GetValueAtTime(0.1), 0.3f, "Correct value"); + is(timeline.GetValueAtTime(0.15), 0.3f, "Correct value"); + is(timeline.GetValueAtTime(0.2), 0.4f, "Correct value"); + is(timeline.GetValueAtTime(0.25), (0.4f + 1.0f) / 2, "Correct value"); + is(timeline.GetValueAtTime(0.3), 1.0f, "Correct value"); + is(timeline.GetValueAtTime(0.35), (1.0f + 0.15f) / 2, "Correct value"); + is(timeline.GetValueAtTime(0.4), 0.15f, "Correct value"); + is(timeline.GetValueAtTime(0.45), (0.15f * powf(0.75f / 0.15f, 0.05f / 0.2f)), "Correct value"); + is(timeline.GetValueAtTime(0.5), (0.15f * powf(0.75f / 0.15f, 0.5f)), "Correct value"); + is(timeline.GetValueAtTime(0.55), (0.15f * powf(0.75f / 0.15f, 0.15f / 0.2f)), "Correct value"); + is(timeline.GetValueAtTime(0.6), 0.75f, "Correct value"); + is(timeline.GetValueAtTime(0.65), (0.75f * powf(0.05f / 0.75f, 0.5f)), "Correct value"); + is(timeline.GetValueAtTime(0.7), 0.0f, "Correct value"); + is(timeline.GetValueAtTime(0.85), 1.0f, "Correct value"); + is(timeline.GetValueAtTime(1.0), curve[curveLength - 1], "Correct value"); + + delete[] curve; +} + +TEST(AudioEventTimeline, InvalidEvents) +{ + static_assert(numeric_limits<float>::has_quiet_NaN, "Platform must have a quiet NaN"); + const float NaN = numeric_limits<float>::quiet_NaN(); + const float Infinity = numeric_limits<float>::infinity(); + Timeline timeline(10.0f); + + float curve[] = { -1.0f, 0.0f, 1.0f }; + float badCurve1[] = { -1.0f, NaN, 1.0f }; + float badCurve2[] = { -1.0f, Infinity, 1.0f }; + float badCurve3[] = { -1.0f, -Infinity, 1.0f }; + + ErrorResultMock rv; + + timeline.SetValueAtTime(NaN, 0.1, rv); + is(rv, NS_ERROR_DOM_SYNTAX_ERR, "Correct error code returned"); + timeline.SetValueAtTime(Infinity, 0.1, rv); + is(rv, NS_ERROR_DOM_SYNTAX_ERR, "Correct error code returned"); + timeline.SetValueAtTime(-Infinity, 0.1, rv); + is(rv, NS_ERROR_DOM_SYNTAX_ERR, "Correct error code returned"); + timeline.LinearRampToValueAtTime(NaN, 0.2, rv); + is(rv, NS_ERROR_DOM_SYNTAX_ERR, "Correct error code returned"); + timeline.LinearRampToValueAtTime(Infinity, 0.2, rv); + is(rv, NS_ERROR_DOM_SYNTAX_ERR, "Correct error code returned"); + timeline.LinearRampToValueAtTime(-Infinity, 0.2, rv); + is(rv, NS_ERROR_DOM_SYNTAX_ERR, "Correct error code returned"); + timeline.ExponentialRampToValueAtTime(NaN, 0.3, rv); + is(rv, NS_ERROR_DOM_SYNTAX_ERR, "Correct error code returned"); + timeline.ExponentialRampToValueAtTime(Infinity, 0.3, rv); + is(rv, NS_ERROR_DOM_SYNTAX_ERR, "Correct error code returned"); + timeline.ExponentialRampToValueAtTime(-Infinity, 0.4, rv); + is(rv, NS_ERROR_DOM_SYNTAX_ERR, "Correct error code returned"); + timeline.ExponentialRampToValueAtTime(0, 0.5, rv); + is(rv, NS_ERROR_DOM_SYNTAX_ERR, "Correct error code returned"); + timeline.SetTargetAtTime(NaN, 0.4, 1.0, rv); + is(rv, NS_ERROR_DOM_SYNTAX_ERR, "Correct error code returned"); + timeline.SetTargetAtTime(Infinity, 0.4, 1.0, rv); + is(rv, NS_ERROR_DOM_SYNTAX_ERR, "Correct error code returned"); + timeline.SetTargetAtTime(-Infinity, 0.4, 1.0, rv); + is(rv, NS_ERROR_DOM_SYNTAX_ERR, "Correct error code returned"); + timeline.SetTargetAtTime(0.4f, NaN, 1.0, rv); + is(rv, NS_ERROR_DOM_SYNTAX_ERR, "Correct error code returned"); + timeline.SetTargetAtTime(0.4f, Infinity, 1.0, rv); + is(rv, NS_ERROR_DOM_SYNTAX_ERR, "Correct error code returned"); + timeline.SetTargetAtTime(0.4f, -Infinity, 1.0, rv); + is(rv, NS_ERROR_DOM_SYNTAX_ERR, "Correct error code returned"); + timeline.SetValueCurveAtTime(nullptr, 0, 1.0, 1.0, rv); + is(rv, NS_ERROR_DOM_SYNTAX_ERR, "Correct error code returned"); + timeline.SetValueCurveAtTime(badCurve1, ArrayLength(badCurve1), 1.0, 1.0, rv); + is(rv, NS_ERROR_DOM_SYNTAX_ERR, "Correct error code returned"); + timeline.SetValueCurveAtTime(badCurve2, ArrayLength(badCurve2), 1.0, 1.0, rv); + is(rv, NS_ERROR_DOM_SYNTAX_ERR, "Correct error code returned"); + timeline.SetValueCurveAtTime(badCurve3, ArrayLength(badCurve3), 1.0, 1.0, rv); + is(rv, NS_ERROR_DOM_SYNTAX_ERR, "Correct error code returned"); + timeline.SetValueCurveAtTime(curve, ArrayLength(curve), NaN, 1.0, rv); + is(rv, NS_ERROR_DOM_SYNTAX_ERR, "Correct error code returned"); + timeline.SetValueCurveAtTime(curve, ArrayLength(curve), Infinity, 1.0, rv); + is(rv, NS_ERROR_DOM_SYNTAX_ERR, "Correct error code returned"); + timeline.SetValueCurveAtTime(curve, ArrayLength(curve), -Infinity, 1.0, rv); + is(rv, NS_ERROR_DOM_SYNTAX_ERR, "Correct error code returned"); + timeline.SetValueCurveAtTime(curve, ArrayLength(curve), 1.0, NaN, rv); + is(rv, NS_ERROR_DOM_SYNTAX_ERR, "Correct error code returned"); + timeline.SetValueCurveAtTime(curve, ArrayLength(curve), 1.0, Infinity, rv); + is(rv, NS_ERROR_DOM_SYNTAX_ERR, "Correct error code returned"); + timeline.SetValueCurveAtTime(curve, ArrayLength(curve), 1.0, -Infinity, rv); + is(rv, NS_ERROR_DOM_SYNTAX_ERR, "Correct error code returned"); +} + +TEST(AudioEventTimeline, EventReplacement) +{ + Timeline timeline(10.0f); + + ErrorResultMock rv; + + is(timeline.GetEventCount(), 0u, "No events yet"); + timeline.SetValueAtTime(10.0f, 0.1, rv); + is(timeline.GetEventCount(), 1u, "One event scheduled now"); + timeline.SetValueAtTime(20.0f, 0.1, rv); + is(rv, NS_OK, "Event scheduling should be successful"); + is(timeline.GetEventCount(), 1u, "Event should be replaced"); + is(timeline.GetValueAtTime(0.1), 20.0f, "The first event should be overwritten"); + timeline.LinearRampToValueAtTime(30.0f, 0.1, rv); + is(rv, NS_OK, "Event scheduling should be successful"); + is(timeline.GetEventCount(), 2u, "Different event type should be appended"); + is(timeline.GetValueAtTime(0.1), 30.0f, "The first event should be overwritten"); +} + +TEST(AudioEventTimeline, EventRemoval) +{ + Timeline timeline(10.0f); + + ErrorResultMock rv; + + timeline.SetValueAtTime(10.0f, 0.1, rv); + timeline.SetValueAtTime(15.0f, 0.15, rv); + timeline.SetValueAtTime(20.0f, 0.2, rv); + timeline.LinearRampToValueAtTime(30.0f, 0.3, rv); + is(timeline.GetEventCount(), 4u, "Should have three events initially"); + timeline.CancelScheduledValues(0.4); + is(timeline.GetEventCount(), 4u, "Trying to delete past the end of the array should have no effect"); + timeline.CancelScheduledValues(0.3); + is(timeline.GetEventCount(), 3u, "Should successfully delete one event"); + timeline.CancelScheduledValues(0.12); + is(timeline.GetEventCount(), 1u, "Should successfully delete two events"); + timeline.CancelAllEvents(); + ok(timeline.HasSimpleValue(), "No event should remain scheduled"); +} + +TEST(AudioEventTimeline, BeforeFirstEventSetValue) +{ + Timeline timeline(10.0f); + + ErrorResultMock rv; + + timeline.SetValueAtTime(20.0f, 1.0, rv); + is(timeline.GetValueAtTime(0.5), 10.0f, "Retrun the default value before the first event"); +} + +TEST(AudioEventTimeline, BeforeFirstEventSetTarget) +{ + Timeline timeline(10.0f); + + ErrorResultMock rv; + + timeline.SetTargetAtTime(20.0f, 1.0, 5.0, rv); + is(timeline.GetValueAtTime(0.5), 10.0f, "Retrun the default value before the first event"); +} + +TEST(AudioEventTimeline, BeforeFirstEventLinearRamp) +{ + Timeline timeline(10.0f); + + ErrorResultMock rv; + + timeline.LinearRampToValueAtTime(20.0f, 1.0, rv); + is(timeline.GetValueAtTime(0.5), 10.0f, "Retrun the default value before the first event"); +} + +TEST(AudioEventTimeline, BeforeFirstEventExponentialRamp) +{ + Timeline timeline(10.0f); + + ErrorResultMock rv; + + timeline.ExponentialRampToValueAtTime(20.0f, 1.0, rv); + is(timeline.GetValueAtTime(0.5), 10.0f, "Retrun the default value before the first event"); +} + +TEST(AudioEventTimeline, AfterLastValueEvent) +{ + Timeline timeline(10.0f); + + ErrorResultMock rv; + + timeline.SetValueAtTime(20.0f, 1.0, rv); + is(timeline.GetValueAtTime(1.5), 20.0f, "Return the last value after the last SetValue event"); +} + +TEST(AudioEventTimeline, AfterLastTargetValueEvent) +{ + Timeline timeline(10.0f); + + ErrorResultMock rv; + + timeline.SetTargetAtTime(20.0f, 1.0, 5.0, rv); + is(timeline.GetValueAtTime(10.), (20.f + (10.f - 20.f) * expf(-9.0f / 5.0f)), "Return the value after the last SetTarget event based on the curve"); +} + +TEST(AudioEventTimeline, AfterLastTargetValueEventWithValueSet) +{ + Timeline timeline(10.0f); + + ErrorResultMock rv; + + timeline.SetValue(50.f); + timeline.SetTargetAtTime(20.0f, 1.0, 5.0, rv); + + // When using SetTargetValueAtTime, Timeline become stateful: the value for + // time t may depend on the time t-1, so we can't just query the value at a + // time and get the right value. We have to call GetValueAtTime for the + // previous times. + for (double i = 0.0; i < 9.99; i+=0.01) { + timeline.GetValueAtTime(i); + } + + is(timeline.GetValueAtTime(10.), (20.f + (50.f - 20.f) * expf(-9.0f / 5.0f)), "Return the value after SetValue and the last SetTarget event based on the curve"); +} + +TEST(AudioEventTimeline, Value) +{ + Timeline timeline(10.0f); + + ErrorResultMock rv; + + is(timeline.Value(), 10.0f, "value should initially match the default value"); + timeline.SetValue(20.0f); + is(timeline.Value(), 20.0f, "Should be able to set the value"); + timeline.SetValueAtTime(20.0f, 1.0, rv); + // TODO: The following check needs to change when we compute the value based on the current time of the context + is(timeline.Value(), 20.0f, "TODO..."); + timeline.SetValue(30.0f); + is(timeline.Value(), 20.0f, "Should not be able to set the value"); +} + +TEST(AudioEventTimeline, LinearRampAtZero) +{ + Timeline timeline(10.0f); + + ErrorResultMock rv; + + timeline.LinearRampToValueAtTime(20.0f, 0.0, rv); + is(timeline.GetValueAtTime(0.0), 20.0f, "Should get the correct value when t0 == t1 == 0"); +} + +TEST(AudioEventTimeline, ExponentialRampAtZero) +{ + Timeline timeline(10.0f); + + ErrorResultMock rv; + + timeline.ExponentialRampToValueAtTime(20.0f, 0.0, rv); + is(timeline.GetValueAtTime(0.0), 20.0f, "Should get the correct value when t0 == t1 == 0"); +} + +TEST(AudioEventTimeline, LinearRampAtSameTime) +{ + Timeline timeline(10.0f); + + ErrorResultMock rv; + + timeline.SetValueAtTime(5.0f, 1.0, rv); + timeline.LinearRampToValueAtTime(20.0f, 1.0, rv); + is(timeline.GetValueAtTime(1.0), 20.0f, "Should get the correct value when t0 == t1"); +} + +TEST(AudioEventTimeline, ExponentialRampAtSameTime) +{ + Timeline timeline(10.0f); + + ErrorResultMock rv; + + timeline.SetValueAtTime(5.0f, 1.0, rv); + timeline.ExponentialRampToValueAtTime(20.0f, 1.0, rv); + is(timeline.GetValueAtTime(1.0), 20.0f, "Should get the correct value when t0 == t1"); +} + +TEST(AudioEventTimeline, SetTargetZeroTimeConstant) +{ + Timeline timeline(10.0f); + + ErrorResultMock rv; + + timeline.SetTargetAtTime(20.0f, 1.0, 0.0, rv); + is(timeline.GetValueAtTime(1.0), 20.0f, "Should get the correct value when t0 == t1"); +} + +TEST(AudioEventTimeline, ExponentialInvalidPreviousZeroValue) +{ + Timeline timeline(0.f); + + ErrorResultMock rv; + + timeline.ExponentialRampToValueAtTime(1.f, 1.0, rv); + is(rv, NS_ERROR_DOM_SYNTAX_ERR, "Correct error code returned"); + timeline.SetValue(1.f); + rv = NS_OK; + timeline.ExponentialRampToValueAtTime(1.f, 1.0, rv); + is(rv, NS_OK, "Should succeed this time"); + timeline.CancelScheduledValues(0.0); + is(timeline.GetEventCount(), 0u, "Should have no events scheduled"); + rv = NS_OK; + timeline.SetValueAtTime(0.f, 0.5, rv); + is(rv, NS_OK, "Should succeed"); + timeline.ExponentialRampToValueAtTime(1.f, 1.0, rv); + is(rv, NS_ERROR_DOM_SYNTAX_ERR, "Correct error code returned"); + timeline.CancelScheduledValues(0.0); + is(timeline.GetEventCount(), 0u, "Should have no events scheduled"); + rv = NS_OK; + timeline.ExponentialRampToValueAtTime(1.f, 1.0, rv); + is(rv, NS_OK, "Should succeed this time"); +} + +TEST(AudioEventTimeline, SettingValueCurveTwice) +{ + Timeline timeline(0.f); + float curve[] = { -1.0f, 0.0f, 1.0f }; + + ErrorResultMock rv; + + timeline.SetValueCurveAtTime(curve, ArrayLength(curve), 0.0f, 0.3f, rv); + timeline.SetValueCurveAtTime(curve, ArrayLength(curve), 0.0f, 0.3f, rv); + is(rv, NS_OK, "SetValueCurveAtTime succeeded"); +} + diff --git a/dom/media/webaudio/gtest/moz.build b/dom/media/webaudio/gtest/moz.build new file mode 100644 index 000000000..2cc13b038 --- /dev/null +++ b/dom/media/webaudio/gtest/moz.build @@ -0,0 +1,15 @@ +# -*- Mode: python; indent-tabs-mode: nil; tab-width: 40 -*- +# vim: set filetype=python: +# This Source Code Form is subject to the terms of the Mozilla Public +# License, v. 2.0. If a copy of the MPL was not distributed with this +# file, You can obtain one at http://mozilla.org/MPL/2.0/. + +UNIFIED_SOURCES += [ + 'TestAudioEventTimeline.cpp', +] + +LOCAL_INCLUDES += [ + '..', +] + +FINAL_LIBRARY = 'xul-gtest' diff --git a/dom/media/webaudio/moz.build b/dom/media/webaudio/moz.build new file mode 100644 index 000000000..d1a9f5680 --- /dev/null +++ b/dom/media/webaudio/moz.build @@ -0,0 +1,142 @@ +# -*- Mode: python; indent-tabs-mode: nil; tab-width: 40 -*- +# vim: set filetype=python: +# This Source Code Form is subject to the terms of the Mozilla Public +# License, v. 2.0. If a copy of the MPL was not distributed with this +# file, You can obtain one at http://mozilla.org/MPL/2.0/. + +with Files('*'): + BUG_COMPONENT = ('Core', 'Web Audio') + +DIRS += ['blink'] + +TEST_DIRS += ['gtest'] + +MOCHITEST_MANIFESTS += [ + 'test/blink/mochitest.ini', + 'test/mochitest.ini', +] + +BROWSER_CHROME_MANIFESTS += [ + 'test/browser.ini', +] + +TEST_HARNESS_FILES.testing.mochitest.tests.dom.media.webaudio.test.blink += [ + 'test/blink/audio-testing.js', + 'test/blink/convolution-testing.js', + 'test/blink/panner-model-testing.js', +] + +EXPORTS += [ + 'AlignedTArray.h', + 'AudioBlock.h', + 'AudioEventTimeline.h', + 'AudioNodeEngine.h', + 'AudioNodeExternalInputStream.h', + 'AudioNodeStream.h', + 'AudioParamTimeline.h', + 'MediaBufferDecoder.h', + 'ThreeDPoint.h', + 'WebAudioUtils.h', +] + +EXPORTS.mozilla += [ + 'FFTBlock.h', + 'MediaStreamAudioDestinationNode.h', +] + +EXPORTS.mozilla.dom += [ + 'AnalyserNode.h', + 'AudioBuffer.h', + 'AudioBufferSourceNode.h', + 'AudioContext.h', + 'AudioDestinationNode.h', + 'AudioListener.h', + 'AudioNode.h', + 'AudioParam.h', + 'AudioProcessingEvent.h', + 'BiquadFilterNode.h', + 'ChannelMergerNode.h', + 'ChannelSplitterNode.h', + 'ConstantSourceNode.h', + 'ConvolverNode.h', + 'DelayNode.h', + 'DynamicsCompressorNode.h', + 'GainNode.h', + 'IIRFilterNode.h', + 'MediaElementAudioSourceNode.h', + 'MediaStreamAudioDestinationNode.h', + 'MediaStreamAudioSourceNode.h', + 'OfflineAudioCompletionEvent.h', + 'OscillatorNode.h', + 'PannerNode.h', + 'PeriodicWave.h', + 'ScriptProcessorNode.h', + 'StereoPannerNode.h', + 'WaveShaperNode.h', +] + +UNIFIED_SOURCES += [ + 'AnalyserNode.cpp', + 'AudioBlock.cpp', + 'AudioBuffer.cpp', + 'AudioBufferSourceNode.cpp', + 'AudioContext.cpp', + 'AudioDestinationNode.cpp', + 'AudioEventTimeline.cpp', + 'AudioListener.cpp', + 'AudioNode.cpp', + 'AudioNodeEngine.cpp', + 'AudioNodeExternalInputStream.cpp', + 'AudioNodeStream.cpp', + 'AudioParam.cpp', + 'AudioProcessingEvent.cpp', + 'BiquadFilterNode.cpp', + 'BufferDecoder.cpp', + 'ChannelMergerNode.cpp', + 'ChannelSplitterNode.cpp', + 'ConstantSourceNode.cpp', + 'ConvolverNode.cpp', + 'DelayBuffer.cpp', + 'DelayNode.cpp', + 'DynamicsCompressorNode.cpp', + 'FFTBlock.cpp', + 'GainNode.cpp', + 'IIRFilterNode.cpp', + 'MediaBufferDecoder.cpp', + 'MediaElementAudioSourceNode.cpp', + 'MediaStreamAudioDestinationNode.cpp', + 'MediaStreamAudioSourceNode.cpp', + 'OfflineAudioCompletionEvent.cpp', + 'OscillatorNode.cpp', + 'PannerNode.cpp', + 'PeriodicWave.cpp', + 'ScriptProcessorNode.cpp', + 'StereoPannerNode.cpp', + 'ThreeDPoint.cpp', + 'WaveShaperNode.cpp', + 'WebAudioUtils.cpp', +] + +if CONFIG['CPU_ARCH'] == 'arm' and CONFIG['BUILD_ARM_NEON']: + SOURCES += ['AudioNodeEngineNEON.cpp'] + SOURCES['AudioNodeEngineNEON.cpp'].flags += CONFIG['NEON_FLAGS'] + LOCAL_INCLUDES += [ + '/media/openmax_dl/dl/api/' + ] + +# Are we targeting x86 or x64? If so, build SSE2 files. +if CONFIG['INTEL_ARCHITECTURE']: + SOURCES += ['AudioNodeEngineSSE2.cpp'] + DEFINES['USE_SSE2'] = True + SOURCES['AudioNodeEngineSSE2.cpp'].flags += CONFIG['SSE2_FLAGS'] + + +include('/ipc/chromium/chromium-config.mozbuild') + +FINAL_LIBRARY = 'xul' +LOCAL_INCLUDES += [ + '..' +] + +if CONFIG['GNU_CXX']: + CXXFLAGS += ['-Wno-error=shadow'] diff --git a/dom/media/webaudio/test/audio-expected.wav b/dom/media/webaudio/test/audio-expected.wav Binary files differnew file mode 100644 index 000000000..151927077 --- /dev/null +++ b/dom/media/webaudio/test/audio-expected.wav diff --git a/dom/media/webaudio/test/audio-mono-expected-2.wav b/dom/media/webaudio/test/audio-mono-expected-2.wav Binary files differnew file mode 100644 index 000000000..68c90dfa1 --- /dev/null +++ b/dom/media/webaudio/test/audio-mono-expected-2.wav diff --git a/dom/media/webaudio/test/audio-mono-expected.wav b/dom/media/webaudio/test/audio-mono-expected.wav Binary files differnew file mode 100644 index 000000000..bf00e5cdf --- /dev/null +++ b/dom/media/webaudio/test/audio-mono-expected.wav diff --git a/dom/media/webaudio/test/audio-quad.wav b/dom/media/webaudio/test/audio-quad.wav Binary files differnew file mode 100644 index 000000000..093f0197a --- /dev/null +++ b/dom/media/webaudio/test/audio-quad.wav diff --git a/dom/media/webaudio/test/audio.ogv b/dom/media/webaudio/test/audio.ogv Binary files differnew file mode 100644 index 000000000..68dee3cf2 --- /dev/null +++ b/dom/media/webaudio/test/audio.ogv diff --git a/dom/media/webaudio/test/audioBufferSourceNodeDetached_worker.js b/dom/media/webaudio/test/audioBufferSourceNodeDetached_worker.js new file mode 100644 index 000000000..2a5a4bff8 --- /dev/null +++ b/dom/media/webaudio/test/audioBufferSourceNodeDetached_worker.js @@ -0,0 +1,3 @@ +onmessage = function (event) { + postMessage("Pong"); +}; diff --git a/dom/media/webaudio/test/audiovideo.mp4 b/dom/media/webaudio/test/audiovideo.mp4 Binary files differnew file mode 100644 index 000000000..fe93122d2 --- /dev/null +++ b/dom/media/webaudio/test/audiovideo.mp4 diff --git a/dom/media/webaudio/test/blink/README b/dom/media/webaudio/test/blink/README new file mode 100644 index 000000000..1d819221f --- /dev/null +++ b/dom/media/webaudio/test/blink/README @@ -0,0 +1,9 @@ +This directory contains tests originally borrowed from the Blink Web Audio test +suite. + +The process of borrowing tests from Blink is as follows: + +* Import the pristine file from the Blink repo, noting the revision in the + commit message. +* Modify the test files to turn the LayoutTest into a mochitest-plain and add +* them to the test suite in a separate commit. diff --git a/dom/media/webaudio/test/blink/audio-testing.js b/dom/media/webaudio/test/blink/audio-testing.js new file mode 100644 index 000000000..c66d32c7f --- /dev/null +++ b/dom/media/webaudio/test/blink/audio-testing.js @@ -0,0 +1,192 @@ +if (window.testRunner) + testRunner.overridePreference("WebKitWebAudioEnabled", "1"); + +function writeString(s, a, offset) { + for (var i = 0; i < s.length; ++i) { + a[offset + i] = s.charCodeAt(i); + } +} + +function writeInt16(n, a, offset) { + n = Math.floor(n); + + var b1 = n & 255; + var b2 = (n >> 8) & 255; + + a[offset + 0] = b1; + a[offset + 1] = b2; +} + +function writeInt32(n, a, offset) { + n = Math.floor(n); + var b1 = n & 255; + var b2 = (n >> 8) & 255; + var b3 = (n >> 16) & 255; + var b4 = (n >> 24) & 255; + + a[offset + 0] = b1; + a[offset + 1] = b2; + a[offset + 2] = b3; + a[offset + 3] = b4; +} + +function writeAudioBuffer(audioBuffer, a, offset) { + var n = audioBuffer.length; + var channels = audioBuffer.numberOfChannels; + + for (var i = 0; i < n; ++i) { + for (var k = 0; k < channels; ++k) { + var buffer = audioBuffer.getChannelData(k); + var sample = buffer[i] * 32768.0; + + // Clip samples to the limitations of 16-bit. + // If we don't do this then we'll get nasty wrap-around distortion. + if (sample < -32768) + sample = -32768; + if (sample > 32767) + sample = 32767; + + writeInt16(sample, a, offset); + offset += 2; + } + } +} + +function createWaveFileData(audioBuffer) { + var frameLength = audioBuffer.length; + var numberOfChannels = audioBuffer.numberOfChannels; + var sampleRate = audioBuffer.sampleRate; + var bitsPerSample = 16; + var byteRate = sampleRate * numberOfChannels * bitsPerSample/8; + var blockAlign = numberOfChannels * bitsPerSample/8; + var wavDataByteLength = frameLength * numberOfChannels * 2; // 16-bit audio + var headerByteLength = 44; + var totalLength = headerByteLength + wavDataByteLength; + + var waveFileData = new Uint8Array(totalLength); + + var subChunk1Size = 16; // for linear PCM + var subChunk2Size = wavDataByteLength; + var chunkSize = 4 + (8 + subChunk1Size) + (8 + subChunk2Size); + + writeString("RIFF", waveFileData, 0); + writeInt32(chunkSize, waveFileData, 4); + writeString("WAVE", waveFileData, 8); + writeString("fmt ", waveFileData, 12); + + writeInt32(subChunk1Size, waveFileData, 16); // SubChunk1Size (4) + writeInt16(1, waveFileData, 20); // AudioFormat (2) + writeInt16(numberOfChannels, waveFileData, 22); // NumChannels (2) + writeInt32(sampleRate, waveFileData, 24); // SampleRate (4) + writeInt32(byteRate, waveFileData, 28); // ByteRate (4) + writeInt16(blockAlign, waveFileData, 32); // BlockAlign (2) + writeInt32(bitsPerSample, waveFileData, 34); // BitsPerSample (4) + + writeString("data", waveFileData, 36); + writeInt32(subChunk2Size, waveFileData, 40); // SubChunk2Size (4) + + // Write actual audio data starting at offset 44. + writeAudioBuffer(audioBuffer, waveFileData, 44); + + return waveFileData; +} + +function createAudioData(audioBuffer) { + return createWaveFileData(audioBuffer); +} + +function finishAudioTest(event) { + var audioData = createAudioData(event.renderedBuffer); + testRunner.setAudioData(audioData); + testRunner.notifyDone(); +} + +// Create an impulse in a buffer of length sampleFrameLength +function createImpulseBuffer(context, sampleFrameLength) { + var audioBuffer = context.createBuffer(1, sampleFrameLength, context.sampleRate); + var n = audioBuffer.length; + var dataL = audioBuffer.getChannelData(0); + + for (var k = 0; k < n; ++k) { + dataL[k] = 0; + } + dataL[0] = 1; + + return audioBuffer; +} + +// Create a buffer of the given length with a linear ramp having values 0 <= x < 1. +function createLinearRampBuffer(context, sampleFrameLength) { + var audioBuffer = context.createBuffer(1, sampleFrameLength, context.sampleRate); + var n = audioBuffer.length; + var dataL = audioBuffer.getChannelData(0); + + for (var i = 0; i < n; ++i) + dataL[i] = i / n; + + return audioBuffer; +} + +// Create a buffer of the given length having a constant value. +function createConstantBuffer(context, sampleFrameLength, constantValue) { + var audioBuffer = context.createBuffer(1, sampleFrameLength, context.sampleRate); + var n = audioBuffer.length; + var dataL = audioBuffer.getChannelData(0); + + for (var i = 0; i < n; ++i) + dataL[i] = constantValue; + + return audioBuffer; +} + +// Create a stereo impulse in a buffer of length sampleFrameLength +function createStereoImpulseBuffer(context, sampleFrameLength) { + var audioBuffer = context.createBuffer(2, sampleFrameLength, context.sampleRate); + var n = audioBuffer.length; + var dataL = audioBuffer.getChannelData(0); + var dataR = audioBuffer.getChannelData(1); + + for (var k = 0; k < n; ++k) { + dataL[k] = 0; + dataR[k] = 0; + } + dataL[0] = 1; + dataR[0] = 1; + + return audioBuffer; +} + +// Convert time (in seconds) to sample frames. +function timeToSampleFrame(time, sampleRate) { + return Math.floor(0.5 + time * sampleRate); +} + +// Compute the number of sample frames consumed by start with +// the specified |grainOffset|, |duration|, and |sampleRate|. +function grainLengthInSampleFrames(grainOffset, duration, sampleRate) { + var startFrame = timeToSampleFrame(grainOffset, sampleRate); + var endFrame = timeToSampleFrame(grainOffset + duration, sampleRate); + + return endFrame - startFrame; +} + +// True if the number is not an infinity or NaN +function isValidNumber(x) { + return !isNaN(x) && (x != Infinity) && (x != -Infinity); +} + +function shouldThrowTypeError(func, text) { + var ok = false; + try { + func(); + } catch (e) { + if (e instanceof TypeError) { + ok = true; + } + } + if (ok) { + testPassed(text + " threw TypeError."); + } else { + testFailed(text + " should throw TypeError."); + } +} diff --git a/dom/media/webaudio/test/blink/biquad-filters.js b/dom/media/webaudio/test/blink/biquad-filters.js new file mode 100644 index 000000000..06fff98b1 --- /dev/null +++ b/dom/media/webaudio/test/blink/biquad-filters.js @@ -0,0 +1,368 @@ +// Taken from WebKit/LayoutTests/webaudio/resources/biquad-filters.js + +// A biquad filter has a z-transform of +// H(z) = (b0 + b1 / z + b2 / z^2) / (1 + a1 / z + a2 / z^2) +// +// The formulas for the various filters were taken from +// http://www.musicdsp.org/files/Audio-EQ-Cookbook.txt. + + +// Lowpass filter. +function createLowpassFilter(freq, q, gain) { + var b0; + var b1; + var b2; + var a0; + var a1; + var a2; + + if (freq == 1) { + // The formula below works, except for roundoff. When freq = 1, + // the filter is just a wire, so hardwire the coefficients. + b0 = 1; + b1 = 0; + b2 = 0; + a0 = 1; + a1 = 0; + a2 = 0; + } else { + var w0 = Math.PI * freq; + var alpha = 0.5 * Math.sin(w0) / Math.pow(10, q / 20); + var cos_w0 = Math.cos(w0); + + b0 = 0.5 * (1 - cos_w0); + b1 = 1 - cos_w0; + b2 = b0; + a0 = 1 + alpha; + a1 = -2.0 * cos_w0; + a2 = 1 - alpha; + } + + return normalizeFilterCoefficients(b0, b1, b2, a0, a1, a2); +} + +function createHighpassFilter(freq, q, gain) { + var b0; + var b1; + var b2; + var a1; + var a2; + + if (freq == 1) { + // The filter is 0 + b0 = 0; + b1 = 0; + b2 = 0; + a0 = 1; + a1 = 0; + a2 = 0; + } else if (freq == 0) { + // The filter is 1. Computation of coefficients below is ok, but + // there's a pole at 1 and a zero at 1, so round-off could make + // the filter unstable. + b0 = 1; + b1 = 0; + b2 = 0; + a0 = 1; + a1 = 0; + a2 = 0; + } else { + var w0 = Math.PI * freq; + var alpha = 0.5 * Math.sin(w0) / Math.pow(10, q / 20); + var cos_w0 = Math.cos(w0); + + b0 = 0.5 * (1 + cos_w0); + b1 = -1 - cos_w0; + b2 = b0; + a0 = 1 + alpha; + a1 = -2.0 * cos_w0; + a2 = 1 - alpha; + } + + return normalizeFilterCoefficients(b0, b1, b2, a0, a1, a2); +} + +function normalizeFilterCoefficients(b0, b1, b2, a0, a1, a2) { + var scale = 1 / a0; + + return {b0 : b0 * scale, + b1 : b1 * scale, + b2 : b2 * scale, + a1 : a1 * scale, + a2 : a2 * scale}; +} + +function createBandpassFilter(freq, q, gain) { + var b0; + var b1; + var b2; + var a0; + var a1; + var a2; + var coef; + + if (freq > 0 && freq < 1) { + var w0 = Math.PI * freq; + if (q > 0) { + var alpha = Math.sin(w0) / (2 * q); + var k = Math.cos(w0); + + b0 = alpha; + b1 = 0; + b2 = -alpha; + a0 = 1 + alpha; + a1 = -2 * k; + a2 = 1 - alpha; + + coef = normalizeFilterCoefficients(b0, b1, b2, a0, a1, a2); + } else { + // q = 0, and frequency is not 0 or 1. The above formula has a + // divide by zero problem. The limit of the z-transform as q + // approaches 0 is 1, so set the filter that way. + coef = {b0 : 1, b1 : 0, b2 : 0, a1 : 0, a2 : 0}; + } + } else { + // When freq = 0 or 1, the z-transform is identically 0, + // independent of q. + coef = {b0 : 0, b1 : 0, b2 : 0, a1 : 0, a2 : 0} + } + + return coef; +} + +function createLowShelfFilter(freq, q, gain) { + // q not used + var b0; + var b1; + var b2; + var a0; + var a1; + var a2; + var coef; + + var S = 1; + var A = Math.pow(10, gain / 40); + + if (freq == 1) { + // The filter is just a constant gain + coef = {b0 : A * A, b1 : 0, b2 : 0, a1 : 0, a2 : 0}; + } else if (freq == 0) { + // The filter is 1 + coef = {b0 : 1, b1 : 0, b2 : 0, a1 : 0, a2 : 0}; + } else { + var w0 = Math.PI * freq; + var alpha = 1 / 2 * Math.sin(w0) * Math.sqrt((A + 1 / A) * (1 / S - 1) + 2); + var k = Math.cos(w0); + var k2 = 2 * Math.sqrt(A) * alpha; + var Ap1 = A + 1; + var Am1 = A - 1; + + b0 = A * (Ap1 - Am1 * k + k2); + b1 = 2 * A * (Am1 - Ap1 * k); + b2 = A * (Ap1 - Am1 * k - k2); + a0 = Ap1 + Am1 * k + k2; + a1 = -2 * (Am1 + Ap1 * k); + a2 = Ap1 + Am1 * k - k2; + coef = normalizeFilterCoefficients(b0, b1, b2, a0, a1, a2); + } + + return coef; +} + +function createHighShelfFilter(freq, q, gain) { + // q not used + var b0; + var b1; + var b2; + var a0; + var a1; + var a2; + var coef; + + var A = Math.pow(10, gain / 40); + + if (freq == 1) { + // When freq = 1, the z-transform is 1 + coef = {b0 : 1, b1 : 0, b2 : 0, a1 : 0, a2 : 0}; + } else if (freq > 0) { + var w0 = Math.PI * freq; + var S = 1; + var alpha = 0.5 * Math.sin(w0) * Math.sqrt((A + 1 / A) * (1 / S - 1) + 2); + var k = Math.cos(w0); + var k2 = 2 * Math.sqrt(A) * alpha; + var Ap1 = A + 1; + var Am1 = A - 1; + + b0 = A * (Ap1 + Am1 * k + k2); + b1 = -2 * A * (Am1 + Ap1 * k); + b2 = A * (Ap1 + Am1 * k - k2); + a0 = Ap1 - Am1 * k + k2; + a1 = 2 * (Am1 - Ap1*k); + a2 = Ap1 - Am1 * k-k2; + + coef = normalizeFilterCoefficients(b0, b1, b2, a0, a1, a2); + } else { + // When freq = 0, the filter is just a gain + coef = {b0 : A * A, b1 : 0, b2 : 0, a1 : 0, a2 : 0}; + } + + return coef; +} + +function createPeakingFilter(freq, q, gain) { + var b0; + var b1; + var b2; + var a0; + var a1; + var a2; + var coef; + + var A = Math.pow(10, gain / 40); + + if (freq > 0 && freq < 1) { + if (q > 0) { + var w0 = Math.PI * freq; + var alpha = Math.sin(w0) / (2 * q); + var k = Math.cos(w0); + + b0 = 1 + alpha * A; + b1 = -2 * k; + b2 = 1 - alpha * A; + a0 = 1 + alpha / A; + a1 = -2 * k; + a2 = 1 - alpha / A; + + coef = normalizeFilterCoefficients(b0, b1, b2, a0, a1, a2); + } else { + // q = 0, we have a divide by zero problem in the formulas + // above. But if we look at the z-transform, we see that the + // limit as q approaches 0 is A^2. + coef = {b0 : A * A, b1 : 0, b2 : 0, a1 : 0, a2 : 0}; + } + } else { + // freq = 0 or 1, the z-transform is 1 + coef = {b0 : 1, b1 : 0, b2 : 0, a1 : 0, a2 : 0}; + } + + return coef; +} + +function createNotchFilter(freq, q, gain) { + var b0; + var b1; + var b2; + var a0; + var a1; + var a2; + var coef; + + if (freq > 0 && freq < 1) { + if (q > 0) { + var w0 = Math.PI * freq; + var alpha = Math.sin(w0) / (2 * q); + var k = Math.cos(w0); + + b0 = 1; + b1 = -2 * k; + b2 = 1; + a0 = 1 + alpha; + a1 = -2 * k; + a2 = 1 - alpha; + coef = normalizeFilterCoefficients(b0, b1, b2, a0, a1, a2); + } else { + // When q = 0, we get a divide by zero above. The limit of the + // z-transform as q approaches 0 is 0, so set the coefficients + // appropriately. + coef = {b0 : 0, b1 : 0, b2 : 0, a1 : 0, a2 : 0}; + } + } else { + // When freq = 0 or 1, the z-transform is 1 + coef = {b0 : 1, b1 : 0, b2 : 0, a1 : 0, a2 : 0}; + } + + return coef; +} + +function createAllpassFilter(freq, q, gain) { + var b0; + var b1; + var b2; + var a0; + var a1; + var a2; + var coef; + + if (freq > 0 && freq < 1) { + if (q > 0) { + var w0 = Math.PI * freq; + var alpha = Math.sin(w0) / (2 * q); + var k = Math.cos(w0); + + b0 = 1 - alpha; + b1 = -2 * k; + b2 = 1 + alpha; + a0 = 1 + alpha; + a1 = -2 * k; + a2 = 1 - alpha; + coef = normalizeFilterCoefficients(b0, b1, b2, a0, a1, a2); + } else { + // q = 0 + coef = {b0 : -1, b1 : 0, b2 : 0, a1 : 0, a2 : 0}; + } + } else { + coef = {b0 : 1, b1 : 0, b2 : 0, a1 : 0, a2 : 0}; + } + + return coef; +} + +function filterData(filterCoef, signal, len) { + var y = new Array(len); + var b0 = filterCoef.b0; + var b1 = filterCoef.b1; + var b2 = filterCoef.b2; + var a1 = filterCoef.a1; + var a2 = filterCoef.a2; + + // Prime the pump. (Assumes the signal has length >= 2!) + y[0] = b0 * signal[0]; + y[1] = b0 * signal[1] + b1 * signal[0] - a1 * y[0]; + + // Filter all of the signal that we have. + for (var k = 2; k < Math.min(signal.length, len); ++k) { + y[k] = b0 * signal[k] + b1 * signal[k-1] + b2 * signal[k-2] - a1 * y[k-1] - a2 * y[k-2]; + } + + // If we need to filter more, but don't have any signal left, + // assume the signal is zero. + for (var k = signal.length; k < len; ++k) { + y[k] = - a1 * y[k-1] - a2 * y[k-2]; + } + + return y; +} + +// Map the filter type name to a function that computes the filter coefficents for the given filter +// type. +var filterCreatorFunction = {"lowpass": createLowpassFilter, + "highpass": createHighpassFilter, + "bandpass": createBandpassFilter, + "lowshelf": createLowShelfFilter, + "highshelf": createHighShelfFilter, + "peaking": createPeakingFilter, + "notch": createNotchFilter, + "allpass": createAllpassFilter}; + +var filterTypeName = {"lowpass": "Lowpass filter", + "highpass": "Highpass filter", + "bandpass": "Bandpass filter", + "lowshelf": "Lowshelf filter", + "highshelf": "Highshelf filter", + "peaking": "Peaking filter", + "notch": "Notch filter", + "allpass": "Allpass filter"}; + +function createFilter(filterType, freq, q, gain) { + return filterCreatorFunction[filterType](freq, q, gain); +} diff --git a/dom/media/webaudio/test/blink/biquad-testing.js b/dom/media/webaudio/test/blink/biquad-testing.js new file mode 100644 index 000000000..795adf601 --- /dev/null +++ b/dom/media/webaudio/test/blink/biquad-testing.js @@ -0,0 +1,153 @@ +// Globals, to make testing and debugging easier. +var context; +var filter; +var signal; +var renderedBuffer; +var renderedData; + +var sampleRate = 44100.0; +var pulseLengthFrames = .1 * sampleRate; + +// Maximum allowed error for the test to succeed. Experimentally determined. +var maxAllowedError = 5.9e-8; + +// This must be large enough so that the filtered result is +// essentially zero. See comments for createTestAndRun. +var timeStep = .1; + +// Maximum number of filters we can process (mostly for setting the +// render length correctly.) +var maxFilters = 5; + +// How long to render. Must be long enough for all of the filters we +// want to test. +var renderLengthSeconds = timeStep * (maxFilters + 1) ; + +var renderLengthSamples = Math.round(renderLengthSeconds * sampleRate); + +// Number of filters that will be processed. +var nFilters; + +function createImpulseBuffer(context, length) { + var impulse = context.createBuffer(1, length, context.sampleRate); + var data = impulse.getChannelData(0); + for (var k = 1; k < data.length; ++k) { + data[k] = 0; + } + data[0] = 1; + + return impulse; +} + + +function createTestAndRun(context, filterType, filterParameters) { + // To test the filters, we apply a signal (an impulse) to each of + // the specified filters, with each signal starting at a different + // time. The output of the filters is summed together at the + // output. Thus for filter k, the signal input to the filter + // starts at time k * timeStep. For this to work well, timeStep + // must be large enough for the output of each filter to have + // decayed to zero with timeStep seconds. That way the filter + // outputs don't interfere with each other. + + nFilters = Math.min(filterParameters.length, maxFilters); + + signal = new Array(nFilters); + filter = new Array(nFilters); + + impulse = createImpulseBuffer(context, pulseLengthFrames); + + // Create all of the signal sources and filters that we need. + for (var k = 0; k < nFilters; ++k) { + signal[k] = context.createBufferSource(); + signal[k].buffer = impulse; + + filter[k] = context.createBiquadFilter(); + filter[k].type = filterType; + filter[k].frequency.value = context.sampleRate / 2 * filterParameters[k].cutoff; + filter[k].detune.value = (filterParameters[k].detune === undefined) ? 0 : filterParameters[k].detune; + filter[k].Q.value = filterParameters[k].q; + filter[k].gain.value = filterParameters[k].gain; + + signal[k].connect(filter[k]); + filter[k].connect(context.destination); + + signal[k].start(timeStep * k); + } + + context.oncomplete = checkFilterResponse(filterType, filterParameters); + context.startRendering(); +} + +function addSignal(dest, src, destOffset) { + // Add src to dest at the given dest offset. + for (var k = destOffset, j = 0; k < dest.length, j < src.length; ++k, ++j) { + dest[k] += src[j]; + } +} + +function generateReference(filterType, filterParameters) { + var result = new Array(renderLengthSamples); + var data = new Array(renderLengthSamples); + // Initialize the result array and data. + for (var k = 0; k < result.length; ++k) { + result[k] = 0; + data[k] = 0; + } + // Make data an impulse. + data[0] = 1; + + for (var k = 0; k < nFilters; ++k) { + // Filter an impulse + var detune = (filterParameters[k].detune === undefined) ? 0 : filterParameters[k].detune; + var frequency = filterParameters[k].cutoff * Math.pow(2, detune / 1200); // Apply detune, converting from Cents. + + var filterCoef = createFilter(filterType, + frequency, + filterParameters[k].q, + filterParameters[k].gain); + var y = filterData(filterCoef, data, renderLengthSamples); + + // Accumulate this filtered data into the final output at the desired offset. + addSignal(result, y, timeToSampleFrame(timeStep * k, sampleRate)); + } + + return result; +} + +function checkFilterResponse(filterType, filterParameters) { + return function(event) { + renderedBuffer = event.renderedBuffer; + renderedData = renderedBuffer.getChannelData(0); + + reference = generateReference(filterType, filterParameters); + + var len = Math.min(renderedData.length, reference.length); + + var success = true; + + // Maximum error between rendered data and expected data + var maxError = 0; + + // Sample offset where the maximum error occurred. + var maxPosition = 0; + + // Number of infinities or NaNs that occurred in the rendered data. + var invalidNumberCount = 0; + + ok(nFilters == filterParameters.length, "Test wanted " + filterParameters.length + " filters but only " + maxFilters + " allowed."); + + compareChannels(renderedData, reference, len, 0, 0, true); + + // Check for bad numbers in the rendered output too. + // There shouldn't be any. + for (var k = 0; k < len; ++k) { + if (!isValidNumber(renderedData[k])) { + ++invalidNumberCount; + } + } + + ok(invalidNumberCount == 0, "Rendered output has " + invalidNumberCount + " infinities or NaNs."); + SimpleTest.finish(); + } +} diff --git a/dom/media/webaudio/test/blink/convolution-testing.js b/dom/media/webaudio/test/blink/convolution-testing.js new file mode 100644 index 000000000..98ff0c775 --- /dev/null +++ b/dom/media/webaudio/test/blink/convolution-testing.js @@ -0,0 +1,182 @@ +var sampleRate = 44100.0; + +var renderLengthSeconds = 8; +var pulseLengthSeconds = 1; +var pulseLengthFrames = pulseLengthSeconds * sampleRate; + +function createSquarePulseBuffer(context, sampleFrameLength) { + var audioBuffer = context.createBuffer(1, sampleFrameLength, context.sampleRate); + + var n = audioBuffer.length; + var data = audioBuffer.getChannelData(0); + + for (var i = 0; i < n; ++i) + data[i] = 1; + + return audioBuffer; +} + +// The triangle buffer holds the expected result of the convolution. +// It linearly ramps up from 0 to its maximum value (at the center) +// then linearly ramps down to 0. The center value corresponds to the +// point where the two square pulses overlap the most. +function createTrianglePulseBuffer(context, sampleFrameLength) { + var audioBuffer = context.createBuffer(1, sampleFrameLength, context.sampleRate); + + var n = audioBuffer.length; + var halfLength = n / 2; + var data = audioBuffer.getChannelData(0); + + for (var i = 0; i < halfLength; ++i) + data[i] = i + 1; + + for (var i = halfLength; i < n; ++i) + data[i] = n - i - 1; + + return audioBuffer; +} + +function log10(x) { + return Math.log(x)/Math.LN10; +} + +function linearToDecibel(x) { + return 20*log10(x); +} + +// Verify that the rendered result is very close to the reference +// triangular pulse. +function checkTriangularPulse(rendered, reference) { + var match = true; + var maxDelta = 0; + var valueAtMaxDelta = 0; + var maxDeltaIndex = 0; + + for (var i = 0; i < reference.length; ++i) { + var diff = rendered[i] - reference[i]; + var x = Math.abs(diff); + if (x > maxDelta) { + maxDelta = x; + valueAtMaxDelta = reference[i]; + maxDeltaIndex = i; + } + } + + // allowedDeviationFraction was determined experimentally. It + // is the threshold of the relative error at the maximum + // difference between the true triangular pulse and the + // rendered pulse. + var allowedDeviationDecibels = -129.4; + var maxDeviationDecibels = linearToDecibel(maxDelta / valueAtMaxDelta); + + if (maxDeviationDecibels <= allowedDeviationDecibels) { + testPassed("Triangular portion of convolution is correct."); + } else { + testFailed("Triangular portion of convolution is not correct. Max deviation = " + maxDeviationDecibels + " dB at " + maxDeltaIndex); + match = false; + } + + return match; +} + +// Verify that the rendered data is close to zero for the first part +// of the tail. +function checkTail1(data, reference, breakpoint) { + var isZero = true; + var tail1Max = 0; + + for (var i = reference.length; i < reference.length + breakpoint; ++i) { + var mag = Math.abs(data[i]); + if (mag > tail1Max) { + tail1Max = mag; + } + } + + // Let's find the peak of the reference (even though we know a + // priori what it is). + var refMax = 0; + for (var i = 0; i < reference.length; ++i) { + refMax = Math.max(refMax, Math.abs(reference[i])); + } + + // This threshold is experimentally determined by examining the + // value of tail1MaxDecibels. + var threshold1 = -129.7; + + var tail1MaxDecibels = linearToDecibel(tail1Max/refMax); + if (tail1MaxDecibels <= threshold1) { + testPassed("First part of tail of convolution is sufficiently small."); + } else { + testFailed("First part of tail of convolution is not sufficiently small: " + tail1MaxDecibels + " dB"); + isZero = false; + } + + return isZero; +} + +// Verify that the second part of the tail of the convolution is +// exactly zero. +function checkTail2(data, reference, breakpoint) { + var isZero = true; + var tail2Max = 0; + // For the second part of the tail, the maximum value should be + // exactly zero. + var threshold2 = 0; + for (var i = reference.length + breakpoint; i < data.length; ++i) { + if (Math.abs(data[i]) > 0) { + isZero = false; + break; + } + } + + if (isZero) { + testPassed("Rendered signal after tail of convolution is silent."); + } else { + testFailed("Rendered signal after tail of convolution should be silent."); + } + + return isZero; +} + +function checkConvolvedResult(trianglePulse) { + return function(event) { + var renderedBuffer = event.renderedBuffer; + + var referenceData = trianglePulse.getChannelData(0); + var renderedData = renderedBuffer.getChannelData(0); + + var success = true; + + // Verify the triangular pulse is actually triangular. + + success = success && checkTriangularPulse(renderedData, referenceData); + + // Make sure that portion after convolved portion is totally + // silent. But round-off prevents this from being completely + // true. At the end of the triangle, it should be close to + // zero. If we go farther out, it should be even closer and + // eventually zero. + + // For the tail of the convolution (where the result would be + // theoretically zero), we partition the tail into two + // parts. The first is the at the beginning of the tail, + // where we tolerate a small but non-zero value. The second part is + // farther along the tail where the result should be zero. + + // breakpoint is the point dividing the first two tail parts + // we're looking at. Experimentally determined. + var breakpoint = 12800; + + success = success && checkTail1(renderedData, referenceData, breakpoint); + + success = success && checkTail2(renderedData, referenceData, breakpoint); + + if (success) { + testPassed("Test signal was correctly convolved."); + } else { + testFailed("Test signal was not correctly convolved."); + } + + finishJSTest(); + } +} diff --git a/dom/media/webaudio/test/blink/mochitest.ini b/dom/media/webaudio/test/blink/mochitest.ini new file mode 100644 index 000000000..28bceb3a4 --- /dev/null +++ b/dom/media/webaudio/test/blink/mochitest.ini @@ -0,0 +1,23 @@ +[DEFAULT] +tags=msg +tags = webaudio +subsuite = media +support-files = + biquad-filters.js + biquad-testing.js + ../webaudio.js + +[test_biquadFilterNodeAllPass.html] +[test_biquadFilterNodeAutomation.html] +skip-if = true # Known problems with Biquad automation, e.g. Bug 1155709 +[test_biquadFilterNodeBandPass.html] +[test_biquadFilterNodeGetFrequencyResponse.html] +[test_biquadFilterNodeHighPass.html] +[test_biquadFilterNodeHighShelf.html] +[test_biquadFilterNodeLowPass.html] +[test_biquadFilterNodeLowShelf.html] +[test_biquadFilterNodeNotch.html] +[test_biquadFilterNodePeaking.html] +[test_biquadFilterNodeTail.html] +[test_iirFilterNode.html] +[test_iirFilterNodeGetFrequencyResponse.html] diff --git a/dom/media/webaudio/test/blink/panner-model-testing.js b/dom/media/webaudio/test/blink/panner-model-testing.js new file mode 100644 index 000000000..45460e276 --- /dev/null +++ b/dom/media/webaudio/test/blink/panner-model-testing.js @@ -0,0 +1,210 @@ +var sampleRate = 48000.0; + +var numberOfChannels = 1; + +// Time step when each panner node starts. +var timeStep = 0.001; + +// Length of the impulse signal. +var pulseLengthFrames = Math.round(timeStep * sampleRate); + +// How many panner nodes to create for the test +var nodesToCreate = 100; + +// Be sure we render long enough for all of our nodes. +var renderLengthSeconds = timeStep * (nodesToCreate + 1); + +// These are global mostly for debugging. +var context; +var impulse; +var bufferSource; +var panner; +var position; +var time; + +var renderedBuffer; +var renderedLeft; +var renderedRight; + +function createGraph(context, nodeCount) { + bufferSource = new Array(nodeCount); + panner = new Array(nodeCount); + position = new Array(nodeCount); + time = new Array(nodeCount); + // Angle between panner locations. (nodeCount - 1 because we want + // to include both 0 and 180 deg. + var angleStep = Math.PI / (nodeCount - 1); + + if (numberOfChannels == 2) { + impulse = createStereoImpulseBuffer(context, pulseLengthFrames); + } + else + impulse = createImpulseBuffer(context, pulseLengthFrames); + + for (var k = 0; k < nodeCount; ++k) { + bufferSource[k] = context.createBufferSource(); + bufferSource[k].buffer = impulse; + + panner[k] = context.createPanner(); + panner[k].panningModel = "equalpower"; + panner[k].distanceModel = "linear"; + + var angle = angleStep * k; + position[k] = {angle : angle, x : Math.cos(angle), z : Math.sin(angle)}; + panner[k].positionX.value = position[k].x; + panner[k].positionZ.value = position[k].z; + + bufferSource[k].connect(panner[k]); + panner[k].connect(context.destination); + + // Start the source + time[k] = k * timeStep; + bufferSource[k].start(time[k]); + } +} + +function createTestAndRun(context, nodeCount, numberOfSourceChannels) { + numberOfChannels = numberOfSourceChannels; + + createGraph(context, nodeCount); + + context.oncomplete = checkResult; + context.startRendering(); +} + +// Map our position angle to the azimuth angle (in degrees). +// +// An angle of 0 corresponds to an azimuth of 90 deg; pi, to -90 deg. +function angleToAzimuth(angle) { + return 90 - angle * 180 / Math.PI; +} + +// The gain caused by the EQUALPOWER panning model +function equalPowerGain(angle) { + var azimuth = angleToAzimuth(angle); + + if (numberOfChannels == 1) { + var panPosition = (azimuth + 90) / 180; + + var gainL = Math.cos(0.5 * Math.PI * panPosition); + var gainR = Math.sin(0.5 * Math.PI * panPosition); + + return { left : gainL, right : gainR }; + } else { + if (azimuth <= 0) { + var panPosition = (azimuth + 90) / 90; + + var gainL = 1 + Math.cos(0.5 * Math.PI * panPosition); + var gainR = Math.sin(0.5 * Math.PI * panPosition); + + return { left : gainL, right : gainR }; + } else { + var panPosition = azimuth / 90; + + var gainL = Math.cos(0.5 * Math.PI * panPosition); + var gainR = 1 + Math.sin(0.5 * Math.PI * panPosition); + + return { left : gainL, right : gainR }; + } + } +} + +function checkResult(event) { + renderedBuffer = event.renderedBuffer; + renderedLeft = renderedBuffer.getChannelData(0); + renderedRight = renderedBuffer.getChannelData(1); + + // The max error we allow between the rendered impulse and the + // expected value. This value is experimentally determined. Set + // to 0 to make the test fail to see what the actual error is. + var maxAllowedError = 1.3e-6; + + var success = true; + + // Number of impulses found in the rendered result. + var impulseCount = 0; + + // Max (relative) error and the index of the maxima for the left + // and right channels. + var maxErrorL = 0; + var maxErrorIndexL = 0; + var maxErrorR = 0; + var maxErrorIndexR = 0; + + // Number of impulses that don't match our expected locations. + var timeCount = 0; + + // Locations of where the impulses aren't at the expected locations. + var timeErrors = new Array(); + + for (var k = 0; k < renderedLeft.length; ++k) { + // We assume that the left and right channels start at the same instant. + if (renderedLeft[k] != 0 || renderedRight[k] != 0) { + // The expected gain for the left and right channels. + var pannerGain = equalPowerGain(position[impulseCount].angle); + var expectedL = pannerGain.left; + var expectedR = pannerGain.right; + + // Absolute error in the gain. + var errorL = Math.abs(renderedLeft[k] - expectedL); + var errorR = Math.abs(renderedRight[k] - expectedR); + + if (Math.abs(errorL) > maxErrorL) { + maxErrorL = Math.abs(errorL); + maxErrorIndexL = impulseCount; + } + if (Math.abs(errorR) > maxErrorR) { + maxErrorR = Math.abs(errorR); + maxErrorIndexR = impulseCount; + } + + // Keep track of the impulses that didn't show up where we + // expected them to be. + var expectedOffset = timeToSampleFrame(time[impulseCount], sampleRate); + if (k != expectedOffset) { + timeErrors[timeCount] = { actual : k, expected : expectedOffset}; + ++timeCount; + } + ++impulseCount; + } + } + + if (impulseCount == nodesToCreate) { + testPassed("Number of impulses matches the number of panner nodes."); + } else { + testFailed("Number of impulses is incorrect. (Found " + impulseCount + " but expected " + nodesToCreate + ")"); + success = false; + } + + if (timeErrors.length > 0) { + success = false; + testFailed(timeErrors.length + " timing errors found in " + nodesToCreate + " panner nodes."); + for (var k = 0; k < timeErrors.length; ++k) { + testFailed("Impulse at sample " + timeErrors[k].actual + " but expected " + timeErrors[k].expected); + } + } else { + testPassed("All impulses at expected offsets."); + } + + if (maxErrorL <= maxAllowedError) { + testPassed("Left channel gain values are correct."); + } else { + testFailed("Left channel gain values are incorrect. Max error = " + maxErrorL + " at time " + time[maxErrorIndexL] + " (threshold = " + maxAllowedError + ")"); + success = false; + } + + if (maxErrorR <= maxAllowedError) { + testPassed("Right channel gain values are correct."); + } else { + testFailed("Right channel gain values are incorrect. Max error = " + maxErrorR + " at time " + time[maxErrorIndexR] + " (threshold = " + maxAllowedError + ")"); + success = false; + } + + if (success) { + testPassed("EqualPower panner test passed"); + } else { + testFailed("EqualPower panner test failed"); + } + + finishJSTest(); +} diff --git a/dom/media/webaudio/test/blink/test_biquadFilterNodeAllPass.html b/dom/media/webaudio/test/blink/test_biquadFilterNodeAllPass.html new file mode 100644 index 000000000..266521c52 --- /dev/null +++ b/dom/media/webaudio/test/blink/test_biquadFilterNodeAllPass.html @@ -0,0 +1,32 @@ +<!DOCTYPE HTML> +<html> +<head> + <title>Test BiquadFilterNode All Pass Filter</title> + <script type="text/javascript" src="/tests/SimpleTest/SimpleTest.js"></script> + <link rel="stylesheet" type="text/css" href="/tests/SimpleTest/test.css" /> +</head> +<body> +<pre id="test"> +<script src="audio-testing.js"></script> +<script src="biquad-filters.js"></script> +<script src="biquad-testing.js"></script> +<script src="webaudio.js" type="text/javascript"></script> +<script class="testbody" type="text/javascript"> + +SimpleTest.waitForExplicitFinish(); + +addLoadEvent(function() { + // Create offline audio context. + var context = new OfflineAudioContext(2, sampleRate * renderLengthSeconds, sampleRate); + + var filterParameters = [{cutoff : 0, q : 10, gain : 1 }, + {cutoff : 1, q : 10, gain : 1 }, + {cutoff : .5, q : 0, gain : 1 }, + {cutoff : 0.25, q : 10, gain : 1 }, + ]; + createTestAndRun(context, "allpass", filterParameters); +}); +</script> +</pre> +</body> +</html> diff --git a/dom/media/webaudio/test/blink/test_biquadFilterNodeAutomation.html b/dom/media/webaudio/test/blink/test_biquadFilterNodeAutomation.html new file mode 100644 index 000000000..08ce71cce --- /dev/null +++ b/dom/media/webaudio/test/blink/test_biquadFilterNodeAutomation.html @@ -0,0 +1,351 @@ +<!DOCTYPE HTML> +<html> +<head> + <title>Test BiquadFilterNode All Pass Filter</title> + <script type="text/javascript" src="/tests/SimpleTest/SimpleTest.js"></script> + <link rel="stylesheet" type="text/css" href="/tests/SimpleTest/test.css" /> +</head> +<body> +<pre id="test"> +<script src="audio-testing.js"></script> +<script src="biquad-filters.js"></script> +<script src="biquad-testing.js"></script> +<script src="webaudio.js" type="text/javascript"></script> +<script class="testbody" type="text/javascript"> + +SimpleTest.waitForExplicitFinish(); + +addLoadEvent(function() { + // Don't need to run these tests at high sampling rate, so just use a low one to reduce memory + // usage and complexity. + var sampleRate = 16000; + + // How long to render for each test. + var renderDuration = 1; + + // The definition of the linear ramp automation function. + function linearRamp(t, v0, v1, t0, t1) { + return v0 + (v1 - v0) * (t - t0) / (t1 - t0); + } + + // Generate the filter coefficients for the specified filter using the given parameters for + // the given duration. |filterTypeFunction| is a function that returns the filter + // coefficients for one set of parameters. |parameters| is a property bag that contains the + // start and end values (as an array) for each of the biquad attributes. The properties are + // |freq|, |Q|, |gain|, and |detune|. |duration| is the number of seconds for which the + // coefficients are generated. + // + // A property bag with properties |b0|, |b1|, |b2|, |a1|, |a2|. Each propery is an array + // consisting of the coefficients for the time-varying biquad filter. + function generateFilterCoefficients(filterTypeFunction, parameters, duration) { + var endFrame = Math.ceil(duration * sampleRate); + var nCoef = endFrame; + var b0 = new Float64Array(nCoef); + var b1 = new Float64Array(nCoef); + var b2 = new Float64Array(nCoef); + var a1 = new Float64Array(nCoef); + var a2 = new Float64Array(nCoef); + + var k = 0; + // If the property is not given, use the defaults. + var freqs = parameters.freq || [350, 350]; + var qs = parameters.Q || [1, 1]; + var gains = parameters.gain || [0, 0]; + var detunes = parameters.detune || [0, 0]; + + for (var frame = 0; frame < endFrame; ++frame) { + // Apply linear ramp at frame |frame|. + var f = linearRamp(frame / sampleRate, freqs[0], freqs[1], 0, duration); + var q = linearRamp(frame / sampleRate, qs[0], qs[1], 0, duration); + var g = linearRamp(frame / sampleRate, gains[0], gains[1], 0, duration); + var d = linearRamp(frame / sampleRate, detunes[0], detunes[1], 0, duration); + + // Compute actual frequency parameter + f = f * Math.pow(2, d / 1200); + + // Compute filter coefficients + var coef = filterTypeFunction(f / (sampleRate / 2), q, g); + b0[k] = coef.b0; + b1[k] = coef.b1; + b2[k] = coef.b2; + a1[k] = coef.a1; + a2[k] = coef.a2; + ++k; + } + + return {b0: b0, b1: b1, b2: b2, a1: a1, a2: a2}; + } + + // Apply the given time-varying biquad filter to the given signal, |signal|. |coef| should be + // the time-varying coefficients of the filter, as returned by |generateFilterCoefficients|. + function timeVaryingFilter(signal, coef) { + var length = signal.length; + // Use double precision for the internal computations. + var y = new Float64Array(length); + + // Prime the pump. (Assumes the signal has length >= 2!) + y[0] = coef.b0[0] * signal[0]; + y[1] = coef.b0[1] * signal[1] + coef.b1[1] * signal[0] - coef.a1[1] * y[0]; + + for (var n = 2; n < length; ++n) { + y[n] = coef.b0[n] * signal[n] + coef.b1[n] * signal[n-1] + coef.b2[n] * signal[n-2]; + y[n] -= coef.a1[n] * y[n-1] + coef.a2[n] * y[n-2]; + } + + // But convert the result to single precision for comparison. + return y.map(Math.fround); + } + + // Configure the audio graph using |context|. Returns the biquad filter node and the + // AudioBuffer used for the source. + function configureGraph(context, toneFrequency) { + // The source is just a simple sine wave. + var src = context.createBufferSource(); + var b = context.createBuffer(1, renderDuration * sampleRate, sampleRate); + var data = b.getChannelData(0); + var omega = 2 * Math.PI * toneFrequency / sampleRate; + for (var k = 0; k < data.length; ++k) { + data[k] = Math.sin(omega * k); + } + src.buffer = b; + var f = context.createBiquadFilter(); + src.connect(f); + f.connect(context.destination); + + src.start(); + + return {filter: f, source: b}; + } + + function createFilterVerifier(filterCreator, threshold, parameters, input, message) { + return function (resultBuffer) { + var actual = resultBuffer.getChannelData(0); + var coefs = generateFilterCoefficients(filterCreator, parameters, renderDuration); + + reference = timeVaryingFilter(input, coefs); + + compareChannels(actual, reference); + }; + } + + var testPromises = []; + + // Automate just the frequency parameter. A bandpass filter is used where the center + // frequency is swept across the source (which is a simple tone). + testPromises.push(function () { + var context = new OfflineAudioContext(1, renderDuration * sampleRate, sampleRate); + + // Center frequency of bandpass filter and also the frequency of the test tone. + var centerFreq = 10*440; + + // Sweep the frequency +/- 9*440 Hz from the center. This should cause the output to low at + // the beginning and end of the test where the done is outside the pass band of the filter, + // but high in the center where the tone is near the center of the pass band. + var parameters = { + freq: [centerFreq - 9*440, centerFreq + 9*440] + } + var graph = configureGraph(context, centerFreq); + var f = graph.filter; + var b = graph.source; + + f.type = "bandpass"; + f.frequency.setValueAtTime(parameters.freq[0], 0); + f.frequency.linearRampToValueAtTime(parameters.freq[1], renderDuration); + + return context.startRendering() + .then(createFilterVerifier(createBandpassFilter, 5e-5, parameters, b.getChannelData(0), + "Output of bandpass filter with frequency automation")); + }()); + + // Automate just the Q parameter. A bandpass filter is used where the Q of the filter is + // swept. + testPromises.push(function() { + var context = new OfflineAudioContext(1, renderDuration * sampleRate, sampleRate); + + // The frequency of the test tone. + var centerFreq = 440; + + // Sweep the Q paramter between 1 and 200. This will cause the output of the filter to pass + // most of the tone at the beginning to passing less of the tone at the end. This is + // because we set center frequency of the bandpass filter to be slightly off from the actual + // tone. + var parameters = { + Q: [1, 200], + // Center frequency of the bandpass filter is just 25 Hz above the tone frequency. + freq: [centerFreq + 25, centerFreq + 25] + }; + var graph = configureGraph(context, centerFreq); + var f = graph.filter; + var b = graph.source; + + f.type = "bandpass"; + f.frequency.value = parameters.freq[0]; + f.Q.setValueAtTime(parameters.Q[0], 0); + f.Q.linearRampToValueAtTime(parameters.Q[1], renderDuration); + + return context.startRendering() + .then(createFilterVerifier(createBandpassFilter, 1.4e-6, parameters, b.getChannelData(0), + "Output of bandpass filter with Q automation")); + }()); + + // Automate just the gain of the lowshelf filter. A test tone will be in the lowshelf part of + // the filter. The output will vary as the gain of the lowshelf is changed. + testPromises.push(function() { + var context = new OfflineAudioContext(1, renderDuration * sampleRate, sampleRate); + + // Frequency of the test tone. + var centerFreq = 440; + + // Set the cutoff frequency of the lowshelf to be significantly higher than the test tone. + // Sweep the gain from 20 dB to -20 dB. (We go from 20 to -20 to easily verify that the + // filter didn't go unstable.) + var parameters = { + freq: [3500, 3500], + gain: [20, -20] + } + var graph = configureGraph(context, centerFreq); + var f = graph.filter; + var b = graph.source; + + f.type = "lowshelf"; + f.frequency.value = parameters.freq[0]; + f.gain.setValueAtTime(parameters.gain[0], 0); + f.gain.linearRampToValueAtTime(parameters.gain[1], renderDuration); + + context.startRendering() + .then(createFilterVerifier(createLowShelfFilter, 8e-6, parameters, b.getChannelData(0), + "Output of lowshelf filter with gain automation")); + }()); + + // Automate just the detune parameter. Basically the same test as for the frequncy parameter + // but we just use the detune parameter to modulate the frequency parameter. + testPromises.push(function() { + var context = new OfflineAudioContext(1, renderDuration * sampleRate, sampleRate); + var centerFreq = 10*440; + var parameters = { + freq: [centerFreq, centerFreq], + detune: [-10*1200, 10*1200] + }; + var graph = configureGraph(context, centerFreq); + var f = graph.filter; + var b = graph.source; + + f.type = "bandpass"; + f.frequency.value = parameters.freq[0]; + f.detune.setValueAtTime(parameters.detune[0], 0); + f.detune.linearRampToValueAtTime(parameters.detune[1], renderDuration); + + context.startRendering() + .then(createFilterVerifier(createBandpassFilter, 5e-6, parameters, b.getChannelData(0), + "Output of bandpass filter with detune automation")); + }()); + + // Automate all of the filter parameters at once. This is a basic check that everything is + // working. A peaking filter is used because it uses all of the parameters. + testPromises.push(function() { + var context = new OfflineAudioContext(1, renderDuration * sampleRate, sampleRate); + var graph = configureGraph(context, 10*440); + var f = graph.filter; + var b = graph.source; + + // Sweep all of the filter parameters. These are pretty much arbitrary. + var parameters = { + freq: [10000, 100], + Q: [f.Q.value, .0001], + gain: [f.gain.value, 20], + detune: [2400, -2400] + }; + + f.type = "peaking"; + // Set starting points for all parameters of the filter. Start at 10 kHz for the center + // frequency, and the defaults for Q and gain. + f.frequency.setValueAtTime(parameters.freq[0], 0); + f.Q.setValueAtTime(parameters.Q[0], 0); + f.gain.setValueAtTime(parameters.gain[0], 0); + f.detune.setValueAtTime(parameters.detune[0], 0); + + // Linear ramp each parameter + f.frequency.linearRampToValueAtTime(parameters.freq[1], renderDuration); + f.Q.linearRampToValueAtTime(parameters.Q[1], renderDuration); + f.gain.linearRampToValueAtTime(parameters.gain[1], renderDuration); + f.detune.linearRampToValueAtTime(parameters.detune[1], renderDuration); + + context.startRendering() + .then(createFilterVerifier(createPeakingFilter, 3.3e-4, parameters, b.getChannelData(0), + "Output of peaking filter with automation of all parameters")); + }()); + + // Test that modulation of the frequency parameter of the filter works. A sinusoid of 440 Hz + // is the test signal that is applied to a bandpass biquad filter. The frequency parameter of + // the filter is modulated by a sinusoid at 103 Hz, and the frequency modulation varies from + // 116 to 412 Hz. (This test was taken from the description in + // https://github.com/WebAudio/web-audio-api/issues/509#issuecomment-94731355) + testPromises.push(function() { + var context = new OfflineAudioContext(1, renderDuration * sampleRate, sampleRate); + + // Create a graph with the sinusoidal source at 440 Hz as the input to a biquad filter. + var graph = configureGraph(context, 440); + var f = graph.filter; + var b = graph.source; + + f.type = "bandpass"; + f.Q.value = 5; + f.frequency.value = 264; + + // Create the modulation source, a sinusoid with frequency 103 Hz and amplitude 148. (The + // amplitude of 148 is added to the filter's frequency value of 264 to produce a sinusoidal + // modulation of the frequency parameter from 116 to 412 Hz.) + var mod = context.createBufferSource(); + var mbuffer = context.createBuffer(1, renderDuration * sampleRate, sampleRate); + var d = mbuffer.getChannelData(0); + var omega = 2 * Math.PI * 103 / sampleRate; + for (var k = 0; k < d.length; ++k) { + d[k] = 148 * Math.sin(omega * k); + } + mod.buffer = mbuffer; + + mod.connect(f.frequency); + + mod.start(); + return context.startRendering() + .then(function (resultBuffer) { + var actual = resultBuffer.getChannelData(0); + // Compute the filter coefficients using the mod sine wave + + var endFrame = Math.ceil(renderDuration * sampleRate); + var nCoef = endFrame; + var b0 = new Float64Array(nCoef); + var b1 = new Float64Array(nCoef); + var b2 = new Float64Array(nCoef); + var a1 = new Float64Array(nCoef); + var a2 = new Float64Array(nCoef); + + // Generate the filter coefficients when the frequency varies from 116 to 248 Hz using + // the 103 Hz sinusoid. + for (var k = 0; k < nCoef; ++k) { + var freq = f.frequency.value + d[k]; + var c = createBandpassFilter(freq / (sampleRate / 2), f.Q.value, f.gain.value); + b0[k] = c.b0; + b1[k] = c.b1; + b2[k] = c.b2; + a1[k] = c.a1; + a2[k] = c.a2; + } + reference = timeVaryingFilter(b.getChannelData(0), + {b0: b0, b1: b1, b2: b2, a1: a1, a2: a2}); + + compareChannels(actual, reference); + }); + }()); + + // Wait for all tests + Promise.all(testPromises).then(function () { + SimpleTest.finish(); + }, function () { + SimpleTest.finish(); + }); +}); +</script> +</pre> +</body> +</html> diff --git a/dom/media/webaudio/test/blink/test_biquadFilterNodeBandPass.html b/dom/media/webaudio/test/blink/test_biquadFilterNodeBandPass.html new file mode 100644 index 000000000..a3a1484f6 --- /dev/null +++ b/dom/media/webaudio/test/blink/test_biquadFilterNodeBandPass.html @@ -0,0 +1,34 @@ +<!DOCTYPE HTML> +<html> +<head> + <title>Test BiquadFilterNode Band Pass Filter</title> + <script type="text/javascript" src="/tests/SimpleTest/SimpleTest.js"></script> + <link rel="stylesheet" type="text/css" href="/tests/SimpleTest/test.css" /> +</head> +<body> +<pre id="test"> +<script src="audio-testing.js"></script> +<script src="biquad-filters.js"></script> +<script src="biquad-testing.js"></script> +<script src="webaudio.js" type="text/javascript"></script> +<script class="testbody" type="text/javascript"> + +SimpleTest.waitForExplicitFinish(); + +addLoadEvent(function() { + // Create offline audio context. + var context = new OfflineAudioContext(2, sampleRate * renderLengthSeconds, sampleRate); + + // The filters we want to test. + var filterParameters = [{cutoff : 0, q : 0, gain : 1 }, + {cutoff : 1, q : 0, gain : 1 }, + {cutoff : 0.5, q : 0, gain : 1 }, + {cutoff : 0.25, q : 1, gain : 1 }, + ]; + + createTestAndRun(context, "bandpass", filterParameters); +}); +</script> +</pre> +</body> +</html> diff --git a/dom/media/webaudio/test/blink/test_biquadFilterNodeGetFrequencyResponse.html b/dom/media/webaudio/test/blink/test_biquadFilterNodeGetFrequencyResponse.html new file mode 100644 index 000000000..1576db1e8 --- /dev/null +++ b/dom/media/webaudio/test/blink/test_biquadFilterNodeGetFrequencyResponse.html @@ -0,0 +1,261 @@ +<!DOCTYPE HTML> +<html> +<head> + <title>Test BiquadFilterNode All Pass Filter</title> + <script type="text/javascript" src="/tests/SimpleTest/SimpleTest.js"></script> + <link rel="stylesheet" type="text/css" href="/tests/SimpleTest/test.css" /> +</head> +<body> +<pre id="test"> +<script src="audio-testing.js"></script> +<script src="biquad-filters.js"></script> +<script src="biquad-testing.js"></script> +<script src="webaudio.js" type="text/javascript"></script> +<script class="testbody" type="text/javascript"> + +SimpleTest.waitForExplicitFinish(); + +addLoadEvent(function() { +// Test the frequency response of a biquad filter. We compute the frequency response for a simple +// peaking biquad filter and compare it with the expected frequency response. The actual filter +// used doesn't matter since we're testing getFrequencyResponse and not the actual filter output. +// The filters are extensively tested in other biquad tests. + +var context; + +// The biquad filter node. +var filter; + +// The magnitude response of the biquad filter. +var magResponse; + +// The phase response of the biquad filter. +var phaseResponse; + +// Number of frequency samples to take. +var numberOfFrequencies = 1000; + +// The filter parameters. +var filterCutoff = 1000; // Hz. +var filterQ = 1; +var filterGain = 5; // Decibels. + +// The maximum allowed error in the magnitude response. +var maxAllowedMagError = 5.7e-7; + +// The maximum allowed error in the phase response. +var maxAllowedPhaseError = 4.7e-8; + +// The magnitudes and phases of the reference frequency response. +var magResponse; +var phaseResponse; + +// The magnitudes and phases of the reference frequency response. +var expectedMagnitudes; +var expectedPhases; + +// Convert frequency in Hz to a normalized frequency between 0 to 1 with 1 corresponding to the +// Nyquist frequency. +function normalizedFrequency(freqHz, sampleRate) +{ + var nyquist = sampleRate / 2; + return freqHz / nyquist; +} + +// Get the filter response at a (normalized) frequency |f| for the filter with coefficients |coef|. +function getResponseAt(coef, f) +{ + var b0 = coef.b0; + var b1 = coef.b1; + var b2 = coef.b2; + var a1 = coef.a1; + var a2 = coef.a2; + + // H(z) = (b0 + b1 / z + b2 / z^2) / (1 + a1 / z + a2 / z^2) + // + // Compute H(exp(i * pi * f)). No native complex numbers in javascript, so break H(exp(i * pi * // f)) + // in to the real and imaginary parts of the numerator and denominator. Let omega = pi * f. + // Then the numerator is + // + // b0 + b1 * cos(omega) + b2 * cos(2 * omega) - i * (b1 * sin(omega) + b2 * sin(2 * omega)) + // + // and the denominator is + // + // 1 + a1 * cos(omega) + a2 * cos(2 * omega) - i * (a1 * sin(omega) + a2 * sin(2 * omega)) + // + // Compute the magnitude and phase from the real and imaginary parts. + + var omega = Math.PI * f; + var numeratorReal = b0 + b1 * Math.cos(omega) + b2 * Math.cos(2 * omega); + var numeratorImag = -(b1 * Math.sin(omega) + b2 * Math.sin(2 * omega)); + var denominatorReal = 1 + a1 * Math.cos(omega) + a2 * Math.cos(2 * omega); + var denominatorImag = -(a1 * Math.sin(omega) + a2 * Math.sin(2 * omega)); + + var magnitude = Math.sqrt((numeratorReal * numeratorReal + numeratorImag * numeratorImag) + / (denominatorReal * denominatorReal + denominatorImag * denominatorImag)); + var phase = Math.atan2(numeratorImag, numeratorReal) - Math.atan2(denominatorImag, denominatorReal); + + if (phase >= Math.PI) { + phase -= 2 * Math.PI; + } else if (phase <= -Math.PI) { + phase += 2 * Math.PI; + } + + return {magnitude : magnitude, phase : phase}; +} + +// Compute the reference frequency response for the biquad filter |filter| at the frequency samples +// given by |frequencies|. +function frequencyResponseReference(filter, frequencies) +{ + var sampleRate = filter.context.sampleRate; + var normalizedFreq = normalizedFrequency(filter.frequency.value, sampleRate); + var filterCoefficients = createFilter(filter.type, normalizedFreq, filter.Q.value, filter.gain.value); + + var magnitudes = []; + var phases = []; + + for (var k = 0; k < frequencies.length; ++k) { + var response = getResponseAt(filterCoefficients, normalizedFrequency(frequencies[k], sampleRate)); + magnitudes.push(response.magnitude); + phases.push(response.phase); + } + + return {magnitudes : magnitudes, phases : phases}; +} + +// Compute a set of linearly spaced frequencies. +function createFrequencies(nFrequencies, sampleRate) +{ + var frequencies = new Float32Array(nFrequencies); + var nyquist = sampleRate / 2; + var freqDelta = nyquist / nFrequencies; + + for (var k = 0; k < nFrequencies; ++k) { + frequencies[k] = k * freqDelta; + } + + return frequencies; +} + +function linearToDecibels(x) +{ + if (x) { + return 20 * Math.log(x) / Math.LN10; + } else { + return -1000; + } +} + +// Look through the array and find any NaN or infinity. Returns the index of the first occurence or +// -1 if none. +function findBadNumber(signal) +{ + for (var k = 0; k < signal.length; ++k) { + if (!isValidNumber(signal[k])) { + return k; + } + } + return -1; +} + +// Compute absolute value of the difference between phase angles, taking into account the wrapping +// of phases. +function absolutePhaseDifference(x, y) +{ + var diff = Math.abs(x - y); + + if (diff > Math.PI) { + diff = 2 * Math.PI - diff; + } + return diff; +} + +// Compare the frequency response with our expected response. +function compareResponses(filter, frequencies, magResponse, phaseResponse) +{ + var expectedResponse = frequencyResponseReference(filter, frequencies); + + expectedMagnitudes = expectedResponse.magnitudes; + expectedPhases = expectedResponse.phases; + + var n = magResponse.length; + var success = true; + var badResponse = false; + + var maxMagError = -1; + var maxMagErrorIndex = -1; + + var k; + var hasBadNumber; + + hasBadNumber = findBadNumber(magResponse); + ok (hasBadNumber < 0, "Magnitude response has NaN or infinity at " + hasBadNumber); + + hasBadNumber = findBadNumber(phaseResponse); + ok (hasBadNumber < 0, "Phase response has NaN or infinity at " + hasBadNumber); + + // These aren't testing the implementation itself. Instead, these are sanity checks on the + // reference. Failure here does not imply an error in the implementation. + hasBadNumber = findBadNumber(expectedMagnitudes); + ok (hasBadNumber < 0, "Expected magnitude response has NaN or infinity at " + hasBadNumber); + + hasBadNumber = findBadNumber(expectedPhases); + ok (hasBadNumber < 0, "Expected phase response has NaN or infinity at " + hasBadNumber); + + for (k = 0; k < n; ++k) { + var error = Math.abs(linearToDecibels(magResponse[k]) - linearToDecibels(expectedMagnitudes[k])); + if (error > maxMagError) { + maxMagError = error; + maxMagErrorIndex = k; + } + } + + var message = "Magnitude error (" + maxMagError + " dB)"; + message += " exceeded threshold at " + frequencies[maxMagErrorIndex]; + message += " Hz. Actual: " + linearToDecibels(magResponse[maxMagErrorIndex]); + message += " dB, expected: " + linearToDecibels(expectedMagnitudes[maxMagErrorIndex]) + " dB."; + ok(maxMagError < maxAllowedMagError, message); + + var maxPhaseError = -1; + var maxPhaseErrorIndex = -1; + + for (k = 0; k < n; ++k) { + var error = absolutePhaseDifference(phaseResponse[k], expectedPhases[k]); + if (error > maxPhaseError) { + maxPhaseError = error; + maxPhaseErrorIndex = k; + } + } + + message = "Phase error (radians) (" + maxPhaseError; + message += ") exceeded threshold at " + frequencies[maxPhaseErrorIndex]; + message += " Hz. Actual: " + phaseResponse[maxPhaseErrorIndex]; + message += " expected: " + expectedPhases[maxPhaseErrorIndex]; + + ok(maxPhaseError < maxAllowedPhaseError, message); +} + +context = new AudioContext(); + +filter = context.createBiquadFilter(); + +// Arbitrarily test a peaking filter, but any kind of filter can be tested. +filter.type = "peaking"; +filter.frequency.value = filterCutoff; +filter.Q.value = filterQ; +filter.gain.value = filterGain; + +var frequencies = createFrequencies(numberOfFrequencies, context.sampleRate); +magResponse = new Float32Array(numberOfFrequencies); +phaseResponse = new Float32Array(numberOfFrequencies); + +filter.getFrequencyResponse(frequencies, magResponse, phaseResponse); +compareResponses(filter, frequencies, magResponse, phaseResponse); + +SimpleTest.finish(); +}); +</script> +</pre> +</body> +</html> diff --git a/dom/media/webaudio/test/blink/test_biquadFilterNodeHighPass.html b/dom/media/webaudio/test/blink/test_biquadFilterNodeHighPass.html new file mode 100644 index 000000000..cb9aa274c --- /dev/null +++ b/dom/media/webaudio/test/blink/test_biquadFilterNodeHighPass.html @@ -0,0 +1,33 @@ +<!DOCTYPE HTML> +<html> +<head> + <title>Test BiquadFilterNode High Pass Filter</title> + <script type="text/javascript" src="/tests/SimpleTest/SimpleTest.js"></script> + <link rel="stylesheet" type="text/css" href="/tests/SimpleTest/test.css" /> +</head> +<body> +<pre id="test"> +<script src="audio-testing.js"></script> +<script src="biquad-filters.js"></script> +<script src="biquad-testing.js"></script> +<script src="webaudio.js" type="text/javascript"></script> +<script class="testbody" type="text/javascript"> + +SimpleTest.waitForExplicitFinish(); + +addLoadEvent(function() { + // Create offline audio context. + var context = new OfflineAudioContext(2, sampleRate * renderLengthSeconds, sampleRate); + + // The filters we want to test. + var filterParameters = [{cutoff : 0, q : 1, gain : 1 }, + {cutoff : 1, q : 1, gain : 1 }, + {cutoff : 0.25, q : 1, gain : 1 }, + ]; + + createTestAndRun(context, "highpass", filterParameters); +}); +</script> +</pre> +</body> +</html> diff --git a/dom/media/webaudio/test/blink/test_biquadFilterNodeHighShelf.html b/dom/media/webaudio/test/blink/test_biquadFilterNodeHighShelf.html new file mode 100644 index 000000000..3581459b0 --- /dev/null +++ b/dom/media/webaudio/test/blink/test_biquadFilterNodeHighShelf.html @@ -0,0 +1,33 @@ +<!DOCTYPE HTML> +<html> +<head> + <title>Test BiquadFilterNode High Shelf Filter</title> + <script type="text/javascript" src="/tests/SimpleTest/SimpleTest.js"></script> + <link rel="stylesheet" type="text/css" href="/tests/SimpleTest/test.css" /> +</head> +<body> +<pre id="test"> +<script src="audio-testing.js"></script> +<script src="biquad-filters.js"></script> +<script src="biquad-testing.js"></script> +<script src="webaudio.js" type="text/javascript"></script> +<script class="testbody" type="text/javascript"> + +SimpleTest.waitForExplicitFinish(); + +addLoadEvent(function() { + // Create offline audio context. + var context = new OfflineAudioContext(2, sampleRate * renderLengthSeconds, sampleRate); + + // The filters we want to test. + var filterParameters = [{cutoff : 0, q : 10, gain : 10 }, + {cutoff : 1, q : 10, gain : 10 }, + {cutoff : 0.25, q : 10, gain : 10 }, + ]; + + createTestAndRun(context, "highshelf", filterParameters); +}); +</script> +</pre> +</body> +</html> diff --git a/dom/media/webaudio/test/blink/test_biquadFilterNodeLowPass.html b/dom/media/webaudio/test/blink/test_biquadFilterNodeLowPass.html new file mode 100644 index 000000000..b0c12558f --- /dev/null +++ b/dom/media/webaudio/test/blink/test_biquadFilterNodeLowPass.html @@ -0,0 +1,34 @@ +<!DOCTYPE HTML> +<html> +<head> + <title>Test BiquadFilterNode Low Pass Filter</title> + <script type="text/javascript" src="/tests/SimpleTest/SimpleTest.js"></script> + <link rel="stylesheet" type="text/css" href="/tests/SimpleTest/test.css" /> +</head> +<body> +<pre id="test"> +<script src="audio-testing.js"></script> +<script src="biquad-filters.js"></script> +<script src="biquad-testing.js"></script> +<script src="webaudio.js" type="text/javascript"></script> +<script class="testbody" type="text/javascript"> + +SimpleTest.waitForExplicitFinish(); + +addLoadEvent(function() { + // Create offline audio context. + var context = new OfflineAudioContext(2, sampleRate * renderLengthSeconds, sampleRate); + + // The filters we want to test. + var filterParameters = [{cutoff : 0, q : 1, gain : 1 }, + {cutoff : 1, q : 1, gain : 1 }, + {cutoff : 0.25, q : 1, gain : 1 }, + {cutoff : 0.25, q : 1, gain : 1, detune : 100 }, + {cutoff : 0.01, q : 1, gain : 1, detune : -200 }, + ]; + createTestAndRun(context, "lowpass", filterParameters); +}); +</script> +</pre> +</body> +</html> diff --git a/dom/media/webaudio/test/blink/test_biquadFilterNodeLowShelf.html b/dom/media/webaudio/test/blink/test_biquadFilterNodeLowShelf.html new file mode 100644 index 000000000..3c83bfaa3 --- /dev/null +++ b/dom/media/webaudio/test/blink/test_biquadFilterNodeLowShelf.html @@ -0,0 +1,34 @@ +<!DOCTYPE HTML> +<html> +<head> + <title>Test BiquadFilterNode Low Shelf Filter</title> + <script type="text/javascript" src="/tests/SimpleTest/SimpleTest.js"></script> + <link rel="stylesheet" type="text/css" href="/tests/SimpleTest/test.css" /> +</head> +<body> +<pre id="test"> +<script src="audio-testing.js"></script> +<script src="biquad-filters.js"></script> +<script src="biquad-testing.js"></script> +<script src="webaudio.js" type="text/javascript"></script> +<script class="testbody" type="text/javascript"> + +SimpleTest.waitForExplicitFinish(); + +addLoadEvent(function() { + + // Create offline audio context. + var context = new OfflineAudioContext(2, sampleRate * renderLengthSeconds, sampleRate); + + // The filters we want to test. + var filterParameters = [{cutoff : 0, q : 10, gain : 10 }, + {cutoff : 1, q : 10, gain : 10 }, + {cutoff : 0.25, q : 10, gain : 10 }, + ]; + + createTestAndRun(context, "lowshelf", filterParameters); +}); +</script> +</pre> +</body> +</html> diff --git a/dom/media/webaudio/test/blink/test_biquadFilterNodeNotch.html b/dom/media/webaudio/test/blink/test_biquadFilterNodeNotch.html new file mode 100644 index 000000000..551410c66 --- /dev/null +++ b/dom/media/webaudio/test/blink/test_biquadFilterNodeNotch.html @@ -0,0 +1,33 @@ +<!DOCTYPE HTML> +<html> +<head> + <title>Test BiquadFilterNode Notch Filter</title> + <script type="text/javascript" src="/tests/SimpleTest/SimpleTest.js"></script> + <link rel="stylesheet" type="text/css" href="/tests/SimpleTest/test.css" /> +</head> +<body> +<pre id="test"> +<script src="audio-testing.js"></script> +<script src="biquad-filters.js"></script> +<script src="biquad-testing.js"></script> +<script src="webaudio.js" type="text/javascript"></script> +<script class="testbody" type="text/javascript"> + +SimpleTest.waitForExplicitFinish(); + +addLoadEvent(function() { + // Create offline audio context. + var context = new OfflineAudioContext(2, sampleRate * renderLengthSeconds, sampleRate); + + var filterParameters = [{cutoff : 0, q : 10, gain : 1 }, + {cutoff : 1, q : 10, gain : 1 }, + {cutoff : .5, q : 0, gain : 1 }, + {cutoff : 0.25, q : 10, gain : 1 }, + ]; + + createTestAndRun(context, "notch", filterParameters); +}); +</script> +</pre> +</body> +</html> diff --git a/dom/media/webaudio/test/blink/test_biquadFilterNodePeaking.html b/dom/media/webaudio/test/blink/test_biquadFilterNodePeaking.html new file mode 100644 index 000000000..33fcc225a --- /dev/null +++ b/dom/media/webaudio/test/blink/test_biquadFilterNodePeaking.html @@ -0,0 +1,34 @@ +<!DOCTYPE HTML> +<html> +<head> + <title>Test BiquadFilterNode Low Pass Filter</title> + <script type="text/javascript" src="/tests/SimpleTest/SimpleTest.js"></script> + <link rel="stylesheet" type="text/css" href="/tests/SimpleTest/test.css" /> +</head> +<body> +<pre id="test"> +<script src="audio-testing.js"></script> +<script src="biquad-filters.js"></script> +<script src="biquad-testing.js"></script> +<script src="webaudio.js" type="text/javascript"></script> +<script class="testbody" type="text/javascript"> + +SimpleTest.waitForExplicitFinish(); + +addLoadEvent(function() { + // Create offline audio context. + var context = new OfflineAudioContext(2, sampleRate * renderLengthSeconds, sampleRate); + + // The filters we want to test. + var filterParameters = [{cutoff : 0, q : 10, gain : 10 }, + {cutoff : 1, q : 10, gain : 10 }, + {cutoff : .5, q : 0, gain : 10 }, + {cutoff : 0.25, q : 10, gain : 10 }, + ]; + + createTestAndRun(context, "peaking", filterParameters); +}); +</script> +</pre> +</body> +</html> diff --git a/dom/media/webaudio/test/blink/test_biquadFilterNodeTail.html b/dom/media/webaudio/test/blink/test_biquadFilterNodeTail.html new file mode 100644 index 000000000..fd02e734f --- /dev/null +++ b/dom/media/webaudio/test/blink/test_biquadFilterNodeTail.html @@ -0,0 +1,76 @@ +<!DOCTYPE HTML> +<html> +<head> + <title>Test BiquadFilterNode All Pass Filter</title> + <script type="text/javascript" src="/tests/SimpleTest/SimpleTest.js"></script> + <link rel="stylesheet" type="text/css" href="/tests/SimpleTest/test.css" /> +</head> +<body> +<pre id="test"> +<script src="audio-testing.js"></script> +<script src="biquad-filters.js"></script> +<script src="biquad-testing.js"></script> +<script src="webaudio.js" type="text/javascript"></script> +<script class="testbody" type="text/javascript"> + +SimpleTest.waitForExplicitFinish(); + +addLoadEvent(function() { + // A high sample rate shows the issue more clearly. + var sampleRate = 192000; + // Some short duration because we don't need to run the test for very long. + var testDurationSec = 0.5; + var testDurationFrames = testDurationSec * sampleRate; + + // Amplitude experimentally determined to give a biquad output close to 1. (No attempt was + // made to produce exactly 1; it's not needed.) + var sourceAmplitude = 100; + + // The output of the biquad filter should not change by more than this much between output + // samples. Threshold was determined experimentally. + var glitchThreshold = 0.01292; + + // Test that a Biquad filter doesn't have it's output terminated because the input has gone + // away. Generally, when a source node is finished, it disconnects itself from any downstream + // nodes. This is the correct behavior. Nodes that have no inputs (disconnected) are + // generally assumed to output zeroes. This is also desired behavior. However, biquad + // filters have memory so they should not suddenly output zeroes when the input is + // disconnected. This test checks to see if the output doesn't suddenly change to zero. + var context = new OfflineAudioContext(1, testDurationFrames, sampleRate); + + // Create an impulse source. + var buffer = context.createBuffer(1, 1, context.sampleRate); + buffer.getChannelData(0)[0] = sourceAmplitude; + var source = context.createBufferSource(); + source.buffer = buffer; + + // Create the biquad filter. It doesn't really matter what kind, so the default filter type + // and parameters is fine. Connect the source to it. + var biquad = context.createBiquadFilter(); + source.connect(biquad); + biquad.connect(context.destination); + + source.start(); + + context.startRendering().then(function(result) { + // There should be no large discontinuities in the output + var buffer = result.getChannelData(0); + var maxGlitchIndex = 0; + var maxGlitchValue = 0.0; + for (var i = 1; i < buffer.length; i++) { + var diff = Math.abs(buffer[i-1] - buffer[i]); + if (diff >= glitchThreshold) { + if (diff > maxGlitchValue) { + maxGlitchIndex = i; + maxGlitchValue = diff; + } + } + } + ok(maxGlitchIndex == 0, 'glitches detected in biquad output: maximum glitch at ' + maxGlitchIndex + ' with diff of ' + maxGlitchValue); + SimpleTest.finish(); + }) +}); +</script> +</pre> +</body> +</html> diff --git a/dom/media/webaudio/test/blink/test_iirFilterNode.html b/dom/media/webaudio/test/blink/test_iirFilterNode.html new file mode 100644 index 000000000..47f936761 --- /dev/null +++ b/dom/media/webaudio/test/blink/test_iirFilterNode.html @@ -0,0 +1,467 @@ +<!DOCTYPE HTML> +<html> +<head> + <title>Test IIRFilterNode GetFrequencyResponse</title> + <script type="text/javascript" src="/tests/SimpleTest/SimpleTest.js"></script> + <script type="text/javascript" src="webaudio.js"></script> + <script type="text/javascript" src="biquad-filters.js"></script> + <link rel="stylesheet" type="text/css" href="/tests/SimpleTest/test.css" /> +</head> +<body> +<pre id="test"> +<script class="testbody" type="text/javascript"> + +SimpleTest.waitForExplicitFinish(); + +addLoadEvent(function() { + var sampleRate = 48000; + var testDurationSec = 1; + var testFrames = testDurationSec * sampleRate; + + var testPromises = [] + testPromises.push(function () { + // Test that the feedback coefficients are normalized. Do this be creating two + // IIRFilterNodes. One has normalized coefficients, and one doesn't. Compute the + // difference and make sure they're the same. + var context = new OfflineAudioContext(2, testFrames, sampleRate); + + // Use a simple impulse as the source. + var buffer = context.createBuffer(1, 1, sampleRate); + buffer.getChannelData(0)[0] = 1; + var source = context.createBufferSource(); + source.buffer = buffer; + + // Gain node for computing the difference between the filters. + var gain = context.createGain(); + gain.gain.value = -1; + + // The IIR filters. Use a common feedforward array. + var ff = [1]; + + var fb1 = [1, .9]; + + var fb2 = new Float64Array(2); + // Scale the feedback coefficients by an arbitrary factor. + var coefScaleFactor = 2; + for (var k = 0; k < fb2.length; ++k) { + fb2[k] = coefScaleFactor * fb1[k]; + } + + var iir1 = context.createIIRFilter(ff, fb1); + var iir2 = context.createIIRFilter(ff, fb2); + + // Create the graph. The output of iir1 (normalized coefficients) is channel 0, and the + // output of iir2 (unnormalized coefficients), with appropriate scaling, is channel 1. + var merger = context.createChannelMerger(2); + source.connect(iir1); + source.connect(iir2); + iir1.connect(merger, 0, 0); + iir2.connect(gain); + + // The gain for the gain node should be set to compensate for the scaling of the + // coefficients. Since iir2 has scaled the coefficients by coefScaleFactor, the output is + // reduced by the same factor, so adjust the gain to scale the output of iir2 back up. + gain.gain.value = coefScaleFactor; + gain.connect(merger, 0, 1); + + merger.connect(context.destination); + + source.start(); + + // Rock and roll! + + return context.startRendering().then(function (result) { + // Find the max amplitude of the result, which should be near zero. + var iir1Data = result.getChannelData(0); + var iir2Data = result.getChannelData(1); + + // Threshold isn't exactly zero because the arithmetic is done differently between the + // IIRFilterNode and the BiquadFilterNode. + compareChannels(iir1Data, iir2Data); + }); + }()); + + testPromises.push(function () { + // Create a simple 1-zero filter and compare with the expected output. + var context = new OfflineAudioContext(1, testFrames, sampleRate); + + // Use a simple impulse as the source + var buffer = context.createBuffer(1, 1, sampleRate); + buffer.getChannelData(0)[0] = 1; + var source = context.createBufferSource(); + source.buffer = buffer; + + // The filter is y(n) = 0.5*(x(n) + x(n-1)), a simple 2-point moving average. This is + // rather arbitrary; keep it simple. + + var iir = context.createIIRFilter([0.5, 0.5], [1]); + + // Create the graph + source.connect(iir); + iir.connect(context.destination); + + // Rock and roll! + source.start(); + + return context.startRendering().then(function (result) { + var actual = result.getChannelData(0); + var expected = new Float64Array(testFrames); + // The filter is a simple 2-point moving average of an impulse, so the first two values + // are non-zero and the rest are zero. + expected[0] = 0.5; + expected[1] = 0.5; + compareChannels(actual, expected); + }); + }()); + + testPromises.push(function () { + // Create a simple 1-pole filter and compare with the expected output. + + // The filter is y(n) + c*y(n-1)= x(n). The analytical response is (-c)^n, so choose a + // suitable number of frames to run the test for where the output isn't flushed to zero. + var c = 0.9; + var eps = 1e-20; + var duration = Math.floor(Math.log(eps) / Math.log(Math.abs(c))); + var context = new OfflineAudioContext(1, duration, sampleRate); + + // Use a simple impulse as the source + var buffer = context.createBuffer(1, 1, sampleRate); + buffer.getChannelData(0)[0] = 1; + var source = context.createBufferSource(); + source.buffer = buffer; + + var iir = context.createIIRFilter([1], [1, c]); + + // Create the graph + source.connect(iir); + iir.connect(context.destination); + + // Rock and roll! + source.start(); + + return context.startRendering().then(function (result) { + var actual = result.getChannelData(0); + var expected = new Float64Array(actual.length); + + // The filter is a simple 1-pole filter: y(n) = -c*y(n-k)+x(n), with an impulse as the + // input. + expected[0] = 1; + for (k = 1; k < testFrames; ++k) { + expected[k] = -c * expected[k-1]; + } + + compareChannels(actual, expected); + }); + }()); + + // This function creates an IIRFilterNode equivalent to the specified + // BiquadFilterNode and compares the outputs. The + // outputs from the two filters should be virtually identical. + function testWithBiquadFilter(filterType) { + var context = new OfflineAudioContext(2, testFrames, sampleRate); + + // Use a constant (step function) as the source + var buffer = context.createBuffer(1, testFrames, context.sampleRate); + for (var i = 0; i < testFrames; ++i) { + buffer.getChannelData(0)[i] = 1; + } + var source = context.createBufferSource(); + source.buffer = buffer; + + // Create the biquad. Choose some rather arbitrary values for Q and gain for the biquad + // so that the shelf filters aren't identical. + var biquad = context.createBiquadFilter(); + biquad.type = filterType; + biquad.Q.value = 10; + biquad.gain.value = 10; + + // Create the equivalent IIR Filter node by computing the coefficients of the given biquad + // filter type. + var nyquist = sampleRate / 2; + var coef = createFilter(filterType, + biquad.frequency.value / nyquist, + biquad.Q.value, + biquad.gain.value); + + var iir = context.createIIRFilter([coef.b0, coef.b1, coef.b2], [1, coef.a1, coef.a2]); + + var merger = context.createChannelMerger(2); + // Create the graph + source.connect(biquad); + source.connect(iir); + + biquad.connect(merger, 0, 0); + iir.connect(merger, 0, 1); + + merger.connect(context.destination); + + // Rock and roll! + source.start(); + + return context.startRendering().then(function (result) { + // Find the max amplitude of the result, which should be near zero. + var expected = result.getChannelData(0); + var actual = result.getChannelData(1); + compareChannels(actual, expected); + }); + } + + biquadFilterTypes = ["lowpass", "highpass", "bandpass", "notch", + "allpass", "lowshelf", "highshelf", "peaking"]; + + // Create a set of tasks based on biquadTestConfigs. + for (var i = 0; i < biquadFilterTypes.length; ++i) { + testPromises.push(testWithBiquadFilter(biquadFilterTypes[i])); + } + + testPromises.push(function () { + // Multi-channel test. Create a biquad filter and the equivalent IIR filter. Filter the + // same multichannel signal and compare the results. + var nChannels = 3; + var context = new OfflineAudioContext(nChannels, testFrames, sampleRate); + + // Create a set of oscillators as the multi-channel source. + var source = []; + + for (k = 0; k < nChannels; ++k) { + source[k] = context.createOscillator(); + source[k].type = "sawtooth"; + // The frequency of the oscillator is pretty arbitrary, but each oscillator should have a + // different frequency. + source[k].frequency.value = 100 + k * 100; + } + + var merger = context.createChannelMerger(3); + + var biquad = context.createBiquadFilter(); + + // Create the equivalent IIR Filter node. + var nyquist = sampleRate / 2; + var coef = createFilter(biquad.type, + biquad.frequency.value / nyquist, + biquad.Q.value, + biquad.gain.value); + var fb = [1, coef.a1, coef.a2]; + var ff = [coef.b0, coef.b1, coef.b2]; + + var iir = context.createIIRFilter(ff, fb); + // Gain node to compute the difference between the IIR and biquad filter. + var gain = context.createGain(); + gain.gain.value = -1; + + // Create the graph. + for (k = 0; k < nChannels; ++k) + source[k].connect(merger, 0, k); + + merger.connect(biquad); + merger.connect(iir); + iir.connect(gain); + biquad.connect(context.destination); + gain.connect(context.destination); + + for (k = 0; k < nChannels; ++k) + source[k].start(); + + return context.startRendering().then(function (result) { + var errorThresholds = [3.7671e-5, 3.0071e-5, 2.6241e-5]; + + // Check the difference signal on each channel + for (channel = 0; channel < result.numberOfChannels; ++channel) { + // Find the max amplitude of the result, which should be near zero. + var data = result.getChannelData(channel); + var maxError = data.reduce(function(reducedValue, currentValue) { + return Math.max(reducedValue, Math.abs(currentValue)); + }); + + ok(maxError <= errorThresholds[channel], "Max difference between IIR and Biquad on channel " + channel); + } + }); + }()); + + testPromises.push(function () { + // Apply an IIRFilter to the given input signal. + // + // IIR filter in the time domain is + // + // y[n] = sum(ff[k]*x[n-k], k, 0, M) - sum(fb[k]*y[n-k], k, 1, N) + // + function iirFilter(input, feedforward, feedback) { + // For simplicity, create an x buffer that contains the input, and a y buffer that contains + // the output. Both of these buffers have an initial work space to implement the initial + // memory of the filter. + var workSize = Math.max(feedforward.length, feedback.length); + var x = new Float32Array(input.length + workSize); + + // Float64 because we want to match the implementation that uses doubles to minimize + // roundoff. + var y = new Float64Array(input.length + workSize); + + // Copy the input over. + for (var k = 0; k < input.length; ++k) + x[k + feedforward.length] = input[k]; + + // Run the filter + for (var n = 0; n < input.length; ++n) { + var index = n + workSize; + var yn = 0; + for (var k = 0; k < feedforward.length; ++k) + yn += feedforward[k] * x[index - k]; + for (var k = 0; k < feedback.length; ++k) + yn -= feedback[k] * y[index - k]; + + y[index] = yn; + } + + return y.slice(workSize).map(Math.fround); + } + + // Cascade the two given biquad filters to create one IIR filter. + function cascadeBiquads(f1Coef, f2Coef) { + // The biquad filters are: + // + // f1 = (b10 + b11/z + b12/z^2)/(1 + a11/z + a12/z^2); + // f2 = (b20 + b21/z + b22/z^2)/(1 + a21/z + a22/z^2); + // + // To cascade them, multiply the two transforms together to get a fourth order IIR filter. + + var numProduct = [f1Coef.b0 * f2Coef.b0, + f1Coef.b0 * f2Coef.b1 + f1Coef.b1 * f2Coef.b0, + f1Coef.b0 * f2Coef.b2 + f1Coef.b1 * f2Coef.b1 + f1Coef.b2 * f2Coef.b0, + f1Coef.b1 * f2Coef.b2 + f1Coef.b2 * f2Coef.b1, + f1Coef.b2 * f2Coef.b2 + ]; + + var denProduct = [1, + f2Coef.a1 + f1Coef.a1, + f2Coef.a2 + f1Coef.a1 * f2Coef.a1 + f1Coef.a2, + f1Coef.a1 * f2Coef.a2 + f1Coef.a2 * f2Coef.a1, + f1Coef.a2 * f2Coef.a2 + ]; + + return { + ff: numProduct, + fb: denProduct + } + } + + // Find the magnitude of the root of the quadratic that has the maximum magnitude. + // + // The quadratic is z^2 + a1 * z + a2 and we want the root z that has the largest magnitude. + function largestRootMagnitude(a1, a2) { + var discriminant = a1 * a1 - 4 * a2; + if (discriminant < 0) { + // Complex roots: -a1/2 +/- i*sqrt(-d)/2. Thus the magnitude of each root is the same + // and is sqrt(a1^2/4 + |d|/4) + var d = Math.sqrt(-discriminant); + return Math.hypot(a1 / 2, d / 2); + } else { + // Real roots + var d = Math.sqrt(discriminant); + return Math.max(Math.abs((-a1 + d) / 2), Math.abs((-a1 - d) / 2)); + } + } + + // Cascade 2 lowpass biquad filters and compare that with the equivalent 4th order IIR + // filter. + + var nyquist = sampleRate / 2; + // Compute the coefficients of a lowpass filter. + + // First some preliminary stuff. Compute the coefficients of the biquad. This is used to + // figure out how frames to use in the test. + var biquadType = "lowpass"; + var biquadCutoff = 350; + var biquadQ = 5; + var biquadGain = 1; + + var coef = createFilter(biquadType, + biquadCutoff / nyquist, + biquadQ, + biquadGain); + + // Cascade the biquads together to create an equivalent IIR filter. + var cascade = cascadeBiquads(coef, coef); + + // Since we're cascading two identical biquads, the root of denominator of the IIR filter is + // repeated, so the root of the denominator with the largest magnitude occurs twice. The + // impulse response of the IIR filter will be roughly c*(r*r)^n at time n, where r is the + // root of largest magnitude. This approximation gets better as n increases. We can use + // this to get a rough idea of when the response has died down to a small value. + + // This is the value we will use to determine how many frames to render. Rendering too many + // is a waste of time and also makes it hard to compare the actual result to the expected + // because the magnitudes are so small that they could be mostly round-off noise. + // + // Find magnitude of the root with largest magnitude + var rootMagnitude = largestRootMagnitude(coef.a1, coef.a2); + + // Find n such that |r|^(2*n) <= eps. That is, n = log(eps)/(2*log(r)). Somewhat + // arbitrarily choose eps = 1e-20; + var eps = 1e-20; + var framesForTest = Math.floor(Math.log(eps) / (2 * Math.log(rootMagnitude))); + + // We're ready to create the graph for the test. The offline context has two channels: + // channel 0 is the expected (cascaded biquad) result and channel 1 is the actual IIR filter + // result. + var context = new OfflineAudioContext(2, framesForTest, sampleRate); + + // Use a simple impulse with a large (arbitrary) amplitude as the source + var amplitude = 1; + var buffer = context.createBuffer(1, testFrames, sampleRate); + buffer.getChannelData(0)[0] = amplitude; + var source = context.createBufferSource(); + source.buffer = buffer; + + // Create the two biquad filters. Doesn't really matter what, but for simplicity we choose + // identical lowpass filters with the same parameters. + var biquad1 = context.createBiquadFilter(); + biquad1.type = biquadType; + biquad1.frequency.value = biquadCutoff; + biquad1.Q.value = biquadQ; + + var biquad2 = context.createBiquadFilter(); + biquad2.type = biquadType; + biquad2.frequency.value = biquadCutoff; + biquad2.Q.value = biquadQ; + + var iir = context.createIIRFilter(cascade.ff, cascade.fb); + + // Create the merger to get the signals into multiple channels + var merger = context.createChannelMerger(2); + + // Create the graph, filtering the source through two biquads. + source.connect(biquad1); + biquad1.connect(biquad2); + biquad2.connect(merger, 0, 0); + + source.connect(iir); + iir.connect(merger, 0, 1); + + merger.connect(context.destination); + + // Now filter the source through the IIR filter. + var y = iirFilter(buffer.getChannelData(0), cascade.ff, cascade.fb); + + // Rock and roll! + source.start(); + + return context.startRendering().then(function(result) { + var expected = result.getChannelData(0); + var actual = result.getChannelData(1); + + compareChannels(actual, expected); + + }); + }()); + + // Wait for all tests + Promise.all(testPromises).then(function () { + SimpleTest.finish(); + }, function () { + SimpleTest.finish(); + }); +}); +</script> +</pre> +</body> +</html> diff --git a/dom/media/webaudio/test/blink/test_iirFilterNodeGetFrequencyResponse.html b/dom/media/webaudio/test/blink/test_iirFilterNodeGetFrequencyResponse.html new file mode 100644 index 000000000..cb5cf33ed --- /dev/null +++ b/dom/media/webaudio/test/blink/test_iirFilterNodeGetFrequencyResponse.html @@ -0,0 +1,97 @@ +<!DOCTYPE HTML> +<html> +<head> + <title>Test IIRFilterNode GetFrequencyResponse</title> + <script type="text/javascript" src="/tests/SimpleTest/SimpleTest.js"></script> + <script type="text/javascript" src="webaudio.js"></script> + <script type="text/javascript" src="biquad-filters.js"></script> + <link rel="stylesheet" type="text/css" href="/tests/SimpleTest/test.css" /> +</head> +<body> +<pre id="test"> +<script class="testbody" type="text/javascript"> +SimpleTest.waitForExplicitFinish(); +addLoadEvent(function() { + // Modified from WebKit/LayoutTests/webaudio/iirfilter-getFrequencyResponse.html + var sampleRate = 48000; + var testDurationSec = 0.01; + + // Compute a set of linearly spaced frequencies. + function createFrequencies(nFrequencies, sampleRate) + { + var frequencies = new Float32Array(nFrequencies); + var nyquist = sampleRate / 2; + var freqDelta = nyquist / nFrequencies; + + for (var k = 0; k < nFrequencies; ++k) { + frequencies[k] = k * freqDelta; + } + + return frequencies; + } + + // Number of frequency samples to take. + var numberOfFrequencies = 1000; + + var context = new OfflineAudioContext(1, testDurationSec * sampleRate, sampleRate); + + var frequencies = createFrequencies(numberOfFrequencies, context.sampleRate); + + // 1-Pole IIR Filter + var iir = context.createIIRFilter([1], [1, -0.9]); + + var iirMag = new Float32Array(numberOfFrequencies); + var iirPhase = new Float32Array(numberOfFrequencies); + var trueMag = new Float32Array(numberOfFrequencies); + var truePhase = new Float32Array(numberOfFrequencies); + + // The IIR filter is + // H(z) = 1/(1 - 0.9*z^(-1)). + // + // The frequency response is + // H(exp(j*w)) = 1/(1 - 0.9*exp(-j*w)). + // + // Thus, the magnitude is + // |H(exp(j*w))| = 1/sqrt(1.81-1.8*cos(w)). + // + // The phase is + // arg(H(exp(j*w)) = atan(0.9*sin(w)/(.9*cos(w)-1)) + + var frequencyScale = Math.PI / (sampleRate / 2); + + for (var k = 0; k < frequencies.length; ++k) { + var omega = frequencyScale * frequencies[k]; + trueMag[k] = 1/Math.sqrt(1.81-1.8*Math.cos(omega)); + truePhase[k] = Math.atan(0.9 * Math.sin(omega) / (0.9 * Math.cos(omega) - 1)); + } + + iir.getFrequencyResponse(frequencies, iirMag, iirPhase); + compareChannels(iirMag, trueMag); + compareChannels(iirPhase, truePhase); + + // Compare IIR and Biquad Filter + // Create an IIR filter equivalent to the biquad filter. Compute the frequency response for both and verify that they are the same. + var biquad = context.createBiquadFilter(); + var coef = createFilter(biquad.type, + biquad.frequency.value / (context.sampleRate / 2), + biquad.Q.value, + biquad.gain.value); + + var iir = context.createIIRFilter([coef.b0, coef.b1, coef.b2], [1, coef.a1, coef.a2]); + + var biquadMag = new Float32Array(numberOfFrequencies); + var biquadPhase = new Float32Array(numberOfFrequencies); + var iirMag = new Float32Array(numberOfFrequencies); + var iirPhase = new Float32Array(numberOfFrequencies); + + biquad.getFrequencyResponse(frequencies, biquadMag, biquadPhase); + iir.getFrequencyResponse(frequencies, iirMag, iirPhase); + compareChannels(iirMag, biquadMag); + compareChannels(iirPhase, biquadPhase); + + SimpleTest.finish(); +}); +</script> +</pre> +</body> +</html> diff --git a/dom/media/webaudio/test/browser.ini b/dom/media/webaudio/test/browser.ini new file mode 100644 index 000000000..60ed969f1 --- /dev/null +++ b/dom/media/webaudio/test/browser.ini @@ -0,0 +1 @@ +[browser_bug1181073.js]
\ No newline at end of file diff --git a/dom/media/webaudio/test/browser_bug1181073.js b/dom/media/webaudio/test/browser_bug1181073.js new file mode 100644 index 000000000..6ee48144c --- /dev/null +++ b/dom/media/webaudio/test/browser_bug1181073.js @@ -0,0 +1,40 @@ +add_task(function*() { + // Make the min_background_timeout_value very high to avoid problems on slow machines + yield new Promise(resolve => SpecialPowers.pushPrefEnv({ + 'set': [['dom.min_background_timeout_value', 3000]] + }, resolve)); + + // Make a new tab, and put it in the background + yield BrowserTestUtils.withNewTab("about:blank", function*(browser) { + yield BrowserTestUtils.withNewTab("about:blank", function*() { + let time = yield ContentTask.spawn(browser, null, function () { + return new Promise(resolve => { + let start = content.performance.now(); + let id = content.window.setInterval(function() { + let end = content.performance.now(); + content.window.clearInterval(id); + resolve(end - start); + }, 0); + }); + }); + + ok(time > 2000, "Interval is throttled with no webaudio (" + time + " ms)"); + + time = yield ContentTask.spawn(browser, null, function () { + return new Promise(resolve => { + // Create an audio context, and save it on the window so it doesn't get GCed + content.window._audioCtx = new content.window.AudioContext(); + + let start = content.performance.now(); + let id = content.window.setInterval(function() { + let end = content.performance.now(); + content.window.clearInterval(id); + resolve(end - start); + }, 0); + }); + }); + + ok(time < 1000, "Interval is not throttled with an audio context present (" + time + " ms)"); + }); + }); +}); diff --git a/dom/media/webaudio/test/corsServer.sjs b/dom/media/webaudio/test/corsServer.sjs new file mode 100644 index 000000000..1804c7862 --- /dev/null +++ b/dom/media/webaudio/test/corsServer.sjs @@ -0,0 +1,25 @@ +function handleRequest(request, response) +{ + var file = Components.classes["@mozilla.org/file/directory_service;1"]. + getService(Components.interfaces.nsIProperties). + get("CurWorkD", Components.interfaces.nsILocalFile); + var fis = Components.classes['@mozilla.org/network/file-input-stream;1']. + createInstance(Components.interfaces.nsIFileInputStream); + var bis = Components.classes["@mozilla.org/binaryinputstream;1"]. + createInstance(Components.interfaces.nsIBinaryInputStream); + var paths = "tests/dom/media/webaudio/test/small-shot.ogg"; + var split = paths.split("/"); + for(var i = 0; i < split.length; ++i) { + file.append(split[i]); + } + fis.init(file, -1, -1, false); + bis.setInputStream(fis); + var bytes = bis.readBytes(bis.available()); + response.setHeader("Content-Type", "video/ogg", false); + response.setHeader("Content-Length", ""+ bytes.length, false); + response.setHeader("Access-Control-Allow-Origin", "*", false); + response.write(bytes, bytes.length); + response.processAsync(); + response.finish(); + bis.close(); +} diff --git a/dom/media/webaudio/test/invalid.txt b/dom/media/webaudio/test/invalid.txt new file mode 100644 index 000000000..c44840faf --- /dev/null +++ b/dom/media/webaudio/test/invalid.txt @@ -0,0 +1 @@ +Surely this is not an audio file! diff --git a/dom/media/webaudio/test/layouttest-glue.js b/dom/media/webaudio/test/layouttest-glue.js new file mode 100644 index 000000000..db1aa563b --- /dev/null +++ b/dom/media/webaudio/test/layouttest-glue.js @@ -0,0 +1,19 @@ +// Reimplementation of the LayoutTest API from Blink so we can easily port +// WebAudio tests to Simpletest, without touching the internals of the test. + +function testFailed(msg) { + ok(false, msg); +} + +function testPassed(msg) { + ok(true, msg); +} + +function finishJSTest() { + SimpleTest.finish(); +} + +function description(str) { + info(str); +} + diff --git a/dom/media/webaudio/test/mochitest.ini b/dom/media/webaudio/test/mochitest.ini new file mode 100644 index 000000000..4abcce7e3 --- /dev/null +++ b/dom/media/webaudio/test/mochitest.ini @@ -0,0 +1,212 @@ +[DEFAULT] +tags=msg +tags = webaudio +subsuite = media +support-files = + audio-expected.wav + audio-mono-expected-2.wav + audio-mono-expected.wav + audio-quad.wav + audio.ogv + audiovideo.mp4 + audioBufferSourceNodeDetached_worker.js + corsServer.sjs + invalid.txt + layouttest-glue.js + noaudio.webm + small-shot-expected.wav + small-shot-mono-expected.wav + small-shot.ogg + small-shot.mp3 + sweep-300-330-1sec.opus + ting-44.1k-1ch.ogg + ting-44.1k-2ch.ogg + ting-48k-1ch.ogg + ting-48k-2ch.ogg + ting-44.1k-1ch.wav + ting-44.1k-2ch.wav + ting-48k-1ch.wav + ting-48k-2ch.wav + sine-440-10s.opus + webaudio.js + +[test_analyserNode.html] +[test_analyserScale.html] +[test_analyserNodeOutput.html] +[test_analyserNodePassThrough.html] +[test_analyserNodeWithGain.html] +[test_AudioBuffer.html] +[test_audioBufferSourceNode.html] +[test_audioBufferSourceNodeEnded.html] +[test_audioBufferSourceNodeLazyLoopParam.html] +[test_audioBufferSourceNodeLoop.html] +[test_audioBufferSourceNodeLoopStartEnd.html] +[test_audioBufferSourceNodeLoopStartEndSame.html] +[test_audioBufferSourceNodeDetached.html] +skip-if = (toolkit == 'android' && debug) || os == 'win' # bug 1127845, bug 1138468 +[test_audioBufferSourceNodeNoStart.html] +[test_audioBufferSourceNodeNullBuffer.html] +[test_audioBufferSourceNodeOffset.html] +skip-if = (toolkit == 'android') || debug #bug 906752 +[test_audioBufferSourceNodePassThrough.html] +[test_audioBufferSourceNodeRate.html] +[test_AudioContext.html] +[test_AudioContext_disabled.html] +[test_audioContextSuspendResumeClose.html] +tags=capturestream +[test_audioDestinationNode.html] +[test_AudioListener.html] +[test_AudioNodeDevtoolsAPI.html] +[test_audioParamChaining.html] +[test_AudioParamDevtoolsAPI.html] +[test_audioParamExponentialRamp.html] +[test_audioParamGain.html] +[test_audioParamLinearRamp.html] +[test_audioParamSetCurveAtTime.html] +[test_audioParamSetCurveAtTimeTwice.html] +[test_audioParamSetCurveAtTimeZeroDuration.html] +[test_audioParamSetTargetAtTime.html] +[test_audioParamSetTargetAtTimeZeroTimeConstant.html] +[test_audioParamSetValueAtTime.html] +[test_audioParamTimelineDestinationOffset.html] +[test_badConnect.html] +[test_biquadFilterNode.html] +[test_biquadFilterNodePassThrough.html] +[test_biquadFilterNodeWithGain.html] +[test_bug808374.html] +[test_bug827541.html] +[test_bug839753.html] +[test_bug845960.html] +[test_bug856771.html] +[test_bug866570.html] +[test_bug866737.html] +[test_bug867089.html] +[test_bug867104.html] +[test_bug867174.html] +[test_bug867203.html] +[test_bug875221.html] +[test_bug875402.html] +[test_bug894150.html] +[test_bug956489.html] +[test_bug964376.html] +[test_bug966247.html] +tags=capturestream +[test_bug972678.html] +[test_bug1113634.html] +[test_bug1118372.html] +[test_bug1027864.html] +[test_bug1056032.html] +skip-if = toolkit == 'android' # bug 1056706 +[test_bug1255618.html] +[test_bug1267579.html] +[test_channelMergerNode.html] +[test_channelMergerNodeWithVolume.html] +[test_channelSplitterNode.html] +[test_channelSplitterNodeWithVolume.html] +skip-if = (android_version == '18' && debug) # bug 1158417 +[test_convolverNode.html] +[test_convolverNode_mono_mono.html] +[test_convolverNodeChannelCount.html] +[test_convolverNodeDelay.html] +[test_convolverNodeFiniteInfluence.html] +[test_convolverNodePassThrough.html] +[test_convolverNodeWithGain.html] +[test_currentTime.html] +[test_decodeMultichannel.html] +[test_decodeAudioDataPromise.html] +[test_decodeOpusTail.html] +[test_delayNode.html] +[test_delayNodeAtMax.html] +[test_delayNodeChannelChanges.html] +skip-if = toolkit == 'android' # bug 1056706 +[test_delayNodeCycles.html] +[test_delayNodePassThrough.html] +[test_delayNodeSmallMaxDelay.html] +[test_delayNodeTailIncrease.html] +[test_delayNodeTailWithDisconnect.html] +[test_delayNodeTailWithGain.html] +[test_delayNodeTailWithReconnect.html] +[test_delayNodeWithGain.html] +[test_disconnectAll.html] +[test_disconnectAudioParam.html] +[test_disconnectAudioParamFromOutput.html] +[test_disconnectExceptions.html] +[test_disconnectFromAudioNode.html] +[test_disconnectFromAudioNodeAndOutput.html] +[test_disconnectFromAudioNodeAndOutputAndInput.html] +[test_disconnectFromAudioNodeMultipleConnection.html] +[test_disconnectFromOutput.html] +[test_dynamicsCompressorNode.html] +[test_dynamicsCompressorNodePassThrough.html] +[test_dynamicsCompressorNodeWithGain.html] +[test_gainNode.html] +[test_gainNodeInLoop.html] +[test_gainNodePassThrough.html] +[test_iirFilterNodePassThrough.html] +[test_maxChannelCount.html] +[test_mediaDecoding.html] +[test_mediaElementAudioSourceNode.html] +tags=capturestream +[test_mediaElementAudioSourceNodeFidelity.html] +tags=capturestream +[test_mediaElementAudioSourceNodePassThrough.html] +tags=capturestream +skip-if = toolkit == 'android' # bug 1145816 +[test_mediaElementAudioSourceNodeVideo.html] +tags=capturestream +[test_mediaElementAudioSourceNodeCrossOrigin.html] +tags=capturestream +skip-if = toolkit == 'android' # bug 1145816 +[test_mediaStreamAudioDestinationNode.html] +[test_mediaStreamAudioSourceNode.html] +[test_mediaStreamAudioSourceNodeCrossOrigin.html] +tags=capturestream +[test_mediaStreamAudioSourceNodeNoGC.html] +[test_mediaStreamAudioSourceNodePassThrough.html] +[test_mediaStreamAudioSourceNodeResampling.html] +tags=capturestream +[test_mixingRules.html] +skip-if = toolkit == 'android' # bug 1091965 +[test_mozaudiochannel.html] +# Android: bug 1061675; OSX 10.6: bug 1097721 +skip-if = (toolkit == 'android') || (os == 'mac' && os_version == '10.6') +[test_nodeToParamConnection.html] +[test_OfflineAudioContext.html] +[test_offlineDestinationChannelCountLess.html] +[test_offlineDestinationChannelCountMore.html] +[test_oscillatorNode.html] +[test_oscillatorNode2.html] +[test_oscillatorNodeNegativeFrequency.html] +[test_oscillatorNodePassThrough.html] +[test_oscillatorNodeStart.html] +[test_oscillatorTypeChange.html] +[test_pannerNode.html] +[test_pannerNode_equalPower.html] +[test_pannerNodeAbove.html] +[test_pannerNodeAtZeroDistance.html] +[test_pannerNodeChannelCount.html] +[test_pannerNodeHRTFSymmetry.html] +[test_pannerNodeTail.html] +[test_pannerNode_maxDistance.html] +[test_stereoPannerNode.html] +[test_stereoPannerNodePassThrough.html] +[test_periodicWave.html] +[test_periodicWaveDisableNormalization.html] +[test_periodicWaveBandLimiting.html] +[test_ScriptProcessorCollected1.html] +[test_scriptProcessorNode.html] +[test_scriptProcessorNodeChannelCount.html] +[test_scriptProcessorNodePassThrough.html] +[test_scriptProcessorNode_playbackTime1.html] +[test_scriptProcessorNodeZeroInputOutput.html] +[test_scriptProcessorNodeNotConnected.html] +[test_sequentialBufferSourceWithResampling.html] +[test_singleSourceDest.html] +[test_stereoPanningWithGain.html] +[test_waveDecoder.html] +[test_waveShaper.html] +[test_waveShaperGain.html] +[test_waveShaperNoCurve.html] +[test_waveShaperPassThrough.html] +[test_waveShaperInvalidLengthCurve.html] +[test_WebAudioMemoryReporting.html] diff --git a/dom/media/webaudio/test/noaudio.webm b/dom/media/webaudio/test/noaudio.webm Binary files differnew file mode 100644 index 000000000..9207017fb --- /dev/null +++ b/dom/media/webaudio/test/noaudio.webm diff --git a/dom/media/webaudio/test/sine-440-10s.opus b/dom/media/webaudio/test/sine-440-10s.opus Binary files differnew file mode 100644 index 000000000..eb9102016 --- /dev/null +++ b/dom/media/webaudio/test/sine-440-10s.opus diff --git a/dom/media/webaudio/test/small-shot-expected.wav b/dom/media/webaudio/test/small-shot-expected.wav Binary files differnew file mode 100644 index 000000000..2faaa8258 --- /dev/null +++ b/dom/media/webaudio/test/small-shot-expected.wav diff --git a/dom/media/webaudio/test/small-shot-mono-expected.wav b/dom/media/webaudio/test/small-shot-mono-expected.wav Binary files differnew file mode 100644 index 000000000..d4e2647e4 --- /dev/null +++ b/dom/media/webaudio/test/small-shot-mono-expected.wav diff --git a/dom/media/webaudio/test/small-shot.mp3 b/dom/media/webaudio/test/small-shot.mp3 Binary files differnew file mode 100644 index 000000000..f9397a510 --- /dev/null +++ b/dom/media/webaudio/test/small-shot.mp3 diff --git a/dom/media/webaudio/test/small-shot.ogg b/dom/media/webaudio/test/small-shot.ogg Binary files differnew file mode 100644 index 000000000..1a41623f8 --- /dev/null +++ b/dom/media/webaudio/test/small-shot.ogg diff --git a/dom/media/webaudio/test/sweep-300-330-1sec.opus b/dom/media/webaudio/test/sweep-300-330-1sec.opus Binary files differnew file mode 100644 index 000000000..619d1e084 --- /dev/null +++ b/dom/media/webaudio/test/sweep-300-330-1sec.opus diff --git a/dom/media/webaudio/test/test_AudioBuffer.html b/dom/media/webaudio/test/test_AudioBuffer.html new file mode 100644 index 000000000..82bfdd420 --- /dev/null +++ b/dom/media/webaudio/test/test_AudioBuffer.html @@ -0,0 +1,105 @@ +<!DOCTYPE HTML> +<html> +<head> + <title>Test whether we can create an AudioContext interface</title> + <script type="text/javascript" src="/tests/SimpleTest/SimpleTest.js"></script> + <link rel="stylesheet" type="text/css" href="/tests/SimpleTest/test.css" /> +</head> +<body> +<pre id="test"> +<script src="webaudio.js" type="text/javascript"></script> +<script class="testbody" type="text/javascript"> + +SimpleTest.waitForExplicitFinish(); +addLoadEvent(function() { + var context = new AudioContext(); + var buffer = context.createBuffer(2, 2048, context.sampleRate); + SpecialPowers.gc(); // Make sure that our channels are accessible after GC + ok(buffer, "Buffer was allocated successfully"); + is(buffer.sampleRate, context.sampleRate, "Correct sample rate"); + is(buffer.length, 2048, "Correct length"); + ok(Math.abs(buffer.duration - 2048 / context.sampleRate) < 0.0001, "Correct duration"); + is(buffer.numberOfChannels, 2, "Correct number of channels"); + for (var i = 0; i < buffer.numberOfChannels; ++i) { + var buf = buffer.getChannelData(i); + ok(buf, "Buffer index " + i + " exists"); + ok(buf instanceof Float32Array, "Result is a typed array"); + is(buf.length, buffer.length, "Correct length"); + var foundNonZero = false; + for (var j = 0; j < buf.length; ++j) { + if (buf[j] != 0) { + foundNonZero = true; + break; + } + buf[j] = j; + } + ok(!foundNonZero, "Buffer " + i + " should be initialized to 0"); + } + + // Now test copying the channel data out of a normal buffer + var copy = new Float32Array(100); + buffer.copyFromChannel(copy, 0, 1024); + for (var i = 0; i < copy.length; ++i) { + is(copy[i], 1024 + i, "Correct sample"); + } + + // Test copying the channel data out of a playing buffer + var srcNode = context.createBufferSource(); + srcNode.buffer = buffer; + srcNode.start(0); + copy = new Float32Array(100); + buffer.copyFromChannel(copy, 0, 1024); + for (var i = 0; i < copy.length; ++i) { + is(copy[i], 1024 + i, "Correct sample"); + } + + // Test copying to the channel data + var newData = new Float32Array(200); + buffer.copyToChannel(newData, 0, 100); + var changedData = buffer.getChannelData(0); + for (var i = 0; i < changedData.length; ++i) { + if (i < 100 || i >= 300) { + is(changedData[i], i, "Untouched sample"); + } else { + is(changedData[i], 0, "Correct sample"); + } + } + + // Now, detach the array buffer + var worker = new Worker("audioBufferSourceNodeDetached_worker.js"); + var data = buffer.getChannelData(0).buffer; + worker.postMessage(data, [data]); + SpecialPowers.gc(); + + expectException(function() { + buffer.copyFromChannel(copy, 0, 1024); + }, DOMException.INDEX_SIZE_ERR); + + expectException(function() { + buffer.copyToChannel(newData, 0, 100); + }, DOMException.INDEX_SIZE_ERR); + + expectException(function() { + context.createBuffer(2, 2048, 7999); + }, DOMException.INDEX_SIZE_ERR); + expectException(function() { + context.createBuffer(2, 2048, 192001); + }, DOMException.INDEX_SIZE_ERR); + context.createBuffer(2, 2048, 8000); // no exception + context.createBuffer(2, 2048, 192000); // no exception + context.createBuffer(32, 2048, 48000); // no exception + // Null length + expectException(function() { + context.createBuffer(2, 0, 48000); + }, DOMException.INDEX_SIZE_ERR); + // Null number of channels + expectException(function() { + context.createBuffer(0, 2048, 48000); + }, DOMException.INDEX_SIZE_ERR); + SimpleTest.finish(); +}); + +</script> +</pre> +</body> +</html> diff --git a/dom/media/webaudio/test/test_AudioContext.html b/dom/media/webaudio/test/test_AudioContext.html new file mode 100644 index 000000000..0cab4354e --- /dev/null +++ b/dom/media/webaudio/test/test_AudioContext.html @@ -0,0 +1,23 @@ +<!DOCTYPE HTML> +<html> +<head> + <title>Test whether we can create an AudioContext interface</title> + <script type="text/javascript" src="/tests/SimpleTest/SimpleTest.js"></script> + <link rel="stylesheet" type="text/css" href="/tests/SimpleTest/test.css" /> +</head> +<body> +<pre id="test"> +<script class="testbody" type="text/javascript"> + +SimpleTest.waitForExplicitFinish(); +addLoadEvent(function() { + var ac = new AudioContext(); + ok(ac, "Create a AudioContext object"); + ok(ac instanceof EventTarget, "AudioContexts must be EventTargets"); + SimpleTest.finish(); +}); + +</script> +</pre> +</body> +</html> diff --git a/dom/media/webaudio/test/test_AudioContext_disabled.html b/dom/media/webaudio/test/test_AudioContext_disabled.html new file mode 100644 index 000000000..03e0775ed --- /dev/null +++ b/dom/media/webaudio/test/test_AudioContext_disabled.html @@ -0,0 +1,56 @@ +<!DOCTYPE HTML> +<html> +<head> + <title>Test whether we can disable the AudioContext interface</title> + <script type="text/javascript" src="/tests/SimpleTest/SimpleTest.js"></script> + <script type="text/javascript" src="webaudio.js"></script> + <link rel="stylesheet" type="text/css" href="/tests/SimpleTest/test.css" /> +</head> +<body> +<pre id="test"> +<script class="testbody" type="text/javascript"> + +const webaudio_interfaces = [ + "AudioContext", + "OfflineAudioContext", + "AudioContext", + "OfflineAudioCompletionEvent", + "AudioNode", + "AudioDestinationNode", + "AudioParam", + "GainNode", + "DelayNode", + "AudioBuffer", + "AudioBufferSourceNode", + "MediaElementAudioSourceNode", + "ScriptProcessorNode", + "AudioProcessingEvent", + "PannerNode", + "AudioListener", + "StereoPannerNode", + "ConvolverNode", + "AnalyserNode", + "ChannelSplitterNode", + "ChannelMergerNode", + "DynamicsCompressorNode", + "BiquadFilterNode", + "IIRFilterNode", + "WaveShaperNode", + "OscillatorNode", + "PeriodicWave", + "MediaStreamAudioSourceNode", + "MediaStreamAudioDestinationNode" +]; + +SimpleTest.waitForExplicitFinish(); +addLoadEvent(function() { + SpecialPowers.pushPrefEnv({"set": [["dom.webaudio.enabled", false]]}, function() { + webaudio_interfaces.forEach((e) => ok(!window[e], e + " must be disabled when the Web Audio API is disabled")); + SimpleTest.finish(); + }); +}); + +</script> +</pre> +</body> +</html> diff --git a/dom/media/webaudio/test/test_AudioListener.html b/dom/media/webaudio/test/test_AudioListener.html new file mode 100644 index 000000000..07ad154d7 --- /dev/null +++ b/dom/media/webaudio/test/test_AudioListener.html @@ -0,0 +1,35 @@ +<!DOCTYPE HTML> +<html> +<head> + <title>Test AudioContext.listener and the AudioListener interface</title> + <script type="text/javascript" src="/tests/SimpleTest/SimpleTest.js"></script> + <link rel="stylesheet" type="text/css" href="/tests/SimpleTest/test.css" /> +</head> +<body> +<pre id="test"> +<script class="testbody" type="text/javascript"> + +SimpleTest.waitForExplicitFinish(); +addLoadEvent(function() { + var context = new AudioContext(); + ok("listener" in context, "AudioContext.listener should exist"); + ok(Math.abs(context.listener.dopplerFactor - 1.0) < 1e-4, "Correct default doppler factor"); + ok(Math.abs(context.listener.speedOfSound - 343.3) < 1e-4, "Correct default speed of sound value"); + context.listener.dopplerFactor = 0.5; + ok(Math.abs(context.listener.dopplerFactor - 0.5) < 1e-4, "The doppler factor value can be changed"); + context.listener.speedOfSound = 400; + ok(Math.abs(context.listener.speedOfSound - 400) < 1e-4, "The speed of sound can be changed"); + // The values set by the following cannot be read from script, but the + // implementation is simple enough, so we just make sure that nothing throws. + with (context.listener) { + setPosition(1.0, 1.0, 1.0); + setOrientation(1.0, 1.0, 1.0, 1.0, 1.0, 1.0); + setVelocity(0.5, 1.0, 1.5); + } + SimpleTest.finish(); +}); + +</script> +</pre> +</body> +</html> diff --git a/dom/media/webaudio/test/test_AudioNodeDevtoolsAPI.html b/dom/media/webaudio/test/test_AudioNodeDevtoolsAPI.html new file mode 100644 index 000000000..49f71505d --- /dev/null +++ b/dom/media/webaudio/test/test_AudioNodeDevtoolsAPI.html @@ -0,0 +1,59 @@ +<!DOCTYPE HTML> +<html> +<head> + <title>Test the devtool AudioNode API</title> + <script type="text/javascript" src="/tests/SimpleTest/SimpleTest.js"></script> + <link rel="stylesheet" type="text/css" href="/tests/SimpleTest/test.css" /> +</head> +<body> +<pre id="test"> +<script class="testbody" type="text/javascript"> + + SimpleTest.waitForExplicitFinish(); + + function id(node) { + return SpecialPowers.getPrivilegedProps(node, "id"); + } + + var ac = new AudioContext(); + var ids; + var weak; + (function() { + var src1 = ac.createBufferSource(); + var src2 = ac.createBufferSource(); + ok(id(src2) > id(src1), "The ID should be monotonic"); + ok(id(src1) > id(ac.destination), "The ID of the destination node should be the lowest"); + ids = [id(src1), id(src2)]; + weak = SpecialPowers.Cu.getWeakReference(src1); + is(SpecialPowers.unwrap(weak.get()), src1, "The node should support a weak reference"); + })(); + function observer(subject, topic, data) { + var id = parseInt(data); + var index = ids.indexOf(id); + if (index != -1) { + info("Dropping id " + id + " at index " + index); + ids.splice(index, 1); + if (ids.length == 0) { + SimpleTest.executeSoon(function() { + is(weak.get(), null, "The weak reference must be dropped now"); + SpecialPowers.removeObserver(observer, "webaudio-node-demise"); + SimpleTest.finish(); + }); + } + } + } + SpecialPowers.addObserver(observer, "webaudio-node-demise", false); + + forceCC(); + forceCC(); + + function forceCC() { + SpecialPowers.DOMWindowUtils.cycleCollect(); + SpecialPowers.DOMWindowUtils.garbageCollect(); + SpecialPowers.DOMWindowUtils.garbageCollect(); + } + +</script> +</pre> +</body> +</html> diff --git a/dom/media/webaudio/test/test_AudioParamDevtoolsAPI.html b/dom/media/webaudio/test/test_AudioParamDevtoolsAPI.html new file mode 100644 index 000000000..9b59dda8a --- /dev/null +++ b/dom/media/webaudio/test/test_AudioParamDevtoolsAPI.html @@ -0,0 +1,49 @@ +<!DOCTYPE HTML> +<html> +<head> + <title>Test the devtool AudioParam API</title> + <script type="text/javascript" src="/tests/SimpleTest/SimpleTest.js"></script> + <link rel="stylesheet" type="text/css" href="/tests/SimpleTest/test.css" /> +</head> +<body> +<pre id="test"> +<script class="testbody" type="text/javascript"> + + function checkIdAndName(node, name) { + is(SpecialPowers.getPrivilegedProps(node, "id"), + SpecialPowers.getPrivilegedProps(node[name], "parentNodeId"), + "The parent id should be correct"); + is(SpecialPowers.getPrivilegedProps(node[name], "name"), name, + "The name of the AudioParam should be correct."); + } + + var ac = new AudioContext(), + gain = ac.createGain(), + osc = ac.createOscillator(), + del = ac.createDelay(), + source = ac.createBufferSource(), + stereoPanner = ac.createStereoPanner(), + comp = ac.createDynamicsCompressor(), + biquad = ac.createBiquadFilter(); + + checkIdAndName(gain, "gain"); + checkIdAndName(osc, "frequency"); + checkIdAndName(osc, "detune"); + checkIdAndName(del, "delayTime"); + checkIdAndName(source, "playbackRate"); + checkIdAndName(source, "detune"); + checkIdAndName(stereoPanner, "pan"); + checkIdAndName(comp, "threshold"); + checkIdAndName(comp, "knee"); + checkIdAndName(comp, "ratio"); + checkIdAndName(comp, "attack"); + checkIdAndName(comp, "release"); + checkIdAndName(biquad, "frequency"); + checkIdAndName(biquad, "detune"); + checkIdAndName(biquad, "Q"); + checkIdAndName(biquad, "gain"); + +</script> +</pre> +</body> +</html> diff --git a/dom/media/webaudio/test/test_OfflineAudioContext.html b/dom/media/webaudio/test/test_OfflineAudioContext.html new file mode 100644 index 000000000..81d3e2313 --- /dev/null +++ b/dom/media/webaudio/test/test_OfflineAudioContext.html @@ -0,0 +1,102 @@ +<!DOCTYPE HTML> +<html> +<head> + <title>Test OfflineAudioContext</title> + <script type="text/javascript" src="/tests/SimpleTest/SimpleTest.js"></script> + <script type="text/javascript" src="webaudio.js"></script> + <link rel="stylesheet" type="text/css" href="/tests/SimpleTest/test.css" /> +</head> +<body> +<pre id="test"> +<script class="testbody" type="text/javascript"> + +var renderedBuffer = null; +var finished = 0; + +function finish() { + finished++; + if (finished == 2) { + SimpleTest.finish(); + } +} + +function setOrCompareRenderedBuffer(aRenderedBuffer) { + if (renderedBuffer) { + is(renderedBuffer, aRenderedBuffer, "Rendered buffers from the event and the promise should be the same"); + finish(); + } else { + renderedBuffer = aRenderedBuffer; + } +} + +SimpleTest.waitForExplicitFinish(); +addLoadEvent(function() { + var ctx = new OfflineAudioContext(2, 100, 22050); + ok(ctx instanceof EventTarget, "OfflineAudioContexts must be EventTargets"); + is(ctx.length, 100, "OfflineAudioContext.length is equal to the value passed to the ctor."); + + var buf = ctx.createBuffer(2, 100, ctx.sampleRate); + for (var i = 0; i < 2; ++i) { + for (var j = 0; j < 100; ++j) { + buf.getChannelData(i)[j] = Math.sin(2 * Math.PI * 200 * j / ctx.sampleRate); + } + } + + expectException(function() { + ctx.createMediaStreamDestination(); + }, DOMException.NOT_SUPPORTED_ERR); + + expectException(function() { + new OfflineAudioContext(2, 100, 0); + }, DOMException.NOT_SUPPORTED_ERR); + expectException(function() { + new OfflineAudioContext(2, 100, -1); + }, DOMException.NOT_SUPPORTED_ERR); + expectException(function() { + new OfflineAudioContext(0, 100, 44100); + }, DOMException.NOT_SUPPORTED_ERR); + new OfflineAudioContext(32, 100, 44100); + expectException(function() { + new OfflineAudioContext(33, 100, 44100); + }, DOMException.NOT_SUPPORTED_ERR); + expectException(function() { + new OfflineAudioContext(2, 0, 44100); + }, DOMException.NOT_SUPPORTED_ERR); + + var src = ctx.createBufferSource(); + src.buffer = buf; + src.start(0); + src.connect(ctx.destination); + + ctx.addEventListener("complete", function(e) { + ok(e instanceof OfflineAudioCompletionEvent, "Correct event received"); + is(e.renderedBuffer.numberOfChannels, 2, "Correct expected number of buffers"); + ok(renderedBuffer != null, "The event should be fired after the promise callback."); + expectNoException(function() { + ctx.startRendering().then(function() { + ok(false, "Promise should not resolve when startRendering is called a second time on an OfflineAudioContext") + finish(); + }).catch(function(err) { + ok(true, "Promise should reject when startRendering is called a second time on an OfflineAudioContext") + finish(); + }); + }); + compareBuffers(e.renderedBuffer, buf); + setOrCompareRenderedBuffer(e.renderedBuffer); + + }, false); + + expectNoException(function() { + ctx.startRendering().then(function(b) { + is(renderedBuffer, null, "The promise callback should be called first."); + setOrCompareRenderedBuffer(b); + }).catch(function (error) { + ok(false, "The promise from OfflineAudioContext.startRendering should never be rejected"); + }); + }); +}); + +</script> +</pre> +</body> +</html> diff --git a/dom/media/webaudio/test/test_ScriptProcessorCollected1.html b/dom/media/webaudio/test/test_ScriptProcessorCollected1.html new file mode 100644 index 000000000..931f995df --- /dev/null +++ b/dom/media/webaudio/test/test_ScriptProcessorCollected1.html @@ -0,0 +1,84 @@ +<!DOCTYPE HTML> +<html> +<head> + <title>Test ScriptProcessorNode in cycle with no listener is collected</title> + <script type="text/javascript" src="/tests/SimpleTest/SimpleTest.js"></script> + <link rel="stylesheet" type="text/css" href="/tests/SimpleTest/test.css" /> +</head> +<body> +<script class="testbody" type="text/javascript"> + +SimpleTest.waitForExplicitFinish(); + +var observer = function(subject, topic, data) { + var id = parseInt(data); + var index = ids.indexOf(id); + if (index != -1) { + ok(true, "Collected AudioNode id " + id + " at index " + index); + ids.splice(index, 1); + } +} + +SpecialPowers.addObserver(observer, "webaudio-node-demise", false); + +SimpleTest.registerCleanupFunction(function() { + if (observer) { + SpecialPowers.removeObserver(observer, "webaudio-node-demise"); + } +}); + +var ac = new AudioContext(); + +var testProcessor = ac.createScriptProcessor(256, 1, 0); +var delay = ac.createDelay(); +testProcessor.connect(delay); +delay.connect(testProcessor); + +var referenceProcessor = ac.createScriptProcessor(256, 1, 0); +var gain = ac.createGain(); +gain.connect(referenceProcessor); + +var processCount = 0; +testProcessor.onaudioprocess = function(event) { + ++processCount; + switch (processCount) { + case 1: + // Switch to listening to referenceProcessor; + referenceProcessor.onaudioprocess = event.target.onaudioprocess; + referenceProcessor = null; + event.target.onaudioprocess = null; + case 2: + // There are no references to testProcessor and so GC can begin. + SpecialPowers.forceGC(); + break; + case 3: + // Another GC should not be required after testProcessor would have + // received another audioprocess event. + SpecialPowers.forceCC(); + // Expect that webaudio-demise has been queued. + // Queue another event to check. + SimpleTest.executeSoon(function() { + SpecialPowers.removeObserver(observer, "webaudio-node-demise"); + observer = null; + event.target.onaudioprocess = null; + ok(ids.length == 0, "All expected nodes should be collected"); + SimpleTest.finish(); + }); + break; + } +}; + +function id(node) { + return SpecialPowers.getPrivilegedProps(node, "id"); +} + +// Nodes with these ids should be collected. +var ids = [ id(testProcessor), id(delay), id(gain) ]; +testProcessor = null; +delay = null; +gain = null; + +</script> +</pre> +</body> +</html> diff --git a/dom/media/webaudio/test/test_WebAudioMemoryReporting.html b/dom/media/webaudio/test/test_WebAudioMemoryReporting.html new file mode 100644 index 000000000..c753756e7 --- /dev/null +++ b/dom/media/webaudio/test/test_WebAudioMemoryReporting.html @@ -0,0 +1,54 @@ +<!DOCTYPE HTML> +<html> +<head> + <title>Web Audio memory reporting</title> + <script type="text/javascript" src="/tests/SimpleTest/SimpleTest.js"></script> + <link rel="stylesheet" type="text/css" href="/tests/SimpleTest/test.css" /> +</head> +<script class="testbody" type="text/javascript"> + +SimpleTest.waitForExplicitFinish(); + +var ac = new AudioContext(); +var sp = ac.createScriptProcessor(4096, 1, 1); +sp.connect(ac.destination); + +// Not started so as to test +// https://bugzilla.mozilla.org/show_bug.cgi?id=1225003#c2 +var oac = new OfflineAudioContext(1, 1, 48000); + +var nodeTypes = ["ScriptProcessorNode", "AudioDestinationNode"]; +var objectTypes = ["dom-nodes", "engine-objects", "stream-objects"]; + +var usages = { "explicit/webaudio/audiocontext": 0 }; + +for (var i = 0; i < nodeTypes.length; ++i) { + for (var j = 0; j < objectTypes.length; ++j) { + usages["explicit/webaudio/audio-node/" + + nodeTypes[i] + "/" + objectTypes[j]] = 0; + } +} + +var handleReport = function(aProcess, aPath, aKind, aUnits, aAmount, aDesc) { + if (aPath in usages) { + usages[aPath] += aAmount; + } +} + +var finished = function () { + ok(true, "Yay didn't crash!"); + for (var resource in usages) { + ok(usages[resource] > 0, "Non-zero usage for " + resource); + }; + SimpleTest.finish(); +} + +SpecialPowers.Cc["@mozilla.org/memory-reporter-manager;1"]. + getService(SpecialPowers.Ci.nsIMemoryReporterManager). + getReports(handleReport, null, finished, null, /* anonymized = */ false); + +// To test bug 1225003, run a failing decodeAudioData() job over a time when +// the tasks from getReports() are expected to run. +ac.decodeAudioData(new ArrayBuffer(4), function(){}, function(){}); +</script> +</html> diff --git a/dom/media/webaudio/test/test_analyserNode.html b/dom/media/webaudio/test/test_analyserNode.html new file mode 100644 index 000000000..7af67a5a5 --- /dev/null +++ b/dom/media/webaudio/test/test_analyserNode.html @@ -0,0 +1,103 @@ +<!DOCTYPE HTML> +<html> +<head> + <title>Test AnalyserNode</title> + <script type="text/javascript" src="/tests/SimpleTest/SimpleTest.js"></script> + <script type="text/javascript" src="webaudio.js"></script> + <link rel="stylesheet" type="text/css" href="/tests/SimpleTest/test.css" /> +</head> +<body> +<pre id="test"> +<script class="testbody" type="text/javascript"> + +SimpleTest.waitForExplicitFinish(); +addLoadEvent(function() { + + var context = new AudioContext(); + var buffer = context.createBuffer(1, 2048, context.sampleRate); + for (var i = 0; i < 2048; ++i) { + buffer.getChannelData(0)[i] = Math.sin(440 * 2 * Math.PI * i / context.sampleRate); + } + + var destination = context.destination; + + var source = context.createBufferSource(); + + var analyser = context.createAnalyser(); + + source.buffer = buffer; + + source.connect(analyser); + analyser.connect(destination); + + is(analyser.channelCount, 1, "analyser node has 1 input channels by default"); + is(analyser.channelCountMode, "max", "Correct channelCountMode for the analyser node"); + is(analyser.channelInterpretation, "speakers", "Correct channelCountInterpretation for the analyser node"); + + is(analyser.fftSize, 2048, "Correct default value for fftSize"); + is(analyser.frequencyBinCount, 1024, "Correct default value for frequencyBinCount"); + expectException(function() { + analyser.fftSize = 0; + }, DOMException.INDEX_SIZE_ERR); + expectException(function() { + analyser.fftSize = 1; + }, DOMException.INDEX_SIZE_ERR); + expectException(function() { + analyser.fftSize = 8; + }, DOMException.INDEX_SIZE_ERR); + expectException(function() { + analyser.fftSize = 100; // non-power of two + }, DOMException.INDEX_SIZE_ERR); + expectException(function() { + analyser.fftSize = 2049; + }, DOMException.INDEX_SIZE_ERR); + expectException(function() { + analyser.fftSize = 4097; + }, DOMException.INDEX_SIZE_ERR); + expectException(function() { + analyser.fftSize = 8193; + }, DOMException.INDEX_SIZE_ERR); + expectException(function() { + analyser.fftSize = 16385; + }, DOMException.INDEX_SIZE_ERR); + expectException(function() { + analyser.fftSize = 32769; + }, DOMException.INDEX_SIZE_ERR); + expectException(function() { + analyser.fftSize = 65536; + }, DOMException.INDEX_SIZE_ERR); + analyser.fftSize = 1024; + is(analyser.frequencyBinCount, 512, "Correct new value for frequencyBinCount"); + + is(analyser.minDecibels, -100, "Correct default value for minDecibels"); + is(analyser.maxDecibels, -30, "Correct default value for maxDecibels"); + expectException(function() { + analyser.minDecibels = -30; + }, DOMException.INDEX_SIZE_ERR); + expectException(function() { + analyser.minDecibels = -29; + }, DOMException.INDEX_SIZE_ERR); + expectException(function() { + analyser.maxDecibels = -100; + }, DOMException.INDEX_SIZE_ERR); + expectException(function() { + analyser.maxDecibels = -101; + }, DOMException.INDEX_SIZE_ERR); + + is(analyser.smoothingTimeConstant, 0.8, "Correct default value for smoothingTimeConstant"); + expectException(function() { + analyser.smoothingTimeConstant = -0.1; + }, DOMException.INDEX_SIZE_ERR); + expectException(function() { + analyser.smoothingTimeConstant = 1.1; + }, DOMException.INDEX_SIZE_ERR); + analyser.smoothingTimeConstant = 0; + analyser.smoothingTimeConstant = 1; + + SimpleTest.finish(); +}); + +</script> +</pre> +</body> +</html> diff --git a/dom/media/webaudio/test/test_analyserNodeOutput.html b/dom/media/webaudio/test/test_analyserNodeOutput.html new file mode 100644 index 000000000..e6255fee0 --- /dev/null +++ b/dom/media/webaudio/test/test_analyserNodeOutput.html @@ -0,0 +1,43 @@ +<!DOCTYPE HTML> +<html> +<head> + <title>Test AnalyserNode</title> + <script type="text/javascript" src="/tests/SimpleTest/SimpleTest.js"></script> + <script type="text/javascript" src="webaudio.js"></script> + <link rel="stylesheet" type="text/css" href="/tests/SimpleTest/test.css" /> +</head> +<body> +<pre id="test"> +<script class="testbody" type="text/javascript"> + +var gTest = { + length: 2048, + numberOfChannels: 1, + createGraph: function(context) { + var source = context.createBufferSource(); + + var analyser = context.createAnalyser(); + + source.buffer = this.buffer; + + source.connect(analyser); + + source.start(0); + return analyser; + }, + createExpectedBuffers: function(context) { + this.buffer = context.createBuffer(1, 2048, context.sampleRate); + for (var i = 0; i < 2048; ++i) { + this.buffer.getChannelData(0)[i] = Math.sin(440 * 2 * Math.PI * i / context.sampleRate); + } + + return [this.buffer]; + }, +}; + +runTest(); + +</script> +</pre> +</body> +</html> diff --git a/dom/media/webaudio/test/test_analyserNodePassThrough.html b/dom/media/webaudio/test/test_analyserNodePassThrough.html new file mode 100644 index 000000000..37d1db510 --- /dev/null +++ b/dom/media/webaudio/test/test_analyserNodePassThrough.html @@ -0,0 +1,47 @@ +<!DOCTYPE HTML> +<html> +<head> + <title>Test AnalyserNode with passthrough</title> + <script type="text/javascript" src="/tests/SimpleTest/SimpleTest.js"></script> + <script type="text/javascript" src="webaudio.js"></script> + <link rel="stylesheet" type="text/css" href="/tests/SimpleTest/test.css" /> +</head> +<body> +<pre id="test"> +<script class="testbody" type="text/javascript"> + +var gTest = { + length: 2048, + numberOfChannels: 1, + createGraph: function(context) { + var source = context.createBufferSource(); + + var analyser = context.createAnalyser(); + + source.buffer = this.buffer; + + source.connect(analyser); + + var analyserWrapped = SpecialPowers.wrap(analyser); + ok("passThrough" in analyserWrapped, "AnalyserNode should support the passThrough API"); + analyserWrapped.passThrough = true; + + source.start(0); + return analyser; + }, + createExpectedBuffers: function(context) { + this.buffer = context.createBuffer(1, 2048, context.sampleRate); + for (var i = 0; i < 2048; ++i) { + this.buffer.getChannelData(0)[i] = Math.sin(440 * 2 * Math.PI * i / context.sampleRate); + } + + return [this.buffer]; + }, +}; + +runTest(); + +</script> +</pre> +</body> +</html> diff --git a/dom/media/webaudio/test/test_analyserNodeWithGain.html b/dom/media/webaudio/test/test_analyserNodeWithGain.html new file mode 100644 index 000000000..fa0a2caa7 --- /dev/null +++ b/dom/media/webaudio/test/test_analyserNodeWithGain.html @@ -0,0 +1,47 @@ +<!DOCTYPE html> +<title>Test effect of AnalyserNode on GainNode output</title> +<script src="/resources/testharness.js"></script> +<script src="/resources/testharnessreport.js"></script> +<script> +promise_test(function() { + // fftSize <= bufferSize so that the time domain data is full of input after + // processing the buffer. + const fftSize = 32; + const bufferSize = 128; + + var context = new OfflineAudioContext(1, bufferSize, 48000); + + var analyser1 = context.createAnalyser(); + analyser1.fftSize = fftSize; + analyser1.connect(context.destination); + var analyser2 = context.createAnalyser(); + analyser2.fftSize = fftSize; + + var gain = context.createGain(); + gain.gain.value = 2.0; + gain.connect(analyser1); + gain.connect(analyser2); + + // Create a DC input to make getFloatTimeDomainData() output consistent at + // any time. + var buffer = context.createBuffer(1, 1, context.sampleRate); + buffer.getChannelData(0)[0] = 1.0 / gain.gain.value; + var source = context.createBufferSource(); + source.buffer = buffer; + source.loop = true; + source.connect(gain); + source.start(); + + return context.startRendering(). + then(function(buffer) { + assert_equals(buffer.getChannelData(0)[0], 1.0, + "analyser1 output"); + + var data = new Float32Array(1); + analyser1.getFloatTimeDomainData(data); + assert_equals(data[0], 1.0, "analyser1 time domain data"); + analyser2.getFloatTimeDomainData(data); + assert_equals(data[0], 1.0, "analyser2 time domain data"); + }); +}); +</script> diff --git a/dom/media/webaudio/test/test_analyserScale.html b/dom/media/webaudio/test/test_analyserScale.html new file mode 100644 index 000000000..3aec8d22b --- /dev/null +++ b/dom/media/webaudio/test/test_analyserScale.html @@ -0,0 +1,59 @@ +<!DOCTYPE HTML> +<html> +<head> + <title>Test AnalyserNode when the input is scaled </title> + <script type="text/javascript" src="/tests/SimpleTest/SimpleTest.js"></script> + <script type="text/javascript" src="webaudio.js"></script> + <link rel="stylesheet" type="text/css" href="/tests/SimpleTest/test.css" /> +</head> +<body> +<pre id="test"> +<script class="testbody" type="text/javascript"> + +SimpleTest.waitForExplicitFinish(); +addLoadEvent(function() { + + var context = new AudioContext(); + + var gain = context.createGain(); + var analyser = context.createAnalyser(); + var osc = context.createOscillator(); + + + osc.connect(gain); + gain.connect(analyser); + + osc.start(); + + var array = new Uint8Array(analyser.frequencyBinCount); + + function getAnalyserData() { + gain.gain.setValueAtTime(currentGain, context.currentTime); + analyser.getByteTimeDomainData(array); + var inrange = true; + var max = -1; + for (var i = 0; i < array.length; i++) { + if (array[i] > max) { + max = Math.abs(array[i] - 128); + } + } + if (max <= currentGain * 128) { + ok(true, "Analyser got scaled data for " + currentGain); + currentGain = tests.shift(); + if (currentGain == undefined) { + SimpleTest.finish(); + return; + } + } + requestAnimationFrame(getAnalyserData); + } + + var tests = [1.0, 0.5, 0.0]; + var currentGain = tests.shift(); + requestAnimationFrame(getAnalyserData); +}); + +</script> +</pre> +</body> +</html> diff --git a/dom/media/webaudio/test/test_audioBufferSourceNode.html b/dom/media/webaudio/test/test_audioBufferSourceNode.html new file mode 100644 index 000000000..875c96c36 --- /dev/null +++ b/dom/media/webaudio/test/test_audioBufferSourceNode.html @@ -0,0 +1,44 @@ +<!DOCTYPE HTML> +<html> +<head> + <title>Test AudioBufferSourceNode</title> + <script type="text/javascript" src="/tests/SimpleTest/SimpleTest.js"></script> + <script type="text/javascript" src="webaudio.js"></script> + <link rel="stylesheet" type="text/css" href="/tests/SimpleTest/test.css" /> +</head> +<body> +<pre id="test"> +<script class="testbody" type="text/javascript"> + +var gTest = { + length: 4096, + createGraph: function(context) { + var buffer = context.createBuffer(1, 2048, context.sampleRate); + for (var i = 0; i < 2048; ++i) { + buffer.getChannelData(0)[i] = Math.sin(440 * 2 * Math.PI * i / context.sampleRate); + } + + var source = context.createBufferSource(); + source.start(0); + source.buffer = buffer; + return source; + }, + createExpectedBuffers: function(context) { + var buffers = []; + var buffer = context.createBuffer(2, 2048, context.sampleRate); + for (var i = 0; i < 2048; ++i) { + buffer.getChannelData(0)[i] = Math.sin(440 * 2 * Math.PI * i / context.sampleRate); + buffer.getChannelData(1)[i] = buffer.getChannelData(0)[i]; + } + buffers.push(buffer); + buffers.push(getEmptyBuffer(context, 2048)); + return buffers; + }, +}; + +runTest(); + +</script> +</pre> +</body> +</html> diff --git a/dom/media/webaudio/test/test_audioBufferSourceNodeDetached.html b/dom/media/webaudio/test/test_audioBufferSourceNodeDetached.html new file mode 100644 index 000000000..4d06c26ca --- /dev/null +++ b/dom/media/webaudio/test/test_audioBufferSourceNodeDetached.html @@ -0,0 +1,58 @@ +<!DOCTYPE HTML> +<html> +<head> + <title>Test AudioBufferSourceNode when an AudioBuffer's getChanneData buffer is detached</title> + <script type="text/javascript" src="/tests/SimpleTest/SimpleTest.js"></script> + <script type="text/javascript" src="webaudio.js"></script> + <link rel="stylesheet" type="text/css" href="/tests/SimpleTest/test.css" /> +</head> +<body> +<pre id="test"> +<script class="testbody" type="text/javascript"> + +function createGarbage() { + var s = []; + for (var i = 0; i < 10000000; ++i) { + s.push(i); + } + var sum = 0; + for (var i = 0; i < s.length; ++i) { + sum += s[i]; + } + return sum; +} + +var worker = new Worker("audioBufferSourceNodeDetached_worker.js"); + +var gTest = { + length: 2048, + numberOfChannels: 1, + createGraph: function(context) { + var buffer = context.createBuffer(1, 10000000, context.sampleRate); + var data = buffer.getChannelData(0); + for (var i = 0; i < data.length; ++i) { + data[i] = (i%100)/100 - 0.5; + } + + // Detach the buffer now + var data = buffer.getChannelData(0).buffer; + worker.postMessage(data, [data]); + // Create garbage and GC to replace the buffer data with garbage + SpecialPowers.gc(); + createGarbage(); + SpecialPowers.gc(); + + var source = context.createBufferSource(); + source.buffer = buffer; + source.start(); + // This should play silence + return source; + }, +}; + +runTest(); + +</script> +</pre> +</body> +</html> diff --git a/dom/media/webaudio/test/test_audioBufferSourceNodeEnded.html b/dom/media/webaudio/test/test_audioBufferSourceNodeEnded.html new file mode 100644 index 000000000..08616bea6 --- /dev/null +++ b/dom/media/webaudio/test/test_audioBufferSourceNodeEnded.html @@ -0,0 +1,36 @@ +<!DOCTYPE HTML> +<html> +<head> + <title>Test ended event on AudioBufferSourceNode</title> + <script type="text/javascript" src="/tests/SimpleTest/SimpleTest.js"></script> + <script type="text/javascript" src="webaudio.js"></script> + <link rel="stylesheet" type="text/css" href="/tests/SimpleTest/test.css" /> +</head> +<body> +<pre id="test"> +<script class="testbody" type="text/javascript"> + +SimpleTest.waitForExplicitFinish(); +addLoadEvent(function() { + var context = new AudioContext(); + var buffer = context.createBuffer(1, 2048, context.sampleRate); + for (var i = 0; i < 2048; ++i) { + buffer.getChannelData(0)[i] = Math.sin(440 * 2 * Math.PI * i / context.sampleRate); + } + + var source = context.createBufferSource(); + + source.onended = function(e) { + is(e.target, source, "Correct target for the ended event"); + SimpleTest.finish(); + }; + + source.start(0); + source.buffer = buffer; + source.connect(context.destination); +}); + +</script> +</pre> +</body> +</html> diff --git a/dom/media/webaudio/test/test_audioBufferSourceNodeLazyLoopParam.html b/dom/media/webaudio/test/test_audioBufferSourceNodeLazyLoopParam.html new file mode 100644 index 000000000..0893cf10b --- /dev/null +++ b/dom/media/webaudio/test/test_audioBufferSourceNodeLazyLoopParam.html @@ -0,0 +1,47 @@ +<!DOCTYPE HTML> +<html> +<head> + <title>Test AudioBufferSourceNode</title> + <script type="text/javascript" src="/tests/SimpleTest/SimpleTest.js"></script> + <script type="text/javascript" src="webaudio.js"></script> + <link rel="stylesheet" type="text/css" href="/tests/SimpleTest/test.css" /> +</head> +<body> +<pre id="test"> +<script class="testbody" type="text/javascript"> + +var gTest = { + length: 4096, + numberOfChannels: 1, + createGraph: function(context) { + // silence for half of the buffer, ones after that. + var buffer = context.createBuffer(1, 2048, context.sampleRate); + for (var i = 1024; i < 2048; i++) { + buffer.getChannelData(0)[i] = 1; + } + + var source = context.createBufferSource(); + + // we start at the 1024 frames, we should only have ones. + source.loop = true; + source.loopStart = 1024 / context.sampleRate; + source.loopEnd = 2048 / context.sampleRate; + source.buffer = buffer; + source.start(0, 1024 / context.sampleRate, 1024 / context.sampleRate); + return source; + }, + createExpectedBuffers: function(context) { + var expectedBuffer = context.createBuffer(1, 4096, context.sampleRate); + for (var i = 0; i < 4096; i++) { + expectedBuffer.getChannelData(0)[i] = 1; + } + return expectedBuffer; + }, +}; + +runTest(); + +</script> +</pre> +</body> +</html> diff --git a/dom/media/webaudio/test/test_audioBufferSourceNodeLoop.html b/dom/media/webaudio/test/test_audioBufferSourceNodeLoop.html new file mode 100644 index 000000000..79c78dfe0 --- /dev/null +++ b/dom/media/webaudio/test/test_audioBufferSourceNodeLoop.html @@ -0,0 +1,45 @@ +<!DOCTYPE HTML> +<html> +<head> + <title>Test AudioBufferSourceNode looping</title> + <script type="text/javascript" src="/tests/SimpleTest/SimpleTest.js"></script> + <script type="text/javascript" src="webaudio.js"></script> + <link rel="stylesheet" type="text/css" href="/tests/SimpleTest/test.css" /> +</head> +<body> +<pre id="test"> +<script class="testbody" type="text/javascript"> + +var gTest = { + length: 2048 * 4, + numberOfChannels: 1, + createGraph: function(context) { + var buffer = context.createBuffer(1, 2048, context.sampleRate); + for (var i = 0; i < 2048; ++i) { + buffer.getChannelData(0)[i] = Math.sin(440 * 2 * Math.PI * i / context.sampleRate); + } + + var source = context.createBufferSource(); + source.buffer = buffer; + + source.start(0); + source.loop = true; + return source; + }, + createExpectedBuffers: function(context) { + var expectedBuffer = context.createBuffer(1, 2048 * 4, context.sampleRate); + for (var i = 0; i < 4; ++i) { + for (var j = 0; j < 2048; ++j) { + expectedBuffer.getChannelData(0)[i * 2048 + j] = Math.sin(440 * 2 * Math.PI * j / context.sampleRate); + } + } + return expectedBuffer; + }, +}; + +runTest(); + +</script> +</pre> +</body> +</html> diff --git a/dom/media/webaudio/test/test_audioBufferSourceNodeLoopStartEnd.html b/dom/media/webaudio/test/test_audioBufferSourceNodeLoopStartEnd.html new file mode 100644 index 000000000..6f60762eb --- /dev/null +++ b/dom/media/webaudio/test/test_audioBufferSourceNodeLoopStartEnd.html @@ -0,0 +1,52 @@ +<!DOCTYPE HTML> +<html> +<head> + <title>Test AudioBufferSourceNode looping</title> + <script type="text/javascript" src="/tests/SimpleTest/SimpleTest.js"></script> + <script type="text/javascript" src="webaudio.js"></script> + <link rel="stylesheet" type="text/css" href="/tests/SimpleTest/test.css" /> +</head> +<body> +<pre id="test"> +<script class="testbody" type="text/javascript"> + +var gTest = { + length: 2048 * 4, + numberOfChannels: 1, + createGraph: function(context) { + var buffer = context.createBuffer(1, 2048, context.sampleRate); + for (var i = 0; i < 2048; ++i) { + buffer.getChannelData(0)[i] = Math.sin(440 * 2 * Math.PI * i / context.sampleRate); + } + + var source = context.createBufferSource(); + source.buffer = buffer; + source.start(0); + source.loop = true; + source.loopStart = buffer.duration * 0.25; + source.loopEnd = buffer.duration * 0.75; + return source; + }, + createExpectedBuffers: function(context) { + var expectedBuffer = context.createBuffer(1, 2048 * 4, context.sampleRate); + for (var i = 0; i < 1536; ++i) { + expectedBuffer.getChannelData(0)[i] = Math.sin(440 * 2 * Math.PI * i / context.sampleRate); + } + for (var i = 0; i < 6; ++i) { + for (var j = 512; j < 1536; ++j) { + expectedBuffer.getChannelData(0)[1536 + i * 1024 + j - 512] = Math.sin(440 * 2 * Math.PI * j / context.sampleRate); + } + } + for (var j = 7680; j < 2048 * 4; ++j) { + expectedBuffer.getChannelData(0)[j] = Math.sin(440 * 2 * Math.PI * (j - 7168) / context.sampleRate); + } + return expectedBuffer; + }, +}; + +runTest(); + +</script> +</pre> +</body> +</html> diff --git a/dom/media/webaudio/test/test_audioBufferSourceNodeLoopStartEndSame.html b/dom/media/webaudio/test/test_audioBufferSourceNodeLoopStartEndSame.html new file mode 100644 index 000000000..eca4bf636 --- /dev/null +++ b/dom/media/webaudio/test/test_audioBufferSourceNodeLoopStartEndSame.html @@ -0,0 +1,44 @@ +<!DOCTYPE HTML> +<html> +<head> + <title>Test AudioBufferSourceNode looping</title> + <script type="text/javascript" src="/tests/SimpleTest/SimpleTest.js"></script> + <script type="text/javascript" src="webaudio.js"></script> + <link rel="stylesheet" type="text/css" href="/tests/SimpleTest/test.css" /> +</head> +<body> +<pre id="test"> +<script class="testbody" type="text/javascript"> + +var gTest = { + length: 2048, + numberOfChannels: 1, + createGraph: function(context) { + var buffer = context.createBuffer(1, 2048, context.sampleRate); + for (var i = 0; i < 2048; ++i) { + buffer.getChannelData(0)[i] = Math.sin(440 * 2 * Math.PI * i / context.sampleRate); + } + + var source = context.createBufferSource(); + source.buffer = buffer; + + source.loop = true; + source.loopStart = source.loopEnd = 1 / context.sampleRate; + source.start(0); + return source; + }, + createExpectedBuffers: function(context) { + var buffer = context.createBuffer(1, 2048, context.sampleRate); + for (var i = 0; i < 2048; ++i) { + buffer.getChannelData(0)[i] = Math.sin(440 * 2 * Math.PI * i / context.sampleRate); + } + return buffer; + }, +}; + +runTest(); + +</script> +</pre> +</body> +</html> diff --git a/dom/media/webaudio/test/test_audioBufferSourceNodeNoStart.html b/dom/media/webaudio/test/test_audioBufferSourceNodeNoStart.html new file mode 100644 index 000000000..89340ade8 --- /dev/null +++ b/dom/media/webaudio/test/test_audioBufferSourceNodeNoStart.html @@ -0,0 +1,33 @@ +<!DOCTYPE HTML> +<html> +<head> + <title>Test AudioBufferSourceNode when start() is not called</title> + <script type="text/javascript" src="/tests/SimpleTest/SimpleTest.js"></script> + <script type="text/javascript" src="webaudio.js"></script> + <link rel="stylesheet" type="text/css" href="/tests/SimpleTest/test.css" /> +</head> +<body> +<pre id="test"> +<script class="testbody" type="text/javascript"> + +var gTest = { + length: 2048, + numberOfChannels: 1, + createGraph: function(context) { + var buffer = context.createBuffer(1, 2048, context.sampleRate); + var data = buffer.getChannelData(0); + for (var i = 0; i < data.length; ++i) { + data[i] = (i%100)/100 - 0.5; + } + var source = context.createBufferSource(); + source.buffer = buffer; + return source; + }, +}; + +runTest(); + +</script> +</pre> +</body> +</html> diff --git a/dom/media/webaudio/test/test_audioBufferSourceNodeNullBuffer.html b/dom/media/webaudio/test/test_audioBufferSourceNodeNullBuffer.html new file mode 100644 index 000000000..6ca771af1 --- /dev/null +++ b/dom/media/webaudio/test/test_audioBufferSourceNodeNullBuffer.html @@ -0,0 +1,31 @@ +<!DOCTYPE HTML> +<html> +<head> + <title>Test AudioBufferSourceNode</title> + <script type="text/javascript" src="/tests/SimpleTest/SimpleTest.js"></script> + <script type="text/javascript" src="webaudio.js"></script> + <link rel="stylesheet" type="text/css" href="/tests/SimpleTest/test.css" /> +</head> +<body> +<pre id="test"> +<script class="testbody" type="text/javascript"> + +var gTest = { + length: 2048, + numberOfChannels: 1, + createGraph: function(context) { + var source = context.createBufferSource(); + + source.start(0); + source.buffer = null; + is(source.buffer, null, "Try playing back a null buffer"); + return source; + }, +}; + +runTest(); + +</script> +</pre> +</body> +</html> diff --git a/dom/media/webaudio/test/test_audioBufferSourceNodeOffset.html b/dom/media/webaudio/test/test_audioBufferSourceNodeOffset.html new file mode 100644 index 000000000..b7a16634e --- /dev/null +++ b/dom/media/webaudio/test/test_audioBufferSourceNodeOffset.html @@ -0,0 +1,55 @@ +<!DOCTYPE HTML> +<html> +<head> + <title>Test the offset property on AudioBufferSourceNode</title> + <script type="text/javascript" src="/tests/SimpleTest/SimpleTest.js"></script> + <script type="text/javascript" src="webaudio.js"></script> + <link rel="stylesheet" type="text/css" href="/tests/SimpleTest/test.css" /> +</head> +<body> +<pre id="test"> +<script class="testbody" type="text/javascript"> + +var fuzz = 0.3; + +if (navigator.platform.startsWith("Mac")) { + // bug 895720 + fuzz = 0.6; +} + +SimpleTest.waitForExplicitFinish(); +addLoadEvent(function() { + var samplesFromSource = 0; + var context = new AudioContext(); + var sp = context.createScriptProcessor(256); + + sp.onaudioprocess = function(e) { + samplesFromSource += e.inputBuffer.length; + } + + var buffer = context.createBuffer(1, context.sampleRate, context.sampleRate); + for (var i = 0; i < context.sampleRate; ++i) { + buffer.getChannelData(0)[i] = Math.sin(440 * 2 * Math.PI * i / context.sampleRate); + } + + var source = context.createBufferSource(); + + source.onended = function(e) { + // The timing at which the audioprocess and ended listeners are called can + // change, hence the fuzzy equal here. + var errorRatio = samplesFromSource / (0.5 * context.sampleRate); + ok(errorRatio > (1.0 - fuzz) && errorRatio < (1.0 + fuzz), + "Correct number of samples received (expected: " + + (0.5 * context.sampleRate) + ", actual: " + samplesFromSource + ")."); + SimpleTest.finish(); + }; + + source.buffer = buffer; + source.connect(sp); + source.start(0, 0.5); +}); + +</script> +</pre> +</body> +</html> diff --git a/dom/media/webaudio/test/test_audioBufferSourceNodePassThrough.html b/dom/media/webaudio/test/test_audioBufferSourceNodePassThrough.html new file mode 100644 index 000000000..5088f1637 --- /dev/null +++ b/dom/media/webaudio/test/test_audioBufferSourceNodePassThrough.html @@ -0,0 +1,45 @@ +<!DOCTYPE HTML> +<html> +<head> + <title>Test AudioBufferSourceNode with passthrough</title> + <script type="text/javascript" src="/tests/SimpleTest/SimpleTest.js"></script> + <script type="text/javascript" src="webaudio.js"></script> + <link rel="stylesheet" type="text/css" href="/tests/SimpleTest/test.css" /> +</head> +<body> +<pre id="test"> +<script class="testbody" type="text/javascript"> + +var gTest = { + length: 2048, + numberOfChannels: 1, + createGraph: function(context) { + var buffer = context.createBuffer(1, 2048, context.sampleRate); + for (var i = 0; i < 2048; ++i) { + buffer.getChannelData(0)[i] = Math.sin(440 * 2 * Math.PI * i / context.sampleRate); + } + + var source = context.createBufferSource(); + + source.buffer = buffer; + + var srcWrapped = SpecialPowers.wrap(source); + ok("passThrough" in srcWrapped, "AudioBufferSourceNode should support the passThrough API"); + srcWrapped.passThrough = true; + + source.start(0); + return source; + }, + createExpectedBuffers: function(context) { + var expectedBuffer = context.createBuffer(1, 2048, context.sampleRate); + + return [expectedBuffer]; + }, +}; + +runTest(); + +</script> +</pre> +</body> +</html> diff --git a/dom/media/webaudio/test/test_audioBufferSourceNodeRate.html b/dom/media/webaudio/test/test_audioBufferSourceNodeRate.html new file mode 100644 index 000000000..2cdcd7270 --- /dev/null +++ b/dom/media/webaudio/test/test_audioBufferSourceNodeRate.html @@ -0,0 +1,66 @@ +<!DOCTYPE HTML> +<html> +<head> + <title>Test AudioBufferSourceNode</title> + <script type="text/javascript" src="/tests/SimpleTest/SimpleTest.js"></script> + <script type="text/javascript" src="webaudio.js"></script> + <link rel="stylesheet" type="text/css" href="/tests/SimpleTest/test.css" /> +</head> +<body> +<pre id="test"> +<script class="testbody" type="text/javascript"> + +SimpleTest.waitForExplicitFinish(); + +var rate = 44100; +var off = new OfflineAudioContext(1, rate, rate); +var off2 = new OfflineAudioContext(1, rate, rate); + +var source = off.createBufferSource(); +var source2 = off2.createBufferSource(); + +// a buffer of a 440Hz at half the length. If we detune by -1200 or set the +// playbackRate to 0.5, we should get 44100 samples back with a sine at 220Hz. +var buf = off.createBuffer(1, rate / 2, rate); +var bufarray = buf.getChannelData(0); +for (var i = 0; i < bufarray.length; i++) { + bufarray[i] = Math.sin(i * 440 * 2 * Math.PI / rate); +} + +source.buffer = buf; +source.playbackRate.value = 0.5; // 50% slowdown +source.connect(off.destination); +source.start(0); + +source2.buffer = buf; +source2.detune.value = -1200; // one octave -> 50% slowdown +source2.connect(off2.destination); +source2.start(0); + +var finished = 0; +function finish() { + finished++; + if (finished == 2) { + SimpleTest.finish(); + } +} + +off.startRendering().then((renderedPlaybackRate) => { + // we don't care about comparing the value here, we just want to know whether + // the second part is noisy. + var rmsValue = rms(renderedPlaybackRate, 0, 22050); + ok(rmsValue != 0, "Resampling happened (rms of the second part " + rmsValue + ")"); + + off2.startRendering().then((renderedDetune) => { + var rmsValue = rms(renderedDetune, 0, 22050); + ok(rmsValue != 0, "Resampling happened (rms of the second part " + rmsValue + ")"); + // The two buffers should be the same: detune of -1200 is a 50% slowdown + compareBuffers(renderedPlaybackRate, renderedDetune); + SimpleTest.finish(); + }); +}); + +</script> +</pre> +</body> +</html> diff --git a/dom/media/webaudio/test/test_audioContextSuspendResumeClose.html b/dom/media/webaudio/test/test_audioContextSuspendResumeClose.html new file mode 100644 index 000000000..269d5380e --- /dev/null +++ b/dom/media/webaudio/test/test_audioContextSuspendResumeClose.html @@ -0,0 +1,410 @@ +<!DOCTYPE HTML> +<html> +<head> + <title>Test suspend, resume and close method of the AudioContext</title> + <script type="text/javascript" src="/tests/SimpleTest/SimpleTest.js"></script> + <script type="text/javascript" src="webaudio.js"></script> + <link rel="stylesheet" type="text/css" href="/tests/SimpleTest/test.css" /> +</head> +<body> +<pre id="test"> +<script class="testbody" type="text/javascript"> + +function tryToCreateNodeOnClosedContext(ctx) { + ok(ctx.state, "closed", "The context is in closed state"); + + [ { name: "createBufferSource" }, + { name: "createMediaStreamDestination", + onOfflineAudioContext: false}, + { name: "createScriptProcessor" }, + { name: "createStereoPanner" }, + { name: "createAnalyser" }, + { name: "createGain" }, + { name: "createDelay" }, + { name: "createBiquadFilter" }, + { name: "createWaveShaper" }, + { name: "createPanner" }, + { name: "createConvolver" }, + { name: "createChannelSplitter" }, + { name: "createChannelMerger" }, + { name: "createDynamicsCompressor" }, + { name: "createOscillator" }, + { name: "createMediaElementSource", + args: [new Audio()], + onOfflineAudioContext: false }, + { name: "createMediaStreamSource", + args: [new Audio().mozCaptureStream()], + onOfflineAudioContext: false } ].forEach(function(e) { + + if (e.onOfflineAudioContext == false && + ctx instanceof OfflineAudioContext) { + return; + } + + expectException(function() { + ctx[e.name].apply(ctx, e.args); + }, DOMException.INVALID_STATE_ERR); + }); +} + +function loadFile(url, callback) { + var xhr = new XMLHttpRequest(); + xhr.open("GET", url, true); + xhr.responseType = "arraybuffer"; + xhr.onload = function() { + callback(xhr.response); + }; + xhr.send(); +} + +// createBuffer, createPeriodicWave and decodeAudioData should work on a context +// that has `state` == "closed" +function tryLegalOpeerationsOnClosedContext(ctx) { + ok(ctx.state, "closed", "The context is in closed state"); + + [ { name: "createBuffer", + args: [1, 44100, 44100] }, + { name: "createPeriodicWave", + args: [new Float32Array(10), new Float32Array(10)] } + ].forEach(function(e) { + expectNoException(function() { + ctx[e.name].apply(ctx, e.args); + }); + }); + loadFile("ting-44.1k-1ch.ogg", function(buf) { + ctx.decodeAudioData(buf).then(function(decodedBuf) { + ok(true, "decodeAudioData on a closed context should work, it did.") + todo(false, "0 " + (ctx instanceof OfflineAudioContext ? "Offline" : "Realtime")); + finish(); + }).catch(function(e){ + ok(false, "decodeAudioData on a closed context should work, it did not"); + finish(); + }); + }); +} + +// Test that MediaStreams that are the output of a suspended AudioContext are +// producing silence +// ac1 produce a sine fed to a MediaStreamAudioDestinationNode +// ac2 is connected to ac1 with a MediaStreamAudioSourceNode, and check that +// there is silence when ac1 is suspended +function testMultiContextOutput() { + var ac1 = new AudioContext(), + ac2 = new AudioContext(); + + ac1.onstatechange = function() { + ac1.onstatechange = null; + + var osc1 = ac1.createOscillator(), + mediaStreamDestination1 = ac1.createMediaStreamDestination(); + + var mediaStreamAudioSourceNode2 = + ac2.createMediaStreamSource(mediaStreamDestination1.stream), + sp2 = ac2.createScriptProcessor(), + silentBuffersInARow = 0; + + + sp2.onaudioprocess = function(e) { + ac1.suspend().then(function() { + is(ac1.state, "suspended", "ac1 is suspended"); + sp2.onaudioprocess = checkSilence; + }); + sp2.onaudioprocess = null; + } + + function checkSilence(e) { + var input = e.inputBuffer.getChannelData(0); + var silent = true; + for (var i = 0; i < input.length; i++) { + if (input[i] != 0.0) { + silent = false; + } + } + + todo(false, "input buffer is " + (silent ? "silent" : "noisy")); + + if (silent) { + silentBuffersInARow++; + if (silentBuffersInARow == 10) { + ok(true, + "MediaStreams produce silence when their input is blocked."); + sp2.onaudioprocess = null; + ac1.close(); + ac2.close(); + todo(false,"1"); + finish(); + } + } else { + is(silentBuffersInARow, 0, + "No non silent buffer inbetween silent buffers."); + } + } + + osc1.connect(mediaStreamDestination1); + + mediaStreamAudioSourceNode2.connect(sp2); + osc1.start(); + } +} + + +// Test that there is no buffering between contexts when connecting a running +// AudioContext to a suspended AudioContext. Our ScriptProcessorNode does some +// buffering internally, so we ensure this by using a very very low frequency +// on a sine, and oberve that the phase has changed by a big enough margin. +function testMultiContextInput() { + var ac1 = new AudioContext(), + ac2 = new AudioContext(); + + ac1.onstatechange = function() { + ac1.onstatechange = null; + + var osc1 = ac1.createOscillator(), + mediaStreamDestination1 = ac1.createMediaStreamDestination(), + sp1 = ac1.createScriptProcessor(); + + var mediaStreamAudioSourceNode2 = + ac2.createMediaStreamSource(mediaStreamDestination1.stream), + sp2 = ac2.createScriptProcessor(), + eventReceived = 0; + + + osc1.frequency.value = 0.0001; + + function checkDiscontinuity(e) { + var inputBuffer = e.inputBuffer.getChannelData(0); + if (eventReceived++ == 3) { + var delta = Math.abs(inputBuffer[1] - sp2.value), + theoreticalIncrement = 2048 * 3 * Math.PI * 2 * osc1.frequency.value / ac1.sampleRate; + ok(delta >= theoreticalIncrement, + "Buffering did not occur when the context was suspended (delta:" + delta + " increment: " + theoreticalIncrement+")"); + ac1.close(); + ac2.close(); + sp1.onaudioprocess = null; + sp2.onaudioprocess = null; + todo(false, "2"); + finish(); + } + } + + sp2.onaudioprocess = function(e) { + var inputBuffer = e.inputBuffer.getChannelData(0); + sp2.value = inputBuffer[inputBuffer.length - 1]; + ac2.suspend().then(function() { + ac2.resume().then(function() { + sp2.onaudioprocess = checkDiscontinuity; + }); + }); + } + + osc1.connect(mediaStreamDestination1); + osc1.connect(sp1); + + mediaStreamAudioSourceNode2.connect(sp2); + osc1.start(); + } +} + +// Test that ScriptProcessorNode's onaudioprocess don't get called while the +// context is suspended/closed. It is possible that we get the handler called +// exactly once after suspend, because the event has already been sent to the +// event loop. +function testScriptProcessNodeSuspended() { + var ac = new AudioContext(); + var sp = ac.createScriptProcessor(); + var remainingIterations = 30; + var afterResume = false; + ac.onstatechange = function() { + ac.onstatechange = null; + sp.onaudioprocess = function() { + ok(ac.state == "running", "If onaudioprocess is called, the context" + + " must be running (was " + ac.state + ", remainingIterations:" + remainingIterations +")"); + remainingIterations--; + if (!afterResume) { + if (remainingIterations == 0) { + ac.suspend().then(function() { + ac.resume().then(function() { + remainingIterations = 30; + afterResume = true; + }); + }); + } + } else { + sp.onaudioprocess = null; + todo(false,"3"); + finish(); + } + } + } + sp.connect(ac.destination); +} + +// Take an AudioContext, make sure it switches to running when the audio starts +// flowing, and then, call suspend, resume and close on it, tracking its state. +function testAudioContext() { + var ac = new AudioContext(); + is(ac.state, "suspended", "AudioContext should start in suspended state."); + var stateTracker = { + previous: ac.state, + // no promise for the initial suspended -> running + initial: { handler: false }, + suspend: { promise: false, handler: false }, + resume: { promise: false, handler: false }, + close: { promise: false, handler: false } + }; + + function initialSuspendToRunning() { + ok(stateTracker.previous == "suspended" && + ac.state == "running", + "AudioContext should switch to \"running\" when the audio hardware is" + + " ready."); + + stateTracker.previous = ac.state; + ac.onstatechange = afterSuspend; + stateTracker.initial.handler = true; + + ac.suspend().then(function() { + ok(!stateTracker.suspend.promise && !stateTracker.suspend.handler, + "Promise should be resolved before the callback, and only once.") + stateTracker.suspend.promise = true; + }); + } + + function afterSuspend() { + ok(stateTracker.previous == "running" && + ac.state == "suspended", + "AudioContext should switch to \"suspend\" when the audio stream is" + + "suspended."); + ok(stateTracker.suspend.promise && !stateTracker.suspend.handler, + "Handler should be called after the callback, and only once"); + + stateTracker.suspend.handler = true; + stateTracker.previous = ac.state; + ac.onstatechange = afterResume; + + ac.resume().then(function() { + ok(!stateTracker.resume.promise && !stateTracker.resume.handler, + "Promise should be called before the callback, and only once"); + stateTracker.resume.promise = true; + }); + } + + function afterResume() { + ok(stateTracker.previous == "suspended" && + ac.state == "running", + "AudioContext should switch to \"running\" when the audio stream resumes."); + + ok(stateTracker.resume.promise && !stateTracker.resume.handler, + "Handler should be called after the callback, and only once"); + + stateTracker.resume.handler = true; + stateTracker.previous = ac.state; + ac.onstatechange = afterClose; + + ac.close().then(function() { + ok(!stateTracker.close.promise && !stateTracker.close.handler, + "Promise should be called before the callback, and only once"); + stateTracker.close.promise = true; + tryToCreateNodeOnClosedContext(ac); + tryLegalOpeerationsOnClosedContext(ac); + }); + } + + function afterClose() { + ok(stateTracker.previous == "running" && + ac.state == "closed", + "AudioContext should switch to \"closed\" when the audio stream is" + + " closed."); + ok(stateTracker.close.promise && !stateTracker.close.handler, + "Handler should be called after the callback, and only once"); + } + + ac.onstatechange = initialSuspendToRunning; +} + +function testOfflineAudioContext() { + var o = new OfflineAudioContext(1, 44100, 44100); + is(o.state, "suspended", "OfflineAudioContext should start in suspended state."); + + expectRejectedPromise(o, "suspend", "NotSupportedError"); + expectRejectedPromise(o, "resume", "NotSupportedError"); + expectRejectedPromise(o, "close", "NotSupportedError"); + + var previousState = o.state, + finishedRendering = false; + function beforeStartRendering() { + ok(previousState == "suspended" && o.state == "running", "onstatechanged" + + "handler is called on state changed, and the new state is running"); + previousState = o.state; + o.onstatechange = onRenderingFinished; + } + + function onRenderingFinished() { + ok(previousState == "running" && o.state == "closed", + "onstatechanged handler is called when rendering finishes, " + + "and the new state is closed"); + ok(finishedRendering, "The Promise that is resolved when the rendering is" + + "done should be resolved earlier than the state change."); + previousState = o.state; + o.onstatechange = afterRenderingFinished; + + tryToCreateNodeOnClosedContext(o); + tryLegalOpeerationsOnClosedContext(o); + } + + function afterRenderingFinished() { + ok(false, "There should be no transition out of the closed state."); + } + + o.onstatechange = beforeStartRendering; + + o.startRendering().then(function(buffer) { + finishedRendering = true; + }); +} + +function testSuspendResumeEventLoop() { + var ac = new AudioContext(); + var source = ac.createBufferSource(); + source.buffer = ac.createBuffer(1, 44100, 44100); + source.onended = function() { + ok(true, "The AudioContext did resume."); + finish(); + } + ac.onstatechange = function() { + ac.onstatechange = null; + + ok(ac.state == "running", "initial state is running"); + ac.suspend(); + source.start(); + ac.resume(); + } +} + +var remaining = 0; +function finish() { + remaining--; + if (remaining == 0) { + SimpleTest.finish(); + } +} + + +SimpleTest.waitForExplicitFinish(); +addLoadEvent(function() { + var tests = [ + testAudioContext, + testOfflineAudioContext, + testScriptProcessNodeSuspended, + testMultiContextOutput, + testMultiContextInput, + testSuspendResumeEventLoop + ]; + remaining = tests.length; + tests.forEach(function(f) { f() }); +}); + +</script> +</pre> +</body> +</html> diff --git a/dom/media/webaudio/test/test_audioDestinationNode.html b/dom/media/webaudio/test/test_audioDestinationNode.html new file mode 100644 index 000000000..b86c7169d --- /dev/null +++ b/dom/media/webaudio/test/test_audioDestinationNode.html @@ -0,0 +1,26 @@ +<!DOCTYPE HTML> +<html> +<head> + <title>Test AudioDestinationNode as EventTarget</title> + <script type="text/javascript" src="/tests/SimpleTest/SimpleTest.js"></script> + <script type="text/javascript" src="webaudio.js"></script> + <link rel="stylesheet" type="text/css" href="/tests/SimpleTest/test.css" /> +</head> +<body> +<pre id="test"> +<script class="testbody" type="text/javascript"> + +SimpleTest.waitForExplicitFinish(); + +var ac = new AudioContext() +ac.destination.addEventListener("foo", function() { + ok(true, "Event received!"); + SimpleTest.finish(); +}, false); +ac.destination.dispatchEvent(new CustomEvent("foo")); + +</script> +</pre> +</body> +</html> + diff --git a/dom/media/webaudio/test/test_audioParamChaining.html b/dom/media/webaudio/test/test_audioParamChaining.html new file mode 100644 index 000000000..6093e4425 --- /dev/null +++ b/dom/media/webaudio/test/test_audioParamChaining.html @@ -0,0 +1,77 @@ +<!DOCTYPE HTML> +<html> +<head> + <title>Test whether we can create an AudioContext interface</title> + <script type="text/javascript" src="/tests/SimpleTest/SimpleTest.js"></script> + <link rel="stylesheet" type="text/css" href="/tests/SimpleTest/test.css" /> +</head> +<body> +<pre id="test"> +<script class="testbody" type="text/javascript"> + +SimpleTest.waitForExplicitFinish() + +function frameToTime(frame, rate) +{ + return frame / rate; +} + +const RATE = 44100; + +var oc = new OfflineAudioContext(1, 44100, RATE); +// This allows us to have a source that is simply a DC offset. +var source = oc.createBufferSource(); +var buf = oc.createBuffer(1, 1, RATE); +buf.getChannelData(0)[0] = 1; +source.loop = true; +source.buffer = buf; + +source.start(0); + +var gain = oc.createGain(); + +source.connect(gain).connect(oc.destination); + +var gain2 = oc.createGain(); +var rv2 = gain2.gain.linearRampToValueAtTime(0.1, 0.5); +ok(rv2 instanceof AudioParam, "linearRampToValueAtTime returns an AudioParam."); +ok(rv2 == gain2.gain, "linearRampToValueAtTime returns the right AudioParam."); + +rv2 = gain2.gain.exponentialRampToValueAtTime(0.01, 1.0); +ok(rv2 instanceof AudioParam, + "exponentialRampToValueAtTime returns an AudioParam."); +ok(rv2 == gain2.gain, + "exponentialRampToValueAtTime returns the right AudioParam."); + +rv2 = gain2.gain.setTargetAtTime(1.0, 2.0, 0.1); +ok(rv2 instanceof AudioParam, "setTargetAtTime returns an AudioParam."); +ok(rv2 == gain2.gain, "setTargetAtTime returns the right AudioParam."); + +var array = new Float32Array(10); +rv2 = gain2.gain.setValueCurveAtTime(array, 10, 11); +ok(rv2 instanceof AudioParam, "setValueCurveAtTime returns an AudioParam."); +ok(rv2 == gain2.gain, "setValueCurveAtTime returns the right AudioParam."); + +// We chain three automation methods, making a gain step. +var rv = gain.gain.setValueAtTime(0, frameToTime(0, RATE)) + .setValueAtTime(0.5, frameToTime(22000, RATE)) + .setValueAtTime(1, frameToTime(44000, RATE)); + +ok(rv instanceof AudioParam, "setValueAtTime returns an AudioParam."); +ok(rv == gain.gain, "setValueAtTime returns the right AudioParam."); + +oc.startRendering().then(function(rendered) { + console.log(rendered.getChannelData(0)); + is(rendered.getChannelData(0)[0], 0, + "The value of the first step is correct."); + is(rendered.getChannelData(0)[22050], 0.5, + "The value of the second step is correct"); + is(rendered.getChannelData(0)[44099], 1, + "The value of the third step is correct."); + SimpleTest.finish(); +}); + +</script> +</pre> +</body> +</html> diff --git a/dom/media/webaudio/test/test_audioParamExponentialRamp.html b/dom/media/webaudio/test/test_audioParamExponentialRamp.html new file mode 100644 index 000000000..e1b1c5142 --- /dev/null +++ b/dom/media/webaudio/test/test_audioParamExponentialRamp.html @@ -0,0 +1,54 @@ +<!DOCTYPE HTML> +<html> +<head> + <title>Test AudioParam.exponentialRampToValue</title> + <script type="text/javascript" src="/tests/SimpleTest/SimpleTest.js"></script> + <script type="text/javascript" src="webaudio.js"></script> + <link rel="stylesheet" type="text/css" href="/tests/SimpleTest/test.css" /> +</head> +<body> +<pre id="test"> +<script class="testbody" type="text/javascript"> + +var V0 = 0.1; +var V1 = 0.9; +var T0 = 0; + +var gTest = { + length: 2048, + numberOfChannels: 1, + createGraph: function(context) { + var sourceBuffer = context.createBuffer(1, 2048, context.sampleRate); + for (var i = 0; i < 2048; ++i) { + sourceBuffer.getChannelData(0)[i] = 1; + } + + var source = context.createBufferSource(); + source.buffer = sourceBuffer; + + var gain = context.createGain(); + gain.gain.setValueAtTime(V0, 0); + gain.gain.exponentialRampToValueAtTime(V1, 2048/context.sampleRate); + + source.connect(gain); + + source.start(0); + return gain; + }, + createExpectedBuffers: function(context) { + var T1 = 2048 / context.sampleRate; + var expectedBuffer = context.createBuffer(1, 2048, context.sampleRate); + for (var i = 0; i < 2048; ++i) { + var t = i / context.sampleRate; + expectedBuffer.getChannelData(0)[i] = V0 * Math.pow(V1 / V0, (t - T0) / (T1 - T0)); + } + return expectedBuffer; + }, +}; + +runTest(); + +</script> +</pre> +</body> +</html> diff --git a/dom/media/webaudio/test/test_audioParamGain.html b/dom/media/webaudio/test/test_audioParamGain.html new file mode 100644 index 000000000..b971becce --- /dev/null +++ b/dom/media/webaudio/test/test_audioParamGain.html @@ -0,0 +1,61 @@ +<!DOCTYPE HTML> +<html> +<head> + <title>Test AudioParam with pre-gain </title> + <script type="text/javascript" src="/tests/SimpleTest/SimpleTest.js"></script> + <script type="text/javascript" src="webaudio.js"></script> + <link rel="stylesheet" type="text/css" href="/tests/SimpleTest/test.css" /> +</head> +<body> +<pre id="test"> +<script class="testbody" type="text/javascript"> + +SimpleTest.waitForExplicitFinish(); + +var ctx = new AudioContext(); +var source = ctx.createOscillator(); +var lfo = ctx.createOscillator(); +var lfoIntensity = ctx.createGain(); +var effect = ctx.createGain(); +var sp = ctx.createScriptProcessor(2048, 1); + +source.frequency.value = 440; +lfo.frequency.value = 2; +// Very low gain, so the LFO should have very little influence +// on the source, its RMS value should be close to the nominal value +// for a sine wave. +lfoIntensity.gain.value = 0.0001; + +lfo.connect(lfoIntensity); +lfoIntensity.connect(effect.gain); +source.connect(effect); +effect.connect(sp); + +sp.onaudioprocess = function(e) { + var buffer = e.inputBuffer.getChannelData(0); + var rms = 0; + for (var i = 0; i < buffer.length; i++) { + rms += buffer[i] * buffer[i]; + } + + rms /= buffer.length; + rms = Math.sqrt(rms); + + // 1 / Math.sqrt(2) is the theoretical RMS value for a sine wave. + ok(fuzzyCompare(rms, 1 / Math.sqrt(2)), + "Gain correctly applied to the AudioParam."); + + ctx = null; + sp.onaudioprocess = null; + lfo.stop(0); + source.stop(0); + + SimpleTest.finish(); +} + +lfo.start(0); +source.start(0); + +</script> +</pre> +</body> diff --git a/dom/media/webaudio/test/test_audioParamLinearRamp.html b/dom/media/webaudio/test/test_audioParamLinearRamp.html new file mode 100644 index 000000000..31f1d80d6 --- /dev/null +++ b/dom/media/webaudio/test/test_audioParamLinearRamp.html @@ -0,0 +1,54 @@ +<!DOCTYPE HTML> +<html> +<head> + <title>Test AudioParam.linearRampToValue</title> + <script type="text/javascript" src="/tests/SimpleTest/SimpleTest.js"></script> + <script type="text/javascript" src="webaudio.js"></script> + <link rel="stylesheet" type="text/css" href="/tests/SimpleTest/test.css" /> +</head> +<body> +<pre id="test"> +<script class="testbody" type="text/javascript"> + +var V0 = 0.1; +var V1 = 0.9; +var T0 = 0; + +var gTest = { + length: 2048, + numberOfChannels: 1, + createGraph: function(context) { + var sourceBuffer = context.createBuffer(1, 2048, context.sampleRate); + for (var i = 0; i < 2048; ++i) { + sourceBuffer.getChannelData(0)[i] = 1; + } + + var source = context.createBufferSource(); + source.buffer = sourceBuffer; + + var gain = context.createGain(); + gain.gain.setValueAtTime(V0, 0); + gain.gain.linearRampToValueAtTime(V1, 2048/context.sampleRate); + + source.connect(gain); + + source.start(0); + return gain; + }, + createExpectedBuffers: function(context) { + var T1 = 2048 / context.sampleRate; + var expectedBuffer = context.createBuffer(1, 2048, context.sampleRate); + for (var i = 0; i < 2048; ++i) { + var t = i / context.sampleRate; + expectedBuffer.getChannelData(0)[i] = V0 + (V1 - V0) * ((t - T0) / (T1 - T0)); + } + return expectedBuffer; + }, +}; + +runTest(); + +</script> +</pre> +</body> +</html> diff --git a/dom/media/webaudio/test/test_audioParamSetCurveAtTime.html b/dom/media/webaudio/test/test_audioParamSetCurveAtTime.html new file mode 100644 index 000000000..bcb655b52 --- /dev/null +++ b/dom/media/webaudio/test/test_audioParamSetCurveAtTime.html @@ -0,0 +1,54 @@ +<!DOCTYPE HTML> +<html> +<head> + <title>Test AudioParam.linearRampToValue</title> + <script type="text/javascript" src="/tests/SimpleTest/SimpleTest.js"></script> + <script type="text/javascript" src="webaudio.js"></script> + <link rel="stylesheet" type="text/css" href="/tests/SimpleTest/test.css" /> +</head> +<body> +<pre id="test"> +<script class="testbody" type="text/javascript"> + +var T0 = 0; + +var gTest = { + length: 2048, + numberOfChannels: 1, + createGraph: function(context) { + var source = context.createConstantSource(); + + var gain = context.createGain(); + gain.gain.setValueCurveAtTime(this.curve, T0, this.duration); + source.connect(gain); + + source.start(0); + return gain; + }, + createExpectedBuffers: function(context) { + this.duration = 1024 / context.sampleRate; + this.curve = new Float32Array([1.0, 0.5, 0.75, 0.25]); + var expectedBuffer = context.createBuffer(1, 2048, context.sampleRate); + var data = expectedBuffer.getChannelData(0); + var step = 1024 / 3; + for (var i = 0; i < 2048; ++i) { + if (i < step) { + data[i] = 1.0 - 0.5*i/step; + } else if (i < 2*step) { + data[i] = 0.5 + 0.25*(i - step)/step; + } else if (i < 3*step) { + data[i] = 0.75 - 0.5*(i - 2*step)/step; + } else { + data[i] = 0.25; + } + } + return expectedBuffer; + }, +}; + +runTest(); + +</script> +</pre> +</body> +</html> diff --git a/dom/media/webaudio/test/test_audioParamSetCurveAtTimeTwice.html b/dom/media/webaudio/test/test_audioParamSetCurveAtTimeTwice.html new file mode 100644 index 000000000..0f976380e --- /dev/null +++ b/dom/media/webaudio/test/test_audioParamSetCurveAtTimeTwice.html @@ -0,0 +1,68 @@ +<!DOCTYPE HTML> +<html> +<head> + <title>Test AudioParam.setValueCurveAtTime twice</title> + <script type="text/javascript" src="/tests/SimpleTest/SimpleTest.js"></script> + <script type="text/javascript" src="webaudio.js"></script> + <link rel="stylesheet" type="text/css" href="/tests/SimpleTest/test.css" /> +</head> +<body> +<pre id="test"> +<script class="testbody" type="text/javascript"> + + +function linearInterpolate(t0, v0, t1, v1, t) +{ + return v0 + (v1 - v0) * ((t - t0) / (t1 - t0)); +} + +var T0 = 0; + +var gTest = { + length: 2048, + numberOfChannels: 1, + createGraph: function(context) { + var curve2 = new Float32Array(100); + for (var i = 0; i < 100; ++i) { + curve2[i] = Math.sin(220 * 6 * Math.PI * i / context.sampleRate); + } + + var source = context.createConstantSource(); + + var gain = context.createGain(); + gain.gain.setValueCurveAtTime(curve2, T0, this.duration/2); + //Set a different curve from the first one + gain.gain.setValueCurveAtTime(this.curve, T0, this.duration); + + source.connect(gain); + + source.start(0); + return gain; + }, + createExpectedBuffers: function(context) { + this.duration = 1024 / context.sampleRate; + this.curve = new Float32Array(100); + for (var i = 0; i < 100; ++i) { + this.curve[i] = Math.sin(440 * 2 * Math.PI * i / context.sampleRate); + } + var expectedBuffer = context.createBuffer(1, 2048, context.sampleRate); + for (var i = 0; i < 2048; ++i) { + step = 1024.0/99.0; + var current = Math.floor(i / step); + var next = current + 1; + if (next < this.curve.length) { + expectedBuffer.getChannelData(0)[i] = linearInterpolate(current*step, this.curve[current], next*step, this.curve[next], i); + } else { + expectedBuffer.getChannelData(0)[i] = this.curve[99]; + } + } + return expectedBuffer; + }, +}; + +runTest(); + +</script> +</pre> +</body> +</html> diff --git a/dom/media/webaudio/test/test_audioParamSetCurveAtTimeZeroDuration.html b/dom/media/webaudio/test/test_audioParamSetCurveAtTimeZeroDuration.html new file mode 100644 index 000000000..174c15c6f --- /dev/null +++ b/dom/media/webaudio/test/test_audioParamSetCurveAtTimeZeroDuration.html @@ -0,0 +1,57 @@ +<!DOCTYPE HTML> +<html> +<head> + <title>Test AudioParam.linearRampToValue</title> + <script type="text/javascript" src="/tests/SimpleTest/SimpleTest.js"></script> + <script type="text/javascript" src="webaudio.js"></script> + <link rel="stylesheet" type="text/css" href="/tests/SimpleTest/test.css" /> +</head> +<body> +<pre id="test"> +<script class="testbody" type="text/javascript"> + +var T0 = 0; + +var gTest = { + length: 2048, + numberOfChannels: 1, + createGraph: function(context) { + var sourceBuffer = context.createBuffer(1, 2048, context.sampleRate); + for (var i = 0; i < 2048; ++i) { + sourceBuffer.getChannelData(0)[i] = 1; + } + + var source = context.createBufferSource(); + source.buffer = sourceBuffer; + + var gain = context.createGain(); + gain.gain.setValueCurveAtTime(this.curve, this.T0, 0); + + source.connect(gain); + + source.start(0); + return gain; + }, + createExpectedBuffers: function(context) { + this.T0 = 1024 / context.sampleRate; + this.curve = new Float32Array(100); + for (var i = 0; i < 100; ++i) { + this.curve[i] = Math.sin(440 * 2 * Math.PI * i / context.sampleRate); + } + var expectedBuffer = context.createBuffer(1, 2048, context.sampleRate); + for (var i = 0; i < 1024; ++i) { + expectedBuffer.getChannelData(0)[i] = 1; + } + for (var i = 1024; i < 2048; ++i) { + expectedBuffer.getChannelData(0)[i] = this.curve[99]; + } + return expectedBuffer; + }, +}; + +runTest(); + +</script> +</pre> +</body> +</html> diff --git a/dom/media/webaudio/test/test_audioParamSetTargetAtTime.html b/dom/media/webaudio/test/test_audioParamSetTargetAtTime.html new file mode 100644 index 000000000..ccb35ca7b --- /dev/null +++ b/dom/media/webaudio/test/test_audioParamSetTargetAtTime.html @@ -0,0 +1,55 @@ +<!DOCTYPE HTML> +<html> +<head> + <title>Test AudioParam.setTargetAtTime</title> + <script type="text/javascript" src="/tests/SimpleTest/SimpleTest.js"></script> + <script type="text/javascript" src="webaudio.js"></script> + <link rel="stylesheet" type="text/css" href="/tests/SimpleTest/test.css" /> +</head> +<body> +<pre id="test"> +<script class="testbody" type="text/javascript"> + +var V0 = 0.9; +var V1 = 0.1; +var T0 = 0; +var TimeConstant = 10; + +var gTest = { + length: 2048, + numberOfChannels: 1, + createGraph: function(context) { + var sourceBuffer = context.createBuffer(1, 2048, context.sampleRate); + for (var i = 0; i < 2048; ++i) { + sourceBuffer.getChannelData(0)[i] = 1; + } + + var source = context.createBufferSource(); + source.buffer = sourceBuffer; + + var gain = context.createGain(); + gain.gain.value = V0; + gain.gain.setTargetAtTime(V1, T0, TimeConstant); + + source.connect(gain); + + source.start(0); + return gain; + }, + createExpectedBuffers: function(context) { + var T1 = 2048 / context.sampleRate; + var expectedBuffer = context.createBuffer(1, 2048, context.sampleRate); + for (var i = 0; i < 2048; ++i) { + var t = i / context.sampleRate; + expectedBuffer.getChannelData(0)[i] = V1 + (V0 - V1) * Math.exp(-(t - T0) / TimeConstant); + } + return expectedBuffer; + }, +}; + +runTest(); + +</script> +</pre> +</body> +</html> diff --git a/dom/media/webaudio/test/test_audioParamSetTargetAtTimeZeroTimeConstant.html b/dom/media/webaudio/test/test_audioParamSetTargetAtTimeZeroTimeConstant.html new file mode 100644 index 000000000..bad12ca31 --- /dev/null +++ b/dom/media/webaudio/test/test_audioParamSetTargetAtTimeZeroTimeConstant.html @@ -0,0 +1,55 @@ +<!DOCTYPE HTML> +<html> +<head> + <title>Test AudioParam.setTargetAtTime with zero time constant</title> + <script type="text/javascript" src="/tests/SimpleTest/SimpleTest.js"></script> + <script type="text/javascript" src="webaudio.js"></script> + <link rel="stylesheet" type="text/css" href="/tests/SimpleTest/test.css" /> +</head> +<body> +<pre id="test"> +<script class="testbody" type="text/javascript"> + +var V0 = 0.9; +var V1 = 0.1; +var T0 = 0; +var TimeConstant = 0; + +var gTest = { + length: 2048, + numberOfChannels: 1, + createGraph: function(context) { + var sourceBuffer = context.createBuffer(1, 2048, context.sampleRate); + for (var i = 0; i < 2048; ++i) { + sourceBuffer.getChannelData(0)[i] = 1; + } + + var source = context.createBufferSource(); + source.buffer = sourceBuffer; + + var gain = context.createGain(); + gain.gain.value = V0; + gain.gain.setTargetAtTime(V1, T0, TimeConstant); + + source.connect(gain); + + source.start(0); + return gain; + }, + createExpectedBuffers: function(context) { + var T1 = 2048 / context.sampleRate; + var expectedBuffer = context.createBuffer(1, 2048, context.sampleRate); + for (var i = 0; i < 2048; ++i) { + var t = i / context.sampleRate; + expectedBuffer.getChannelData(0)[i] = V1; + } + return expectedBuffer; + }, +}; + +runTest(); + +</script> +</pre> +</body> +</html> diff --git a/dom/media/webaudio/test/test_audioParamSetValueAtTime.html b/dom/media/webaudio/test/test_audioParamSetValueAtTime.html new file mode 100644 index 000000000..1ab515935 --- /dev/null +++ b/dom/media/webaudio/test/test_audioParamSetValueAtTime.html @@ -0,0 +1,52 @@ +<!DOCTYPE HTML> +<html> +<head> + <title>Test AudioParam.linearRampToValue</title> + <script type="text/javascript" src="/tests/SimpleTest/SimpleTest.js"></script> + <script type="text/javascript" src="webaudio.js"></script> + <link rel="stylesheet" type="text/css" href="/tests/SimpleTest/test.css" /> +</head> +<body> +<pre id="test"> +<script class="testbody" type="text/javascript"> + +var V0 = 0.1; +var V1 = 0.9; +var T0 = 0; + +var gTest = { + length: 2048, + numberOfChannels: 1, + createGraph: function(context) { + var sourceBuffer = context.createBuffer(1, 2048, context.sampleRate); + for (var i = 0; i < 2048; ++i) { + sourceBuffer.getChannelData(0)[i] = 1; + } + + var source = context.createBufferSource(); + source.buffer = sourceBuffer; + + var gain = context.createGain(); + gain.gain.value = 0; + gain.gain.setValueAtTime(V0, 1024/context.sampleRate); + + source.connect(gain); + + source.start(0); + return gain; + }, + createExpectedBuffers: function(context) { + var expectedBuffer = context.createBuffer(1, 2048, context.sampleRate); + for (var i = 1024; i < 2048; ++i) { + expectedBuffer.getChannelData(0)[i] = 0.1; + } + return expectedBuffer; + }, +}; + +runTest(); + +</script> +</pre> +</body> +</html> diff --git a/dom/media/webaudio/test/test_audioParamTimelineDestinationOffset.html b/dom/media/webaudio/test/test_audioParamTimelineDestinationOffset.html new file mode 100644 index 000000000..510beb3c7 --- /dev/null +++ b/dom/media/webaudio/test/test_audioParamTimelineDestinationOffset.html @@ -0,0 +1,45 @@ +<!DOCTYPE HTML> +<html> +<head> + <title>Test AudioParam timeline events scheduled after the destination stream has started playback</title> + <script type="text/javascript" src="/tests/SimpleTest/SimpleTest.js"></script> + <script type="text/javascript" src="webaudio.js"></script> + <link rel="stylesheet" type="text/css" href="/tests/SimpleTest/test.css" /> +</head> +<body> +<pre id="test"> +<script class="testbody" type="text/javascript"> + +SimpleTest.requestFlakyTimeout("This test needs to wait until the AudioDestinationNode's stream's timer starts."); + +var gTest = { + length: 16384, + numberOfChannels: 1, + createGraphAsync: function(context, callback) { + var sourceBuffer = context.createBuffer(1, 2048, context.sampleRate); + for (var i = 0; i < 2048; ++i) { + sourceBuffer.getChannelData(0)[i] = 1; + } + + setTimeout(function() { + var source = context.createBufferSource(); + source.buffer = sourceBuffer; + source.start(context.currentTime); + source.stop(context.currentTime + sourceBuffer.duration); + + var gain = context.createGain(); + gain.gain.setValueAtTime(0, context.currentTime); + gain.gain.setTargetAtTime(0, context.currentTime + sourceBuffer.duration, 1); + source.connect(gain); + + callback(gain); + }, 100); + }, +}; + +runTest(); + +</script> +</pre> +</body> +</html> diff --git a/dom/media/webaudio/test/test_badConnect.html b/dom/media/webaudio/test/test_badConnect.html new file mode 100644 index 000000000..b0d7c8f0c --- /dev/null +++ b/dom/media/webaudio/test/test_badConnect.html @@ -0,0 +1,48 @@ +<!DOCTYPE HTML> +<html> +<head> + <title>Test whether we can create an AudioContext interface</title> + <script type="text/javascript" src="/tests/SimpleTest/SimpleTest.js"></script> + <link rel="stylesheet" type="text/css" href="/tests/SimpleTest/test.css" /> +</head> +<body> +<pre id="test"> +<script src="webaudio.js" type="text/javascript"></script> +<script class="testbody" type="text/javascript"> + +SimpleTest.waitForExplicitFinish(); +addLoadEvent(function() { + var context1 = new AudioContext(); + var context2 = new AudioContext(); + + var destination1 = context1.destination; + var destination2 = context2.destination; + + isnot(destination1, destination2, "Destination nodes should not be the same"); + isnot(destination1.context, destination2.context, "Destination nodes should not have the same context"); + + var source1 = context1.createBufferSource(); + + expectException(function() { + source1.connect(destination1, 1); + }, DOMException.INDEX_SIZE_ERR); + expectException(function() { + source1.connect(destination1, 0, 1); + }, DOMException.INDEX_SIZE_ERR); + expectException(function() { + source1.connect(destination2); + }, DOMException.SYNTAX_ERR); + + source1.connect(destination1); + + expectException(function() { + source1.disconnect(1); + }, DOMException.INDEX_SIZE_ERR); + + SimpleTest.finish(); +}); + +</script> +</pre> +</body> +</html> diff --git a/dom/media/webaudio/test/test_biquadFilterNode.html b/dom/media/webaudio/test/test_biquadFilterNode.html new file mode 100644 index 000000000..078f89179 --- /dev/null +++ b/dom/media/webaudio/test/test_biquadFilterNode.html @@ -0,0 +1,86 @@ +<!DOCTYPE HTML> +<html> +<head> + <title>Test BiquadFilterNode</title> + <script type="text/javascript" src="/tests/SimpleTest/SimpleTest.js"></script> + <link rel="stylesheet" type="text/css" href="/tests/SimpleTest/test.css" /> +</head> +<body> +<pre id="test"> +<script src="webaudio.js" type="text/javascript"></script> +<script class="testbody" type="text/javascript"> + +function near(a, b, msg) { + ok(Math.abs(a - b) < 1e-3, msg); +} + +SimpleTest.waitForExplicitFinish(); +addLoadEvent(function() { + var context = new AudioContext(); + var buffer = context.createBuffer(1, 2048, context.sampleRate); + for (var i = 0; i < 2048; ++i) { + buffer.getChannelData(0)[i] = Math.sin(440 * 2 * Math.PI * i / context.sampleRate); + } + + var destination = context.destination; + + var source = context.createBufferSource(); + + var filter = context.createBiquadFilter(); + + source.buffer = buffer; + + source.connect(filter); + filter.connect(destination); + + // Verify default values + is(filter.type, "lowpass", "Correct default value for type"); + near(filter.frequency.defaultValue, 350, "Correct default value for filter frequency"); + near(filter.detune.defaultValue, 0, "Correct default value for filter detune"); + near(filter.Q.defaultValue, 1, "Correct default value for filter Q"); + near(filter.gain.defaultValue, 0, "Correct default value for filter gain"); + is(filter.channelCount, 2, "Biquad filter node has 2 input channels by default"); + is(filter.channelCountMode, "max", "Correct channelCountMode for the biquad filter node"); + is(filter.channelInterpretation, "speakers", "Correct channelCountInterpretation for the biquad filter node"); + + // Make sure that we can set all of the valid type values + var types = [ + "lowpass", + "highpass", + "bandpass", + "lowshelf", + "highshelf", + "peaking", + "notch", + "allpass", + ]; + for (var i = 0; i < types.length; ++i) { + filter.type = types[i]; + } + + // Make sure getFrequencyResponse handles invalid frequencies properly + var frequencies = new Float32Array([-1.0, context.sampleRate*0.5 - 1.0, context.sampleRate]); + var magResults = new Float32Array(3); + var phaseResults = new Float32Array(3); + filter.getFrequencyResponse(frequencies, magResults, phaseResults); + ok(isNaN(magResults[0]), "Invalid input frequency should give NaN magnitude response"); + ok(!isNaN(magResults[1]), "Valid input frequency should not give NaN magnitude response"); + ok(isNaN(magResults[2]), "Invalid input frequency should give NaN magnitude response"); + ok(isNaN(phaseResults[0]), "Invalid input frquency should give NaN phase response"); + ok(!isNaN(phaseResults[1]), "Valid input frquency should not give NaN phase response"); + ok(isNaN(phaseResults[2]), "Invalid input frquency should give NaN phase response"); + + source.start(0); + SimpleTest.executeSoon(function() { + source.stop(0); + source.disconnect(); + filter.disconnect(); + + SimpleTest.finish(); + }); +}); + +</script> +</pre> +</body> +</html> diff --git a/dom/media/webaudio/test/test_biquadFilterNodePassThrough.html b/dom/media/webaudio/test/test_biquadFilterNodePassThrough.html new file mode 100644 index 000000000..59fc8ab4f --- /dev/null +++ b/dom/media/webaudio/test/test_biquadFilterNodePassThrough.html @@ -0,0 +1,47 @@ +<!DOCTYPE HTML> +<html> +<head> + <title>Test BiquadFilterNode with passthrough</title> + <script type="text/javascript" src="/tests/SimpleTest/SimpleTest.js"></script> + <script type="text/javascript" src="webaudio.js"></script> + <link rel="stylesheet" type="text/css" href="/tests/SimpleTest/test.css" /> +</head> +<body> +<pre id="test"> +<script class="testbody" type="text/javascript"> + +var gTest = { + length: 2048, + numberOfChannels: 1, + createGraph: function(context) { + var source = context.createBufferSource(); + + var filter = context.createBiquadFilter(); + + source.buffer = this.buffer; + + source.connect(filter); + + var filterWrapped = SpecialPowers.wrap(filter); + ok("passThrough" in filterWrapped, "BiquadFilterNode should support the passThrough API"); + filterWrapped.passThrough = true; + + source.start(0); + return filter; + }, + createExpectedBuffers: function(context) { + this.buffer = context.createBuffer(1, 2048, context.sampleRate); + for (var i = 0; i < 2048; ++i) { + this.buffer.getChannelData(0)[i] = Math.sin(440 * 2 * Math.PI * i / context.sampleRate); + } + + return [this.buffer]; + }, +}; + +runTest(); + +</script> +</pre> +</body> +</html> diff --git a/dom/media/webaudio/test/test_biquadFilterNodeWithGain.html b/dom/media/webaudio/test/test_biquadFilterNodeWithGain.html new file mode 100644 index 000000000..390f2cdb0 --- /dev/null +++ b/dom/media/webaudio/test/test_biquadFilterNodeWithGain.html @@ -0,0 +1,61 @@ +<!DOCTYPE HTML> +<html> +<head> + <title>Test BiquadFilterNode after a GainNode and tail - Bugs 924286 and 924288</title> + <script type="text/javascript" src="/tests/SimpleTest/SimpleTest.js"></script> + <link rel="stylesheet" type="text/css" href="/tests/SimpleTest/test.css" /> +</head> +<body> +<pre id="test"> +<script src="webaudio.js" type="text/javascript"></script> +<script class="testbody" type="text/javascript"> + +const signalLength = 2048; + +var gTest = { + length: signalLength, + numberOfChannels: 1, + createGraph: function(context) { + // Two oscillators scheduled sequentially + var signalDuration = signalLength / context.sampleRate; + var osc1 = context.createOscillator(); + osc1.type = "square"; + osc1.start(0); + osc1.stop(signalDuration / 2); + var osc2 = context.createOscillator(); + osc2.start(signalDuration / 2); + osc2.stop(signalDuration); + + // Comparing a biquad on each source with one on both sources checks that + // the biquad on the first source doesn't shut down early. + var biquad1 = context.createBiquadFilter(); + osc1.connect(biquad1); + var biquad2 = context.createBiquadFilter(); + osc2.connect(biquad2); + + var gain = context.createGain(); + gain.gain.value = -1; + osc1.connect(gain); + osc2.connect(gain); + + var biquadWithGain = context.createBiquadFilter(); + gain.connect(biquadWithGain); + + // The output of biquadWithGain should be the inverse of the sum of the + // outputs of biquad1 and biquad2, so blend them together and expect + // silence. + var blend = context.createGain(); + biquad1.connect(blend); + biquad2.connect(blend); + biquadWithGain.connect(blend); + + return blend; + }, +}; + +runTest(); + +</script> +</pre> +</body> +</html> diff --git a/dom/media/webaudio/test/test_bug1027864.html b/dom/media/webaudio/test/test_bug1027864.html new file mode 100644 index 000000000..0c115d1a0 --- /dev/null +++ b/dom/media/webaudio/test/test_bug1027864.html @@ -0,0 +1,74 @@ +<!DOCTYPE HTML> +<html> +<head> + <title>Test bug 1027864</title> + <script type="text/javascript" src="/tests/SimpleTest/SimpleTest.js"></script> + <link rel="stylesheet" type="text/css" href="/tests/SimpleTest/test.css" /> +</head> +<body> +<pre id="test"> +<script class="testbody" type="text/javascript"> + +SimpleTest.waitForExplicitFinish(); + +function observer(subject, topic, data) { + var id = parseInt(data); + var index = ids.indexOf(id); + if (index != -1) { + ok(true, "Dropping id " + id + " at index " + index); + ids.splice(index, 1); + if (ids.length == 0) { + SimpleTest.executeSoon(function() { + SimpleTest.finish(); + }); + } + } +} + +function id(node) { + return SpecialPowers.getPrivilegedProps(node, "id"); +} + +SpecialPowers.addObserver(observer, "webaudio-node-demise", false); + +SimpleTest.registerCleanupFunction(function() { + SpecialPowers.removeObserver(observer, "webaudio-node-demise"); +}); + +var ac = new AudioContext(); +var ids; + +(function() { + var delay = ac.createDelay(); + delay.delayTime.value = 0.03; + + var gain = ac.createGain(); + gain.gain.value = 0.6; + + delay.connect(gain); + gain.connect(delay); + + gain.connect(ac.destination); + + var source = ac.createOscillator(); + + source.connect(gain); + source.start(ac.currentTime); + source.stop(ac.currentTime + 0.1); + + ids = [ id(delay), id(gain), id(source) ]; +})(); + +setInterval(function() { + forceCC(); +}, 200); + +function forceCC() { + SpecialPowers.DOMWindowUtils.cycleCollect(); + SpecialPowers.DOMWindowUtils.garbageCollect(); +} + +</script> +</pre> +</body> +</html> diff --git a/dom/media/webaudio/test/test_bug1056032.html b/dom/media/webaudio/test/test_bug1056032.html new file mode 100644 index 000000000..98fb159f7 --- /dev/null +++ b/dom/media/webaudio/test/test_bug1056032.html @@ -0,0 +1,35 @@ +<!DOCTYPE HTML> +<html> +<meta charset=utf-8> +<head> + <title>Test that we can decode an mp3 (bug 1056032)</title> + <script type="text/javascript" src="/tests/SimpleTest/SimpleTest.js"></script> + <link rel="stylesheet" type="text/css" href="/tests/SimpleTest/test.css" /> +</head> +<body> +<pre id="test"> +<script class="testbody" type="text/javascript"> +var filename = "small-shot.mp3"; + +SimpleTest.waitForExplicitFinish(); + +addLoadEvent(function() { + var xhr = new XMLHttpRequest(); + xhr.open("GET", filename); + xhr.responseType = "arraybuffer"; + xhr.onload = function() { + var context = new AudioContext(); + context.decodeAudioData(xhr.response, function(b) { + ok(true, "We can decode an mp3 using decodeAudioData"); + SimpleTest.finish(); + }, function() { + ok(false, "We should be able to decode an mp3 using decodeAudioData but couldn't"); + SimpleTest.finish(); + }); + }; + xhr.send(null); +}); +</script> +</pre> +</body> +</html> diff --git a/dom/media/webaudio/test/test_bug1113634.html b/dom/media/webaudio/test/test_bug1113634.html new file mode 100644 index 000000000..8995589f3 --- /dev/null +++ b/dom/media/webaudio/test/test_bug1113634.html @@ -0,0 +1,54 @@ +<!DOCTYPE HTML> +<html> +<head> + <title>Test AudioParam.setTargetAtTime where the target time is the same as the time of a previous event</title> + <script type="text/javascript" src="/tests/SimpleTest/SimpleTest.js"></script> + <script type="text/javascript" src="webaudio.js"></script> + <link rel="stylesheet" type="text/css" href="/tests/SimpleTest/test.css" /> +</head> +<body> +<pre id="test"> +<script class="testbody" type="text/javascript"> + +var V0 = 0.9; +var V1 = 0.1; +var T0 = 0; +var TimeConstant = 0.1; + +var gTest = { + length: 2048, + numberOfChannels: 1, + createGraph: function(context) { + var sourceBuffer = context.createBuffer(1, 2048, context.sampleRate); + for (var i = 0; i < 2048; ++i) { + sourceBuffer.getChannelData(0)[i] = 1; + } + + var source = context.createBufferSource(); + source.buffer = sourceBuffer; + + var gain = context.createGain(); + gain.gain.setValueAtTime(V0, T0); + gain.gain.setTargetAtTime(V1, T0, TimeConstant); + + source.connect(gain); + + source.start(0); + return gain; + }, + createExpectedBuffers: function(context) { + var expectedBuffer = context.createBuffer(1, 2048, context.sampleRate); + for (var i = 0; i < 2048; ++i) { + var t = i / context.sampleRate; + expectedBuffer.getChannelData(0)[i] = V1 + (V0 - V1) * Math.exp(-(t - T0) / TimeConstant); + } + return expectedBuffer; + }, +}; + +runTest(); + +</script> +</pre> +</body> +</html> diff --git a/dom/media/webaudio/test/test_bug1118372.html b/dom/media/webaudio/test/test_bug1118372.html new file mode 100644 index 000000000..ca3fc6b0d --- /dev/null +++ b/dom/media/webaudio/test/test_bug1118372.html @@ -0,0 +1,46 @@ +<!DOCTYPE HTML> +<html> +<head> + <title>Test WaveShaperNode with no curve</title> + <script type="text/javascript" src="/tests/SimpleTest/SimpleTest.js"></script> + <script type="text/javascript" src="webaudio.js"></script> + <link rel="stylesheet" type="text/css" href="/tests/SimpleTest/test.css" /> +</head> +<body> +<pre id="test"> +<script class="testbody" type="text/javascript"> + +SimpleTest.waitForExplicitFinish(); + +addLoadEvent(function() { + var context = new OfflineAudioContext(1, 2048, 44100); + + var osc=context.createOscillator(); + var gain=context.createGain(); + var shaper=context.createWaveShaper(); + gain.gain.value=0.1; + shaper.curve=new Float32Array([-0.5,-0.5,1,1]); + + osc.connect(gain); + gain.connect(shaper); + shaper.connect(context.destination); + osc.start(0); + + context.startRendering().then(function(buffer) { + var samples = buffer.getChannelData(0); + // the signal should be scaled + var failures = 0; + for (var i = 0; i < 2048; ++i) { + if (samples[i] > 0.5) { + failures = failures + 1; + } + } + ok(failures == 0, "signal should have been rescaled by gain: found " + failures + " points too loud."); + SimpleTest.finish(); + }); +}); + +</script> +</pre> +</body> +</html> diff --git a/dom/media/webaudio/test/test_bug1255618.html b/dom/media/webaudio/test/test_bug1255618.html new file mode 100644 index 000000000..246a04438 --- /dev/null +++ b/dom/media/webaudio/test/test_bug1255618.html @@ -0,0 +1,41 @@ +<!DOCTYPE HTML> +<html> +<head> + <title>Test sync XHR does not crash unlinked AudioContext</title> + <script type="text/javascript" src="/tests/SimpleTest/SimpleTest.js"></script> + <link rel="stylesheet" type="text/css" href="/tests/SimpleTest/test.css" /> +</head> +<body> +<script> +SimpleTest.waitForExplicitFinish(); + +const filename = "test_bug1255618.html"; + +function collect_and_fetch() { + SpecialPowers.forceGC(); + SpecialPowers.forceCC(); + + var xhr = new XMLHttpRequest(); + xhr.open("GET", filename, false); + var ended = false; + xhr.onloadend = function() { ended = true; } + // Sync XHR will suspend timeouts, which involves any AudioContexts still + // registered with the window. + // See https://bugzilla.mozilla.org/show_bug.cgi?id=1255618#c0 + xhr.send(null); + + ok(ended, "No crash during fetch"); + SimpleTest.finish(); +} + +var ac = new AudioContext(); + +ac.onstatechange = function () { + ac.onstatechange = null; + is(ac.state, "running", "statechange to running"); + ac = null; + SimpleTest.executeSoon(collect_and_fetch); +} + +</script> +</body> diff --git a/dom/media/webaudio/test/test_bug1267579.html b/dom/media/webaudio/test/test_bug1267579.html new file mode 100644 index 000000000..62eda14dc --- /dev/null +++ b/dom/media/webaudio/test/test_bug1267579.html @@ -0,0 +1,46 @@ +<!DOCTYPE HTML> +<html> +<head> + <title>Test that PeriodicWave handles fundamental fequency of zero</title> + <script type="text/javascript" src="/tests/SimpleTest/SimpleTest.js"></script> + <script type="text/javascript" src="webaudio.js"></script> + <link rel="stylesheet" type="text/css" href="/tests/SimpleTest/test.css" /> +</head> +<body> +<pre id="test"> +<script class="testbody" type="text/javascript"> + +// This is the smallest value that the test framework will accept +const testLength = 256; + +SimpleTest.waitForExplicitFinish(); + +addLoadEvent(function() { + runTest(); +}); + +var gTest = { + numberOfChannels: 1, + createGraph: function(context) { + var osc = context.createOscillator(); + osc.setPeriodicWave(context. + createPeriodicWave(new Float32Array([0.0, 1.0]), + new Float32Array(2))); + osc.frequency.value = 0.0; + osc.start(); + return osc; + }, + createExpectedBuffers: function(context) { + var buffer = context.createBuffer(1, testLength, context.sampleRate); + + for (var i = 0; i < buffer.length; ++i) { + buffer.getChannelData(0)[i] = 1.0; + } + return buffer; + }, +}; + +</script> +</pre> +</body> +</html> diff --git a/dom/media/webaudio/test/test_bug808374.html b/dom/media/webaudio/test/test_bug808374.html new file mode 100644 index 000000000..cc4e02f41 --- /dev/null +++ b/dom/media/webaudio/test/test_bug808374.html @@ -0,0 +1,22 @@ +<!DOCTYPE HTML> +<html> +<head> + <title>Crashtest for bug 808374</title> + <script type="text/javascript" src="/tests/SimpleTest/SimpleTest.js"></script> + <link rel="stylesheet" type="text/css" href="/tests/SimpleTest/test.css" /> +</head> +<body> +<pre id="test"> +<script class="testbody" type="text/javascript"> + +try { + var ctx = new AudioContext(); + ctx.createBuffer(0, 1, ctx.sampleRate); +} catch (e) { + ok(true, "The test should not crash during CC"); +} + +</script> +</pre> +</body> +</html> diff --git a/dom/media/webaudio/test/test_bug827541.html b/dom/media/webaudio/test/test_bug827541.html new file mode 100644 index 000000000..9940c112e --- /dev/null +++ b/dom/media/webaudio/test/test_bug827541.html @@ -0,0 +1,22 @@ +<!DOCTYPE HTML> +<html> +<head> + <title>Tell the cycle collector about the audio contexts owned by nsGlobalWindow</title> + <script type="text/javascript" src="/tests/SimpleTest/SimpleTest.js"></script> + <link rel="stylesheet" type="text/css" href="/tests/SimpleTest/test.css" /> +</head> +<body> +<pre id="test"> +<script class="testbody" type="text/javascript"> + var iframe = document.createElementNS("http://www.w3.org/1999/xhtml", "iframe"); + document.body.appendChild(iframe); + var frameWin = iframe.contentWindow; + new frameWin.AudioContext(); + document.body.removeChild(iframe); + new frameWin.AudioContext(); + + ok(true, "This test should not leak"); +</script> +</pre> +</body> +</html> diff --git a/dom/media/webaudio/test/test_bug839753.html b/dom/media/webaudio/test/test_bug839753.html new file mode 100644 index 000000000..bbab10b25 --- /dev/null +++ b/dom/media/webaudio/test/test_bug839753.html @@ -0,0 +1,18 @@ +<!DOCTYPE HTML> +<html> +<head> + <title>Crashtest for bug 839753</title> + <script type="text/javascript" src="/tests/SimpleTest/SimpleTest.js"></script> + <link rel="stylesheet" type="text/css" href="/tests/SimpleTest/test.css" /> +</head> +<body> +<pre id="test"> +<script class="testbody" type="text/javascript"> + +(new AudioContext()).destination.expando = null; +ok(true, "The test should not trigger wrapper cache assertions"); + +</script> +</pre> +</body> +</html> diff --git a/dom/media/webaudio/test/test_bug845960.html b/dom/media/webaudio/test/test_bug845960.html new file mode 100644 index 000000000..4e37f91bf --- /dev/null +++ b/dom/media/webaudio/test/test_bug845960.html @@ -0,0 +1,18 @@ +<!DOCTYPE HTML> +<html> +<head> + <title>Crashtest for bug 845960</title> + <script type="text/javascript" src="/tests/SimpleTest/SimpleTest.js"></script> + <link rel="stylesheet" type="text/css" href="/tests/SimpleTest/test.css" /> +</head> +<body> +<pre id="test"> +<script class="testbody" type="text/javascript"> + +(new AudioContext()).decodeAudioData(new ArrayBuffer(0), function() {}); +ok(true, "Should not crash when the optional failure callback is not specified"); + +</script> +</pre> +</body> +</html> diff --git a/dom/media/webaudio/test/test_bug856771.html b/dom/media/webaudio/test/test_bug856771.html new file mode 100644 index 000000000..8a6e622c2 --- /dev/null +++ b/dom/media/webaudio/test/test_bug856771.html @@ -0,0 +1,26 @@ +<!DOCTYPE HTML> +<html> +<head> + <title>Test for bug 856771</title> + <script type="text/javascript" src="/tests/SimpleTest/SimpleTest.js"></script> + <link rel="stylesheet" type="text/css" href="/tests/SimpleTest/test.css" /> +</head> +<body> +<pre id="test"> +<script class="testbody" type="text/javascript"> + +SimpleTest.waitForExplicitFinish(); +addLoadEvent(function() { + var context = new AudioContext(); + + var source = context.createBufferSource(); + source.connect(context.destination); + ok(true, "Nothing should leak"); + + SimpleTest.finish(); +}); + +</script> +</pre> +</body> +</html> diff --git a/dom/media/webaudio/test/test_bug866570.html b/dom/media/webaudio/test/test_bug866570.html new file mode 100644 index 000000000..0a1feca61 --- /dev/null +++ b/dom/media/webaudio/test/test_bug866570.html @@ -0,0 +1,18 @@ +<!DOCTYPE HTML> +<html> +<head> + <title>Crashtest for bug 859600</title> + <script type="text/javascript" src="/tests/SimpleTest/SimpleTest.js"></script> + <link rel="stylesheet" type="text/css" href="/tests/SimpleTest/test.css" /> +</head> +<body> +<pre id="test"> +<script class="testbody" type="text/javascript"> + +(new AudioContext()).foo = null; +ok(true, "The test should not fatally assert"); + +</script> +</pre> +</body> +</html> diff --git a/dom/media/webaudio/test/test_bug866737.html b/dom/media/webaudio/test/test_bug866737.html new file mode 100644 index 000000000..40fcf83fd --- /dev/null +++ b/dom/media/webaudio/test/test_bug866737.html @@ -0,0 +1,36 @@ +<!DOCTYPE HTML> +<html> +<head> + <title>Test for bug 866737</title> + <script type="text/javascript" src="/tests/SimpleTest/SimpleTest.js"></script> + <link rel="stylesheet" type="text/css" href="/tests/SimpleTest/test.css" /> +</head> +<body> +<pre id="test"> +<script class="testbody" type="text/javascript"> + +var context = new AudioContext(); + +(function() { + var d = context.createDelay(); + var panner = context.createPanner(); + d.connect(panner); + var gain = context.createGain(); + panner.connect(gain); + gain.connect(context.destination); + gain.disconnect(0); +})(); + +SpecialPowers.forceGC(); +SpecialPowers.forceCC(); + +var gain = context.createGain(); +gain.connect(context.destination); +gain.disconnect(0); + +ok(true, "No crashes should happen!"); + +</script> +</pre> +</body> +</html> diff --git a/dom/media/webaudio/test/test_bug867089.html b/dom/media/webaudio/test/test_bug867089.html new file mode 100644 index 000000000..650676a44 --- /dev/null +++ b/dom/media/webaudio/test/test_bug867089.html @@ -0,0 +1,43 @@ +<!DOCTYPE HTML> +<html> +<head> + <title>Crashtest for bug 867089</title> + <script type="text/javascript" src="/tests/SimpleTest/SimpleTest.js"></script> + <link rel="stylesheet" type="text/css" href="/tests/SimpleTest/test.css" /> +</head> +<body> +<pre id="test"> +<script class="testbody" type="text/javascript"> + +SimpleTest.waitForExplicitFinish(); +addLoadEvent(function() { + var ctx = new AudioContext(); + + // Test invalid playbackRate values for AudioBufferSourceNode. + var source = ctx.createBufferSource(); + var buffer = ctx.createBuffer(2, 2048, 8000); + source.buffer = buffer; + source.playbackRate.value = 0.0; + source.connect(ctx.destination); + source.start(0); + + var source2 = ctx.createBufferSource(); + source2.buffer = buffer; + source2.playbackRate.value = -1.0; + source2.connect(ctx.destination); + source2.start(0); + + var source3 = ctx.createBufferSource(); + source3.buffer = buffer; + source3.playbackRate.value = 3000000.0; + source3.connect(ctx.destination); + source3.start(0); + ok(true, "We did not crash."); + SimpleTest.finish(); +}); + + +</script> +</pre> +</body> +</html> diff --git a/dom/media/webaudio/test/test_bug867104.html b/dom/media/webaudio/test/test_bug867104.html new file mode 100644 index 000000000..82852ba51 --- /dev/null +++ b/dom/media/webaudio/test/test_bug867104.html @@ -0,0 +1,34 @@ +<!DOCTYPE HTML> +<html> +<head> + <title>Crashtest for bug 867104</title> + <script type="text/javascript" src="/tests/SimpleTest/SimpleTest.js"></script> + <link rel="stylesheet" type="text/css" href="/tests/SimpleTest/test.css" /> +</head> +<body> +<pre id="test"> +<script class="testbody" type="text/javascript"> + +SimpleTest.waitForExplicitFinish(); +addLoadEvent(function() { + var ctx = new AudioContext(); + var source = ctx.createBufferSource(); + var b0 = ctx.createBuffer(32,798,22050); + var b1 = ctx.createBuffer(32,28,22050); + var sp = ctx.createScriptProcessor(0, 2, 0); + source.buffer = b0; + source.connect(sp); + source.start(0); + source.buffer = b1; + sp.onaudioprocess = function() { + ok(true, "We did not crash."); + sp.onaudioprocess = null; + SimpleTest.finish(); + }; +}); + + +</script> +</pre> +</body> +</html> diff --git a/dom/media/webaudio/test/test_bug867174.html b/dom/media/webaudio/test/test_bug867174.html new file mode 100644 index 000000000..ce88be1b3 --- /dev/null +++ b/dom/media/webaudio/test/test_bug867174.html @@ -0,0 +1,38 @@ +<!DOCTYPE HTML> +<html> +<head> + <title>Crashtest for bug 867174</title> + <script type="text/javascript" src="/tests/SimpleTest/SimpleTest.js"></script> + <link rel="stylesheet" type="text/css" href="/tests/SimpleTest/test.css" /> +</head> +<body> +<pre id="test"> +<script class="testbody" type="text/javascript"> + +SimpleTest.waitForExplicitFinish(); +addLoadEvent(function() { + var ctx = new AudioContext(); + + var source = ctx.createBufferSource(); + var buffer = ctx.createBuffer(2, 2048, 8000); + source.playbackRate.setTargetAtTime(0, 2, 3); + var sp = ctx.createScriptProcessor(); + source.connect(sp); + sp.connect(ctx.destination); + source.start(0); + + sp.onaudioprocess = function(e) { + // Now set the buffer + source.buffer = buffer; + + ok(true, "We did not crash."); + sp.onaudioprocess = null; + SimpleTest.finish(); + }; +}); + + +</script> +</pre> +</body> +</html> diff --git a/dom/media/webaudio/test/test_bug867203.html b/dom/media/webaudio/test/test_bug867203.html new file mode 100644 index 000000000..0ca31263d --- /dev/null +++ b/dom/media/webaudio/test/test_bug867203.html @@ -0,0 +1,34 @@ +<!DOCTYPE HTML> +<html> +<head> + <title>Crashtest for bug 867203</title> + <script type="text/javascript" src="/tests/SimpleTest/SimpleTest.js"></script> + <link rel="stylesheet" type="text/css" href="/tests/SimpleTest/test.css" /> +</head> +<body> +<pre id="test"> +<script class="testbody" type="text/javascript"> + +SimpleTest.waitForExplicitFinish(); +addLoadEvent(function() { + var ctx = new AudioContext(); + + var panner1 = ctx.createPanner(); + panner1.setVelocity(1, 1, 1); + ctx.listener.setVelocity(1, 1, 1); + (function() { + ctx.createBufferSource().connect(panner1); + })(); + SpecialPowers.forceGC(); + SpecialPowers.forceCC(); + ctx.createPanner(); + + ok(true, "We did not crash."); + SimpleTest.finish(); +}); + + +</script> +</pre> +</body> +</html> diff --git a/dom/media/webaudio/test/test_bug875221.html b/dom/media/webaudio/test/test_bug875221.html new file mode 100644 index 000000000..16560ae75 --- /dev/null +++ b/dom/media/webaudio/test/test_bug875221.html @@ -0,0 +1,239 @@ +<!DOCTYPE HTML> +<html> +<head> + <title>Crashtest for bug 875221</title> + <script type="text/javascript" src="/tests/SimpleTest/SimpleTest.js"></script> + <link rel="stylesheet" type="text/css" href="/tests/SimpleTest/test.css" /> +</head> +<body> +<pre id="test"> +<script class="testbody" type="text/javascript"> + +SimpleTest.waitForExplicitFinish(); +SimpleTest.requestFlakyTimeout("This test is generated by a fuzzer, so we leave these setTimeouts untouched."); + +try { o0 = document.createElement('audio'); } catch(e) { } +try { (document.body || document.documentElement).appendChild(o0); } catch(e) { } +try { o1 = new OfflineAudioContext(1, 10, (new AudioContext()).sampleRate); } catch(e) { } +try { o1.startRendering(); } catch(e) { } +try { o1.listener.dopplerFactor = 1; } catch(e) { } +try { o2 = o1.createScriptProcessor(); } catch(e) { } +try { o3 = o1.createChannelMerger(4); } catch(e) { } +try { o1.listener.dopplerFactor = 3; } catch(e) { } +try { o1.listener.setPosition(0, 134217728, 64) } catch(e) { } +try { o1.listener.dopplerFactor = 15; } catch(e) { } +try { o1.startRendering(); } catch(e) { } +try { o4 = new OfflineAudioContext(1, 10, (new AudioContext()).sampleRate); } catch(e) { } +try { o4.listener.speedOfSound = 2048; } catch(e) { } +try { o4.listener.setPosition(32768, 1, 1) } catch(e) { } +try { o5 = o1.createChannelSplitter(4); } catch(e) { } +try { o4.listener.setVelocity(4, 1, 0) } catch(e) { } +try { o4.startRendering(); } catch(e) { } +try { o4.startRendering(); } catch(e) { } +try { o4.listener.setPosition(64, 1, 0) } catch(e) { } +try { o1.listener.setOrientation(4194304, 15, 8388608, 15, 1, 1) } catch(e) { } +try { o1.listener.dopplerFactor = 256; } catch(e) { } +try { o6 = o4.createDelay(16); } catch(e) { } +try { o4.startRendering(); } catch(e) { } +try { o4.listener.setOrientation(0, 1, 0, 0, 31, 1073741824) } catch(e) { } +try { o4.listener.speedOfSound = 1; } catch(e) { } +try { o1.listener.speedOfSound = 0; } catch(e) { } +try { o1.startRendering(); } catch(e) { } +try { o6.connect(o3, 1, 0) } catch(e) { } +try { o1.listener.setPosition(4294967296, 32, 1) } catch(e) { } +try { o1.listener.speedOfSound = 0; } catch(e) { } +try { o1.listener.speedOfSound = 0; } catch(e) { } +try { o1.listener.setVelocity(1, 256, 0) } catch(e) { } +try { o4.startRendering(); } catch(e) { } +try { o3.disconnect() } catch(e) { } +setTimeout("try { o4.startRendering(); } catch(e) { }",50) +try { o4.listener.setOrientation(0, 0, 2048, 128, 16384, 127) } catch(e) { } +try { o4.listener.setVelocity(0, 4, 1) } catch(e) { } +try { o7 = o4.createScriptProcessor(1024, 4, 1); } catch(e) { } +try { o8 = o4.createDynamicsCompressor(); } catch(e) { } +try { o1.startRendering(); } catch(e) { } +try { o2.connect(o7); } catch(e) { } +try { o7.connect(o4); } catch(e) { } +try { o2.connect(o7); } catch(e) { } +try { o7.connect(o4); } catch(e) { } +SpecialPowers.forceCC(); +SpecialPowers.forceGC(); +try { o4.listener.setOrientation(8192, 1, 1, 512, 0, 15) } catch(e) { } +setTimeout("try { o7.onaudioprocess = function() {}; } catch(e) { }",50) +try { o1.startRendering(); } catch(e) { } +try { o1.listener.speedOfSound = 1073741824; } catch(e) { } +try { o2.connect(o7); } catch(e) { } +try { o7.connect(o4); } catch(e) { } +try { o9 = o4.createScriptProcessor(1024, 1, 4); } catch(e) { } +try { o10 = o4.createAnalyser(); } catch(e) { } +try { o4.listener.speedOfSound = 0; } catch(e) { } +try { o2.connect(o7); } catch(e) { } +try { o9.connect(o1); } catch(e) { } +try { o4.listener.setVelocity(524288, 1, 65536) } catch(e) { } +setTimeout("try { o2.connect(o9); } catch(e) { } setTimeout(done, 0);",1000) +try { o7.connect(o4); } catch(e) { } +try { o1.listener.setVelocity(1, 127, 31) } catch(e) { } +try { o2.connect(o7); } catch(e) { } +try { o7.connect(o1); } catch(e) { } +setTimeout("try { o5.disconnect() } catch(e) { }",100) +try { o2.connect(o9); } catch(e) { } +try { o7.connect(o4); } catch(e) { } +try { o4.startRendering(); } catch(e) { } +setTimeout("try { o1.listener.dopplerFactor = 1; } catch(e) { }",100) +try { o5.disconnect() } catch(e) { } +try { o1.startRendering(); } catch(e) { } +try { o1.startRendering(); } catch(e) { } +try { o10.disconnect() } catch(e) { } +try { o1.startRendering(); } catch(e) { } +try { o11 = o1.createGain(); } catch(e) { } +try { o2.connect(o7); } catch(e) { } +try { o9.connect(o4); } catch(e) { } +try { o4.listener.setOrientation(31, 0, 15, 0, 33554432, 1) } catch(e) { } +try { o4.listener.dopplerFactor = 1; } catch(e) { } +try { o2.connect(o7); } catch(e) { } +try { o7.connect(o4); } catch(e) { } +try { o2.connect(o7); } catch(e) { } +setTimeout("try { o9.connect(o4); } catch(e) { }",50) +try { o2.connect(o9); } catch(e) { } +setTimeout("try { o9.connect(o1); } catch(e) { }",200) +try { o2.connect(o7); } catch(e) { } +try { o7.connect(o1); } catch(e) { } +try { o12 = o4.createDynamicsCompressor(); } catch(e) { } +try { o2.connect(o7); } catch(e) { } +try { o9.connect(o1); } catch(e) { } +try { o9.onaudioprocess = function() {}; } catch(e) { } +try { o1.listener.speedOfSound = 262144; } catch(e) { } +try { o2.connect(o7); } catch(e) { } +try { o7.connect(o4); } catch(e) { } +try { o2.connect(o9); } catch(e) { } +setTimeout("try { o7.connect(o4); } catch(e) { }",50) +try { o2.connect(o9); } catch(e) { } +try { o7.connect(o4); } catch(e) { } +try { o13 = o4.createGain(); } catch(e) { } +try { o4.listener.dopplerFactor = 31; } catch(e) { } +try { o11.gain.value = 268435456; } catch(e) { } +try { o1.listener.setOrientation(63, 3, 1, 63, 1, 2147483648) } catch(e) { } +try { o2.connect(o9); } catch(e) { } +try { o7.connect(o4); } catch(e) { } +try { o4.listener.setVelocity(1, 0, 1) } catch(e) { } +try { o11.gain.value = 65536; } catch(e) { } +try { o2.connect(o9); } catch(e) { } +setTimeout("try { o7.connect(o4); } catch(e) { }",200) +try { o14 = o4.createDynamicsCompressor(); } catch(e) { } +setTimeout("try { o2.connect(o9); } catch(e) { }",50) +try { o7.connect(o1); } catch(e) { } +try { o15 = o1.createWaveShaper(); } catch(e) { } +try { o2.connect(o7); } catch(e) { } +try { o7.connect(o1); } catch(e) { } +try { o16 = o1.createWaveShaper(); } catch(e) { } +try { o11.gain.value = 1; } catch(e) { } +try { o1.listener.speedOfSound = 16; } catch(e) { } +try { o4.listener.setVelocity(0, 127, 15) } catch(e) { } +try { o1.listener.setVelocity(0, 2048, 16777216) } catch(e) { } +try { o13.gain.value = 0; } catch(e) { } +try { o2.connect(o9); } catch(e) { } +try { o7.connect(o4); } catch(e) { } +try { o2.connect(o9); } catch(e) { } +try { o9.connect(o1); } catch(e) { } +try { o17 = document.createElement('audio'); } catch(e) { } +try { (document.body || document.documentElement).appendChild(o0); } catch(e) { } +try { o4.listener.setVelocity(3, 1, 256) } catch(e) { } +try { o11.gain.cancelScheduledValues(1) } catch(e) { } +try { o1.listener.dopplerFactor = 524288; } catch(e) { } +try { o9.onaudioprocess = function() {}; } catch(e) { } +setTimeout("try { o7.connect(o13, 0, 0) } catch(e) { }",50) +try { o1.listener.speedOfSound = 0; } catch(e) { } +try { o10.disconnect() } catch(e) { } +try { o2.connect(o9); } catch(e) { } +try { o9.connect(o4); } catch(e) { } +try { o2.connect(o7); } catch(e) { } +try { o9.connect(o4); } catch(e) { } +try { o1.listener.speedOfSound = 1; } catch(e) { } +try { o15.disconnect() } catch(e) { } +try { o11.gain.exponentialRampToValueAtTime(0, 15) } catch(e) { } +try { o15.curve = new Float32Array(15); } catch(e) { } +try { o4.listener.setVelocity(1, 1, 1) } catch(e) { } +try { o14.connect(o6, 0, 0) } catch(e) { } +try { o2.connect(o9); } catch(e) { } +try { o9.connect(o1); } catch(e) { } +try { o2.connect(o9); } catch(e) { } +try { o7.connect(o4); } catch(e) { } +try { o2.connect(o7); } catch(e) { } +setTimeout("try { o7.connect(o1); } catch(e) { }",100) +try { o4.listener.setVelocity(1, 7, 1) } catch(e) { } +try { o18 = document.createElement('audio'); } catch(e) { } +try { (document.body || document.documentElement).appendChild(o18); } catch(e) { } +try { o19 = o4.createGain(); } catch(e) { } +try { o2.connect(o7); } catch(e) { } +try { o7.connect(o1); } catch(e) { } +try { o4.listener.dopplerFactor = 0; } catch(e) { } +try { o1.listener.setPosition(256, 16, 1) } catch(e) { } +setTimeout("try { o2.connect(o9); } catch(e) { }",50) +try { o7.connect(o1); } catch(e) { } +try { o4.listener.speedOfSound = 31; } catch(e) { } +try { o2.connect(o7); } catch(e) { } +setTimeout("try { o9.connect(o4); } catch(e) { }",1000) +try { o11.gain.value = 127; } catch(e) { } +try { o7.connect(o7, 0, 0) } catch(e) { } +try { o4.listener.speedOfSound = 63; } catch(e) { } +try { o11.gain.value = 33554432; } catch(e) { } +try { o2.connect(o7); } catch(e) { } +try { o9.connect(o4); } catch(e) { } +try { o4.listener.speedOfSound = 16; } catch(e) { } +try { o4.listener.setVelocity(1048576, 0, 127) } catch(e) { } +try { o1.listener.dopplerFactor = 0; } catch(e) { } +try { o6.connect(o2, 0, 1) } catch(e) { } +try { o5.disconnect() } catch(e) { } +try { o3.disconnect() } catch(e) { } +try { o2.connect(o9); } catch(e) { } +try { o7.connect(o1); } catch(e) { } +try { o16.disconnect() } catch(e) { } +try { o2.connect(o9); } catch(e) { } +try { o7.connect(o1); } catch(e) { } +try { o9.disconnect() } catch(e) { } +try { o4.listener.speedOfSound = 1; } catch(e) { } +try { o2.connect(o7); } catch(e) { } +try { o7.connect(o4); } catch(e) { } +try { o11.gain.setValueCurveAtTime(new Float32Array(3), 2048, 3) } catch(e) { } +try { o13.gain.value = 8; } catch(e) { } +try { o2.connect(o7); } catch(e) { } +try { o9.connect(o4); } catch(e) { } +try { o4.listener.setOrientation(1, 2048, 1, 1, 0, 31) } catch(e) { } +try { o2.connect(o9); } catch(e) { } +try { o7.connect(o1); } catch(e) { } +try { o1.listener.speedOfSound = 256; } catch(e) { } +try { o2.connect(o7); } catch(e) { } +try { o9.connect(o4); } catch(e) { } +try { o4.listener.setVelocity(1, 67108864, 128) } catch(e) { } +setTimeout("try { o1.listener.setVelocity(0, 1, 1) } catch(e) { }",100) +try { o2.connect(o9); } catch(e) { } +try { o9.connect(o1); } catch(e) { } +setTimeout("try { o20 = o1.createBiquadFilter(); } catch(e) { }",200) +try { o13.gain.value = 4096; } catch(e) { } +try { o1.listener.dopplerFactor = 0; } catch(e) { } +try { o2.connect(o7); } catch(e) { } +try { o9.connect(o1); } catch(e) { } +try { o2.connect(o9); } catch(e) { } +try { o7.connect(o4); } catch(e) { } +setTimeout("try { o2.connect(o9); } catch(e) { }",200) +try { o7.connect(o1); } catch(e) { } +try { o3.connect(o15, 1, 1) } catch(e) { } +try { o2.connect(o12, 0, 0) } catch(e) { } +try { o19.gain.exponentialRampToValueAtTime(1, 0) } catch(e) { } +try { o2.connect(o7); } catch(e) { } +try { o7.connect(o4); } catch(e) { } +try { o2.connect(o7); } catch(e) { } +try { o9.connect(o1); } catch(e) { } +try { o2.connect(o7); } catch(e) { } +try { o7.connect(o4); } catch(e) { } + +function done() { + ok(true, "We did not crash."); + SimpleTest.finish(); +} + + +</script> +</pre> +</body> +</html> diff --git a/dom/media/webaudio/test/test_bug875402.html b/dom/media/webaudio/test/test_bug875402.html new file mode 100644 index 000000000..2dc347fc1 --- /dev/null +++ b/dom/media/webaudio/test/test_bug875402.html @@ -0,0 +1,47 @@ +<!DOCTYPE HTML> +<html> +<head> + <title>Crashtest for bug 875402</title> + <script type="text/javascript" src="/tests/SimpleTest/SimpleTest.js"></script> + <link rel="stylesheet" type="text/css" href="/tests/SimpleTest/test.css" /> +</head> +<body> +<pre id="test"> +<script class="testbody" type="text/javascript"> + +SimpleTest.waitForExplicitFinish(); + +SimpleTest.requestFlakyTimeout("This test is generated by a fuzzer, so we leave these setTimeouts untouched."); + +try { o1 = new OfflineAudioContext(1, 10, (new AudioContext()).sampleRate); } catch(e) { } +try { o2 = o1.createScriptProcessor(); } catch(e) { } +try { o4 = new OfflineAudioContext(1, 10, (new AudioContext()).sampleRate); } catch(e) { } +try { o5 = o1.createChannelSplitter(4); } catch(e) { } +try { o7 = o4.createScriptProcessor(1024, 4, 1); } catch(e) { } +SpecialPowers.forceCC(); +SpecialPowers.forceGC(); +try { o1.startRendering(); } catch(e) { } +try { o2.connect(o7); } catch(e) { } +try { o7.connect(o4); } catch(e) { } +try { o9 = o4.createScriptProcessor(1024, 1, 4); } catch(e) { } +try { o2.connect(o7); } catch(e) { } +try { o9.connect(o1); } catch(e) { } +setTimeout("try { o2.connect(o9); } catch(e) { } done();",1000) +try { o7.connect(o4); } catch(e) { } +setTimeout("try { o5.disconnect() } catch(e) { }",100) +try { o2.connect(o9); } catch(e) { } +try { o4.startRendering(); } catch(e) { } +try { o2.connect(o9); } catch(e) { } +setTimeout("try { o7.connect(o4); } catch(e) { }",50) +try { o13 = o4.createGain(); } catch(e) { } +setTimeout("try { o7.connect(o13, 0, 0) } catch(e) { }",50) + +function done() { + ok(true, "We did not crash."); + SimpleTest.finish(); +} + +</script> +</pre> +</body> +</html> diff --git a/dom/media/webaudio/test/test_bug894150.html b/dom/media/webaudio/test/test_bug894150.html new file mode 100644 index 000000000..08fd72413 --- /dev/null +++ b/dom/media/webaudio/test/test_bug894150.html @@ -0,0 +1,21 @@ +<!DOCTYPE HTML> +<html> +<head> + <title>Test whether we can create an AudioContext interface</title> + <script type="text/javascript" src="/tests/SimpleTest/SimpleTest.js"></script> + <link rel="stylesheet" type="text/css" href="/tests/SimpleTest/test.css" /> +</head> +<body> +<script> + +var ac = new AudioContext(); +ac.createPanner(); +var listener = ac.listener; +SpecialPowers.forceGC(); +SpecialPowers.forceCC(); +listener.setOrientation(0, 0, -1, 0, 0, 0); + +ok(true, "No crashes should happen!"); + +</script> +</body> diff --git a/dom/media/webaudio/test/test_bug956489.html b/dom/media/webaudio/test/test_bug956489.html new file mode 100644 index 000000000..920889290 --- /dev/null +++ b/dom/media/webaudio/test/test_bug956489.html @@ -0,0 +1,56 @@ +<!DOCTYPE HTML> +<html> +<head> + <title>Test when and currentTime are in the same coordinate system</title> + <script type="text/javascript" src="/tests/SimpleTest/SimpleTest.js"></script> + <link rel="stylesheet" type="text/css" href="/tests/SimpleTest/test.css" /> +</head> +<body> +<pre id="test"> +<script class="testbody" type="text/javascript"> + +SimpleTest.waitForExplicitFinish(); +SimpleTest.requestFlakyTimeout("This test needs to wait a while for the AudioContext's timer to start."); +addLoadEvent(function() { + var freq = 330; + + var context = new AudioContext(); + + var buffer = context.createBuffer(1, context.sampleRate / freq, context.sampleRate); + for (var i = 0; i < buffer.length; ++i) { + buffer.getChannelData(0)[i] = Math.sin(2 * Math.PI * i / buffer.length); + } + + var source = context.createBufferSource(); + source.loop = true; + source.buffer = buffer; + + setTimeout(function () { + var finished = false; + + source.start(context.currentTime); + var processor = context.createScriptProcessor(256, 1, 1); + processor.onaudioprocess = function (e) { + if (finished) return; + var c = e.inputBuffer.getChannelData(0); + var result = true; + + for (var i = 0; i < buffer.length; ++i) { + if (Math.abs(c[i] - buffer.getChannelData(0)[i]) > 1e-9) { + result = false; + break; + } + } + finished = true; + ok(result, "when and currentTime are in same time coordinate system"); + SimpleTest.finish(); + } + processor.connect(context.destination); + source.connect(processor); + }, 500); +}); + +</script> +</pre> +</body> +</html> diff --git a/dom/media/webaudio/test/test_bug964376.html b/dom/media/webaudio/test/test_bug964376.html new file mode 100644 index 000000000..1d9af1c1e --- /dev/null +++ b/dom/media/webaudio/test/test_bug964376.html @@ -0,0 +1,64 @@ +<!DOCTYPE HTML> +<html> +<head> + <title>Test repeating audio is not distorted</title> + <script type="text/javascript" src="/tests/SimpleTest/SimpleTest.js"></script> + <script type="text/javascript" src="webaudio.js"></script> + <link rel="stylesheet" type="text/css" href="/tests/SimpleTest/test.css" /> +</head> +<body> +<pre id="test"> +<script class="testbody" type="text/javascript"> + +function gcd(a, b) { + if (b === 0) { + return a; + } + return gcd(b, a % b); +} + +var SAMPLE_PLACEMENT = 128; + +var gTest = { + length: 2048, + numberOfChannels: 1, + + createGraph: function(context) { + var freq = Math.round(context.sampleRate / SAMPLE_PLACEMENT); + var dur = context.sampleRate / gcd(freq, context.sampleRate); + var buffer = context.createBuffer(1, dur, context.sampleRate); + + for (var i = 0; i < context.sampleRate; ++i) { + buffer.getChannelData(0)[i] = Math.sin(freq * 2 * Math.PI * i / context.sampleRate); + } + + var source = context.createBufferSource(); + source.buffer = buffer; + source.loop = true; + source.playbackRate.setValueAtTime(0.5, SAMPLE_PLACEMENT / context.sampleRate); + source.start(0); + + return source; + }, + + createExpectedBuffers: function(context) { + var freq = Math.round(context.sampleRate / SAMPLE_PLACEMENT); + var expectedBuffer = context.createBuffer(1, 2048, context.sampleRate); + var c = expectedBuffer.getChannelData(0); + for (var i = 0; i < c.length; ++i) { + if (i < SAMPLE_PLACEMENT) { + c[i] = Math.sin(freq * 2 * Math.PI * i / context.sampleRate); + } else { + c[i] = Math.sin(freq / 2 * 2 * Math.PI * (i + SAMPLE_PLACEMENT) / context.sampleRate); + } + } + + return expectedBuffer; + }, +}; + +runTest(); +</script> +</pre> +</body> +</html> diff --git a/dom/media/webaudio/test/test_bug966247.html b/dom/media/webaudio/test/test_bug966247.html new file mode 100644 index 000000000..9224ac2d4 --- /dev/null +++ b/dom/media/webaudio/test/test_bug966247.html @@ -0,0 +1,46 @@ +<!DOCTYPE HTML> +<html> +<head> + <title>Test whether an audio file played with a volume set to 0 plays silence</title> + <script type="text/javascript" src="/tests/SimpleTest/SimpleTest.js"></script> + <link rel="stylesheet" type="text/css" href="/tests/SimpleTest/test.css" /> +</head> +<body> +<audio preload=none src="ting-48k-1ch.ogg" controls> </audio> +<script> + SimpleTest.waitForExplicitFinish(); + + var count = 20; + + function isSilent(b) { + for (var i = 0; i < b.length; b++) { + if (b[i] != 0.0) { + return false; + } + } + return true; + } + + var a = document.getElementsByTagName("audio")[0]; + a.volume = 0.0; + var ac = new AudioContext(); + var measn = ac.createMediaElementSource(a); + var sp = ac.createScriptProcessor(); + + sp.onaudioprocess = function(e) { + var inputBuffer = e.inputBuffer.getChannelData(0); + ok(isSilent(inputBuffer), "The volume is set to 0, so all the elements of the buffer are supposed to be equal to 0.0"); + } + // Connect the MediaElementAudioSourceNode to the ScriptProcessorNode to check + // the audio volume. + measn.connect(sp); + a.play(); + + a.addEventListener("ended", function() { + sp.onaudioprocess = null; + SimpleTest.finish(); + }); + +</script> +</body> +</html> diff --git a/dom/media/webaudio/test/test_bug972678.html b/dom/media/webaudio/test/test_bug972678.html new file mode 100644 index 000000000..d0cb4e419 --- /dev/null +++ b/dom/media/webaudio/test/test_bug972678.html @@ -0,0 +1,62 @@ +<!DOCTYPE HTML> +<html> +<head> + <title>Test buffers do not interfere when scheduled in sequence</title> + <script type="text/javascript" src="/tests/SimpleTest/SimpleTest.js"></script> + <script type="text/javascript" src="webaudio.js"></script> + <link rel="stylesheet" type="text/css" href="/tests/SimpleTest/test.css" /> +</head> +<body> +<pre id="test"> +<script class="testbody" type="text/javascript"> +var OFFSETS = [0.005, 0.01, 0.02, 0.03]; +var LENGTH = 128; + +var gTest = { + length: 128 * OFFSETS.length, + numberOfChannels: 1, + + createGraph: function(context) { + var gain = context.createGain(); + + // create a repeating sample + var repeatingSample = context.createBuffer(1, 2, context.sampleRate); + var c = repeatingSample.getChannelData(0); + for (var i = 0; i < repeatingSample.length; ++i) { + c[i] = i % 2 == 0 ? 1 : -1; + } + + OFFSETS.forEach(function (offset, offsetIdx) { + // Schedule a set of nodes to repeat the sample. + for (var i = 0; i < LENGTH; i += repeatingSample.length) { + var source = context.createBufferSource(); + source.buffer = repeatingSample; + source.connect(gain); + source.start((offsetIdx * LENGTH + i + offset) / context.sampleRate); + } + + buffer = context.createBuffer(1, LENGTH, context.sampleRate); + c = buffer.getChannelData(0); + for (var i = 0; i < buffer.length; ++i) { + c[i] = i % 2 == 0 ? -1 : 1; + } + + var source = context.createBufferSource(); + source.buffer = buffer; + source.connect(gain); + source.start((offsetIdx * LENGTH + offset) / context.sampleRate); + }); + + return gain; + }, + + createExpectedBuffers: function(context) { + return context.createBuffer(1, gTest.length, context.sampleRate); + }, +}; + +runTest(); +</script> +</pre> +</body> +</html> diff --git a/dom/media/webaudio/test/test_channelMergerNode.html b/dom/media/webaudio/test/test_channelMergerNode.html new file mode 100644 index 000000000..a76aaa2e8 --- /dev/null +++ b/dom/media/webaudio/test/test_channelMergerNode.html @@ -0,0 +1,57 @@ +<!DOCTYPE HTML> +<html> +<head> + <title>Test ChannelMergerNode</title> + <script type="text/javascript" src="/tests/SimpleTest/SimpleTest.js"></script> + <script type="text/javascript" src="webaudio.js"></script> + <link rel="stylesheet" type="text/css" href="/tests/SimpleTest/test.css" /> +</head> +<body> +<pre id="test"> +<script class="testbody" type="text/javascript"> + +var gTest = { + length: 2048, + numberOfChannels: 6, + createGraph: function(context) { + var buffers = []; + for (var j = 0; j < 6; ++j) { + var buffer = context.createBuffer(2, 2048, context.sampleRate); + for (var i = 0; i < 2048; ++i) { + buffer.getChannelData(0)[i] = Math.sin(440 * 2 * (j + 1) * Math.PI * i / context.sampleRate); + // Second channel is silent + } + buffers.push(buffer); + } + + var merger = context.createChannelMerger(); + is(merger.channelCount, 1, "merger node has 1 input channels"); + is(merger.channelCountMode, "explicit", "Correct channelCountMode for the merger node"); + is(merger.channelInterpretation, "speakers", "Correct channelCountInterpretation for the merger node"); + + for (var i = 0; i < 6; ++i) { + var source = context.createBufferSource(); + source.buffer = buffers[i]; + source.connect(merger, 0, i); + source.start(0); + } + + return merger; + }, + createExpectedBuffers: function(context) { + var expectedBuffer = context.createBuffer(6, 2048, context.sampleRate); + for (var i = 0; i < 6; ++i) { + for (var j = 0; j < 2048; ++j) { + expectedBuffer.getChannelData(i)[j] = 0.5 * Math.sin(440 * 2 * (i + 1) * Math.PI * j / context.sampleRate); + } + } + return expectedBuffer; + }, +}; + +runTest(); + +</script> +</pre> +</body> +</html> diff --git a/dom/media/webaudio/test/test_channelMergerNodeWithVolume.html b/dom/media/webaudio/test/test_channelMergerNodeWithVolume.html new file mode 100644 index 000000000..22f0a39cb --- /dev/null +++ b/dom/media/webaudio/test/test_channelMergerNodeWithVolume.html @@ -0,0 +1,60 @@ +<!DOCTYPE HTML> +<html> +<head> + <title>Test ChannelMergerNode</title> + <script type="text/javascript" src="/tests/SimpleTest/SimpleTest.js"></script> + <script type="text/javascript" src="webaudio.js"></script> + <link rel="stylesheet" type="text/css" href="/tests/SimpleTest/test.css" /> +</head> +<body> +<pre id="test"> +<script class="testbody" type="text/javascript"> + +var gTest = { + length: 2048, + numberOfChannels: 6, + createGraph: function(context) { + var buffers = []; + for (var j = 0; j < 6; ++j) { + var buffer = context.createBuffer(2, 2048, context.sampleRate); + for (var i = 0; i < 2048; ++i) { + buffer.getChannelData(0)[i] = Math.sin(440 * 2 * (j + 1) * Math.PI * i / context.sampleRate); + // Second channel is silent + } + buffers.push(buffer); + } + + var merger = context.createChannelMerger(); + is(merger.channelCount, 1, "merger node has 1 input channels"); + is(merger.channelCountMode, "explicit", "Correct channelCountMode for the merger node"); + is(merger.channelInterpretation, "speakers", "Correct channelCountInterpretation for the merger node"); + + for (var i = 0; i < 6; ++i) { + var source = context.createBufferSource(); + source.buffer = buffers[i]; + var gain = context.createGain(); + gain.gain.value = 0.5; + source.connect(gain); + gain.connect(merger, 0, i); + source.start(0); + } + + return merger; + }, + createExpectedBuffers: function(context) { + var expectedBuffer = context.createBuffer(6, 2048, context.sampleRate); + for (var i = 0; i < 6; ++i) { + for (var j = 0; j < 2048; ++j) { + expectedBuffer.getChannelData(i)[j] = 0.5 * 0.5 * Math.sin(440 * 2 * (i + 1) * Math.PI * j / context.sampleRate); + } + } + return expectedBuffer; + }, +}; + +runTest(); + +</script> +</pre> +</body> +</html> diff --git a/dom/media/webaudio/test/test_channelSplitterNode.html b/dom/media/webaudio/test/test_channelSplitterNode.html new file mode 100644 index 000000000..30cb0028c --- /dev/null +++ b/dom/media/webaudio/test/test_channelSplitterNode.html @@ -0,0 +1,71 @@ +<!DOCTYPE HTML> +<html> +<head> + <title>Test ChannelSplitterNode</title> + <script type="text/javascript" src="/tests/SimpleTest/SimpleTest.js"></script> + <script type="text/javascript" src="webaudio.js"></script> + <link rel="stylesheet" type="text/css" href="/tests/SimpleTest/test.css" /> +</head> +<body> +<pre id="test"> +<script class="testbody" type="text/javascript"> + +// We do not use our generic graph test framework here because +// the splitter node is special in that it creates multiple +// output ports. + +SimpleTest.waitForExplicitFinish(); +addLoadEvent(function() { + var context = new AudioContext(); + var buffer = context.createBuffer(4, 2048, context.sampleRate); + for (var j = 0; j < 4; ++j) { + for (var i = 0; i < 2048; ++i) { + buffer.getChannelData(j)[i] = Math.sin(440 * 2 * (j + 1) * Math.PI * i / context.sampleRate); + } + } + var emptyBuffer = context.createBuffer(1, 2048, context.sampleRate); + + var destination = context.destination; + + var source = context.createBufferSource(); + + var splitter = context.createChannelSplitter(); + is(splitter.channelCount, 2, "splitter node has 2 input channels by default"); + is(splitter.channelCountMode, "max", "Correct channelCountMode for the splitter node"); + is(splitter.channelInterpretation, "speakers", "Correct channelCountInterpretation for the splitter node"); + + source.buffer = buffer; + source.connect(splitter); + + var channelsSeen = 0; + function createHandler(i) { + return function(e) { + is(e.inputBuffer.numberOfChannels, 1, "Correct input channel count"); + if (i < 4) { + compareChannels(e.inputBuffer.getChannelData(0), buffer.getChannelData(i)); + } else { + compareChannels(e.inputBuffer.getChannelData(0), emptyBuffer.getChannelData(0)); + } + e.target.onaudioprocess = null; + ++channelsSeen; + + if (channelsSeen == 6) { + SimpleTest.finish(); + } + }; + } + + for (var i = 0; i < 6; ++i) { + var sp = context.createScriptProcessor(2048, 1); + splitter.connect(sp, i); + sp.onaudioprocess = createHandler(i); + sp.connect(destination); + } + + source.start(0); +}); + +</script> +</pre> +</body> +</html> diff --git a/dom/media/webaudio/test/test_channelSplitterNodeWithVolume.html b/dom/media/webaudio/test/test_channelSplitterNodeWithVolume.html new file mode 100644 index 000000000..8e16271f3 --- /dev/null +++ b/dom/media/webaudio/test/test_channelSplitterNodeWithVolume.html @@ -0,0 +1,76 @@ +<!DOCTYPE HTML> +<html> +<head> + <title>Test ChannelSplitterNode</title> + <script type="text/javascript" src="/tests/SimpleTest/SimpleTest.js"></script> + <script type="text/javascript" src="webaudio.js"></script> + <link rel="stylesheet" type="text/css" href="/tests/SimpleTest/test.css" /> +</head> +<body> +<pre id="test"> +<script class="testbody" type="text/javascript"> + +// We do not use our generic graph test framework here because +// the splitter node is special in that it creates multiple +// output ports. + +SimpleTest.waitForExplicitFinish(); +addLoadEvent(function() { + var context = new AudioContext(); + var buffer = context.createBuffer(4, 2048, context.sampleRate); + var expectedBuffer = context.createBuffer(4, 2048, context.sampleRate); + for (var j = 0; j < 4; ++j) { + for (var i = 0; i < 2048; ++i) { + buffer.getChannelData(j)[i] = Math.sin(440 * 2 * (j + 1) * Math.PI * i / context.sampleRate); + expectedBuffer.getChannelData(j)[i] = buffer.getChannelData(j)[i] / 2; + } + } + var emptyBuffer = context.createBuffer(1, 2048, context.sampleRate); + + var destination = context.destination; + + var source = context.createBufferSource(); + + var splitter = context.createChannelSplitter(); + is(splitter.channelCount, 2, "splitter node has 2 input channels by default"); + is(splitter.channelCountMode, "max", "Correct channelCountMode for the splitter node"); + is(splitter.channelInterpretation, "speakers", "Correct channelCountInterpretation for the splitter node"); + + source.buffer = buffer; + var gain = context.createGain(); + gain.gain.value = 0.5; + source.connect(gain); + gain.connect(splitter); + + var channelsSeen = 0; + function createHandler(i) { + return function(e) { + is(e.inputBuffer.numberOfChannels, 1, "Correct input channel count"); + if (i < 4) { + compareBuffers(e.inputBuffer.getChannelData(0), expectedBuffer.getChannelData(i)); + } else { + compareBuffers(e.inputBuffer.getChannelData(0), emptyBuffer.getChannelData(0)); + } + e.target.onaudioprocess = null; + ++channelsSeen; + + if (channelsSeen == 6) { + SimpleTest.finish(); + } + }; + } + + for (var i = 0; i < 6; ++i) { + var sp = context.createScriptProcessor(2048, 1); + splitter.connect(sp, i); + sp.onaudioprocess = createHandler(i); + sp.connect(destination); + } + + source.start(0); +}); + +</script> +</pre> +</body> +</html> diff --git a/dom/media/webaudio/test/test_convolverNode.html b/dom/media/webaudio/test/test_convolverNode.html new file mode 100644 index 000000000..38b58bd9b --- /dev/null +++ b/dom/media/webaudio/test/test_convolverNode.html @@ -0,0 +1,32 @@ +<!DOCTYPE HTML> +<html> +<head> + <title>Test the ConvolverNode interface</title> + <script type="text/javascript" src="/tests/SimpleTest/SimpleTest.js"></script> + <script type="text/javascript" src="webaudio.js"></script> + <link rel="stylesheet" type="text/css" href="/tests/SimpleTest/test.css" /> +</head> +<body> +<pre id="test"> +<script class="testbody" type="text/javascript"> + +SimpleTest.waitForExplicitFinish(); +addLoadEvent(function() { + var context = new AudioContext(); + var conv = context.createConvolver(); + + is(conv.channelCount, 2, "Convolver node has 2 input channels by default"); + is(conv.channelCountMode, "clamped-max", "Correct channelCountMode for the Convolver node"); + is(conv.channelInterpretation, "speakers", "Correct channelCountInterpretation for the Convolver node"); + + is(conv.buffer, null, "Default buffer value"); + conv.buffer = context.createBuffer(2, 1024, 22050); + is(conv.normalize, true, "Default normalize value"); + + SimpleTest.finish(); +}); + +</script> +</pre> +</body> +</html> diff --git a/dom/media/webaudio/test/test_convolverNodeChannelCount.html b/dom/media/webaudio/test/test_convolverNodeChannelCount.html new file mode 100644 index 000000000..d5b261c81 --- /dev/null +++ b/dom/media/webaudio/test/test_convolverNodeChannelCount.html @@ -0,0 +1,61 @@ +<!DOCTYPE HTML> +<html> +<head> + <title>Test ConvolverNode channel count</title> + <script type="text/javascript" src="/tests/SimpleTest/SimpleTest.js"></script> + <link rel="stylesheet" type="text/css" href="/tests/SimpleTest/test.css" /> +</head> +<body> +<pre id="test"> +<script src="webaudio.js" type="text/javascript"></script> +<script class="testbody" type="text/javascript"> + +const signalLength = 2048; +const responseLength = 1000; +const outputLength = 2048; // < signalLength + responseLength to test bug 910171 + +var gTest = { + length: outputLength, + numberOfChannels: 2, + createGraph: function(context) { + var buffer = context.createBuffer(2, signalLength, context.sampleRate); + for (var i = 0; i < signalLength; ++i) { + var sample = Math.sin(440 * 2 * Math.PI * i / context.sampleRate); + // When mixed into a single channel, this produces silence + buffer.getChannelData(0)[i] = sample; + buffer.getChannelData(1)[i] = -sample; + } + + var response = context.createBuffer(2, responseLength, context.sampleRate); + for (var i = 0; i < responseLength; ++i) { + response.getChannelData(0)[i] = i / responseLength; + response.getChannelData(1)[i] = 1 - (i / responseLength); + } + + var convolver = context.createConvolver(); + convolver.buffer = response; + convolver.channelCount = 1; + + expectException(function() { convolver.channelCount = 3; }, + DOMException.NOT_SUPPORTED_ERR); + convolver.channelCountMode = "explicit"; + expectException(function() { convolver.channelCountMode = "max"; }, + DOMException.NOT_SUPPORTED_ERR); + convolver.channelInterpretation = "discrete"; + convolver.channelInterpretation = "speakers"; + + var source = context.createBufferSource(); + source.buffer = buffer; + source.connect(convolver); + source.start(0); + + return convolver; + }, +}; + +runTest(); + +</script> +</pre> +</body> +</html> diff --git a/dom/media/webaudio/test/test_convolverNodeDelay.html b/dom/media/webaudio/test/test_convolverNodeDelay.html new file mode 100644 index 000000000..2e8caf802 --- /dev/null +++ b/dom/media/webaudio/test/test_convolverNodeDelay.html @@ -0,0 +1,72 @@ +<!DOCTYPE html> +<title>Test convolution to delay a triangle pulse</title> +<script src="/resources/testharness.js"></script> +<script src="/resources/testharnessreport.js"></script> +<script> +const sampleRate = 48000; +const LENGTH = 12800; +// tolerate 16-bit math. +const EPSILON = 1.0 / Math.pow(2, 15); + +// Triangle pulse +var sourceBuffer = new OfflineAudioContext(1, 1, sampleRate). + createBuffer(1, 2 * 128, sampleRate); +var channelData = sourceBuffer.getChannelData(0); +for (var i = 0; i < 128; ++i) { + channelData[i] = i/128; + channelData[128 + i] = 1.0 - i/128; +} + +function test_delay_index(delayIndex) { + + var context = new OfflineAudioContext(2, LENGTH, sampleRate); + + var merger = context.createChannelMerger(2); + merger.connect(context.destination); + + var impulse = context.createBuffer(1, delayIndex + 1, sampleRate); + impulse.getChannelData(0)[delayIndex] = 1.0; + var convolver = context.createConvolver(); + convolver.normalize = false; + convolver.buffer = impulse; + convolver.connect(merger, 0, 0); + + var delayTime = delayIndex/sampleRate; + var delay = context.createDelay(delayTime || 1/sampleRate); + delay.delayTime.value = delayTime; + delay.connect(merger, 0, 1); + + var source = context.createBufferSource(); + source.buffer = sourceBuffer; + source.connect(convolver); + source.connect(delay); + source.start(0); + + return context.startRendering(). + then((buffer) => { + var convolverOutput = buffer.getChannelData(0); + var delayOutput = buffer.getChannelData(1); + var maxDiff = 0.0; + var maxIndex = 0; + for (var i = 0; i < buffer.length; ++i) { + var diff = Math.abs(convolverOutput[i] - delayOutput[i]); + if (diff > maxDiff) { + maxDiff = diff; + maxIndex = i; + } + } + // The convolver should produce similar output to the delay. + assert_approx_equals(convolverOutput[maxIndex], delayOutput[maxIndex], + EPSILON, "output at " + maxIndex); + }); +} + +// The 5/4 ratio provides sampling across a range of delays and offsets within +// blocks. +for (var delayIndex = 0; + delayIndex < LENGTH; + delayIndex = Math.floor((5 * (delayIndex + 1)) / 4)) { + promise_test(test_delay_index.bind(null, delayIndex), + "Delay " + delayIndex); +} +</script> diff --git a/dom/media/webaudio/test/test_convolverNodeFiniteInfluence.html b/dom/media/webaudio/test/test_convolverNodeFiniteInfluence.html new file mode 100644 index 000000000..1cfb51ce8 --- /dev/null +++ b/dom/media/webaudio/test/test_convolverNodeFiniteInfluence.html @@ -0,0 +1,44 @@ +<!DOCTYPE html> +<title>Test convolution effect has finite duration</title> +<script src="/resources/testharness.js"></script> +<script src="/resources/testharnessreport.js"></script> +<script> +promise_test(function() { + + const responseLength = 256; + // Accept an influence period of twice the responseLength to accept FFT + // implementations. + const tolerancePeriod = 2 * responseLength; + const totalSize = tolerancePeriod + responseLength; + + var context = new OfflineAudioContext(1, totalSize, 48000); + + var responseBuffer = + context.createBuffer(1, responseLength, context.sampleRate); + var responseChannelData = responseBuffer.getChannelData(0); + responseChannelData[0] = 1; + responseChannelData[responseLength - 1] = 1; + var convolver = context.createConvolver(); + convolver.buffer = responseBuffer; + convolver.connect(context.destination); + + var sourceBuffer = context.createBuffer(1, totalSize, context.sampleRate); + sourceBuffer.getChannelData(0)[0] = NaN; + var source = context.createBufferSource(); + source.buffer = sourceBuffer; + source.connect(convolver); + source.start(); + + return context.startRendering(). + then((buffer) => { + var convolverOutput = buffer.getChannelData(0); + // There should be no non-zeros after the tolerance period. + var testIndex = tolerancePeriod; + for (; + testIndex < buffer.length - 1 && convolverOutput[testIndex] == 0; + ++testIndex) { + } + assert_equals(convolverOutput[testIndex], 0, "output at " + testIndex); + }); +}); +</script> diff --git a/dom/media/webaudio/test/test_convolverNodePassThrough.html b/dom/media/webaudio/test/test_convolverNodePassThrough.html new file mode 100644 index 000000000..d5f9ef8ab --- /dev/null +++ b/dom/media/webaudio/test/test_convolverNodePassThrough.html @@ -0,0 +1,48 @@ +<!DOCTYPE HTML> +<html> +<head> + <title>Test ConvolverNode with passthrough</title> + <script type="text/javascript" src="/tests/SimpleTest/SimpleTest.js"></script> + <script type="text/javascript" src="webaudio.js"></script> + <link rel="stylesheet" type="text/css" href="/tests/SimpleTest/test.css" /> +</head> +<body> +<pre id="test"> +<script class="testbody" type="text/javascript"> + +var gTest = { + length: 2048, + numberOfChannels: 1, + createGraph: function(context) { + var source = context.createBufferSource(); + + var convolver = context.createConvolver(); + + source.buffer = this.buffer; + convolver.buffer = this.buffer; + + source.connect(convolver); + + var convolverWrapped = SpecialPowers.wrap(convolver); + ok("passThrough" in convolverWrapped, "ConvolverNode should support the passThrough API"); + convolverWrapped.passThrough = true; + + source.start(0); + return convolver; + }, + createExpectedBuffers: function(context) { + this.buffer = context.createBuffer(1, 2048, context.sampleRate); + for (var i = 0; i < 2048; ++i) { + this.buffer.getChannelData(0)[i] = Math.sin(440 * 2 * Math.PI * i / context.sampleRate); + } + + return [this.buffer]; + }, +}; + +runTest(); + +</script> +</pre> +</body> +</html> diff --git a/dom/media/webaudio/test/test_convolverNodeWithGain.html b/dom/media/webaudio/test/test_convolverNodeWithGain.html new file mode 100644 index 000000000..7bbe24089 --- /dev/null +++ b/dom/media/webaudio/test/test_convolverNodeWithGain.html @@ -0,0 +1,62 @@ +<!DOCTYPE HTML> +<html> +<head> + <title>Test ConvolverNode after a GainNode - Bug 891254 </title> + <script type="text/javascript" src="/tests/SimpleTest/SimpleTest.js"></script> + <link rel="stylesheet" type="text/css" href="/tests/SimpleTest/test.css" /> +</head> +<body> +<pre id="test"> +<script src="webaudio.js" type="text/javascript"></script> +<script class="testbody" type="text/javascript"> + +const signalLength = 2048; +const responseLength = 100; +const outputLength = 4096; // > signalLength + responseLength + +var gTest = { + length: outputLength, + numberOfChannels: 1, + createGraph: function(context) { + var buffer = context.createBuffer(1, signalLength, context.sampleRate); + for (var i = 0; i < signalLength; ++i) { + buffer.getChannelData(0)[i] = Math.sin(2 * Math.PI * i / signalLength); + } + + var source = context.createBufferSource(); + source.buffer = buffer; + source.start(0); + + var response = context.createBuffer(1, responseLength, context.sampleRate); + for (var i = 0; i < responseLength; ++i) { + response.getChannelData(0)[i] = i / responseLength; + } + + var gain = context.createGain(); + gain.gain.value = -1; + source.connect(gain); + + var convolver1 = context.createConvolver(); + convolver1.buffer = response; + gain.connect(convolver1); + + var convolver2 = context.createConvolver(); + convolver2.buffer = response; + source.connect(convolver2); + + // The output of convolver1 should be the inverse of convolver2, so blend + // them together and expect silence. + var blend = context.createGain(); + convolver1.connect(blend); + convolver2.connect(blend); + + return blend; + }, +}; + +runTest(); + +</script> +</pre> +</body> +</html> diff --git a/dom/media/webaudio/test/test_convolverNode_mono_mono.html b/dom/media/webaudio/test/test_convolverNode_mono_mono.html new file mode 100644 index 000000000..f7da2b020 --- /dev/null +++ b/dom/media/webaudio/test/test_convolverNode_mono_mono.html @@ -0,0 +1,73 @@ +<!DOCTYPE html> + +<html> +<head> +<script type="text/javascript" src="/tests/SimpleTest/SimpleTest.js"></script> +<script type="text/javascript" src="webaudio.js"></script> +<script type="text/javascript" src="layouttest-glue.js"></script> +<script type="text/javascript" src="blink/audio-testing.js"></script> +<script type="text/javascript" src="blink/convolution-testing.js"></script> +<link rel="stylesheet" type="text/css" href="/tests/SimpleTest/test.css" /> +</head> + +<body> + +<div id="description"></div> +<div id="console"></div> + +<script> +description("Tests ConvolverNode processing a mono channel with mono impulse response."); +SimpleTest.waitForExplicitFinish(); + +// To test the convolver, we convolve two square pulses together to +// produce a triangular pulse. To verify the result is correct we +// check several parts of the result. First, we make sure the initial +// part of the result is zero (due to the latency in the convolver). +// Next, the triangular pulse should match the theoretical result to +// within some roundoff. After the triangular pulse, the result +// should be exactly zero, but round-off prevents that. We make sure +// the part after the pulse is sufficiently close to zero. Finally, +// the result should be exactly zero because the inputs are exactly +// zero. +function runTest() { + if (window.testRunner) { + testRunner.dumpAsText(); + testRunner.waitUntilDone(); + } + + window.jsTestIsAsync = true; + + // Create offline audio context. + var context = new OfflineAudioContext(2, sampleRate * renderLengthSeconds, sampleRate); + + var squarePulse = createSquarePulseBuffer(context, pulseLengthFrames); + var trianglePulse = createTrianglePulseBuffer(context, 2 * pulseLengthFrames); + + var bufferSource = context.createBufferSource(); + bufferSource.buffer = squarePulse; + + var convolver = context.createConvolver(); + convolver.normalize = false; + convolver.buffer = squarePulse; + + bufferSource.connect(convolver); + convolver.connect(context.destination); + + bufferSource.start(0); + + context.oncomplete = checkConvolvedResult(trianglePulse); + context.startRendering(); +} + +function finishJSTest() { + SimpleTest.finish(); +} + +runTest(); +successfullyParsed = true; + +</script> + +<script src="../fast/js/resources/js-test-post.js"></script> +</body> +</html> diff --git a/dom/media/webaudio/test/test_currentTime.html b/dom/media/webaudio/test/test_currentTime.html new file mode 100644 index 000000000..bb015e5e2 --- /dev/null +++ b/dom/media/webaudio/test/test_currentTime.html @@ -0,0 +1,26 @@ +<!DOCTYPE HTML> +<html> +<head> + <title>Test AudioContext.currentTime</title> + <script type="text/javascript" src="/tests/SimpleTest/SimpleTest.js"></script> + <link rel="stylesheet" type="text/css" href="/tests/SimpleTest/test.css" /> +</head> +<body> +<pre id="test"> +<script class="testbody" type="text/javascript"> + +SimpleTest.waitForExplicitFinish(); +SimpleTest.requestFlakyTimeout("This test needs to wait a while for the AudioContext's timer to start."); +addLoadEvent(function() { + var ac = new AudioContext(); + is(ac.currentTime, 0, "AudioContext.currentTime should be 0 initially"); + setTimeout(function() { + ok(ac.currentTime > 0, "AudioContext.currentTime should have increased by now"); + SimpleTest.finish(); + }, 1000); +}); + +</script> +</pre> +</body> +</html> diff --git a/dom/media/webaudio/test/test_decodeAudioDataPromise.html b/dom/media/webaudio/test/test_decodeAudioDataPromise.html new file mode 100644 index 000000000..d07f55936 --- /dev/null +++ b/dom/media/webaudio/test/test_decodeAudioDataPromise.html @@ -0,0 +1,62 @@ +<!DOCTYPE HTML> +<html> +<head> + <title>Test the decodeAudioData API with Promise</title> + + <script type="text/javascript" src="/tests/SimpleTest/SimpleTest.js"></script> + <link rel="stylesheet" type="text/css" href="/tests/SimpleTest/test.css" /> + <script src="webaudio.js"></script> +</head> +<body> +<pre id="test"> +<script class="testbody" type="text/javascript"> + +SimpleTest.waitForExplicitFinish(); +addLoadEvent(function() { + +var finished = 0; + +function finish() { + if (++finished == 2) { + SimpleTest.finish(); + } +} + +var ac = new AudioContext(); +// Test that a the promise is rejected with an invalid source buffer. +expectNoException(function() { + var p = ac.decodeAudioData(" "); + ok(p instanceof Promise, "AudioContext.decodeAudioData should return a Promise"); + p.then(function(data) { + ok(false, "Promise should not resolve with an invalid source buffer."); + finish(); + }).catch(function(e) { + ok(true, "Promise should be rejected with an invalid source buffer."); + ok(e.name == "TypeError", "The error should be TypeError"); + finish(); + }) +}); + +// Test that a the promise is resolved with a valid source buffer. +var xhr = new XMLHttpRequest(); +xhr.open("GET", "ting-44.1k-1ch.ogg", true); +xhr.responseType = "arraybuffer"; +xhr.onload = function() { + var p = ac.decodeAudioData(xhr.response); + ok(p instanceof Promise, "AudioContext.decodeAudioData should return a Promise"); + p.then(function(data) { + ok(data instanceof AudioBuffer, "Promise should resolve, passing an AudioBuffer"); + ok(true, "Promise should resolve with a valid source buffer."); + finish(); + }).catch(function() { + ok(false, "Promise should not be rejected with a valid source buffer."); + finish(); + }); +}; +xhr.send(); +}); + +</script> +</pre> +</body> +</html> diff --git a/dom/media/webaudio/test/test_decodeMultichannel.html b/dom/media/webaudio/test/test_decodeMultichannel.html new file mode 100644 index 000000000..0fb2f5b3c --- /dev/null +++ b/dom/media/webaudio/test/test_decodeMultichannel.html @@ -0,0 +1,58 @@ +<!DOCTYPE HTML> +<html> +<meta charset=utf-8> +<head> + <title>Test that we can decode 4 channel wave file in webaudio, but not in <audio></title> + <script type="text/javascript" src="/tests/SimpleTest/SimpleTest.js"></script> + <link rel="stylesheet" type="text/css" href="/tests/SimpleTest/test.css" /> +</head> +<body> +<pre id="test"> +<script class="testbody" type="text/javascript"> +var filename = "audio-quad.wav"; + +SimpleTest.waitForExplicitFinish(); + +function finishTest(a) { + if (a) { + a = null; + SimpleTest.finish(); + } +} + +function decodeUsingAudioElement() { + var a = new Audio(); + a.addEventListener("error", function() { + ok(false, "Error loading metadata"); + finishTest(a); + }); + a.addEventListener("loadedmetadata", function() { + ok(true, "Metadata Loaded"); + finishTest(a); + }); + + a.src = filename; + a.load(); +} + +addLoadEvent(function() { + var xhr = new XMLHttpRequest(); + xhr.open("GET", filename); + xhr.responseType = "arraybuffer"; + xhr.onload = function() { + var context = new AudioContext(); + context.decodeAudioData(xhr.response, function(b) { + ok(true, "Decoding of a wave file with four channels succeded."); + is(b.numberOfChannels, 4, "The AudioBuffer should have 4 channels."); + decodeUsingAudioElement(); + }, function() { + ok(false, "Decoding of a wave file with four channels failed."); + decodeUsingAudioElement(); + }); + }; + xhr.send(null); +}); +</script> +</pre> +</body> +</html> diff --git a/dom/media/webaudio/test/test_decodeOpusTail.html b/dom/media/webaudio/test/test_decodeOpusTail.html new file mode 100644 index 000000000..b5b53f685 --- /dev/null +++ b/dom/media/webaudio/test/test_decodeOpusTail.html @@ -0,0 +1,28 @@ +<!DOCTYPE HTML> +<html> +<meta charset="utf-8"> +<head> + <title>Regression test to check that opus files don't have a tail at the end.</title> + <script type="text/javascript" src="/tests/SimpleTest/SimpleTest.js"></script> + <link rel="stylesheet" type="text/css" href="/tests/SimpleTest/test.css" /> +</head> +<body> +<pre id="test"> +<script class="testbody" type="text/javascript"> +SimpleTest.waitForExplicitFinish(); + +// This gets a 1 second Opus file and decodes it to a buffer. The opus file is +// decoded at 48kHz, and the OfflineAudioContext is also at 48kHz, no resampling +// is taking place. +fetch('sweep-300-330-1sec.opus') +.then(function(response) { return response.arrayBuffer(); }) +.then(function(buffer) { + var off = new OfflineAudioContext(1, 128, 48000); + off.decodeAudioData(buffer, function(decoded) { + var pcm = decoded.getChannelData(0); + is(pcm.length, 48000, "The length of the decoded file is correct."); + SimpleTest.finish(); + }); +}); + +</script> diff --git a/dom/media/webaudio/test/test_delayNode.html b/dom/media/webaudio/test/test_delayNode.html new file mode 100644 index 000000000..a3e314ef7 --- /dev/null +++ b/dom/media/webaudio/test/test_delayNode.html @@ -0,0 +1,74 @@ +<!DOCTYPE HTML> +<html> +<head> + <title>Test DelayNode</title> + <script type="text/javascript" src="/tests/SimpleTest/SimpleTest.js"></script> + <link rel="stylesheet" type="text/css" href="/tests/SimpleTest/test.css" /> +</head> +<body> +<pre id="test"> +<script src="webaudio.js" type="text/javascript"></script> +<script class="testbody" type="text/javascript"> + +var gTest = { + length: 4096, + numberOfChannels: 1, + createGraph: function(context) { + var buffer = context.createBuffer(1, 2048, context.sampleRate); + for (var i = 0; i < 2048; ++i) { + buffer.getChannelData(0)[i] = Math.sin(440 * 2 * Math.PI * i / context.sampleRate); + } + + var source = context.createBufferSource(); + + var delay = context.createDelay(); + + source.buffer = buffer; + + source.connect(delay); + + ok(delay.delayTime, "The audioparam member must exist"); + is(delay.delayTime.value, 0, "Correct initial value"); + is(delay.delayTime.defaultValue, 0, "Correct default value"); + delay.delayTime.value = 0.5; + is(delay.delayTime.value, 0.5, "Correct initial value"); + is(delay.delayTime.defaultValue, 0, "Correct default value"); + is(delay.channelCount, 2, "delay node has 2 input channels by default"); + is(delay.channelCountMode, "max", "Correct channelCountMode for the delay node"); + is(delay.channelInterpretation, "speakers", "Correct channelCountInterpretation for the delay node"); + + expectException(function() { + context.createDelay(0); + }, DOMException.NOT_SUPPORTED_ERR); + expectException(function() { + context.createDelay(180); + }, DOMException.NOT_SUPPORTED_ERR); + expectTypeError(function() { + context.createDelay(NaN); + }, DOMException.NOT_SUPPORTED_ERR); + expectException(function() { + context.createDelay(-1); + }, DOMException.NOT_SUPPORTED_ERR); + context.createDelay(1); // should not throw + + // Delay the source stream by 2048 frames + delay.delayTime.value = 2048 / context.sampleRate; + + source.start(0); + return delay; + }, + createExpectedBuffers: function(context) { + var expectedBuffer = context.createBuffer(1, 2048 * 2, context.sampleRate); + for (var i = 2048; i < 2048 * 2; ++i) { + expectedBuffer.getChannelData(0)[i] = Math.sin(440 * 2 * Math.PI * (i - 2048) / context.sampleRate); + } + return expectedBuffer; + }, +}; + +runTest(); + +</script> +</pre> +</body> +</html> diff --git a/dom/media/webaudio/test/test_delayNodeAtMax.html b/dom/media/webaudio/test/test_delayNodeAtMax.html new file mode 100644 index 000000000..6c7dde3d1 --- /dev/null +++ b/dom/media/webaudio/test/test_delayNodeAtMax.html @@ -0,0 +1,53 @@ +<!DOCTYPE HTML> +<html> +<head> + <title>Test DelayNode with maxDelayTime delay - bug 890528</title> + <script type="text/javascript" src="/tests/SimpleTest/SimpleTest.js"></script> + <link rel="stylesheet" type="text/css" href="/tests/SimpleTest/test.css" /> +</head> +<body> +<pre id="test"> +<script src="webaudio.js" type="text/javascript"></script> +<script class="testbody" type="text/javascript"> + +const signalLength = 2048; +const delayLength = 1000; // Not on a block boundary +const outputLength = 4096 // > signalLength + 2 * delayLength; + +function applySignal(buffer, offset) { + for (var i = 0; i < signalLength; ++i) { + buffer.getChannelData(0)[offset + i] = Math.cos(Math.PI * i / signalLength); + } +} + +var gTest = { + numberOfChannels: 1, + createGraph: function(context) { + var buffer = context.createBuffer(1, signalLength, context.sampleRate); + applySignal(buffer, 0); + + var source = context.createBufferSource(); + source.buffer = buffer; + + const delayTime = delayLength / context.sampleRate; + var delay = context.createDelay(delayTime); + delay.delayTime.value = delayTime; + + source.connect(delay); + + source.start(0); + return delay; + }, + createExpectedBuffers: function(context) { + var expectedBuffer = context.createBuffer(1, outputLength, context.sampleRate); + applySignal(expectedBuffer, delayLength); + return expectedBuffer; + }, +}; + +runTest(); + +</script> +</pre> +</body> +</html> diff --git a/dom/media/webaudio/test/test_delayNodeChannelChanges.html b/dom/media/webaudio/test/test_delayNodeChannelChanges.html new file mode 100644 index 000000000..229bfd069 --- /dev/null +++ b/dom/media/webaudio/test/test_delayNodeChannelChanges.html @@ -0,0 +1,98 @@ +<!DOCTYPE HTML> +<html> +<head> + <title>test DelayNode channel count changes</title> + <script type="text/javascript" src="/tests/SimpleTest/SimpleTest.js"></script> + <script type="text/javascript" src="webaudio.js"></script> + <link rel="stylesheet" type="text/css" href="/tests/SimpleTest/test.css" /> +</head> +<body> +<pre id="test"> +<script class="testbody" type="text/javascript"> + +SimpleTest.waitForExplicitFinish(); +SimpleTest.requestCompleteLog(); + +const bufferSize = 4096; + +var ctx; +var testDelay; +var stereoDelay; +var invertor; + +function compareOutputs(callback) { + var processor = ctx.createScriptProcessor(bufferSize, 2, 0); + testDelay.connect(processor); + invertor.connect(processor); + processor.onaudioprocess = + function(e) { + compareBuffers(e.inputBuffer, + ctx.createBuffer(2, bufferSize, ctx.sampleRate)); + e.target.onaudioprocess = null; + callback(); + } +} + +function startTest() { + // And a two-channel signal + var merger = ctx.createChannelMerger(); + merger.connect(testDelay); + merger.connect(stereoDelay); + var oscL = ctx.createOscillator(); + oscL.connect(merger, 0, 0); + oscL.start(0); + var oscR = ctx.createOscillator(); + oscR.type = "sawtooth"; + oscR.connect(merger, 0, 1); + oscR.start(0); + + compareOutputs( + function () { + // Disconnect the two-channel signal and test again + merger.disconnect(); + compareOutputs(SimpleTest.finish); + }); +} + +function prepareTest() { + ctx = new AudioContext(); + + // The output of a test delay node with mono and stereo input will be + // compared with that of separate mono and stereo delay nodes. + const delayTime = 0.3 * bufferSize / ctx.sampleRate; + testDelay = ctx.createDelay(delayTime); + testDelay.delayTime.value = delayTime; + monoDelay = ctx.createDelay(delayTime); + monoDelay.delayTime.value = delayTime; + stereoDelay = ctx.createDelay(delayTime); + stereoDelay.delayTime.value = delayTime; + + // Create a one-channel signal and connect to the delay nodes + var monoOsc = ctx.createOscillator(); + monoOsc.frequency.value = 110; + monoOsc.connect(testDelay); + monoOsc.connect(monoDelay); + monoOsc.start(0); + + // Invert the expected so that mixing with the test will find the difference. + invertor = ctx.createGain(); + invertor.gain.value = -1.0; + monoDelay.connect(invertor); + stereoDelay.connect(invertor); + + // Start the test after the delay nodes have begun processing. + var processor = ctx.createScriptProcessor(bufferSize, 1, 0); + processor.connect(ctx.destination); + + processor.onaudioprocess = + function(e) { + e.target.onaudioprocess = null; + processor.disconnect(); + startTest(); + }; +} +prepareTest(); +</script> +</pre> +</body> +</html> diff --git a/dom/media/webaudio/test/test_delayNodeCycles.html b/dom/media/webaudio/test/test_delayNodeCycles.html new file mode 100644 index 000000000..f5f2e6786 --- /dev/null +++ b/dom/media/webaudio/test/test_delayNodeCycles.html @@ -0,0 +1,157 @@ +<!DOCTYPE HTML> +<html> +<head> + <title>Test the support of cycles.</title> + <script type="text/javascript" src="/tests/SimpleTest/SimpleTest.js"></script> + <script type="text/javascript" src="webaudio.js"></script> + <link rel="stylesheet" type="text/css" href="/tests/SimpleTest/test.css" /> +</head> +<body> +<pre id="test"> +<script src="webaudio.js" type="text/javascript"></script> +<script class="testbody" type="text/javascript"> + +SimpleTest.waitForExplicitFinish(); + +const sampleRate = 48000; +const inputLength = 2048; + +addLoadEvent(function() { + function addSine(b) { + for (var i = 0; i < b.length; i++) { + b[i] += Math.sin(440 * 2 * Math.PI * i / sampleRate); + } + } + + function getSineBuffer(ctx) { + var buffer = ctx.createBuffer(1, inputLength, ctx.sampleRate); + addSine(buffer.getChannelData(0)); + return buffer; + } + + function createAndPlayWithCycleAndDelayNode(ctx, delayFrames) { + var source = ctx.createBufferSource(); + source.buffer = getSineBuffer(ctx); + + var gain = ctx.createGain(); + var delay = ctx.createDelay(); + delay.delayTime.value = delayFrames/ctx.sampleRate; + + source.connect(gain); + gain.connect(delay); + delay.connect(ctx.destination); + // cycle + delay.connect(gain); + + source.start(0); + } + + function createAndPlayWithCycleAndNoDelayNode(ctx) { + var source = ctx.createBufferSource(); + source.loop = true; + source.buffer = getSineBuffer(ctx); + + var gain = ctx.createGain(); + var gain2 = ctx.createGain(); + + source.connect(gain); + gain.connect(gain2); + // cycle + gain2.connect(gain); + gain2.connect(ctx.destination); + + source.start(0); + } + + function createAndPlayWithCycleAndNoDelayNodeInCycle(ctx) { + var source = ctx.createBufferSource(); + source.loop = true; + source.buffer = getSineBuffer(ctx); + + var delay = ctx.createDelay(); + var gain = ctx.createGain(); + var gain2 = ctx.createGain(); + + // Their is a cycle, a delay, but the delay is not in the cycle. + source.connect(delay); + delay.connect(gain); + gain.connect(gain2); + // cycle + gain2.connect(gain); + gain2.connect(ctx.destination); + + source.start(0); + } + + var remainingTests = 0; + function finish() { + if (--remainingTests == 0) { + SimpleTest.finish(); + } + } + + function getOfflineContext(oncomplete) { + var ctx = new OfflineAudioContext(1, sampleRate, sampleRate); + ctx.oncomplete = oncomplete; + return ctx; + } + + function checkSilentBuffer(e) { + var buffer = e.renderedBuffer.getChannelData(0); + for (var i = 0; i < buffer.length; i++) { + if (buffer[i] != 0.0) { + ok(false, "buffer should be silent."); + finish(); + return; + } + } + ok(true, "buffer should be silent."); + finish(); + } + + function checkNoisyBuffer(e, aDelayFrames) { + delayFrames = Math.max(128, aDelayFrames); + + var expected = new Float32Array(e.renderedBuffer.length); + for (var i = delayFrames; i < expected.length; i += delayFrames) { + addSine(expected.subarray(i, i + inputLength)); + } + + compareChannels(e.renderedBuffer.getChannelData(0), expected); + finish(); + } + + function expectSilentOutput(f) { + remainingTests++; + var ctx = getOfflineContext(checkSilentBuffer); + f(ctx); + ctx.startRendering(); + } + + function expectNoisyOutput(delayFrames) { + remainingTests++; + var ctx = getOfflineContext(); + ctx.oncomplete = function(e) { checkNoisyBuffer(e, delayFrames); }; + createAndPlayWithCycleAndDelayNode(ctx, delayFrames); + ctx.startRendering(); + } + + // This is trying to make a graph with a cycle and no DelayNode in the graph. + // The cycle subgraph should be muted, in this graph the output should be silent. + expectSilentOutput(createAndPlayWithCycleAndNoDelayNode); + // This is trying to make a graph with a cycle and a DelayNode in the graph, but + // not part of the cycle. + // The cycle subgraph should be muted, in this graph the output should be silent. + expectSilentOutput(createAndPlayWithCycleAndNoDelayNodeInCycle); + // Those are making legal graphs, with at least one DelayNode in the cycle. + // There should be some non-silent output. + expectNoisyOutput(sampleRate/4); + // DelayNode.delayTime will be clamped to 128/ctx.sampleRate. + // There should be some non-silent output. + expectNoisyOutput(0); +}); + +</script> +</pre> +</body> +</html> diff --git a/dom/media/webaudio/test/test_delayNodePassThrough.html b/dom/media/webaudio/test/test_delayNodePassThrough.html new file mode 100644 index 000000000..4945ee95c --- /dev/null +++ b/dom/media/webaudio/test/test_delayNodePassThrough.html @@ -0,0 +1,53 @@ +<!DOCTYPE HTML> +<html> +<head> + <title>Test DelayNode</title> + <script type="text/javascript" src="/tests/SimpleTest/SimpleTest.js"></script> + <link rel="stylesheet" type="text/css" href="/tests/SimpleTest/test.css" /> +</head> +<body> +<pre id="test"> +<script src="webaudio.js" type="text/javascript"></script> +<script class="testbody" type="text/javascript"> + +var gTest = { + length: 4096, + numberOfChannels: 1, + createGraph: function(context) { + var source = context.createBufferSource(); + + var delay = context.createDelay(); + + source.buffer = this.buffer; + + source.connect(delay); + + delay.delayTime.value = 0.5; + + // Delay the source stream by 2048 frames + delay.delayTime.value = 2048 / context.sampleRate; + + var delayWrapped = SpecialPowers.wrap(delay); + ok("passThrough" in delayWrapped, "DelayNode should support the passThrough API"); + delayWrapped.passThrough = true; + + source.start(0); + return delay; + }, + createExpectedBuffers: function(context) { + this.buffer = context.createBuffer(1, 2048, context.sampleRate); + for (var i = 0; i < 2048; ++i) { + this.buffer.getChannelData(0)[i] = Math.sin(440 * 2 * Math.PI * i / context.sampleRate); + } + var silence = context.createBuffer(1, 2048, context.sampleRate); + + return [this.buffer, silence]; + }, +}; + +runTest(); + +</script> +</pre> +</body> +</html> diff --git a/dom/media/webaudio/test/test_delayNodeSmallMaxDelay.html b/dom/media/webaudio/test/test_delayNodeSmallMaxDelay.html new file mode 100644 index 000000000..b9cee458d --- /dev/null +++ b/dom/media/webaudio/test/test_delayNodeSmallMaxDelay.html @@ -0,0 +1,43 @@ +<!DOCTYPE HTML> +<html> +<head> + <title>Test DelayNode</title> + <script type="text/javascript" src="/tests/SimpleTest/SimpleTest.js"></script> + <link rel="stylesheet" type="text/css" href="/tests/SimpleTest/test.css" /> +</head> +<body> +<pre id="test"> +<script src="webaudio.js" type="text/javascript"></script> +<script class="testbody" type="text/javascript"> + +var gTest = { + length: 2048, + numberOfChannels: 1, + createGraph: function(context) { + var source = context.createBufferSource(); + + var delay = context.createDelay(0.02); + + source.buffer = this.buffer; + + source.connect(delay); + + source.start(0); + return delay; + }, + createExpectedBuffers: function(context) { + var expectedBuffer = context.createBuffer(1, 2048, context.sampleRate); + for (var i = 0; i < 2048; ++i) { + expectedBuffer.getChannelData(0)[i] = Math.sin(440 * 2 * Math.PI * i / context.sampleRate); + } + this.buffer = expectedBuffer; + return expectedBuffer; + }, +}; + +runTest(); + +</script> +</pre> +</body> +</html> diff --git a/dom/media/webaudio/test/test_delayNodeTailIncrease.html b/dom/media/webaudio/test/test_delayNodeTailIncrease.html new file mode 100644 index 000000000..751602824 --- /dev/null +++ b/dom/media/webaudio/test/test_delayNodeTailIncrease.html @@ -0,0 +1,71 @@ +<!DOCTYPE HTML> +<html> +<head> + <title>Test increasing delay of DelayNode after input finishes</title> + <script type="text/javascript" src="/tests/SimpleTest/SimpleTest.js"></script> + <script type="text/javascript" src="webaudio.js"></script> + <link rel="stylesheet" type="text/css" href="/tests/SimpleTest/test.css" /> +</head> +<body> +<pre id="test"> +<script class="testbody" type="text/javascript"> + +SimpleTest.waitForExplicitFinish(); + +const signalLength = 100; +const bufferSize = 1024; +// Delay should be long enough to allow CC to run +const delayBufferCount = 50; +const delayLength = delayBufferCount * bufferSize + 700; + +var count = 0; + +function applySignal(buffer, offset) { + for (var i = 0; i < signalLength; ++i) { + buffer.getChannelData(0)[offset + i] = Math.cos(Math.PI * i / signalLength); + } +} + +function onAudioProcess(e) { + switch(count) { + case 5: + SpecialPowers.forceGC(); + SpecialPowers.forceCC(); + break; + case delayBufferCount: + var offset = delayLength - count * bufferSize; + var ctx = e.target.context; + var expected = ctx.createBuffer(1, bufferSize, ctx.sampleRate); + applySignal(expected, offset); + compareBuffers(e.inputBuffer, expected); + SimpleTest.finish(); + } + count++; +} + +function startTest() { + var ctx = new AudioContext(); + var processor = ctx.createScriptProcessor(bufferSize, 1, 0); + processor.onaudioprocess = onAudioProcess; + + // Switch on delay at a time in the future. + var delayDuration = delayLength / ctx.sampleRate; + var delayStartTime = (delayLength - bufferSize) / ctx.sampleRate; + var delay = ctx.createDelay(delayDuration); + delay.delayTime.setValueAtTime(delayDuration, delayStartTime); + delay.connect(processor); + + // Short signal that finishes before switching to long delay + var buffer = ctx.createBuffer(1, signalLength, ctx.sampleRate); + applySignal(buffer, 0); + var source = ctx.createBufferSource(); + source.buffer = buffer; + source.start(); + source.connect(delay); +}; + +startTest(); +</script> +</pre> +</body> +</html> diff --git a/dom/media/webaudio/test/test_delayNodeTailWithDisconnect.html b/dom/media/webaudio/test/test_delayNodeTailWithDisconnect.html new file mode 100644 index 000000000..fa431d61b --- /dev/null +++ b/dom/media/webaudio/test/test_delayNodeTailWithDisconnect.html @@ -0,0 +1,95 @@ +<!DOCTYPE HTML> +<html> +<head> + <title>Test tail time lifetime of DelayNode after input is disconnected</title> + <script type="text/javascript" src="/tests/SimpleTest/SimpleTest.js"></script> + <script type="text/javascript" src="webaudio.js"></script> + <link rel="stylesheet" type="text/css" href="/tests/SimpleTest/test.css" /> +</head> +<body> +<pre id="test"> +<script class="testbody" type="text/javascript"> + +// Web Audio doesn't provide a means to precisely time disconnect()s but we +// can test that the output of delay nodes matches the output from their +// sources before they are disconnected. + +SimpleTest.waitForExplicitFinish(); + +const signalLength = 128; +const bufferSize = 4096; +const sourceCount = bufferSize / signalLength; +// Delay should be long enough to allow CC to run +var delayBufferCount = 20; +const delayLength = delayBufferCount * bufferSize; + +var sourceOutput = new Float32Array(bufferSize); +var delayOutputCount = 0; +var sources = []; + +function onDelayOutput(e) { + if (delayOutputCount < delayBufferCount) { + delayOutputCount++; + return; + } + + compareChannels(e.inputBuffer.getChannelData(0), sourceOutput); + e.target.onaudioprocess = null; + SimpleTest.finish(); +} + +function onSourceOutput(e) { + // Record the first buffer + e.inputBuffer.copyFromChannel(sourceOutput, 0); + e.target.onaudioprocess = null; +} + +function disconnectSources() { + for (var i = 0; i < sourceCount; ++i) { + sources[i].disconnect(); + } + + SpecialPowers.forceGC(); + SpecialPowers.forceCC(); +} + +function startTest() { + var ctx = new AudioContext(); + + var sourceProcessor = ctx.createScriptProcessor(bufferSize, 1, 0); + sourceProcessor.onaudioprocess = onSourceOutput; + // Keep audioprocess events going after source disconnect. + sourceProcessor.connect(ctx.destination); + + var delayProcessor = ctx.createScriptProcessor(bufferSize, 1, 0); + delayProcessor.onaudioprocess = onDelayOutput; + + var delayDuration = delayLength / ctx.sampleRate; + for (var i = 0; i < sourceCount; ++i) { + var delay = ctx.createDelay(delayDuration); + delay.delayTime.value = delayDuration; + delay.connect(delayProcessor); + + var source = ctx.createOscillator(); + source.frequency.value = 440 + 10 * i + source.start(i * signalLength / ctx.sampleRate); + source.stop((i + 1) * signalLength / ctx.sampleRate); + source.connect(delay); + source.connect(sourceProcessor); + + sources[i] = source; + } + + // Assuming the above Web Audio operations have already scheduled an event + // to run in stable state and start the graph thread, schedule a subsequent + // event to disconnect the sources, which will remove main thread connection + // references before it knows the graph thread has started using the source + // streams. + SimpleTest.executeSoon(disconnectSources); +}; + +startTest(); +</script> +</pre> +</body> +</html> diff --git a/dom/media/webaudio/test/test_delayNodeTailWithGain.html b/dom/media/webaudio/test/test_delayNodeTailWithGain.html new file mode 100644 index 000000000..6994a7f9d --- /dev/null +++ b/dom/media/webaudio/test/test_delayNodeTailWithGain.html @@ -0,0 +1,72 @@ +<!DOCTYPE HTML> +<html> +<head> + <title>Test tail time lifetime of DelayNode indirectly connected to source</title> + <script type="text/javascript" src="/tests/SimpleTest/SimpleTest.js"></script> + <script type="text/javascript" src="webaudio.js"></script> + <link rel="stylesheet" type="text/css" href="/tests/SimpleTest/test.css" /> +</head> +<body> +<pre id="test"> +<script class="testbody" type="text/javascript"> + +SimpleTest.waitForExplicitFinish(); + +const signalLength = 130; +const bufferSize = 1024; +// Delay should be long enough to allow CC to run +const delayBufferCount = 50; +const delayLength = delayBufferCount * bufferSize + 700; + +var count = 0; + +function applySignal(buffer, offset) { + for (var i = 0; i < signalLength; ++i) { + buffer.getChannelData(0)[offset + i] = Math.cos(Math.PI * i / signalLength); + } +} + +function onAudioProcess(e) { + switch(count) { + case 5: + SpecialPowers.forceGC(); + SpecialPowers.forceCC(); + break; + case delayBufferCount: + var offset = delayLength - count * bufferSize; + var ctx = e.target.context; + var expected = ctx.createBuffer(1, bufferSize, ctx.sampleRate); + applySignal(expected, offset); + compareBuffers(e.inputBuffer, expected); + SimpleTest.finish(); + } + count++; +} + +function startTest() { + var ctx = new AudioContext(); + var processor = ctx.createScriptProcessor(bufferSize, 1, 0); + processor.onaudioprocess = onAudioProcess; + + var delayDuration = delayLength / ctx.sampleRate; + var delay = ctx.createDelay(delayDuration); + delay.delayTime.value = delayDuration; + delay.connect(processor); + + var gain = ctx.createGain(); + gain.connect(delay); + + // Short signal that finishes before garbage collection + var buffer = ctx.createBuffer(1, signalLength, ctx.sampleRate); + applySignal(buffer, 0); + var source = ctx.createBufferSource(); + source.buffer = buffer; + source.start(); + source.connect(gain); +}; + +startTest(); +</script> +</pre> +</body> +</html> diff --git a/dom/media/webaudio/test/test_delayNodeTailWithReconnect.html b/dom/media/webaudio/test/test_delayNodeTailWithReconnect.html new file mode 100644 index 000000000..6c1cda580 --- /dev/null +++ b/dom/media/webaudio/test/test_delayNodeTailWithReconnect.html @@ -0,0 +1,136 @@ +<!DOCTYPE HTML> +<html> +<head> + <title>Test tail time lifetime of DelayNode after input finishes and new input added</title> + <script type="text/javascript" src="/tests/SimpleTest/SimpleTest.js"></script> + <script type="text/javascript" src="webaudio.js"></script> + <link rel="stylesheet" type="text/css" href="/tests/SimpleTest/test.css" /> +</head> +<body> +<pre id="test"> +<script class="testbody" type="text/javascript"> + +SimpleTest.waitForExplicitFinish(); + +// The buffer source will start on a block boundary, so keeping the signal +// within one block ensures that it will not cross AudioProcessingEvent buffer +// boundaries. +const signalLength = 128; +const bufferSize = 1024; +// Delay should be long enough to allow CC to run +var delayBufferCount = 50; +var delayBufferOffset; +const delayLength = delayBufferCount * bufferSize; + +var phase = "initial"; +var sourceCount = 0; +var delayCount = 0; +var oscillator; +var delay; +var source; + +function applySignal(buffer, offset) { + for (var i = 0; i < signalLength; ++i) { + buffer.getChannelData(0)[offset + i] = Math.cos(Math.PI * i / signalLength); + } +} + +function bufferIsSilent(buffer, out) { + for (var i = 0; i < buffer.length; ++i) { + if (buffer.getChannelData(0)[i] != 0) { + if (out) { + out.soundOffset = i; + } + return false; + } + } + return true; +} + +function onDelayOutput(e) { + switch(phase) { + + case "initial": + // Wait for oscillator sound to exit delay + if (bufferIsSilent(e.inputBuffer)) + break; + + phase = "played oscillator"; + break; + + case "played oscillator": + // First tail time has expired. Start second source and remove references + // to the delay and connected second source. + oscillator.disconnect(); + source.connect(delay); + source.start(); + source = null; + delay = null; + phase = "started second source"; + break; + + case "second tail time": + if (delayCount == delayBufferCount) { + var ctx = e.target.context; + var expected = ctx.createBuffer(1, bufferSize, ctx.sampleRate); + applySignal(expected, delayBufferOffset); + compareBuffers(e.inputBuffer, expected); + e.target.onaudioprocess = null; + SimpleTest.finish(); + } + } + + delayCount++; +} + +function onSourceOutput(e) { + switch(phase) { + case "started second source": + var out = {}; + if (!bufferIsSilent(e.inputBuffer, out)) { + delayBufferCount += sourceCount; + delayBufferOffset = out.soundOffset; + phase = "played second source"; + } + break; + case "played second source": + SpecialPowers.forceGC(); + SpecialPowers.forceCC(); + phase = "second tail time"; + e.target.onaudioprocess = null; + } + + sourceCount++; +} + +function startTest() { + var ctx = new AudioContext(); + var delayDuration = delayLength / ctx.sampleRate; + delay = ctx.createDelay(delayDuration); + delay.delayTime.value = delayDuration; + var processor1 = ctx.createScriptProcessor(bufferSize, 1, 0); + delay.connect(processor1); + processor1.onaudioprocess = onDelayOutput; + + // Signal to trigger initial tail time reference + oscillator = ctx.createOscillator(); + oscillator.start(0); + oscillator.stop(100/ctx.sampleRate); + oscillator.connect(delay); + + // Short signal, not started yet, with a ScriptProcessor to detect when it + // starts. It should finish before garbage collection. + var buffer = ctx.createBuffer(1, signalLength, ctx.sampleRate); + applySignal(buffer, 0); + source = ctx.createBufferSource(); + source.buffer = buffer; + var processor2 = ctx.createScriptProcessor(bufferSize, 1, 0); + source.connect(processor2); + processor2.onaudioprocess = onSourceOutput; +}; + +startTest(); +</script> +</pre> +</body> +</html> diff --git a/dom/media/webaudio/test/test_delayNodeWithGain.html b/dom/media/webaudio/test/test_delayNodeWithGain.html new file mode 100644 index 000000000..768bea77c --- /dev/null +++ b/dom/media/webaudio/test/test_delayNodeWithGain.html @@ -0,0 +1,54 @@ +<!DOCTYPE HTML> +<html> +<head> + <title>Test DelayNode with a GainNode</title> + <script type="text/javascript" src="/tests/SimpleTest/SimpleTest.js"></script> + <link rel="stylesheet" type="text/css" href="/tests/SimpleTest/test.css" /> +</head> +<body> +<pre id="test"> +<script src="webaudio.js" type="text/javascript"></script> +<script class="testbody" type="text/javascript"> + +var gTest = { + length: 4096, + numberOfChannels: 1, + createGraph: function(context) { + var buffer = context.createBuffer(1, 2048, context.sampleRate); + for (var i = 0; i < 2048; ++i) { + buffer.getChannelData(0)[i] = Math.sin(440 * 2 * Math.PI * i / context.sampleRate); + } + + var source = context.createBufferSource(); + + var delay = context.createDelay(); + + source.buffer = buffer; + + var gain = context.createGain(); + gain.gain.value = 0.5; + + source.connect(gain); + gain.connect(delay); + + // Delay the source stream by 2048 frames + delay.delayTime.value = 2048 / context.sampleRate; + + source.start(0); + return delay; + }, + createExpectedBuffers: function(context) { + var expectedBuffer = context.createBuffer(1, 2048 * 2, context.sampleRate); + for (var i = 2048; i < 2048 * 2; ++i) { + expectedBuffer.getChannelData(0)[i] = Math.sin(440 * 2 * Math.PI * (i - 2048) / context.sampleRate) / 2; + } + return expectedBuffer; + }, +}; + +runTest(); + +</script> +</pre> +</body> +</html> diff --git a/dom/media/webaudio/test/test_disconnectAll.html b/dom/media/webaudio/test/test_disconnectAll.html new file mode 100644 index 000000000..9d3af066e --- /dev/null +++ b/dom/media/webaudio/test/test_disconnectAll.html @@ -0,0 +1,51 @@ +<!DOCTYPE HTML> +<html> + <head> + <title>Test whether we can disconnect an AudioNode</title> + <script type="text/javascript" src="/tests/SimpleTest/SimpleTest.js"></script> + <script type="text/javascript" src="webaudio.js"></script> + <link rel="stylesheet" type="text/css" href="/tests/SimpleTest/test.css" /> + </head> + <body> + <pre id="test"> + <script class="testbody" type="text/javascript"> + var gTest = { + length: 256, + numberOfChannels: 1, + createGraph: function(context) { + var sourceBuffer = context.createBuffer(1, 256, context.sampleRate); + var data = sourceBuffer.getChannelData(0); + for (var j = 0; j < data.length; j++) { + data[j] = 1; + } + + var source = context.createBufferSource(); + source.buffer = sourceBuffer; + + var source = context.createBufferSource(); + source.buffer = sourceBuffer; + + var gain1 = context.createGain(); + var gain2 = context.createGain(); + var gain3 = context.createGain(); + var merger = context.createChannelMerger(3); + + source.connect(gain1); + source.connect(gain2); + source.connect(gain3); + gain1.connect(merger); + gain2.connect(merger); + gain3.connect(merger); + source.start(); + + source.disconnect(); + + return merger; + } + }; + + runTest(); + </script> + </pre> + </body> +</html>
\ No newline at end of file diff --git a/dom/media/webaudio/test/test_disconnectAudioParam.html b/dom/media/webaudio/test/test_disconnectAudioParam.html new file mode 100644 index 000000000..1f4e79c56 --- /dev/null +++ b/dom/media/webaudio/test/test_disconnectAudioParam.html @@ -0,0 +1,58 @@ +<!DOCTYPE HTML> +<html> + <head> + <title>Test whether we can disconnect an AudioParam</title> + <script type="text/javascript" src="/tests/SimpleTest/SimpleTest.js"></script> + <script type="text/javascript" src="webaudio.js"></script> + <link rel="stylesheet" type="text/css" href="/tests/SimpleTest/test.css" /> + </head> + <body> + <pre id="test"> + <script class="testbody" type="text/javascript"> + var gTest = { + length: 256, + numberOfChannels: 1, + createGraph: function(context) { + var sourceBuffer = context.createBuffer(1, 256, context.sampleRate); + var data = sourceBuffer.getChannelData(0); + for (var j = 0; j < data.length; j++) { + data[j] = 1; + } + + var source = context.createBufferSource(); + source.buffer = sourceBuffer; + + var half = context.createGain(); + var gain1 = context.createGain(); + var gain2 = context.createGain(); + + half.gain.value = 0.5; + + source.connect(gain1); + gain1.connect(gain2); + source.connect(half); + + half.connect(gain1.gain); + half.connect(gain2.gain); + + half.disconnect(gain2.gain); + + source.start(); + + return gain2; + }, + createExpectedBuffers: function(context) { + expectedBuffer = context.createBuffer(1, 256, context.sampleRate); + for (var i = 0; i < 256; ++i) { + expectedBuffer.getChannelData(0)[i] = 1.5; + } + + return expectedBuffer; + } + }; + + runTest(); + </script> + </pre> + </body> +</html> diff --git a/dom/media/webaudio/test/test_disconnectAudioParamFromOutput.html b/dom/media/webaudio/test/test_disconnectAudioParamFromOutput.html new file mode 100644 index 000000000..a08ffa53b --- /dev/null +++ b/dom/media/webaudio/test/test_disconnectAudioParamFromOutput.html @@ -0,0 +1,67 @@ +<!DOCTYPE HTML> +<html> + <head> + <title>Test whether we can disconnect an AudioParam</title> + <script type="text/javascript" src="/tests/SimpleTest/SimpleTest.js"></script> + <script type="text/javascript" src="webaudio.js"></script> + <link rel="stylesheet" type="text/css" href="/tests/SimpleTest/test.css" /> + </head> + <body> + <pre id="test"> + <script class="testbody" type="text/javascript"> + var gTest = { + length: 256, + numberOfChannels: 2, + createGraph: function(context) { + var sourceBuffer = context.createBuffer(2, 256, context.sampleRate); + for (var i = 1; i <= 2; i++) { + var data = sourceBuffer.getChannelData(i-1); + for (var j = 0; j < data.length; j++) { + data[j] = i; + } + } + + var source = context.createBufferSource(); + source.buffer = sourceBuffer; + + var source = context.createBufferSource(); + source.buffer = sourceBuffer; + + var half = context.createGain(); + var gain1 = context.createGain(); + var gain2 = context.createGain(); + var splitter = context.createChannelSplitter(2); + + half.gain.value = 0.5; + + source.connect(gain1); + gain1.connect(gain2); + source.connect(half); + half.connect(splitter); + splitter.connect(gain1.gain, 0); + splitter.connect(gain2.gain, 1); + + splitter.disconnect(gain2.gain, 1); + + source.start(); + + return gain2; + }, + createExpectedBuffers: function(context) { + var expectedBuffer = context.createBuffer(2, 256, context.sampleRate); + for (var i = 1; i <= 2; i++) { + var data = expectedBuffer.getChannelData(i-1); + for (var j = 0; j < data.length; j++) { + data[j] = (i == 1) ? 1.5 : 3.0; + } + } + + return expectedBuffer; + } + }; + + runTest(); + </script> + </pre> + </body> +</html> diff --git a/dom/media/webaudio/test/test_disconnectExceptions.html b/dom/media/webaudio/test/test_disconnectExceptions.html new file mode 100644 index 000000000..ceba972c9 --- /dev/null +++ b/dom/media/webaudio/test/test_disconnectExceptions.html @@ -0,0 +1,75 @@ +<!DOCTYPE HTML> +<html> + <head> + <title>Test whether we can disconnect an AudioNode</title> + <script type="text/javascript" src="/tests/SimpleTest/SimpleTest.js"></script> + <script type="text/javascript" src="webaudio.js"></script> + <link rel="stylesheet" type="text/css" href="/tests/SimpleTest/test.css" /> + </head> + <body> + <pre id="test"> + <script class="testbody" type="text/javascript"> + var ctx = new AudioContext(); + var sourceBuffer = ctx.createBuffer(2, 256, ctx.sampleRate); + for (var i = 1; i <= 2; i++) { + var data = sourceBuffer.getChannelData(i-1); + for (var j = 0; j < data.length; j++) { + data[j] = i; + } + } + + var source = ctx.createBufferSource(); + source.buffer = sourceBuffer; + + var gain1 = ctx.createGain(); + var splitter = ctx.createChannelSplitter(2); + var merger = ctx.createChannelMerger(2); + var gain2 = ctx.createGain(); + var gain3 = ctx.createGain(); + + gain1.connect(splitter); + splitter.connect(gain2, 0); + splitter.connect(gain3, 1); + splitter.connect(merger, 0, 0); + splitter.connect(merger, 1, 1); + gain2.connect(gain3); + gain3.connect(ctx.destination); + merger.connect(ctx.destination); + + expectException(function() { + splitter.disconnect(2); + }, DOMException.INDEX_SIZE_ERR); + + expectNoException(function() { + splitter.disconnect(1); + splitter.disconnect(1); + }); + + expectException(function() { + gain1.disconnect(gain2); + }, DOMException.INVALID_ACCESS_ERR); + + expectException(function() { + gain1.disconnect(gain3); + ok(false, 'Should get InvalidAccessError exception'); + }, DOMException.INVALID_ACCESS_ERR); + + expectException(function() { + splitter.disconnect(gain2, 2); + }, DOMException.INDEX_SIZE_ERR); + + expectException(function() { + splitter.disconnect(gain1, 0); + }, DOMException.INVALID_ACCESS_ERR); + + expectException(function() { + splitter.disconnect(gain3, 0, 0); + }, DOMException.INVALID_ACCESS_ERR); + + expectException(function() { + splitter.disconnect(merger, 3, 0); + }, DOMException.INDEX_SIZE_ERR); + </script> + </pre> + </body> +</html> diff --git a/dom/media/webaudio/test/test_disconnectFromAudioNode.html b/dom/media/webaudio/test/test_disconnectFromAudioNode.html new file mode 100644 index 000000000..931195146 --- /dev/null +++ b/dom/media/webaudio/test/test_disconnectFromAudioNode.html @@ -0,0 +1,55 @@ +<!DOCTYPE HTML> +<html> + <head> + <title>Test whether we can disconnect an AudioNode</title> + <script type="text/javascript" src="/tests/SimpleTest/SimpleTest.js"></script> + <script type="text/javascript" src="webaudio.js"></script> + <link rel="stylesheet" type="text/css" href="/tests/SimpleTest/test.css" /> + </head> + <body> + <pre id="test"> + <script class="testbody" type="text/javascript"> + var gTest = { + length: 256, + numberOfChannels: 1, + createGraph: function(context) { + var sourceBuffer = context.createBuffer(1, 256, context.sampleRate); + var data = sourceBuffer.getChannelData(0); + for (var j = 0; j < data.length; j++) { + data[j] = 1; + } + + var source = context.createBufferSource(); + source.buffer = sourceBuffer; + + var gain1 = context.createGain(); + var gain2 = context.createGain(); + var gain3 = context.createGain(); + + source.connect(gain1); + source.connect(gain2); + + gain1.connect(gain3); + gain2.connect(gain3); + + source.start(); + + source.disconnect(gain2); + + return gain3; + }, + createExpectedBuffers: function(context) { + expectedBuffer = context.createBuffer(1, 256, context.sampleRate); + for (var i = 0; i < 256; ++i) { + expectedBuffer.getChannelData(0)[i] = 1.0; + } + + return expectedBuffer; + } + }; + + runTest(); + </script> + </pre> + </body> +</html> diff --git a/dom/media/webaudio/test/test_disconnectFromAudioNodeAndOutput.html b/dom/media/webaudio/test/test_disconnectFromAudioNodeAndOutput.html new file mode 100644 index 000000000..5c4e3ee5d --- /dev/null +++ b/dom/media/webaudio/test/test_disconnectFromAudioNodeAndOutput.html @@ -0,0 +1,59 @@ +<!DOCTYPE HTML> +<html> + <head> + <title>Test whether we can disconnect an AudioNode</title> + <script type="text/javascript" src="/tests/SimpleTest/SimpleTest.js"></script> + <script type="text/javascript" src="webaudio.js"></script> + <link rel="stylesheet" type="text/css" href="/tests/SimpleTest/test.css" /> + </head> + <body> + <pre id="test"> + <script class="testbody" type="text/javascript"> + var gTest = { + length: 256, + numberOfChannels: 2, + createGraph: function(context) { + var sourceBuffer = context.createBuffer(2, 256, context.sampleRate); + for (var i = 1; i <= 2; i++) { + var data = sourceBuffer.getChannelData(i-1); + for (var j = 0; j < data.length; j++) { + data[j] = i; + } + } + + var source = context.createBufferSource(); + source.buffer = sourceBuffer; + + var splitter = context.createChannelSplitter(2); + var gain1 = context.createGain(); + var gain2 = context.createGain(); + var merger = context.createChannelMerger(2); + + source.connect(splitter); + splitter.connect(gain1, 0); + splitter.connect(gain2, 0); + splitter.connect(gain2, 1); + gain1.connect(merger, 0, 1); + gain2.connect(merger, 0, 1); + source.start(); + + splitter.disconnect(gain2, 0); + + return merger; + }, + createExpectedBuffers: function(context) { + expectedBuffer = context.createBuffer(2, 256, context.sampleRate); + for (var i = 0; i < 256; ++i) { + expectedBuffer.getChannelData(0)[i] = 0; + expectedBuffer.getChannelData(1)[i] = 3; + } + + return expectedBuffer; + } + }; + + runTest(); + </script> + </pre> + </body> +</html> diff --git a/dom/media/webaudio/test/test_disconnectFromAudioNodeAndOutputAndInput.html b/dom/media/webaudio/test/test_disconnectFromAudioNodeAndOutputAndInput.html new file mode 100644 index 000000000..6526cf65b --- /dev/null +++ b/dom/media/webaudio/test/test_disconnectFromAudioNodeAndOutputAndInput.html @@ -0,0 +1,57 @@ +<!DOCTYPE HTML> +<html> + <head> + <title>Test whether we can disconnect an AudioNode</title> + <script type="text/javascript" src="/tests/SimpleTest/SimpleTest.js"></script> + <script type="text/javascript" src="webaudio.js"></script> + <link rel="stylesheet" type="text/css" href="/tests/SimpleTest/test.css" /> + </head> + <body> + <pre id="test"> + <script class="testbody" type="text/javascript"> + var gTest = { + length: 256, + numberOfChannels: 3, + createGraph: function(context) { + var sourceBuffer = context.createBuffer(3, 256, context.sampleRate); + for (var i = 1; i <= 3; i++) { + var data = sourceBuffer.getChannelData(i-1); + for (var j = 0; j < data.length; j++) { + data[j] = i; + } + } + + var source = context.createBufferSource(); + source.buffer = sourceBuffer; + + var splitter = context.createChannelSplitter(3); + var merger = context.createChannelMerger(3); + + source.connect(splitter); + splitter.connect(merger, 0, 0); + splitter.connect(merger, 1, 1); + splitter.connect(merger, 2, 2); + source.start(); + + splitter.disconnect(merger, 2, 2); + + return merger; + }, + createExpectedBuffers: function(context) { + var expectedBuffer = context.createBuffer(3, 256, context.sampleRate); + for (var i = 1; i <= 3; i++) { + var data = expectedBuffer.getChannelData(i-1); + for (var j = 0; j < data.length; j++) { + data[j] = (i == 3) ? 0 : i; + } + } + + return expectedBuffer; + } + }; + + runTest(); + </script> + </pre> + </body> +</html>
\ No newline at end of file diff --git a/dom/media/webaudio/test/test_disconnectFromAudioNodeMultipleConnection.html b/dom/media/webaudio/test/test_disconnectFromAudioNodeMultipleConnection.html new file mode 100644 index 000000000..746b7ba93 --- /dev/null +++ b/dom/media/webaudio/test/test_disconnectFromAudioNodeMultipleConnection.html @@ -0,0 +1,56 @@ +<!DOCTYPE HTML> +<html> + <head> + <title> + Test whether we can disconnect all outbound connection of an AudioNode + </title> + <script type="text/javascript" src="/tests/SimpleTest/SimpleTest.js"></script> + <script type="text/javascript" src="webaudio.js"></script> + <link rel="stylesheet" type="text/css" href="/tests/SimpleTest/test.css" /> + </head> + <body> + <pre id="test"> + <script class="testbody" type="text/javascript"> + var gTest = { + length: 256, + numberOfChannels: 2, + createGraph: function(context) { + var sourceBuffer = context.createBuffer(1, 256, context.sampleRate); + var data = sourceBuffer.getChannelData(0); + for (var j = 0; j < data.length; j++) { + data[j] = 1; + } + + var source = context.createBufferSource(); + source.buffer = sourceBuffer; + + var merger = context.createChannelMerger(2); + var gain = context.createGain(); + + source.connect(merger, 0, 0); + source.connect(gain); + source.connect(merger, 0, 1); + + source.disconnect(merger); + + source.start(); + + return merger; + }, + createExpectedBuffers: function(context) { + expectedBuffer = context.createBuffer(2, 256, context.sampleRate); + for (var channel = 0; channel < 2; channel++) { + for (var i = 0; i < 256; ++i) { + expectedBuffer.getChannelData(0)[i] = 0; + } + } + + return expectedBuffer; + } + }; + + runTest(); + </script> + </pre> + </body> +</html> diff --git a/dom/media/webaudio/test/test_disconnectFromOutput.html b/dom/media/webaudio/test/test_disconnectFromOutput.html new file mode 100644 index 000000000..8a6daf5c7 --- /dev/null +++ b/dom/media/webaudio/test/test_disconnectFromOutput.html @@ -0,0 +1,54 @@ +<!DOCTYPE HTML> +<html> + <head> + <title>Test whether we can disconnect an AudioNode</title> + <script type="text/javascript" src="/tests/SimpleTest/SimpleTest.js"></script> + <script type="text/javascript" src="webaudio.js"></script> + <link rel="stylesheet" type="text/css" href="/tests/SimpleTest/test.css" /> + </head> + <body> + <pre id="test"> + <script class="testbody" type="text/javascript"> + var gTest = { + length: 256, + numberOfChannels: 1, + createGraph: function(context) { + var sourceBuffer = context.createBuffer(3, 256, context.sampleRate); + for (var i = 1; i <= 3; i++) { + var data = sourceBuffer.getChannelData(i-1); + for (var j = 0; j < data.length; j++) { + data[j] = i; + } + } + + var source = context.createBufferSource(); + source.buffer = sourceBuffer; + + var splitter = context.createChannelSplitter(3); + var sum = context.createGain(); + + source.connect(splitter); + splitter.connect(sum, 0); + splitter.connect(sum, 1); + splitter.connect(sum, 2); + source.start(); + + splitter.disconnect(1); + + return sum; + }, + createExpectedBuffers: function(context) { + expectedBuffer = context.createBuffer(1, 256, context.sampleRate); + for (var i = 0; i < 256; ++i) { + expectedBuffer.getChannelData(0)[i] = 4; + } + + return expectedBuffer; + }, + }; + + runTest(); + </script> + </pre> + </body> +</html>
\ No newline at end of file diff --git a/dom/media/webaudio/test/test_dynamicsCompressorNode.html b/dom/media/webaudio/test/test_dynamicsCompressorNode.html new file mode 100644 index 000000000..052b27671 --- /dev/null +++ b/dom/media/webaudio/test/test_dynamicsCompressorNode.html @@ -0,0 +1,70 @@ +<!DOCTYPE HTML> +<html> +<head> + <title>Test DynamicsCompressorNode</title> + <script type="text/javascript" src="/tests/SimpleTest/SimpleTest.js"></script> + <link rel="stylesheet" type="text/css" href="/tests/SimpleTest/test.css" /> +</head> +<body> +<pre id="test"> +<script class="testbody" type="text/javascript"> + +function near(a, b, msg) { + ok(Math.abs(a - b) < 1e-4, msg); +} + +SimpleTest.waitForExplicitFinish(); +addLoadEvent(function() { + var context = new AudioContext(); + + var osc = context.createOscillator(); + var sp = context.createScriptProcessor(); + + var compressor = context.createDynamicsCompressor(); + + osc.connect(compressor); + osc.connect(sp); + compressor.connect(context.destination); + + is(compressor.channelCount, 2, "compressor node has 2 input channels by default"); + is(compressor.channelCountMode, "explicit", "Correct channelCountMode for the compressor node"); + is(compressor.channelInterpretation, "speakers", "Correct channelCountInterpretation for the compressor node"); + + // Verify default values + with (compressor) { + ok(threshold instanceof AudioParam, "treshold is an AudioParam"); + near(threshold.defaultValue, -24, "Correct default value for threshold"); + ok(knee instanceof AudioParam, "knee is an AudioParam"); + near(knee.defaultValue, 30, "Correct default value for knee"); + ok(ratio instanceof AudioParam, "knee is an AudioParam"); + near(ratio.defaultValue, 12, "Correct default value for ratio"); + is(typeof reduction, "number", "reduction is a number"); + near(reduction, 0, "Correct default value for reduction"); + ok(attack instanceof AudioParam, "attack is an AudioParam"); + near(attack.defaultValue, 0.003, "Correct default value for attack"); + ok(release instanceof AudioParam, "release is an AudioParam"); + near(release.defaultValue, 0.25, "Correct default value for release"); + } + + compressor.threshold.value = -80; + + osc.start(); + var iteration = 0; + sp.onaudioprocess = function(e) { + if (iteration > 10) { + ok(compressor.reduction < 0, + "Feeding a full-scale sine to a compressor should result in an db" + + "reduction."); + sp.onaudioprocess = null; + osc.stop(0); + + SimpleTest.finish(); + } + iteration++; + } +}); + +</script> +</pre> +</body> +</html> diff --git a/dom/media/webaudio/test/test_dynamicsCompressorNodePassThrough.html b/dom/media/webaudio/test/test_dynamicsCompressorNodePassThrough.html new file mode 100644 index 000000000..1be838a4e --- /dev/null +++ b/dom/media/webaudio/test/test_dynamicsCompressorNodePassThrough.html @@ -0,0 +1,47 @@ +<!DOCTYPE HTML> +<html> +<head> + <title>Test DynamicsCompressorNode with passthrough</title> + <script type="text/javascript" src="/tests/SimpleTest/SimpleTest.js"></script> + <script type="text/javascript" src="webaudio.js"></script> + <link rel="stylesheet" type="text/css" href="/tests/SimpleTest/test.css" /> +</head> +<body> +<pre id="test"> +<script class="testbody" type="text/javascript"> + +var gTest = { + length: 2048, + numberOfChannels: 1, + createGraph: function(context) { + var source = context.createBufferSource(); + + var compressor = context.createDynamicsCompressor(); + + source.buffer = this.buffer; + + source.connect(compressor); + + var compressorWrapped = SpecialPowers.wrap(compressor); + ok("passThrough" in compressorWrapped, "DynamicsCompressorNode should support the passThrough API"); + compressorWrapped.passThrough = true; + + source.start(0); + return compressor; + }, + createExpectedBuffers: function(context) { + this.buffer = context.createBuffer(1, 2048, context.sampleRate); + for (var i = 0; i < 2048; ++i) { + this.buffer.getChannelData(0)[i] = Math.sin(440 * 2 * Math.PI * i / context.sampleRate); + } + + return [this.buffer]; + }, +}; + +runTest(); + +</script> +</pre> +</body> +</html> diff --git a/dom/media/webaudio/test/test_dynamicsCompressorNodeWithGain.html b/dom/media/webaudio/test/test_dynamicsCompressorNodeWithGain.html new file mode 100644 index 000000000..2e4b38ea5 --- /dev/null +++ b/dom/media/webaudio/test/test_dynamicsCompressorNodeWithGain.html @@ -0,0 +1,51 @@ +<!DOCTYPE HTML> +<html> +<head> +<meta charset="utf-8"> + <title>Test DynamicsCompressor with Gain</title> + <script type="text/javascript" src="/tests/SimpleTest/SimpleTest.js"></script> + <link rel="stylesheet" type="text/css" href="/tests/SimpleTest/test.css" /> +</head> +<body> +<script class="testbody" type="text/javascript"> +SimpleTest.waitForExplicitFinish(); + +addLoadEvent(function() { + var samplerate = 44100; + var context = new OfflineAudioContext(1, samplerate/100, samplerate); + + var osc = context.createOscillator(); + osc.frequency.value = 2400; + + var gain = context.createGain(); + gain.gain.value = 1.5; + + // These numbers are borrowed from the example code on MDN + // https://developer.mozilla.org/en-US/docs/Web/API/DynamicsCompressorNode + var compressor = context.createDynamicsCompressor(); + compressor.threshold.value = -50; + compressor.knee.value = 40; + compressor.ratio.value = 12; + compressor.reduction.value = -20; + compressor.attack.value = 0; + compressor.release.value = 0.25; + + osc.connect(gain); + gain.connect(compressor); + compressor.connect(context.destination); + osc.start(); + + context.startRendering().then(buffer => { + var peak = Math.max(...buffer.getChannelData(0)); + console.log(peak); + // These values are experimentally determined. Without dynamics compression + // the peak should be just under 1.5. We also check for a minimum value + // to make sure we are not getting all zeros. + ok(peak >= 0.2 && peak < 1.0, "Peak value should be greater than 0.25 and less than 1.0"); + SimpleTest.finish(); + }); +}); +</script> +<pre> +</pre> +</body> diff --git a/dom/media/webaudio/test/test_gainNode.html b/dom/media/webaudio/test/test_gainNode.html new file mode 100644 index 000000000..41b19fda0 --- /dev/null +++ b/dom/media/webaudio/test/test_gainNode.html @@ -0,0 +1,57 @@ +<!DOCTYPE HTML> +<html> +<head> + <title>Test GainNode</title> + <script type="text/javascript" src="/tests/SimpleTest/SimpleTest.js"></script> + <script type="text/javascript" src="webaudio.js"></script> + <link rel="stylesheet" type="text/css" href="/tests/SimpleTest/test.css" /> +</head> +<body> +<pre id="test"> +<script class="testbody" type="text/javascript"> + +var gTest = { + length: 2048, + numberOfChannels: 1, + createGraph: function(context) { + var buffer = context.createBuffer(1, 2048, context.sampleRate); + for (var i = 0; i < 2048; ++i) { + buffer.getChannelData(0)[i] = Math.sin(440 * 2 * Math.PI * i / context.sampleRate); + } + + var source = context.createBufferSource(); + + var gain = context.createGain(); + + source.buffer = buffer; + + source.connect(gain); + + ok(gain.gain, "The audioparam member must exist"); + is(gain.gain.value, 1.0, "Correct initial value"); + is(gain.gain.defaultValue, 1.0, "Correct default value"); + gain.gain.value = 0.5; + is(gain.gain.value, 0.5, "Correct initial value"); + is(gain.gain.defaultValue, 1.0, "Correct default value"); + is(gain.channelCount, 2, "gain node has 2 input channels by default"); + is(gain.channelCountMode, "max", "Correct channelCountMode for the gain node"); + is(gain.channelInterpretation, "speakers", "Correct channelCountInterpretation for the gain node"); + + source.start(0); + return gain; + }, + createExpectedBuffers: function(context) { + var expectedBuffer = context.createBuffer(1, 2048, context.sampleRate); + for (var i = 0; i < 2048; ++i) { + expectedBuffer.getChannelData(0)[i] = Math.sin(440 * 2 * Math.PI * i / context.sampleRate) / 2; + } + return expectedBuffer; + }, +}; + +runTest(); + +</script> +</pre> +</body> +</html> diff --git a/dom/media/webaudio/test/test_gainNodeInLoop.html b/dom/media/webaudio/test/test_gainNodeInLoop.html new file mode 100644 index 000000000..6b32cbcfa --- /dev/null +++ b/dom/media/webaudio/test/test_gainNodeInLoop.html @@ -0,0 +1,48 @@ +<!DOCTYPE HTML> +<html> +<head> + <title>Test GainNode in presence of loops</title> + <script type="text/javascript" src="/tests/SimpleTest/SimpleTest.js"></script> + <script type="text/javascript" src="webaudio.js"></script> + <link rel="stylesheet" type="text/css" href="/tests/SimpleTest/test.css" /> +</head> +<body> +<pre id="test"> +<script class="testbody" type="text/javascript"> + +var gTest = { + length: 4096, + numberOfChannels: 1, + createGraph: function(context) { + var sourceBuffer = context.createBuffer(1, 2048, context.sampleRate); + for (var i = 0; i < 2048; ++i) { + sourceBuffer.getChannelData(0)[i] = 1; + } + + var source = context.createBufferSource(); + source.buffer = sourceBuffer; + source.loop = true; + source.start(0); + source.stop(sourceBuffer.duration * 2); + + var gain = context.createGain(); + // Adjust the gain in a way that we don't just end up modifying AudioChunk::mVolume + gain.gain.setValueAtTime(0.5, 0); + source.connect(gain); + return gain; + }, + createExpectedBuffers: function(context) { + var expectedBuffer = context.createBuffer(1, 4096, context.sampleRate); + for (var i = 0; i < 4096; ++i) { + expectedBuffer.getChannelData(0)[i] = 0.5; + } + return expectedBuffer; + }, +}; + +runTest(); + +</script> +</pre> +</body> +</html> diff --git a/dom/media/webaudio/test/test_gainNodePassThrough.html b/dom/media/webaudio/test/test_gainNodePassThrough.html new file mode 100644 index 000000000..2a7cd6bf4 --- /dev/null +++ b/dom/media/webaudio/test/test_gainNodePassThrough.html @@ -0,0 +1,49 @@ +<!DOCTYPE HTML> +<html> +<head> + <title>Test GainNode with passthrough</title> + <script type="text/javascript" src="/tests/SimpleTest/SimpleTest.js"></script> + <script type="text/javascript" src="webaudio.js"></script> + <link rel="stylesheet" type="text/css" href="/tests/SimpleTest/test.css" /> +</head> +<body> +<pre id="test"> +<script class="testbody" type="text/javascript"> + +var gTest = { + length: 2048, + numberOfChannels: 1, + createGraph: function(context) { + var source = context.createBufferSource(); + + var gain = context.createGain(); + + source.buffer = this.buffer; + + source.connect(gain); + + gain.gain.value = 0.5; + + var gainWrapped = SpecialPowers.wrap(gain); + ok("passThrough" in gainWrapped, "GainNode should support the passThrough API"); + gainWrapped.passThrough = true; + + source.start(0); + return gain; + }, + createExpectedBuffers: function(context) { + this.buffer = context.createBuffer(1, 2048, context.sampleRate); + for (var i = 0; i < 2048; ++i) { + this.buffer.getChannelData(0)[i] = Math.sin(440 * 2 * Math.PI * i / context.sampleRate); + } + + return [this.buffer]; + }, +}; + +runTest(); + +</script> +</pre> +</body> +</html> diff --git a/dom/media/webaudio/test/test_iirFilterNodePassThrough.html b/dom/media/webaudio/test/test_iirFilterNodePassThrough.html new file mode 100644 index 000000000..7773a5b82 --- /dev/null +++ b/dom/media/webaudio/test/test_iirFilterNodePassThrough.html @@ -0,0 +1,47 @@ +<!DOCTYPE HTML> +<html> +<head> + <title>Test IIRFilterNode with passthrough</title> + <script type="text/javascript" src="/tests/SimpleTest/SimpleTest.js"></script> + <script type="text/javascript" src="webaudio.js"></script> + <link rel="stylesheet" type="text/css" href="/tests/SimpleTest/test.css" /> +</head> +<body> +<pre id="test"> +<script class="testbody" type="text/javascript"> + +var gTest = { + length: 2048, + numberOfChannels: 1, + createGraph: function(context) { + var source = context.createBufferSource(); + + var filter = context.createIIRFilter([0.5, 0.5], [1.0]); + + source.buffer = this.buffer; + + source.connect(filter); + + var filterWrapped = SpecialPowers.wrap(filter); + ok("passThrough" in filterWrapped, "BiquadFilterNode should support the passThrough API"); + filterWrapped.passThrough = true; + + source.start(0); + return filter; + }, + createExpectedBuffers: function(context) { + this.buffer = context.createBuffer(1, 2048, context.sampleRate); + for (var i = 0; i < 2048; ++i) { + this.buffer.getChannelData(0)[i] = Math.sin(440 * 2 * Math.PI * i / context.sampleRate); + } + + return [this.buffer]; + }, +}; + +runTest(); + +</script> +</pre> +</body> +</html> diff --git a/dom/media/webaudio/test/test_maxChannelCount.html b/dom/media/webaudio/test/test_maxChannelCount.html new file mode 100644 index 000000000..319e2bf1e --- /dev/null +++ b/dom/media/webaudio/test/test_maxChannelCount.html @@ -0,0 +1,38 @@ +<!DOCTYPE HTML> +<html> +<head> + <title>Test the AudioContext.destination interface</title> + <script type="text/javascript" src="/tests/SimpleTest/SimpleTest.js"></script> + <script type="text/javascript" src="webaudio.js"></script> + <link rel="stylesheet" type="text/css" href="/tests/SimpleTest/test.css" /> +</head> +<body> +<pre id="test"> +<script class="testbody" type="text/javascript"> + +// Work around bug 911777 +SpecialPowers.forceGC(); +SpecialPowers.forceCC(); + +SimpleTest.waitForExplicitFinish(); +addLoadEvent(function() { + var ac = new AudioContext(); + ok(ac.destination.maxChannelCount > 0, "We can query the maximum number of channels"); + + var oac = new OfflineAudioContext(2, 1024, 48000); + ok(oac.destination.maxChannelCount, 2, "This OfflineAudioContext should have 2 max channels."); + + oac = new OfflineAudioContext(6, 1024, 48000); + ok(oac.destination.maxChannelCount, 6, "This OfflineAudioContext should have 6 max channels."); + + expectException(function() { + oac.destination.channelCount = oac.destination.channelCount + 1; + }, DOMException.INDEX_SIZE_ERR); + + SimpleTest.finish(); +}); + +</script> +</pre> +</body> +</html> diff --git a/dom/media/webaudio/test/test_mediaDecoding.html b/dom/media/webaudio/test/test_mediaDecoding.html new file mode 100644 index 000000000..07e18162b --- /dev/null +++ b/dom/media/webaudio/test/test_mediaDecoding.html @@ -0,0 +1,367 @@ +<!DOCTYPE HTML> +<html> +<head> + <title>Test the decodeAudioData API and Resampling</title> + <script type="text/javascript" src="/tests/SimpleTest/SimpleTest.js"></script> + <link rel="stylesheet" type="text/css" href="/tests/SimpleTest/test.css" /> +</head> +<body> +<pre id="test"> +<script src="webaudio.js" type="text/javascript"></script> +<script type="text/javascript"> + +// These routines have been copied verbatim from WebKit, and are used in order +// to convert a memory buffer into a wave buffer. +function writeString(s, a, offset) { + for (var i = 0; i < s.length; ++i) { + a[offset + i] = s.charCodeAt(i); + } +} + +function writeInt16(n, a, offset) { + n = Math.floor(n); + + var b1 = n & 255; + var b2 = (n >> 8) & 255; + + a[offset + 0] = b1; + a[offset + 1] = b2; +} + +function writeInt32(n, a, offset) { + n = Math.floor(n); + var b1 = n & 255; + var b2 = (n >> 8) & 255; + var b3 = (n >> 16) & 255; + var b4 = (n >> 24) & 255; + + a[offset + 0] = b1; + a[offset + 1] = b2; + a[offset + 2] = b3; + a[offset + 3] = b4; +} + +function writeAudioBuffer(audioBuffer, a, offset) { + var n = audioBuffer.length; + var channels = audioBuffer.numberOfChannels; + + for (var i = 0; i < n; ++i) { + for (var k = 0; k < channels; ++k) { + var buffer = audioBuffer.getChannelData(k); + var sample = buffer[i] * 32768.0; + + // Clip samples to the limitations of 16-bit. + // If we don't do this then we'll get nasty wrap-around distortion. + if (sample < -32768) + sample = -32768; + if (sample > 32767) + sample = 32767; + + writeInt16(sample, a, offset); + offset += 2; + } + } +} + +function createWaveFileData(audioBuffer) { + var frameLength = audioBuffer.length; + var numberOfChannels = audioBuffer.numberOfChannels; + var sampleRate = audioBuffer.sampleRate; + var bitsPerSample = 16; + var byteRate = sampleRate * numberOfChannels * bitsPerSample/8; + var blockAlign = numberOfChannels * bitsPerSample/8; + var wavDataByteLength = frameLength * numberOfChannels * 2; // 16-bit audio + var headerByteLength = 44; + var totalLength = headerByteLength + wavDataByteLength; + + var waveFileData = new Uint8Array(totalLength); + + var subChunk1Size = 16; // for linear PCM + var subChunk2Size = wavDataByteLength; + var chunkSize = 4 + (8 + subChunk1Size) + (8 + subChunk2Size); + + writeString("RIFF", waveFileData, 0); + writeInt32(chunkSize, waveFileData, 4); + writeString("WAVE", waveFileData, 8); + writeString("fmt ", waveFileData, 12); + + writeInt32(subChunk1Size, waveFileData, 16); // SubChunk1Size (4) + writeInt16(1, waveFileData, 20); // AudioFormat (2) + writeInt16(numberOfChannels, waveFileData, 22); // NumChannels (2) + writeInt32(sampleRate, waveFileData, 24); // SampleRate (4) + writeInt32(byteRate, waveFileData, 28); // ByteRate (4) + writeInt16(blockAlign, waveFileData, 32); // BlockAlign (2) + writeInt32(bitsPerSample, waveFileData, 34); // BitsPerSample (4) + + writeString("data", waveFileData, 36); + writeInt32(subChunk2Size, waveFileData, 40); // SubChunk2Size (4) + + // Write actual audio data starting at offset 44. + writeAudioBuffer(audioBuffer, waveFileData, 44); + + return waveFileData; +} + +</script> +<script class="testbody" type="text/javascript"> + +SimpleTest.waitForExplicitFinish(); + +// fuzzTolerance and fuzzToleranceMobile are used to determine fuzziness +// thresholds. They're needed to make sure that we can deal with neglibible +// differences in the binary buffer caused as a result of resampling the +// audio. fuzzToleranceMobile is typically larger on mobile platforms since +// we do fixed-point resampling as opposed to floating-point resampling on +// those platforms. +var files = [ + // An ogg file, 44.1khz, mono + { + url: "ting-44.1k-1ch.ogg", + valid: true, + expectedUrl: "ting-44.1k-1ch.wav", + numberOfChannels: 1, + frames: 30592, + sampleRate: 44100, + duration: 0.693, + fuzzTolerance: 5, + fuzzToleranceMobile: 1284 + }, + // An ogg file, 44.1khz, stereo + { + url: "ting-44.1k-2ch.ogg", + valid: true, + expectedUrl: "ting-44.1k-2ch.wav", + numberOfChannels: 2, + frames: 30592, + sampleRate: 44100, + duration: 0.693, + fuzzTolerance: 6, + fuzzToleranceMobile: 2544 + }, + // An ogg file, 48khz, mono + { + url: "ting-48k-1ch.ogg", + valid: true, + expectedUrl: "ting-48k-1ch.wav", + numberOfChannels: 1, + frames: 33297, + sampleRate: 48000, + duration: 0.693, + fuzzTolerance: 5, + fuzzToleranceMobile: 1388 + }, + // An ogg file, 48khz, stereo + { + url: "ting-48k-2ch.ogg", + valid: true, + expectedUrl: "ting-48k-2ch.wav", + numberOfChannels: 2, + frames: 33297, + sampleRate: 48000, + duration: 0.693, + fuzzTolerance: 14, + fuzzToleranceMobile: 2752 + }, + // Make sure decoding a wave file results in the same buffer (for both the + // resampling and non-resampling cases) + { + url: "ting-44.1k-1ch.wav", + valid: true, + expectedUrl: "ting-44.1k-1ch.wav", + numberOfChannels: 1, + frames: 30592, + sampleRate: 44100, + duration: 0.693, + fuzzTolerance: 0, + fuzzToleranceMobile: 0 + }, + { + url: "ting-48k-1ch.wav", + valid: true, + expectedUrl: "ting-48k-1ch.wav", + numberOfChannels: 1, + frames: 33297, + sampleRate: 48000, + duration: 0.693, + fuzzTolerance: 0, + fuzzToleranceMobile: 0 + }, + // // A wave file + // //{ url: "24bit-44khz.wav", valid: true, expectedUrl: "24bit-44khz-expected.wav" }, + // A non-audio file + { url: "invalid.txt", valid: false, sampleRate: 44100 }, + // A webm file with no audio + { url: "noaudio.webm", valid: false, sampleRate: 48000 }, + // A video ogg file with audio + { + url: "audio.ogv", + valid: true, + expectedUrl: "audio-expected.wav", + numberOfChannels: 2, + sampleRate: 44100, + frames: 47680, + duration: 1.0807, + fuzzTolerance: 106, + fuzzToleranceMobile: 3482 + } +]; + +// Returns true if the memory buffers are less different that |fuzz| bytes +function fuzzyMemcmp(buf1, buf2, fuzz) { + var result = true; + var difference = 0; + is(buf1.length, buf2.length, "same length"); + for (var i = 0; i < buf1.length; ++i) { + if (Math.abs(buf1[i] - buf2[i])) { + ++difference; + } + } + if (difference > fuzz) { + ok(false, "Expected at most " + fuzz + " bytes difference, found " + difference + " bytes"); + } + return difference <= fuzz; +} + +function getFuzzTolerance(test) { + var kIsMobile = + navigator.userAgent.indexOf("Mobile") != -1 || // b2g + navigator.userAgent.indexOf("Android") != -1; // android + return kIsMobile ? test.fuzzToleranceMobile : test.fuzzTolerance; +} + +function bufferIsSilent(buffer) { + for (var i = 0; i < buffer.length; ++i) { + if (buffer.getChannelData(0)[i] != 0) { + return false; + } + } + return true; +} + +function checkAudioBuffer(buffer, test) { + if (buffer.numberOfChannels != test.numberOfChannels) { + is(buffer.numberOfChannels, test.numberOfChannels, "Correct number of channels"); + return; + } + ok(Math.abs(buffer.duration - test.duration) < 1e-3, "Correct duration"); + if (Math.abs(buffer.duration - test.duration) >= 1e-3) { + ok(false, "got: " + buffer.duration + ", expected: " + test.duration); + } + is(buffer.sampleRate, test.sampleRate, "Correct sample rate"); + is(buffer.length, test.frames, "Correct length"); + + var wave = createWaveFileData(buffer); + ok(fuzzyMemcmp(wave, test.expectedWaveData, getFuzzTolerance(test)), "Received expected decoded data"); +} + +function checkResampledBuffer(buffer, test, callback) { + if (buffer.numberOfChannels != test.numberOfChannels) { + is(buffer.numberOfChannels, test.numberOfChannels, "Correct number of channels"); + return; + } + ok(Math.abs(buffer.duration - test.duration) < 1e-3, "Correct duration"); + if (Math.abs(buffer.duration - test.duration) >= 1e-3) { + ok(false, "got: " + buffer.duration + ", expected: " + test.duration); + } + // Take into account the resampling when checking the size + var expectedLength = test.frames * buffer.sampleRate / test.sampleRate; + ok(Math.abs(buffer.length - expectedLength) < 1.0, "Correct length", "got " + buffer.length + ", expected about " + expectedLength); + + // Playback the buffer in the original context, to resample back to the + // original rate and compare with the decoded buffer without resampling. + cx = test.nativeContext; + var expected = cx.createBufferSource(); + expected.buffer = test.expectedBuffer; + expected.start(); + var inverse = cx.createGain(); + inverse.gain.value = -1; + expected.connect(inverse); + inverse.connect(cx.destination); + var resampled = cx.createBufferSource(); + resampled.buffer = buffer; + resampled.start(); + // This stop should do nothing, but it tests for bug 937475 + resampled.stop(test.frames / cx.sampleRate); + resampled.connect(cx.destination); + cx.oncomplete = function(e) { + ok(!bufferIsSilent(e.renderedBuffer), "Expect buffer not silent"); + // Resampling will lose the highest frequency components, so we should + // pass the difference through a low pass filter. However, either the + // input files don't have significant high frequency components or the + // tolerance in compareBuffers() is too high to detect them. + compareBuffers(e.renderedBuffer, + cx.createBuffer(test.numberOfChannels, + test.frames, test.sampleRate)); + callback(); + } + cx.startRendering(); +} + +function runResampling(test, response, callback) { + var sampleRate = test.sampleRate == 44100 ? 48000 : 44100; + var cx = new OfflineAudioContext(1, 1, sampleRate); + cx.decodeAudioData(response, function onSuccess(asyncResult) { + is(asyncResult.sampleRate, sampleRate, "Correct sample rate"); + + checkResampledBuffer(asyncResult, test, callback); + }, function onFailure() { + ok(false, "Expected successful decode with resample"); + callback(); + }); +} + +function runTest(test, response, callback) { + // We need to copy the array here, because decodeAudioData will detach the + // array's buffer. + var compressedAudio = response.slice(0); + var expectCallback = false; + var cx = new OfflineAudioContext(test.numberOfChannels || 1, + test.frames || 1, test.sampleRate); + cx.decodeAudioData(response, function onSuccess(asyncResult) { + ok(expectCallback, "Success callback should fire asynchronously"); + ok(test.valid, "Did expect success for test " + test.url); + + checkAudioBuffer(asyncResult, test); + + test.expectedBuffer = asyncResult; + test.nativeContext = cx; + runResampling(test, compressedAudio, callback); + }, function onFailure() { + ok(expectCallback, "Failure callback should fire asynchronously"); + ok(!test.valid, "Did expect failure for test " + test.url); + callback(); + }); + expectCallback = true; +} + +function loadTest(test, callback) { + var xhr = new XMLHttpRequest(); + xhr.open("GET", test.url, true); + xhr.responseType = "arraybuffer"; + xhr.onload = function() { + var getExpected = new XMLHttpRequest(); + getExpected.open("GET", test.expectedUrl, true); + getExpected.responseType = "arraybuffer"; + getExpected.onload = function() { + test.expectedWaveData = new Uint8Array(getExpected.response); + runTest(test, xhr.response, callback); + }; + getExpected.send(); + }; + xhr.send(); +} + +function loadNextTest() { + if (files.length) { + loadTest(files.shift(), loadNextTest); + } else { + SimpleTest.finish(); + } +} + +loadNextTest(); + +</script> +</pre> +</body> +</html> diff --git a/dom/media/webaudio/test/test_mediaElementAudioSourceNode.html b/dom/media/webaudio/test/test_mediaElementAudioSourceNode.html new file mode 100644 index 000000000..3e196735f --- /dev/null +++ b/dom/media/webaudio/test/test_mediaElementAudioSourceNode.html @@ -0,0 +1,74 @@ +<!DOCTYPE HTML> +<html> +<meta charset="utf-8"> +<head> + <title>Test MediaElementAudioSourceNode</title> + <script type="text/javascript" src="/tests/SimpleTest/SimpleTest.js"></script> + <link rel="stylesheet" type="text/css" href="/tests/SimpleTest/test.css" /> +</head> +<body> +<pre id="test"> +<script class="testbody" type="text/javascript"> +SimpleTest.waitForExplicitFinish(); + +function test() { + var audio = new Audio("small-shot.ogg"); + var context = new AudioContext(); + var expectedMinNonzeroSampleCount; + var expectedMaxNonzeroSampleCount; + var nonzeroSampleCount = 0; + var complete = false; + var iterationCount = 0; + + // This test ensures we receive at least expectedSampleCount nonzero samples + function processSamples(e) { + if (complete) { + return; + } + + if (iterationCount == 0) { + // Don't start playing the audio until the AudioContext stuff is connected + // and running. + audio.play(); + } + ++iterationCount; + + var buf = e.inputBuffer.getChannelData(0); + var nonzeroSamplesThisBuffer = 0; + for (var i = 0; i < buf.length; ++i) { + if (buf[i] != 0) { + ++nonzeroSamplesThisBuffer; + } + } + nonzeroSampleCount += nonzeroSamplesThisBuffer; + is(e.inputBuffer.numberOfChannels, 1, + "Checking data channel count (nonzeroSamplesThisBuffer=" + + nonzeroSamplesThisBuffer + ")"); + ok(nonzeroSampleCount <= expectedMaxNonzeroSampleCount, + "Too many nonzero samples (got " + nonzeroSampleCount + ", expected max " + expectedMaxNonzeroSampleCount + ")"); + if (nonzeroSampleCount >= expectedMinNonzeroSampleCount && + nonzeroSamplesThisBuffer == 0) { + ok(true, + "Check received enough nonzero samples (got " + nonzeroSampleCount + ", expected min " + expectedMinNonzeroSampleCount + ")"); + SimpleTest.finish(); + complete = true; + } + } + + audio.onloadedmetadata = function() { + var node = context.createMediaElementSource(audio); + var sp = context.createScriptProcessor(2048, 1); + node.connect(sp); + // Use a fuzz factor of 100 to account for samples that just happen to be zero + expectedMinNonzeroSampleCount = Math.floor(audio.duration*context.sampleRate) - 100; + expectedMaxNonzeroSampleCount = Math.floor(audio.duration*context.sampleRate) + 500; + sp.onaudioprocess = processSamples; + }; +} + +SpecialPowers.pushPrefEnv({"set": [["media.preload.default", 2], ["media.preload.auto", 3]]}, test); + +</script> +</pre> +</body> +</html> diff --git a/dom/media/webaudio/test/test_mediaElementAudioSourceNodeCrossOrigin.html b/dom/media/webaudio/test/test_mediaElementAudioSourceNodeCrossOrigin.html new file mode 100644 index 000000000..7e03b7079 --- /dev/null +++ b/dom/media/webaudio/test/test_mediaElementAudioSourceNodeCrossOrigin.html @@ -0,0 +1,94 @@ +<!DOCTYPE HTML> +<html> +<meta charset="utf-8"> +<head> + <title>Test MediaStreamAudioSourceNode doesn't get data from cross-origin media resources</title> + <script type="text/javascript" src="/tests/SimpleTest/SimpleTest.js"></script> + <link rel="stylesheet" type="text/css" href="/tests/SimpleTest/test.css" /> +</head> +<body> +<pre id="test"> +<script class="testbody" type="text/javascript"> +SimpleTest.waitForExplicitFinish(); + +// Turn off the authentication dialog blocking for this test. +SpecialPowers.setIntPref("network.auth.subresource-http-auth-allow", 2) + +var tests = [ + // Not the same origin no CORS asked for, should have silence + { url: "http://example.org:80/tests/dom/media/webaudio/test/small-shot.ogg", + cors: null, + expectSilence: true }, + // Same origin, should have sound + { url: "small-shot.ogg", + cors: null, + expectSilence: false }, + // Cross-origin but we asked for CORS and the server answered with the right + // header, should have + { url: "http://example.org:80/tests/dom/media/webaudio/test/corsServer.sjs", + cors: "anonymous", + expectSilence: false } +]; + +var testsRemaining = tests.length; + +tests.forEach(function(e) { + e.ac = new AudioContext(); + var a = new Audio(); + if (e.cors) { + a.crossOrigin = e.cors; + } + a.src = e.url; + document.body.appendChild(a); + + a.onloadedmetadata = () => { + // Wait for "loadedmetadata" before capturing since tracks are then known + // directly. If we set up the capture before "loadedmetadata" we + // (internally) have to wait an extra async jump for tracks to become known + // to main thread, before setting up audio data forwarding to the node. + // As that happens, the audio resource may have already ended on slow test + // machines, causing failures. + a.onloadedmetadata = null; + var measn = e.ac.createMediaElementSource(a); + var sp = e.ac.createScriptProcessor(2048, 1); + sp.seenSound = false; + sp.onaudioprocess = checkBufferSilent; + + measn.connect(sp); + a.play(); + }; + + function checkFinished(sp) { + if (a.ended) { + sp.onaudioprocess = null; + var not = e.expectSilence ? "" : "not"; + is(e.expectSilence, !sp.seenSound, + "Buffer is " + not + " silent as expected, for " + + e.url + " (cors: " + e.cors + ")"); + if (--testsRemaining == 0) { + SimpleTest.finish(); + } + } + } + + function checkBufferSilent(e) { + var inputArrayBuffer = e.inputBuffer.getChannelData(0); + var silent = true; + for (var i = 0; i < inputArrayBuffer.length; i++) { + if (inputArrayBuffer[i] != 0.0) { + silent = false; + break; + } + } + // It is acceptable to find a full buffer of silence here, even if we expect + // sound, because Gecko's looping on media elements is not seamless and we + // can underrun. We are looking for at least one buffer of non-silent data. + e.target.seenSound = !silent || e.target.seenSound; + checkFinished(e.target); + return silent; + } +}); +</script> +</pre> +</body> +</html> diff --git a/dom/media/webaudio/test/test_mediaElementAudioSourceNodeFidelity.html b/dom/media/webaudio/test/test_mediaElementAudioSourceNodeFidelity.html new file mode 100644 index 000000000..8d3b0ed46 --- /dev/null +++ b/dom/media/webaudio/test/test_mediaElementAudioSourceNodeFidelity.html @@ -0,0 +1,128 @@ +<!DOCTYPE HTML> +<html> +<meta charset="utf-8"> +<head> + <title>Test MediaStreamAudioSourceNode doesn't get data from cross-origin media resources</title> + <script type="text/javascript" src="/tests/SimpleTest/SimpleTest.js"></script> + <link rel="stylesheet" type="text/css" href="/tests/SimpleTest/test.css" /> +</head> +<body> +<pre id="test"> +<script class="testbody" type="text/javascript"> +SimpleTest.waitForExplicitFinish(); + +function binIndexForFrequency(frequency, analyser) { + return 1 + Math.round(frequency * + analyser.fftSize / + analyser.context.sampleRate); +} + +function debugCanvas(analyser) { + var cvs = document.createElement("canvas"); + document.body.appendChild(cvs); + + // Easy: 1px per bin + cvs.width = analyser.frequencyBinCount; + cvs.height = 256; + cvs.style.border = "1px solid red"; + + var c = cvs.getContext('2d'); + var buf = new Uint8Array(analyser.frequencyBinCount); + + function render() { + c.clearRect(0, 0, cvs.width, cvs.height); + analyser.getByteFrequencyData(buf); + for (var i = 0; i < buf.length; i++) { + c.fillRect(i, (256 - (buf[i])), 1, 256); + } + requestAnimationFrame(render); + } + requestAnimationFrame(render); +} + + +function checkFrequency(an) { + an.getFloatFrequencyData(frequencyArray); + // We should have no energy when checking the data largely outside the index + // for 440Hz (the frequency of the sine wave), start checking an octave above, + // the Opus compression can add some harmonics to the pure since wave. + var index = binIndexForFrequency(880, an); + var underTreshold = true; + for (var i = index; i < frequencyArray.length; i++) { + // Let some slack, there might be some noise here because of int -> float + // conversion or the Opus encoding. + if (frequencyArray[i] > an.minDecibels + 40) { + return false; + } + } + + // On the other hand, we should find a peak at 440Hz. Our sine wave is not + // attenuated, we're expecting the peak to reach 0dBFs. + index = binIndexForFrequency(440, an); + info("energy at 440: " + frequencyArray[index] + ", threshold " + (an.maxDecibels - 10)); + if (frequencyArray[index] < (an.maxDecibels - 10)) { + return false; + } + + return true; +} + +var audioElement = new Audio(); +audioElement.src = 'sine-440-10s.opus' +audioElement.loop = true; +var ac = new AudioContext(); +var mediaElementSource = ac.createMediaElementSource(audioElement); +var an = ac.createAnalyser(); +frequencyArray = new Float32Array(an.frequencyBinCount); + +// Uncomment this to check what the analyser is doing. +// debugCanvas(an); + +mediaElementSource.connect(an) + +audioElement.play(); +// We want to check the we have the expected audio for at least two loop of +// the HTMLMediaElement, piped into an AudioContext. The file is ten seconds, +// and we use the default FFT size. +var lastCurrentTime = 0; +var loopCount = 0; +audioElement.onplaying = function() { + audioElement.ontimeupdate = function() { + // We don't run the analysis when close to loop point or at the + // beginning, since looping is not seamless, there could be an + // unpredictable amount of silence + var rv = checkFrequency(an); + info("currentTime: " + audioElement.currentTime); + if (audioElement.currentTime < 4 || + audioElement.currentTIme > 8){ + return; + } + if (!rv) { + ok(false, "Found unexpected noise during analysis."); + audioElement.ontimeupdate = null; + audioElement.onplaying = null; + ac.close(); + audioElement.src = ''; + SimpleTest.finish() + return; + } + ok(true, "Found correct audio signal during analysis"); + info(lastCurrentTime + " " + audioElement.currentTime); + if (lastCurrentTime > audioElement.currentTime) { + info("loopCount: " + loopCount); + if (loopCount > 1) { + audioElement.ontimeupdate = null; + audioElement.onplaying = null; + ac.close(); + audioElement.src = ''; + SimpleTest.finish(); + } + lastCurrentTime = audioElement.currentTime; + loopCount++; + } else { + lastCurrentTime = audioElement.currentTime; + } + } +} + +</script> diff --git a/dom/media/webaudio/test/test_mediaElementAudioSourceNodePassThrough.html b/dom/media/webaudio/test/test_mediaElementAudioSourceNodePassThrough.html new file mode 100644 index 000000000..1bb0ad9ec --- /dev/null +++ b/dom/media/webaudio/test/test_mediaElementAudioSourceNodePassThrough.html @@ -0,0 +1,66 @@ +<!DOCTYPE HTML> +<html> +<meta charset="utf-8"> +<head> + <title>Test MediaElementAudioSourceNode with passthrough</title> + <script type="text/javascript" src="/tests/SimpleTest/SimpleTest.js"></script> + <link rel="stylesheet" type="text/css" href="/tests/SimpleTest/test.css" /> +</head> +<body> +<pre id="test"> +<script class="testbody" type="text/javascript"> +SimpleTest.waitForExplicitFinish(); + +function test() { + var audio = new Audio("small-shot.ogg"); + var context = new AudioContext(); + var node = context.createMediaElementSource(audio); + var sp = context.createScriptProcessor(2048, 1); + node.connect(sp); + var nonzeroSampleCount = 0; + var complete = false; + var iterationCount = 0; + + var srcWrapped = SpecialPowers.wrap(node); + ok("passThrough" in srcWrapped, "MediaElementAudioSourceNode should support the passThrough API"); + srcWrapped.passThrough = true; + + // This test ensures we receive at least expectedSampleCount nonzero samples + function processSamples(e) { + if (complete) { + return; + } + + if (iterationCount == 0) { + // Don't start playing the audio until the AudioContext stuff is connected + // and running. + audio.play(); + } + ++iterationCount; + + var buf = e.inputBuffer.getChannelData(0); + var nonzeroSamplesThisBuffer = 0; + for (var i = 0; i < buf.length; ++i) { + if (buf[i] != 0) { + ++nonzeroSamplesThisBuffer; + } + } + nonzeroSampleCount += nonzeroSamplesThisBuffer; + if (iterationCount == 10) { + is(nonzeroSampleCount, 0, "The input must be silence"); + SimpleTest.finish(); + complete = true; + } + } + + audio.oncanplaythrough = function() { + sp.onaudioprocess = processSamples; + }; +} + +SpecialPowers.pushPrefEnv({"set": [["media.preload.default", 2], ["media.preload.auto", 3]]}, test); + +</script> +</pre> +</body> +</html> diff --git a/dom/media/webaudio/test/test_mediaElementAudioSourceNodeVideo.html b/dom/media/webaudio/test/test_mediaElementAudioSourceNodeVideo.html new file mode 100644 index 000000000..ad0b355b1 --- /dev/null +++ b/dom/media/webaudio/test/test_mediaElementAudioSourceNodeVideo.html @@ -0,0 +1,70 @@ +<!DOCTYPE HTML> +<html> +<meta charset="utf-8"> +<head> + <title>Test MediaElementAudioSourceNode before "loadedmetadata"</title> + <script type="text/javascript" src="/tests/SimpleTest/SimpleTest.js"></script> + <link rel="stylesheet" type="text/css" href="/tests/SimpleTest/test.css" /> +</head> +<body> +<pre id="test"> +<script class="testbody" type="text/javascript"> +SimpleTest.waitForExplicitFinish(); + +var video = document.createElement("video"); +function test() { + video.src = "audiovideo.mp4"; + + var context = new AudioContext(); + var complete = false; + + video.onended = () => { + if (complete) { + return; + } + + complete = true; + ok(false, "Video ended without any samples seen"); + SimpleTest.finish(); + }; + + video.ontimeupdate = () => { + info("Timeupdate: " + video.currentTime); + }; + + var node = context.createMediaElementSource(video); + var sp = context.createScriptProcessor(2048, 1); + node.connect(sp); + + // This test ensures we receive some nonzero samples when we capture to + // WebAudio before "loadedmetadata". + sp.onaudioprocess = e => { + if (complete) { + return; + } + + var buf = e.inputBuffer.getChannelData(0); + for (var i = 0; i < buf.length; ++i) { + if (buf[i] != 0) { + complete = true; + ok(true, "Got non-zero samples"); + SimpleTest.finish(); + return; + } + } + }; + + video.play(); +} + +if (video.canPlayType("video/mp4")) { + test(); +} else { + ok(true, "MP4 not supported. Skipping."); + SimpleTest.finish(); +} + +</script> +</pre> +</body> +</html> diff --git a/dom/media/webaudio/test/test_mediaStreamAudioDestinationNode.html b/dom/media/webaudio/test/test_mediaStreamAudioDestinationNode.html new file mode 100644 index 000000000..5aa1a7910 --- /dev/null +++ b/dom/media/webaudio/test/test_mediaStreamAudioDestinationNode.html @@ -0,0 +1,50 @@ +<!DOCTYPE HTML> +<html> +<head> + <title>Test MediaStreamAudioDestinationNode</title> + <script type="text/javascript" src="/tests/SimpleTest/SimpleTest.js"></script> + <script type="text/javascript" src="webaudio.js"></script> + <link rel="stylesheet" type="text/css" href="/tests/SimpleTest/test.css" /> +</head> +<body> +<pre id="test"> +<audio id="audioelem"></audio> +<script class="testbody" type="text/javascript"> + +SimpleTest.waitForExplicitFinish(); +SimpleTest.requestFlakyTimeout("This test uses a live media element so it needs to wait for the media stack to do some work."); +addLoadEvent(function() { + var context = new AudioContext(); + var buffer = context.createBuffer(1, 2048, context.sampleRate); + for (var i = 0; i < 2048; ++i) { + buffer.getChannelData(0)[i] = Math.sin(440 * 2 * Math.PI * i / context.sampleRate); + } + + var source = context.createBufferSource(); + source.buffer = buffer; + + var dest = context.createMediaStreamDestination(); + source.connect(dest); + + var elem = document.getElementById('audioelem'); + elem.srcObject = dest.stream; + elem.onloadedmetadata = function() { + ok(true, "got metadata event"); + setTimeout(function() { + is(elem.played.length, 1, "should have a played interval"); + is(elem.played.start(0), 0, "should have played immediately"); + isnot(elem.played.end(0), 0, "should have played for a non-zero interval"); + + // This will end the media element. + dest.stream.getTracks()[0].stop(); + }, 2000); + }; + elem.onended = function() { + ok(true, "media element ended after destination track.stop()"); + SimpleTest.finish(); + }; + + source.start(0); + elem.play(); +}); +</script> diff --git a/dom/media/webaudio/test/test_mediaStreamAudioSourceNode.html b/dom/media/webaudio/test/test_mediaStreamAudioSourceNode.html new file mode 100644 index 000000000..85d96d3e8 --- /dev/null +++ b/dom/media/webaudio/test/test_mediaStreamAudioSourceNode.html @@ -0,0 +1,50 @@ +<!DOCTYPE HTML> +<html> +<meta charset="utf-8"> +<head> + <title>Test MediaStreamAudioSourceNode processing is correct</title> + <script type="text/javascript" src="/tests/SimpleTest/SimpleTest.js"></script> + <script type="text/javascript" src="webaudio.js"></script> + <link rel="stylesheet" type="text/css" href="/tests/SimpleTest/test.css" /> +</head> +<body> +<pre id="test"> +<script class="testbody" type="text/javascript"> + +function createBuffer(context) { + var buffer = context.createBuffer(2, 2048, context.sampleRate); + for (var i = 0; i < 2048; ++i) { + buffer.getChannelData(0)[i] = Math.sin(440 * 2 * Math.PI * i / context.sampleRate); + buffer.getChannelData(1)[i] = -buffer.getChannelData(0)[i]; + } + return buffer; +} + +var gTest = { + length: 2048, + skipOfflineContextTests: true, + createGraph: function(context) { + var sourceGraph = new AudioContext(); + var source = sourceGraph.createBufferSource(); + source.buffer = createBuffer(context); + var dest = sourceGraph.createMediaStreamDestination(); + source.connect(dest); + source.start(0); + + var mediaStreamSource = context.createMediaStreamSource(dest.stream); + // channelCount and channelCountMode should have no effect + mediaStreamSource.channelCount = 1; + mediaStreamSource.channelCountMode = "explicit"; + return mediaStreamSource; + }, + createExpectedBuffers: function(context) { + return createBuffer(context); + }, +}; + +runTest(); + +</script> +</pre> +</body> +</html> diff --git a/dom/media/webaudio/test/test_mediaStreamAudioSourceNodeCrossOrigin.html b/dom/media/webaudio/test/test_mediaStreamAudioSourceNodeCrossOrigin.html new file mode 100644 index 000000000..f3cc0334a --- /dev/null +++ b/dom/media/webaudio/test/test_mediaStreamAudioSourceNodeCrossOrigin.html @@ -0,0 +1,57 @@ +<!DOCTYPE HTML> +<html> +<meta charset="utf-8"> +<head> + <title>Test MediaStreamAudioSourceNode doesn't get data from cross-origin media resources</title> + <script type="text/javascript" src="/tests/SimpleTest/SimpleTest.js"></script> + <link rel="stylesheet" type="text/css" href="/tests/SimpleTest/test.css" /> +</head> +<body> +<pre id="test"> +<script class="testbody" type="text/javascript"> +SimpleTest.waitForExplicitFinish(); + +var audio = new Audio("http://example.org:80/tests/dom/media/webaudio/test/small-shot.ogg"); +var context = new AudioContext(); +var node = context.createMediaStreamSource(audio.mozCaptureStreamUntilEnded()); +var sp = context.createScriptProcessor(2048, 1); +node.connect(sp); +var nonzeroSampleCount = 0; +var complete = false; +var iterationCount = 0; + +// This test ensures we receive at least expectedSampleCount nonzero samples +function processSamples(e) { + if (complete) { + return; + } + + if (iterationCount == 0) { + // Don't start playing the audio until the AudioContext stuff is connected + // and running. + audio.play(); + } + ++iterationCount; + + var buf = e.inputBuffer.getChannelData(0); + var nonzeroSamplesThisBuffer = 0; + for (var i = 0; i < buf.length; ++i) { + if (buf[i] != 0) { + ++nonzeroSamplesThisBuffer; + } + } + is(nonzeroSamplesThisBuffer, 0, + "Checking all samples are zero"); + if (iterationCount >= 20) { + SimpleTest.finish(); + complete = true; + } +} + +audio.oncanplaythrough = function() { + sp.onaudioprocess = processSamples; +}; +</script> +</pre> +</body> +</html> diff --git a/dom/media/webaudio/test/test_mediaStreamAudioSourceNodeNoGC.html b/dom/media/webaudio/test/test_mediaStreamAudioSourceNodeNoGC.html new file mode 100644 index 000000000..7a9b6c4a6 --- /dev/null +++ b/dom/media/webaudio/test/test_mediaStreamAudioSourceNodeNoGC.html @@ -0,0 +1,89 @@ +<!DOCTYPE HTML> +<html> +<meta charset="utf-8"> +<head> + <title>Test that MediaStreamAudioSourceNode and its input MediaStream stays alive while there are active tracks</title> + <script type="text/javascript" src="/tests/SimpleTest/SimpleTest.js"></script> + <link rel="stylesheet" type="text/css" href="/tests/SimpleTest/test.css" /> +</head> +<body> +<pre id="test"> +<script class="testbody" type="text/javascript"> +SimpleTest.waitForExplicitFinish(); +SimpleTest.requestFlakyTimeout("gUM and WebAudio data is async to main thread. " + + "We need a timeout to see that something does " + + "NOT happen to data."); + +var context = new AudioContext(); +var analyser = context.createAnalyser(); + +function wait(millis, resolveWithThis) { + return new Promise(resolve => setTimeout(() => resolve(resolveWithThis), millis)); +} + +function binIndexForFrequency(frequency) { + return 1 + Math.round(frequency * analyser.fftSize / context.sampleRate); +} + +function waitForAudio(analysisFunction, cancelPromise) { + var data = new Uint8Array(analyser.frequencyBinCount); + var cancelled = false; + var cancelledMsg = ""; + cancelPromise.then(msg => { + cancelled = true; + cancelledMsg = msg; + }); + return new Promise((resolve, reject) => { + var loop = () => { + analyser.getByteFrequencyData(data); + if (cancelled) { + reject(new Error("waitForAudio cancelled: " + cancelledMsg)); + return; + } + if (analysisFunction(data)) { + resolve(); + return; + } + requestAnimationFrame(loop); + }; + loop(); + }); +} + +navigator.mediaDevices.getUserMedia({audio: true, fake: true}) + .then(stream => { + stream.onended = () => ended = true; + let source = context.createMediaStreamSource(stream); + source.connect(analyser); + analyser.connect(context.destination); + }) + .then(() => { + ok(true, "Waiting for audio to pass through the analyser") + return waitForAudio(arr => arr[binIndexForFrequency(1000)] > 200, + wait(60000, "Timeout waiting for audio")); + }) + .then(() => { + ok(true, "Audio was detected by the analyser. Forcing CC."); + SpecialPowers.forceCC(); + SpecialPowers.forceGC(); + SpecialPowers.forceCC(); + SpecialPowers.forceGC(); + + info("Checking that GC didn't destroy the stream or source node"); + return waitForAudio(arr => arr[binIndexForFrequency(1000)] < 50, + wait(5000, "Timeout waiting for GC (timeout OK)")) + .then(() => Promise.reject("Audio stopped unexpectedly"), + () => Promise.resolve()); + }) + .then(() => { + ok(true, "Audio is still flowing"); + SimpleTest.finish(); + }) + .catch(e => { + ok(false, "Error executing test: " + e + (e.stack ? "\n" + e.stack : "")); + SimpleTest.finish(); + }); +</script> +</pre> +</body> +</html> diff --git a/dom/media/webaudio/test/test_mediaStreamAudioSourceNodePassThrough.html b/dom/media/webaudio/test/test_mediaStreamAudioSourceNodePassThrough.html new file mode 100644 index 000000000..d2c22600a --- /dev/null +++ b/dom/media/webaudio/test/test_mediaStreamAudioSourceNodePassThrough.html @@ -0,0 +1,55 @@ +<!DOCTYPE HTML> +<html> +<meta charset="utf-8"> +<head> + <title>Test MediaStreamAudioSourceNode passthrough</title> + <script type="text/javascript" src="/tests/SimpleTest/SimpleTest.js"></script> + <script type="text/javascript" src="webaudio.js"></script> + <link rel="stylesheet" type="text/css" href="/tests/SimpleTest/test.css" /> +</head> +<body> +<pre id="test"> +<script class="testbody" type="text/javascript"> + +function createBuffer(context, delay) { + var buffer = context.createBuffer(2, 2048, context.sampleRate); + for (var i = 0; i < 2048 - delay; ++i) { + buffer.getChannelData(0)[i + delay] = Math.sin(440 * 2 * Math.PI * i / context.sampleRate); + buffer.getChannelData(1)[i + delay] = -buffer.getChannelData(0)[i + delay]; + } + return buffer; +} + +var gTest = { + length: 2048, + skipOfflineContextTests: true, + createGraph: function(context) { + var sourceGraph = new AudioContext(); + var source = sourceGraph.createBufferSource(); + source.buffer = createBuffer(context, 0); + var dest = sourceGraph.createMediaStreamDestination(); + source.connect(dest); + source.start(0); + + var mediaStreamSource = context.createMediaStreamSource(dest.stream); + // channelCount and channelCountMode should have no effect + mediaStreamSource.channelCount = 1; + mediaStreamSource.channelCountMode = "explicit"; + + var srcWrapped = SpecialPowers.wrap(mediaStreamSource); + ok("passThrough" in srcWrapped, "MediaStreamAudioSourceNode should support the passThrough API"); + srcWrapped.passThrough = true; + + return mediaStreamSource; + }, + createExpectedBuffers: function(context) { + return context.createBuffer(2, 2048, context.sampleRate); + }, +}; + +runTest(); + +</script> +</pre> +</body> +</html> diff --git a/dom/media/webaudio/test/test_mediaStreamAudioSourceNodeResampling.html b/dom/media/webaudio/test/test_mediaStreamAudioSourceNodeResampling.html new file mode 100644 index 000000000..4a4f03c53 --- /dev/null +++ b/dom/media/webaudio/test/test_mediaStreamAudioSourceNodeResampling.html @@ -0,0 +1,74 @@ +<!DOCTYPE HTML> +<html> +<meta charset="utf-8"> +<head> + <title>Test MediaStreamAudioSourceNode processing is correct</title> + <script type="text/javascript" src="/tests/SimpleTest/SimpleTest.js"></script> + <link rel="stylesheet" type="text/css" href="/tests/SimpleTest/test.css" /> +</head> +<body> +<pre id="test"> +<script class="testbody" type="text/javascript"> +SimpleTest.waitForExplicitFinish(); + +function test() { + var audio = new Audio("small-shot.ogg"); + var context = new AudioContext(); + var expectedMinNonzeroSampleCount; + var expectedMaxNonzeroSampleCount; + var nonzeroSampleCount = 0; + var complete = false; + var iterationCount = 0; + + // This test ensures we receive at least expectedSampleCount nonzero samples + function processSamples(e) { + if (complete) { + return; + } + + if (iterationCount == 0) { + // Don't start playing the audio until the AudioContext stuff is connected + // and running. + audio.play(); + } + ++iterationCount; + + var buf = e.inputBuffer.getChannelData(0); + var nonzeroSamplesThisBuffer = 0; + for (var i = 0; i < buf.length; ++i) { + if (buf[i] != 0) { + ++nonzeroSamplesThisBuffer; + } + } + nonzeroSampleCount += nonzeroSamplesThisBuffer; + is(e.inputBuffer.numberOfChannels, 1, + "Checking data channel count (nonzeroSamplesThisBuffer=" + + nonzeroSamplesThisBuffer + ")"); + ok(nonzeroSampleCount <= expectedMaxNonzeroSampleCount, + "Too many nonzero samples (got " + nonzeroSampleCount + ", expected max " + expectedMaxNonzeroSampleCount + ")"); + if (nonzeroSampleCount >= expectedMinNonzeroSampleCount && + nonzeroSamplesThisBuffer == 0) { + ok(true, + "Check received enough nonzero samples (got " + nonzeroSampleCount + ", expected min " + expectedMinNonzeroSampleCount + ")"); + SimpleTest.finish(); + complete = true; + } + } + + audio.onloadedmetadata = function() { + var node = context.createMediaStreamSource(audio.mozCaptureStreamUntilEnded()); + var sp = context.createScriptProcessor(2048, 1, 0); + node.connect(sp); + // Use a fuzz factor of 100 to account for samples that just happen to be zero + expectedMinNonzeroSampleCount = Math.floor(audio.duration*context.sampleRate) - 100; + expectedMaxNonzeroSampleCount = Math.floor(audio.duration*context.sampleRate) + 500; + sp.onaudioprocess = processSamples; + }; +} + +SpecialPowers.pushPrefEnv({"set": [["media.preload.default", 2], ["media.preload.auto", 3]]}, test); + +</script> +</pre> +</body> +</html> diff --git a/dom/media/webaudio/test/test_mixingRules.html b/dom/media/webaudio/test/test_mixingRules.html new file mode 100644 index 000000000..0bdcff87e --- /dev/null +++ b/dom/media/webaudio/test/test_mixingRules.html @@ -0,0 +1,401 @@ +<!DOCTYPE html> +<html> +<head> + <title>Testcase for AudioNode channel up-mix/down-mix rules</title> + <script type="text/javascript" src="/tests/SimpleTest/SimpleTest.js"></script> + <script type="text/javascript" src="webaudio.js"></script> + <link rel="stylesheet" type="text/css" href="/tests/SimpleTest/test.css" /> +</head> + +<body> + +<script> + +// This test is based on http://src.chromium.org/viewvc/blink/trunk/LayoutTests/webaudio/audionode-channel-rules.html + +var context = null; +var sp = null; +var renderNumberOfChannels = 8; +var singleTestFrameLength = 8; +var testBuffers; + +// A list of connections to an AudioNode input, each of which is to be used in one or more specific test cases. +// Each element in the list is a string, with the number of connections corresponding to the length of the string, +// and each character in the string is from '1' to '8' representing a 1 to 8 channel connection (from an AudioNode output). +// For example, the string "128" means 3 connections, having 1, 2, and 8 channels respectively. +var connectionsList = []; +for (var i = 1; i <= 8; ++i) { + connectionsList.push(i.toString()); + for (var j = 1; j <= 8; ++j) { + connectionsList.push(i.toString() + j.toString()); + } +} + +// A list of mixing rules, each of which will be tested against all of the connections in connectionsList. +var mixingRulesList = [ + {channelCount: 1, channelCountMode: "max", channelInterpretation: "speakers"}, + {channelCount: 2, channelCountMode: "clamped-max", channelInterpretation: "speakers"}, + {channelCount: 3, channelCountMode: "clamped-max", channelInterpretation: "speakers"}, + {channelCount: 4, channelCountMode: "clamped-max", channelInterpretation: "speakers"}, + {channelCount: 5, channelCountMode: "clamped-max", channelInterpretation: "speakers"}, + {channelCount: 6, channelCountMode: "clamped-max", channelInterpretation: "speakers"}, + {channelCount: 7, channelCountMode: "clamped-max", channelInterpretation: "speakers"}, + {channelCount: 2, channelCountMode: "explicit", channelInterpretation: "speakers"}, + {channelCount: 3, channelCountMode: "explicit", channelInterpretation: "speakers"}, + {channelCount: 4, channelCountMode: "explicit", channelInterpretation: "speakers"}, + {channelCount: 5, channelCountMode: "explicit", channelInterpretation: "speakers"}, + {channelCount: 6, channelCountMode: "explicit", channelInterpretation: "speakers"}, + {channelCount: 7, channelCountMode: "explicit", channelInterpretation: "speakers"}, + {channelCount: 8, channelCountMode: "explicit", channelInterpretation: "speakers"}, + {channelCount: 1, channelCountMode: "max", channelInterpretation: "discrete"}, + {channelCount: 2, channelCountMode: "clamped-max", channelInterpretation: "discrete"}, + {channelCount: 3, channelCountMode: "clamped-max", channelInterpretation: "discrete"}, + {channelCount: 4, channelCountMode: "clamped-max", channelInterpretation: "discrete"}, + {channelCount: 5, channelCountMode: "clamped-max", channelInterpretation: "discrete"}, + {channelCount: 6, channelCountMode: "clamped-max", channelInterpretation: "discrete"}, + {channelCount: 3, channelCountMode: "explicit", channelInterpretation: "discrete"}, + {channelCount: 4, channelCountMode: "explicit", channelInterpretation: "discrete"}, + {channelCount: 5, channelCountMode: "explicit", channelInterpretation: "discrete"}, + {channelCount: 6, channelCountMode: "explicit", channelInterpretation: "discrete"}, + {channelCount: 7, channelCountMode: "explicit", channelInterpretation: "discrete"}, + {channelCount: 8, channelCountMode: "explicit", channelInterpretation: "discrete"}, +]; + +var numberOfTests = mixingRulesList.length * connectionsList.length; + +// Create an n-channel buffer, with all sample data zero except for a shifted impulse. +// The impulse position depends on the channel index. +// For example, for a 4-channel buffer: +// channel0: 1 0 0 0 0 0 0 0 +// channel1: 0 1 0 0 0 0 0 0 +// channel2: 0 0 1 0 0 0 0 0 +// channel3: 0 0 0 1 0 0 0 0 +function createTestBuffer(numberOfChannels) { + var buffer = context.createBuffer(numberOfChannels, singleTestFrameLength, context.sampleRate); + for (var i = 0; i < numberOfChannels; ++i) { + var data = buffer.getChannelData(i); + data[i] = 1; + } + return buffer; +} + +// Discrete channel interpretation mixing: +// https://dvcs.w3.org/hg/audio/raw-file/tip/webaudio/specification.html#UpMix +// up-mix by filling channels until they run out then ignore remaining dest channels. +// down-mix by filling as many channels as possible, then dropping remaining source channels. +function discreteSum(sourceBuffer, destBuffer) { + if (sourceBuffer.length != destBuffer.length) { + is(sourceBuffer.length, destBuffer.length, "source and destination buffers should have the same length"); + } + + var numberOfChannels = Math.min(sourceBuffer.numberOfChannels, destBuffer.numberOfChannels); + var length = sourceBuffer.length; + + for (var c = 0; c < numberOfChannels; ++c) { + var source = sourceBuffer.getChannelData(c); + var dest = destBuffer.getChannelData(c); + for (var i = 0; i < length; ++i) { + dest[i] += source[i]; + } + } +} + +// Speaker channel interpretation mixing: +// https://dvcs.w3.org/hg/audio/raw-file/tip/webaudio/specification.html#UpMix +function speakersSum(sourceBuffer, destBuffer) +{ + var numberOfSourceChannels = sourceBuffer.numberOfChannels; + var numberOfDestinationChannels = destBuffer.numberOfChannels; + var length = destBuffer.length; + + if ((numberOfDestinationChannels == 2 && numberOfSourceChannels == 1) || + (numberOfDestinationChannels == 4 && numberOfSourceChannels == 1)) { + // Handle mono -> stereo/Quad case (summing mono channel into both left and right). + var source = sourceBuffer.getChannelData(0); + var destL = destBuffer.getChannelData(0); + var destR = destBuffer.getChannelData(1); + + for (var i = 0; i < length; ++i) { + destL[i] += source[i]; + destR[i] += source[i]; + } + } else if ((numberOfDestinationChannels == 4 && numberOfSourceChannels == 2) || + (numberOfDestinationChannels == 6 && numberOfSourceChannels == 2)) { + // Handle stereo -> Quad/5.1 case (summing left and right channels into the output's left and right). + var sourceL = sourceBuffer.getChannelData(0); + var sourceR = sourceBuffer.getChannelData(1); + var destL = destBuffer.getChannelData(0); + var destR = destBuffer.getChannelData(1); + + for (var i = 0; i < length; ++i) { + destL[i] += sourceL[i]; + destR[i] += sourceR[i]; + } + } else if (numberOfDestinationChannels == 1 && numberOfSourceChannels == 2) { + // Handle stereo -> mono case. output += 0.5 * (input.L + input.R). + var sourceL = sourceBuffer.getChannelData(0); + var sourceR = sourceBuffer.getChannelData(1); + var dest = destBuffer.getChannelData(0); + + for (var i = 0; i < length; ++i) { + dest[i] += 0.5 * (sourceL[i] + sourceR[i]); + } + } else if (numberOfDestinationChannels == 1 && numberOfSourceChannels == 4) { + // Handle Quad -> mono case. output += 0.25 * (input.L + input.R + input.SL + input.SR). + var sourceL = sourceBuffer.getChannelData(0); + var sourceR = sourceBuffer.getChannelData(1); + var sourceSL = sourceBuffer.getChannelData(2); + var sourceSR = sourceBuffer.getChannelData(3); + var dest = destBuffer.getChannelData(0); + + for (var i = 0; i < length; ++i) { + dest[i] += 0.25 * (sourceL[i] + sourceR[i] + sourceSL[i] + sourceSR[i]); + } + } else if (numberOfDestinationChannels == 2 && numberOfSourceChannels == 4) { + // Handle Quad -> stereo case. outputLeft += 0.5 * (input.L + input.SL), + // outputRight += 0.5 * (input.R + input.SR). + var sourceL = sourceBuffer.getChannelData(0); + var sourceR = sourceBuffer.getChannelData(1); + var sourceSL = sourceBuffer.getChannelData(2); + var sourceSR = sourceBuffer.getChannelData(3); + var destL = destBuffer.getChannelData(0); + var destR = destBuffer.getChannelData(1); + + for (var i = 0; i < length; ++i) { + destL[i] += 0.5 * (sourceL[i] + sourceSL[i]); + destR[i] += 0.5 * (sourceR[i] + sourceSR[i]); + } + } else if (numberOfDestinationChannels == 6 && numberOfSourceChannels == 4) { + // Handle Quad -> 5.1 case. outputLeft += (inputL, inputR, 0, 0, inputSL, inputSR) + var sourceL = sourceBuffer.getChannelData(0); + var sourceR = sourceBuffer.getChannelData(1); + var sourceSL = sourceBuffer.getChannelData(2); + var sourceSR = sourceBuffer.getChannelData(3); + var destL = destBuffer.getChannelData(0); + var destR = destBuffer.getChannelData(1); + var destSL = destBuffer.getChannelData(4); + var destSR = destBuffer.getChannelData(5); + + for (var i = 0; i < length; ++i) { + destL[i] += sourceL[i]; + destR[i] += sourceR[i]; + destSL[i] += sourceSL[i]; + destSR[i] += sourceSR[i]; + } + } else if (numberOfDestinationChannels == 6 && numberOfSourceChannels == 1) { + // Handle mono -> 5.1 case, sum mono channel into center. + var source = sourceBuffer.getChannelData(0); + var dest = destBuffer.getChannelData(2); + + for (var i = 0; i < length; ++i) { + dest[i] += source[i]; + } + } else if (numberOfDestinationChannels == 1 && numberOfSourceChannels == 6) { + // Handle 5.1 -> mono. + var sourceL = sourceBuffer.getChannelData(0); + var sourceR = sourceBuffer.getChannelData(1); + var sourceC = sourceBuffer.getChannelData(2); + // skip LFE for now, according to current spec. + var sourceSL = sourceBuffer.getChannelData(4); + var sourceSR = sourceBuffer.getChannelData(5); + var dest = destBuffer.getChannelData(0); + + for (var i = 0; i < length; ++i) { + dest[i] += 0.7071 * (sourceL[i] + sourceR[i]) + sourceC[i] + 0.5 * (sourceSL[i] + sourceSR[i]); + } + } else if (numberOfDestinationChannels == 2 && numberOfSourceChannels == 6) { + // Handle 5.1 -> stereo. + var sourceL = sourceBuffer.getChannelData(0); + var sourceR = sourceBuffer.getChannelData(1); + var sourceC = sourceBuffer.getChannelData(2); + // skip LFE for now, according to current spec. + var sourceSL = sourceBuffer.getChannelData(4); + var sourceSR = sourceBuffer.getChannelData(5); + var destL = destBuffer.getChannelData(0); + var destR = destBuffer.getChannelData(1); + + for (var i = 0; i < length; ++i) { + destL[i] += sourceL[i] + 0.7071 * (sourceC[i] + sourceSL[i]); + destR[i] += sourceR[i] + 0.7071 * (sourceC[i] + sourceSR[i]); + } + } else if (numberOfDestinationChannels == 4 && numberOfSourceChannels == 6) { + // Handle 5.1 -> Quad. + var sourceL = sourceBuffer.getChannelData(0); + var sourceR = sourceBuffer.getChannelData(1); + var sourceC = sourceBuffer.getChannelData(2); + // skip LFE for now, according to current spec. + var sourceSL = sourceBuffer.getChannelData(4); + var sourceSR = sourceBuffer.getChannelData(5); + var destL = destBuffer.getChannelData(0); + var destR = destBuffer.getChannelData(1); + var destSL = destBuffer.getChannelData(2); + var destSR = destBuffer.getChannelData(3); + + for (var i = 0; i < length; ++i) { + destL[i] += sourceL[i] + 0.7071 * sourceC[i]; + destR[i] += sourceR[i] + 0.7071 * sourceC[i]; + destSL[i] += sourceSL[i]; + destSR[i] += sourceSR[i]; + } + } else { + // Fallback for unknown combinations. + discreteSum(sourceBuffer, destBuffer); + } +} + +function scheduleTest(testNumber, connections, channelCount, channelCountMode, channelInterpretation) { + var mixNode = context.createGain(); + mixNode.channelCount = channelCount; + mixNode.channelCountMode = channelCountMode; + mixNode.channelInterpretation = channelInterpretation; + mixNode.connect(sp); + + for (var i = 0; i < connections.length; ++i) { + var connectionNumberOfChannels = connections.charCodeAt(i) - "0".charCodeAt(0); + + var source = context.createBufferSource(); + // Get a buffer with the right number of channels, converting from 1-based to 0-based index. + var buffer = testBuffers[connectionNumberOfChannels - 1]; + source.buffer = buffer; + source.connect(mixNode); + + // Start at the right offset. + var sampleFrameOffset = testNumber * singleTestFrameLength; + var time = sampleFrameOffset / context.sampleRate; + source.start(time); + } +} + +function computeNumberOfChannels(connections, channelCount, channelCountMode) { + if (channelCountMode == "explicit") + return channelCount; + + var computedNumberOfChannels = 1; // Must have at least one channel. + + // Compute "computedNumberOfChannels" based on all the connections. + for (var i = 0; i < connections.length; ++i) { + var connectionNumberOfChannels = connections.charCodeAt(i) - "0".charCodeAt(0); + computedNumberOfChannels = Math.max(computedNumberOfChannels, connectionNumberOfChannels); + } + + if (channelCountMode == "clamped-max") + computedNumberOfChannels = Math.min(computedNumberOfChannels, channelCount); + + return computedNumberOfChannels; +} + +function checkTestResult(renderedBuffer, testNumber, connections, channelCount, channelCountMode, channelInterpretation) { + var computedNumberOfChannels = computeNumberOfChannels(connections, channelCount, channelCountMode); + + // Create a zero-initialized silent AudioBuffer with computedNumberOfChannels. + var destBuffer = context.createBuffer(computedNumberOfChannels, singleTestFrameLength, context.sampleRate); + + // Mix all of the connections into the destination buffer. + for (var i = 0; i < connections.length; ++i) { + var connectionNumberOfChannels = connections.charCodeAt(i) - "0".charCodeAt(0); + var sourceBuffer = testBuffers[connectionNumberOfChannels - 1]; // convert from 1-based to 0-based index + + if (channelInterpretation == "speakers") { + speakersSum(sourceBuffer, destBuffer); + } else if (channelInterpretation == "discrete") { + discreteSum(sourceBuffer, destBuffer); + } else { + ok(false, "Invalid channel interpretation!"); + } + } + + // Validate that destBuffer matches the rendered output. + // We need to check the rendered output at a specific sample-frame-offset corresponding + // to the specific test case we're checking for based on testNumber. + + var sampleFrameOffset = testNumber * singleTestFrameLength; + for (var c = 0; c < renderNumberOfChannels; ++c) { + var renderedData = renderedBuffer.getChannelData(c); + for (var frame = 0; frame < singleTestFrameLength; ++frame) { + var renderedValue = renderedData[frame + sampleFrameOffset]; + + var expectedValue = 0; + if (c < destBuffer.numberOfChannels) { + var expectedData = destBuffer.getChannelData(c); + expectedValue = expectedData[frame]; + } + + if (Math.abs(renderedValue - expectedValue) > 1e-4) { + var s = "connections: " + connections + ", " + channelCountMode; + + // channelCount is ignored in "max" mode. + if (channelCountMode == "clamped-max" || channelCountMode == "explicit") { + s += "(" + channelCount + ")"; + } + + s += ", " + channelInterpretation + ". "; + + var message = s + "rendered: " + renderedValue + " expected: " + expectedValue + " channel: " + c + " frame: " + frame; + is(renderedValue, expectedValue, message); + } + } + } +} + +function checkResult(event) { + var buffer = event.inputBuffer; + + // Sanity check result. + ok(buffer.length != numberOfTests * singleTestFrameLength || + buffer.numberOfChannels != renderNumberOfChannels, "Sanity check"); + + // Check all the tests. + var testNumber = 0; + for (var m = 0; m < mixingRulesList.length; ++m) { + var mixingRules = mixingRulesList[m]; + for (var i = 0; i < connectionsList.length; ++i, ++testNumber) { + checkTestResult(buffer, testNumber, connectionsList[i], mixingRules.channelCount, mixingRules.channelCountMode, mixingRules.channelInterpretation); + } + } + + sp.onaudioprocess = null; + SimpleTest.finish(); +} + +SimpleTest.waitForExplicitFinish(); +function runTest() { + // Create 8-channel offline audio context. + // Each test will render 8 sample-frames starting at sample-frame position testNumber * 8. + var totalFrameLength = numberOfTests * singleTestFrameLength; + context = new AudioContext(); + var nextPowerOfTwo = 256; + while (nextPowerOfTwo < totalFrameLength) { + nextPowerOfTwo *= 2; + } + sp = context.createScriptProcessor(nextPowerOfTwo, renderNumberOfChannels); + + // Set destination to discrete mixing. + sp.channelCount = renderNumberOfChannels; + sp.channelCountMode = "explicit"; + sp.channelInterpretation = "discrete"; + + // Create test buffers from 1 to 8 channels. + testBuffers = new Array(); + for (var i = 0; i < renderNumberOfChannels; ++i) { + testBuffers[i] = createTestBuffer(i + 1); + } + + // Schedule all the tests. + var testNumber = 0; + for (var m = 0; m < mixingRulesList.length; ++m) { + var mixingRules = mixingRulesList[m]; + for (var i = 0; i < connectionsList.length; ++i, ++testNumber) { + scheduleTest(testNumber, connectionsList[i], mixingRules.channelCount, mixingRules.channelCountMode, mixingRules.channelInterpretation); + } + } + + // Render then check results. + sp.onaudioprocess = checkResult; +} + +runTest(); + +</script> + +</body> +</html> diff --git a/dom/media/webaudio/test/test_mozaudiochannel.html b/dom/media/webaudio/test/test_mozaudiochannel.html new file mode 100644 index 000000000..6ba14347b --- /dev/null +++ b/dom/media/webaudio/test/test_mozaudiochannel.html @@ -0,0 +1,151 @@ +<!DOCTYPE HTML> +<html> +<head> + <title>Test for mozaudiochannel</title> + <script type="application/javascript" src="/tests/SimpleTest/SimpleTest.js"></script> + <script type="text/javascript" src="/tests/SimpleTest/EventUtils.js"></script> + <link rel="stylesheet" type="text/css" href="/tests/SimpleTest/test.css"/> +</head> +<body> +<p id="display"></p> +<pre id="test"> +<script type="application/javascript"> + +function test_basic() { + var ac = new AudioContext(); + ok(ac, "AudioContext created"); + + // Default + is(ac.mozAudioChannelType, "normal", "Default ac channel == 'normal'"); + + // Unpermitted channels + ac = new AudioContext("content"); + is(ac.mozAudioChannelType, "normal", "Default ac channel == 'normal'"); + + ac = new AudioContext("notification"); + is(ac.mozAudioChannelType, "normal", "Default ac channel == 'normal'"); + + ac = new AudioContext("alarm"); + is(ac.mozAudioChannelType, "normal", "Default ac channel == 'normal'"); + + ac = new AudioContext("telephony"); + is(ac.mozAudioChannelType, "normal", "Default ac channel == 'normal'"); + + ac = new AudioContext("ringer"); + is(ac.mozAudioChannelType, "normal", "Default ac channel == 'normal'"); + + ac = new AudioContext("publicnotification"); + is(ac.mozAudioChannelType, "normal", "Default ac channel == 'normal'"); + + runTest(); +} + +function test_permission(aChannel) { + var ac = new AudioContext(); + ok(ac, "AudioContext created"); + + is(ac.mozAudioChannelType, "normal", "Default ac channel == 'normal'"); + + var channel = SpecialPowers.wrap(ac).testAudioChannelInAudioNodeStream(); + is(channel, "normal", "AudioNodeStream is using the correct default audio channel."); + + SpecialPowers.pushPermissions( + [{ "type": "audio-channel-" + aChannel, "allow": true, "context": document }], + function() { + var ac = new AudioContext(aChannel); + is(ac.mozAudioChannelType, aChannel, "Default ac channel == '" + aChannel + "'"); + + var channel = SpecialPowers.wrap(ac).testAudioChannelInAudioNodeStream(); + is(channel, aChannel, "AudioNodeStream is using the correct new audio channel."); + + runTest(); + } + ); +} + +function test_preferences(aChannel) { + SpecialPowers.pushPrefEnv({"set": [["media.defaultAudioChannel", aChannel ]]}, + function() { + SpecialPowers.pushPermissions( + [{ "type": "audio-channel-" + aChannel, "allow": false, "context": document }], + function() { + var ac = new AudioContext(aChannel); + ok(ac, "AudioContext created"); + is(ac.mozAudioChannelType, aChannel, "Default ac channel == '" + aChannel + "'"); + + var channel = SpecialPowers.wrap(ac).testAudioChannelInAudioNodeStream(); + is(channel, aChannel, "AudioNodeStream is using the correct audio channel."); + + runTest(); + } + ); + } + ); +} + +function test_wrong_preferences() { + SpecialPowers.pushPrefEnv({"set": [["media.defaultAudioChannel", 'foobar' ]]}, + function() { + var ac = new AudioContext(); + ok(ac, "AudioContext created"); + is(ac.mozAudioChannelType, 'normal', "Default ac channel == 'normal'"); + runTest(); + } + ); +} + +function test_testAudioChannelInAudioNodeStream() { + var ac = new AudioContext(); + ok(ac, "AudioContext created"); + + var status = false; + try { + ac.testAudioChannelInAudioNodeStream(); + } catch(e) { + status = true; + } + + ok(status, "testAudioChannelInAudioNodeStream() should not exist in content."); + runTest(); +} + +var tests = [ + test_basic, + + function() { test_permission("content"); }, + function() { test_permission("notification"); }, + function() { test_permission("alarm"); }, + function() { test_permission("telephony"); }, + function() { test_permission("ringer"); }, + function() { test_permission("publicnotification"); }, + + function() { test_preferences("content"); }, + function() { test_preferences("notification"); }, + function() { test_preferences("alarm"); }, + function() { test_preferences("telephony"); }, + function() { test_preferences("ringer"); }, + function() { test_preferences("publicnotification"); }, + + test_wrong_preferences, + + test_testAudioChannelInAudioNodeStream, +]; + +function runTest() { + if (!tests.length) { + SimpleTest.finish(); + return; + } + + var test = tests.shift(); + test(); +} + +SpecialPowers.pushPrefEnv({"set": [["media.useAudioChannelAPI", true ]]}, runTest); +SimpleTest.waitForExplicitFinish(); +SimpleTest.requestLongerTimeout(5); + +</script> +</pre> +</body> +</html> diff --git a/dom/media/webaudio/test/test_nodeToParamConnection.html b/dom/media/webaudio/test/test_nodeToParamConnection.html new file mode 100644 index 000000000..4525923db --- /dev/null +++ b/dom/media/webaudio/test/test_nodeToParamConnection.html @@ -0,0 +1,60 @@ +<!DOCTYPE HTML> +<html> +<head> + <title>Test connecting an AudioNode to an AudioParam</title> + <script type="text/javascript" src="/tests/SimpleTest/SimpleTest.js"></script> + <script type="text/javascript" src="webaudio.js"></script> + <link rel="stylesheet" type="text/css" href="/tests/SimpleTest/test.css" /> +</head> +<body> +<pre id="test"> +<script class="testbody" type="text/javascript"> + +var gTest = { + length: 2048, + createGraph: function(context) { + var sourceBuffer = context.createBuffer(2, 2048, context.sampleRate); + for (var i = 0; i < 2048; ++i) { + sourceBuffer.getChannelData(0)[i] = 1; + sourceBuffer.getChannelData(1)[i] = -1; + } + + var destination = context.destination; + + var paramSource = context.createBufferSource(); + paramSource.buffer = this.buffer; + + var source = context.createBufferSource(); + source.buffer = sourceBuffer; + + var gain = context.createGain(); + + paramSource.connect(gain.gain); + source.connect(gain); + + paramSource.start(0); + source.start(0); + return gain; + }, + createExpectedBuffers: function(context) { + this.buffer = context.createBuffer(2, 2048, context.sampleRate); + for (var i = 0; i < 2048; ++i) { + for (var j = 0; j < 2; ++j) { + this.buffer.getChannelData(j)[i] = Math.sin(440 * 2 * (j + 1) * Math.PI * i / context.sampleRate); + } + } + var expectedBuffer = context.createBuffer(2, 2048, context.sampleRate); + for (var i = 0; i < 2048; ++i) { + expectedBuffer.getChannelData(0)[i] = 1 + (this.buffer.getChannelData(0)[i] + this.buffer.getChannelData(1)[i]) / 2; + expectedBuffer.getChannelData(1)[i] = -(1 + (this.buffer.getChannelData(0)[i] + this.buffer.getChannelData(1)[i]) / 2); + } + return expectedBuffer; + }, +}; + +runTest(); + +</script> +</pre> +</body> +</html> diff --git a/dom/media/webaudio/test/test_offlineDestinationChannelCountLess.html b/dom/media/webaudio/test/test_offlineDestinationChannelCountLess.html new file mode 100644 index 000000000..675106697 --- /dev/null +++ b/dom/media/webaudio/test/test_offlineDestinationChannelCountLess.html @@ -0,0 +1,42 @@ +<!DOCTYPE HTML> +<html> +<head> + <title>Test OfflineAudioContext with a channel count less than the specified number</title> + <script type="text/javascript" src="/tests/SimpleTest/SimpleTest.js"></script> + <script type="text/javascript" src="webaudio.js"></script> + <link rel="stylesheet" type="text/css" href="/tests/SimpleTest/test.css" /> +</head> +<body> +<pre id="test"> +<script class="testbody" type="text/javascript"> + +SimpleTest.waitForExplicitFinish(); +addLoadEvent(function() { + var ctx = new OfflineAudioContext(2, 100, 22050); + + var buf = ctx.createBuffer(6, 100, ctx.sampleRate); + for (var i = 0; i < 6; ++i) { + for (var j = 0; j < 100; ++j) { + buf.getChannelData(i)[j] = Math.sin(2 * Math.PI * 200 * j / ctx.sampleRate); + } + } + + var src = ctx.createBufferSource(); + src.buffer = buf; + src.start(0); + src.connect(ctx.destination); + ctx.destination.channelCountMode = "max"; + ctx.startRendering(); + ctx.oncomplete = function(e) { + is(e.renderedBuffer.numberOfChannels, 2, "Correct expected number of buffers"); + compareChannels(e.renderedBuffer.getChannelData(0), buf.getChannelData(0)); + compareChannels(e.renderedBuffer.getChannelData(1), buf.getChannelData(1)); + + SimpleTest.finish(); + }; +}); + +</script> +</pre> +</body> +</html> diff --git a/dom/media/webaudio/test/test_offlineDestinationChannelCountMore.html b/dom/media/webaudio/test/test_offlineDestinationChannelCountMore.html new file mode 100644 index 000000000..7c7d5c8e5 --- /dev/null +++ b/dom/media/webaudio/test/test_offlineDestinationChannelCountMore.html @@ -0,0 +1,46 @@ +<!DOCTYPE HTML> +<html> +<head> + <title>Test OfflineAudioContext with a channel count less than the specified number</title> + <script type="text/javascript" src="/tests/SimpleTest/SimpleTest.js"></script> + <script type="text/javascript" src="webaudio.js"></script> + <link rel="stylesheet" type="text/css" href="/tests/SimpleTest/test.css" /> +</head> +<body> +<pre id="test"> +<script class="testbody" type="text/javascript"> + +SimpleTest.waitForExplicitFinish(); +addLoadEvent(function() { + var ctx = new OfflineAudioContext(6, 100, 22050); + + var buf = ctx.createBuffer(2, 100, ctx.sampleRate); + for (var i = 0; i < 2; ++i) { + for (var j = 0; j < 100; ++j) { + buf.getChannelData(i)[j] = Math.sin(2 * Math.PI * 200 * j / ctx.sampleRate); + } + } + var emptyBuffer = ctx.createBuffer(1, 100, ctx.sampleRate); + + var src = ctx.createBufferSource(); + src.buffer = buf; + src.start(0); + src.connect(ctx.destination); + ctx.destination.channelCountMode = "max"; + ctx.startRendering(); + ctx.oncomplete = function(e) { + is(e.renderedBuffer.numberOfChannels, 6, "Correct expected number of buffers"); + compareChannels(e.renderedBuffer.getChannelData(0), buf.getChannelData(0)); + compareChannels(e.renderedBuffer.getChannelData(1), buf.getChannelData(1)); + for (var i = 2; i < 6; ++i) { + compareChannels(e.renderedBuffer.getChannelData(i), emptyBuffer.getChannelData(0)); + } + + SimpleTest.finish(); + }; +}); + +</script> +</pre> +</body> +</html> diff --git a/dom/media/webaudio/test/test_oscillatorNode.html b/dom/media/webaudio/test/test_oscillatorNode.html new file mode 100644 index 000000000..5eb488574 --- /dev/null +++ b/dom/media/webaudio/test/test_oscillatorNode.html @@ -0,0 +1,60 @@ +<!DOCTYPE HTML> +<html> +<head> + <title>Test the OscillatorNode interface</title> + <script type="text/javascript" src="/tests/SimpleTest/SimpleTest.js"></script> + <script type="text/javascript" src="webaudio.js"></script> + <link rel="stylesheet" type="text/css" href="/tests/SimpleTest/test.css" /> +</head> +<body> +<pre id="test"> +<script class="testbody" type="text/javascript"> + +SimpleTest.waitForExplicitFinish(); +addLoadEvent(function() { + + var context = new AudioContext(); + var osc = context.createOscillator(); + + is(osc.channelCount, 2, "Oscillator node has 2 input channels by default"); + is(osc.channelCountMode, "max", "Correct channelCountMode for the Oscillator node"); + is(osc.channelInterpretation, "speakers", "Correct channelCountInterpretation for the Oscillator node"); + is(osc.type, "sine", "Correct default type"); + expectException(function() { + osc.type = "custom"; + }, DOMException.INVALID_STATE_ERR); + is(osc.type, "sine", "Cannot set the type to custom"); + is(osc.frequency.value, 440, "Correct default frequency value"); + is(osc.detune.value, 0, "Correct default detine value"); + + // Make sure that we can set all of the valid type values + var types = [ + "sine", + "square", + "sawtooth", + "triangle", + ]; + for (var i = 0; i < types.length; ++i) { + osc.type = types[i]; + } + + // Verify setPeriodicWave() + var real = new Float32Array([1.0, 0.5, 0.25, 0.125]); + var imag = new Float32Array([1.0, 0.7, -1.0, 0.5]); + osc.setPeriodicWave(context.createPeriodicWave(real, imag)); + is(osc.type, "custom", "Failed to set custom waveform"); + + expectNoException(function() { + osc.start(); + }); + expectNoException(function() { + osc.stop(); + }); + + SimpleTest.finish(); +}); + +</script> +</pre> +</body> +</html> diff --git a/dom/media/webaudio/test/test_oscillatorNode2.html b/dom/media/webaudio/test/test_oscillatorNode2.html new file mode 100644 index 000000000..1ddae937c --- /dev/null +++ b/dom/media/webaudio/test/test_oscillatorNode2.html @@ -0,0 +1,53 @@ +<!DOCTYPE HTML> +<html> +<head> + <title>Test OscillatorNode lifetime and sine phase</title> + <script type="text/javascript" src="/tests/SimpleTest/SimpleTest.js"></script> + <link rel="stylesheet" type="text/css" href="/tests/SimpleTest/test.css" /> +</head> +<body> +<pre id="test"> +<script src="webaudio.js" type="text/javascript"></script> +<script class="testbody" type="text/javascript"> + +const signalLength = 2048; + +function createOscillator(context) { + var osc = context.createOscillator(); + osc.start(0); + osc.stop(signalLength/context.sampleRate); + return osc; +} + +function connectUnreferencedOscillator(context, destination) { + var osc = createOscillator(context); + osc.connect(destination); +} + +var gTest = { + length: signalLength, + numberOfChannels: 1, + createGraph: function(context) { + var blend = context.createGain(); + + connectUnreferencedOscillator(context, blend); + // Test that the unreferenced oscillator remains alive until it has finished. + SpecialPowers.forceGC(); + SpecialPowers.forceCC(); + + // Create another sine wave oscillator in negative time, which should + // cancel when mixed with the unreferenced oscillator. + var oscillator = createOscillator(context); + oscillator.frequency.value = -440; + oscillator.connect(blend); + + return blend; + }, +}; + +runTest(); + +</script> +</pre> +</body> +</html> diff --git a/dom/media/webaudio/test/test_oscillatorNodeNegativeFrequency.html b/dom/media/webaudio/test/test_oscillatorNodeNegativeFrequency.html new file mode 100644 index 000000000..8acc025da --- /dev/null +++ b/dom/media/webaudio/test/test_oscillatorNodeNegativeFrequency.html @@ -0,0 +1,50 @@ +<!DOCTYPE HTML> +<html> +<head> + <title>Test the OscillatorNode when the frequency is negative</title> + <script type="text/javascript" src="/tests/SimpleTest/SimpleTest.js"></script> + <script type="text/javascript" src="webaudio.js"></script> + <link rel="stylesheet" type="text/css" href="/tests/SimpleTest/test.css" /> +</head> +<body> +<pre id="test"> +<script class="testbody" type="text/javascript"> + +SimpleTest.waitForExplicitFinish(); +addLoadEvent(function() { + + var types = ["sine", + "square", + "sawtooth", + "triangle"]; + + var finished = 0; + function finish() { + if (++finished == types.length) { + SimpleTest.finish(); + } + } + + types.forEach(function(t) { + var context = new OfflineAudioContext(1, 256, 44100); + var osc = context.createOscillator(); + + osc.frequency.value = -440; + osc.type = t; + + osc.connect(context.destination); + osc.start(); + context.startRendering().then(function(buffer) { + var samples = buffer.getChannelData(0); + // This samples the wave form in the middle of the first period, the value + // should be negative. + ok(samples[Math.floor(44100 / 440 / 4)] < 0., "Phase should be inverted when using a " + t + " waveform"); + finish(); + }); + }); +}); + +</script> +</pre> +</body> +</html> diff --git a/dom/media/webaudio/test/test_oscillatorNodePassThrough.html b/dom/media/webaudio/test/test_oscillatorNodePassThrough.html new file mode 100644 index 000000000..c732bb273 --- /dev/null +++ b/dom/media/webaudio/test/test_oscillatorNodePassThrough.html @@ -0,0 +1,43 @@ +<!DOCTYPE HTML> +<html> +<head> + <title>Test Oscillator with passthrough</title> + <script type="text/javascript" src="/tests/SimpleTest/SimpleTest.js"></script> + <script type="text/javascript" src="webaudio.js"></script> + <link rel="stylesheet" type="text/css" href="/tests/SimpleTest/test.css" /> +</head> +<body> +<pre id="test"> +<script class="testbody" type="text/javascript"> + +var gTest = { + length: 2048, + numberOfChannels: 1, + createGraph: function(context) { + var buffer = context.createBuffer(1, 2048, context.sampleRate); + for (var i = 0; i < 2048; ++i) { + buffer.getChannelData(0)[i] = Math.sin(440 * 2 * Math.PI * i / context.sampleRate); + } + + var source = context.createOscillator(); + + var srcWrapped = SpecialPowers.wrap(source); + ok("passThrough" in srcWrapped, "OscillatorNode should support the passThrough API"); + srcWrapped.passThrough = true; + + source.start(0); + return source; + }, + createExpectedBuffers: function(context) { + var expectedBuffer = context.createBuffer(1, 2048, context.sampleRate); + + return [expectedBuffer]; + }, +}; + +runTest(); + +</script> +</pre> +</body> +</html> diff --git a/dom/media/webaudio/test/test_oscillatorNodeStart.html b/dom/media/webaudio/test/test_oscillatorNodeStart.html new file mode 100644 index 000000000..c43219c99 --- /dev/null +++ b/dom/media/webaudio/test/test_oscillatorNodeStart.html @@ -0,0 +1,38 @@ +<!DOCTYPE HTML> +<html> +<head> + <title>Test the OscillatorNode interface</title> + <script type="text/javascript" src="/tests/SimpleTest/SimpleTest.js"></script> + <link rel="stylesheet" type="text/css" href="/tests/SimpleTest/test.css" /> +</head> +<body> +<pre id="test"> +<script class="testbody" type="text/javascript"> + +SimpleTest.waitForExplicitFinish(); +addLoadEvent(function() { + + var context = new AudioContext(); + var osc = context.createOscillator(); + var sp = context.createScriptProcessor(0, 1, 0); + + osc.connect(sp); + + sp.onaudioprocess = function (e) { + var input = e.inputBuffer.getChannelData(0); + var isSilent = true; + for (var i = 0; i < input.length; i++) { + if (input[i] != 0.0) { + isSilent = false; + } + } + sp.onaudioprocess = null; + ok(isSilent, "OscillatorNode should be silent before calling start."); + SimpleTest.finish(); + } +}); + +</script> +</pre> +</body> +</html> diff --git a/dom/media/webaudio/test/test_oscillatorTypeChange.html b/dom/media/webaudio/test/test_oscillatorTypeChange.html new file mode 100644 index 000000000..aaf311a0c --- /dev/null +++ b/dom/media/webaudio/test/test_oscillatorTypeChange.html @@ -0,0 +1,58 @@ +<!DOCTYPE HTML> +<html> +<head> + <title>Test OscillatorNode type change after it has started and triangle phase</title> + <script type="text/javascript" src="/tests/SimpleTest/SimpleTest.js"></script> + <script type="text/javascript" src="webaudio.js"></script> + <link rel="stylesheet" type="text/css" href="/tests/SimpleTest/test.css" /> +</head> +<body> +<pre id="test"> +<script class="testbody" type="text/javascript"> + +SimpleTest.waitForExplicitFinish(); + +const bufferSize = 1024; + +function startTest() { + var ctx = new AudioContext(); + + var oscillator1 = ctx.createOscillator(); + oscillator1.connect(ctx.destination); + oscillator1.start(0); + + // Assuming the above Web Audio operations have already scheduled an event + // to run in stable state and start the graph thread, schedule a subsequent + // event to change the type of oscillator1. + SimpleTest.executeSoon(function() { + oscillator1.type = "triangle"; + + // Another triangle wave with -1 gain should cancel the first. This is + // starting at the same time as the type change, assuming that the phase + // is reset on type change. A negative frequency should achieve the same + // as the -1 gain but for bug 916285. + var oscillator2 = ctx.createOscillator(); + oscillator2.type = "triangle"; + oscillator2.start(0); + + var processor = ctx.createScriptProcessor(bufferSize, 1, 0); + oscillator1.connect(processor); + var gain = ctx.createGain(); + gain.gain.value = -1; + gain.connect(processor); + oscillator2.connect(gain); + + processor.onaudioprocess = function(e) { + compareChannels(e.inputBuffer.getChannelData(0), + new Float32Array(bufferSize)); + e.target.onaudioprocess = null; + SimpleTest.finish(); + } + }); +}; + +startTest(); +</script> +</pre> +</body> +</html> diff --git a/dom/media/webaudio/test/test_pannerNode.html b/dom/media/webaudio/test/test_pannerNode.html new file mode 100644 index 000000000..374ad3421 --- /dev/null +++ b/dom/media/webaudio/test/test_pannerNode.html @@ -0,0 +1,73 @@ +<!DOCTYPE HTML> +<html> +<head> + <title>Test PannerNode</title> + <script type="text/javascript" src="/tests/SimpleTest/SimpleTest.js"></script> + <link rel="stylesheet" type="text/css" href="/tests/SimpleTest/test.css" /> +</head> +<body> +<pre id="test"> +<script class="testbody" type="text/javascript"> + +function near(a, b, msg) { + ok(Math.abs(a - b) < 1e-4, msg); +} + +SimpleTest.waitForExplicitFinish(); +addLoadEvent(function() { + var context = new AudioContext(); + var buffer = context.createBuffer(1, 2048, context.sampleRate); + for (var i = 0; i < 2048; ++i) { + buffer.getChannelData(0)[i] = Math.sin(440 * 2 * Math.PI * i / context.sampleRate); + } + + var destination = context.destination; + + var source = context.createBufferSource(); + + var panner = context.createPanner(); + + source.buffer = buffer; + + source.connect(panner); + panner.connect(destination); + + // Verify default values + is(panner.panningModel, "equalpower", "Correct default value for panning model"); + is(panner.distanceModel, "inverse", "Correct default value for distance model"); + near(panner.refDistance, 1, "Correct default value for ref distance"); + near(panner.maxDistance, 10000, "Correct default value for max distance"); + near(panner.rolloffFactor, 1, "Correct default value for rolloff factor"); + near(panner.coneInnerAngle, 360, "Correct default value for cone inner angle"); + near(panner.coneOuterAngle, 360, "Correct default value for cone outer angle"); + near(panner.coneOuterGain, 0, "Correct default value for cone outer gain"); + is(panner.channelCount, 2, "panner node has 2 input channels by default"); + is(panner.channelCountMode, "clamped-max", "Correct channelCountMode for the panner node"); + is(panner.channelInterpretation, "speakers", "Correct channelCountInterpretation for the panner node"); + + panner.setPosition(1, 1, 1); + near(panner.positionX.value, 1, "setPosition sets AudioParam properly"); + near(panner.positionY.value, 1, "setPosition sets AudioParam properly"); + near(panner.positionZ.value, 1, "setPosition sets AudioParam properly"); + + panner.setOrientation(0, 1, 0); + near(panner.orientationX.value, 0, "setOrientation sets AudioParam properly"); + near(panner.orientationY.value, 1, "setOrientation sets AudioParam properly"); + near(panner.orientationZ.value, 0, "setOrientation sets AudioParam properly"); + + panner.setVelocity(1, 1, 1); + + source.start(0); + SimpleTest.executeSoon(function() { + source.stop(0); + source.disconnect(); + panner.disconnect(); + + SimpleTest.finish(); + }); +}); + +</script> +</pre> +</body> +</html> diff --git a/dom/media/webaudio/test/test_pannerNodeAbove.html b/dom/media/webaudio/test/test_pannerNodeAbove.html new file mode 100644 index 000000000..6bab394e6 --- /dev/null +++ b/dom/media/webaudio/test/test_pannerNodeAbove.html @@ -0,0 +1,50 @@ +<!DOCTYPE HTML> +<html> +<head> + <title>Test PannerNode directly above</title> + <script type="text/javascript" src="/tests/SimpleTest/SimpleTest.js"></script> + <link rel="stylesheet" type="text/css" href="/tests/SimpleTest/test.css" /> +</head> +<body> +<pre id="test"> +<script src="webaudio.js" type="text/javascript"></script> +<script class="testbody" type="text/javascript"> + +var gTest = { + numberOfChannels: 2, + createGraph: function(context) { + // An up vector will be made perpendicular to the front vector, in the + // front-up plane. + context.listener.setOrientation(0, 6.311749985202524e+307, 0, 0.1, 1000, 0); + // Linearly dependent vectors are ignored. + context.listener.setOrientation(0, 0, -6.311749985202524e+307, 0, 0, 6.311749985202524e+307); + var panner = context.createPanner(); + panner.positionX.value = 2; // directly above + panner.rolloffFactor = 0; // no distance gain + panner.panningModel = "equalpower"; // no effect when directly above + + var source = context.createBufferSource(); + source.buffer = this.buffer; + source.connect(panner); + source.start(0); + + return panner; + }, + createExpectedBuffers: function(context) { + var expectedBuffer = context.createBuffer(2, 2048, context.sampleRate); + for (var i = 0; i < 2048; ++i) { + // Different signals in left and right buffers + expectedBuffer.getChannelData(0)[i] = Math.sin(440 * 2 * Math.PI * i / context.sampleRate); + expectedBuffer.getChannelData(1)[i] = Math.sin(220 * 2 * Math.PI * i / context.sampleRate); + } + this.buffer = expectedBuffer; + return expectedBuffer; + }, +}; + +runTest(); + +</script> +</pre> +</body> +</html> diff --git a/dom/media/webaudio/test/test_pannerNodeAtZeroDistance.html b/dom/media/webaudio/test/test_pannerNodeAtZeroDistance.html new file mode 100644 index 000000000..21abd2b60 --- /dev/null +++ b/dom/media/webaudio/test/test_pannerNodeAtZeroDistance.html @@ -0,0 +1,86 @@ +<!DOCTYPE HTML> +<html> +<head> + <title>Test PannerNode produces output even when the even when the distance is from the listener is zero</title> + <script type="text/javascript" src="/tests/SimpleTest/SimpleTest.js"></script> + <script type="text/javascript" src="webaudio.js"></script> + <link rel="stylesheet" type="text/css" href="/tests/SimpleTest/test.css" /> +</head> +<body> +<pre id="test"> +<script class="testbody" type="text/javascript"> + +var BUF_SIZE = 128; + +var types = [ + "equalpower", + "HRTF" +] + +var finished = types.length; + +function finish() { + if (!--finished) { + SimpleTest.finish(); + } +} + +function test(type) { + var ac = new OfflineAudioContext(1, BUF_SIZE, 44100); + + // A sine to be used to fill the buffers + function sine(t) { + return Math.sin(440 * 2 * Math.PI * t / ac.sampleRate); + } + + var monoBuffer = ac.createBuffer(1, BUF_SIZE, ac.sampleRate); + for (var i = 0; i < BUF_SIZE; ++i) { + monoBuffer.getChannelData(0)[i] = sine(i); + } + + var monoSource = ac.createBufferSource(); + monoSource.buffer = monoBuffer; + monoSource.start(0); + + var panner = ac.createPanner(); + panner.distanceModel = "linear"; + panner.refDistance = 1; + panner.positionX.value = 0; + panner.positionY.value = 0; + panner.positionZ.value = 0; + monoSource.connect(panner); + + var panner2 = ac.createPanner(); + panner2.distanceModel = "inverse"; + panner2.refDistance = 1; + panner2.positionX.value = 0; + panner2.positionY.value = 0; + panner2.positionZ.value = 0; + panner.connect(panner2); + + var panner3 = ac.createPanner(); + panner3.distanceModel = "exponential"; + panner3.refDistance = 1; + panner3.positionX.value = 0; + panner3.positionY.value = 0; + panner3.positionZ.value = 0; + panner2.connect(panner3); + + panner3.connect(ac.destination); + + ac.startRendering().then(function(buffer) { + compareBuffers(buffer, monoBuffer); + finish(); + }); +} + +addLoadEvent(function() { + types.forEach(test); +}); + +SimpleTest.waitForExplicitFinish(); + +</script> +</pre> +</body> +</html> diff --git a/dom/media/webaudio/test/test_pannerNodeChannelCount.html b/dom/media/webaudio/test/test_pannerNodeChannelCount.html new file mode 100644 index 000000000..63a52ea0c --- /dev/null +++ b/dom/media/webaudio/test/test_pannerNodeChannelCount.html @@ -0,0 +1,52 @@ +<!DOCTYPE HTML> +<html> +<head> + <title>Test PannerNode directly above</title> + <script type="text/javascript" src="/tests/SimpleTest/SimpleTest.js"></script> + <link rel="stylesheet" type="text/css" href="/tests/SimpleTest/test.css" /> +</head> +<body> +<pre id="test"> +<script src="webaudio.js" type="text/javascript"></script> +<script class="testbody" type="text/javascript"> + +var gTest = { + length: 2048, + numberOfChannels: 2, + createGraph: function(context) { + var buffer = context.createBuffer(2, 2048, context.sampleRate); + for (var i = 0; i < 2048; ++i) { + var sample = Math.sin(440 * 2 * Math.PI * i / context.sampleRate); + // When mixed into a single channel, this produces silence + buffer.getChannelData(0)[i] = sample; + buffer.getChannelData(1)[i] = -sample; + } + + var panner = context.createPanner(); + panner.positionX.value = 1; + panner.positionY.value = 2; + panner.positionZ.value = 3; + panner.channelCount = 1; + expectException(function() { panner.channelCount = 3; }, + DOMException.NOT_SUPPORTED_ERR); + panner.channelCountMode = "explicit"; + expectException(function() { panner.channelCountMode = "max"; }, + DOMException.NOT_SUPPORTED_ERR); + panner.channelInterpretation = "discrete"; + panner.channelInterpretation = "speakers"; + + var source = context.createBufferSource(); + source.buffer = buffer; + source.connect(panner); + source.start(0); + + return panner; + }, +}; + +runTest(); + +</script> +</pre> +</body> +</html> diff --git a/dom/media/webaudio/test/test_pannerNodeHRTFSymmetry.html b/dom/media/webaudio/test/test_pannerNodeHRTFSymmetry.html new file mode 100644 index 000000000..c5312d042 --- /dev/null +++ b/dom/media/webaudio/test/test_pannerNodeHRTFSymmetry.html @@ -0,0 +1,106 @@ +<!DOCTYPE HTML> +<html> +<head> + <title>Test left/right symmetry and block-offset invariance of HRTF panner</title> + <script type="text/javascript" src="/tests/SimpleTest/SimpleTest.js"></script> + <script type="text/javascript" src="webaudio.js"></script> + <link rel="stylesheet" type="text/css" href="/tests/SimpleTest/test.css" /> +</head> +<body> +<pre id="test"> +<script class="testbody" type="text/javascript"> + +SimpleTest.waitForExplicitFinish(); + +const blockSize = 128; +const bufferSize = 4096; // > HRTF panner latency + +var ctx = new AudioContext(); + +function isChannelSilent(channel) { + for (var i = 0; i < channel.length; ++i) { + if (channel[i] != 0.0) { + return false; + } + } + return true; +} + +function startTest() { + var leftPanner = ctx.createPanner(); + var rightPanner = ctx.createPanner(); + leftPanner.type = "HRTF"; + rightPanner.type = "HRTF"; + leftPanner.positionX.value = -1; + rightPanner.positionX.value = 1; + + // Test that PannerNode processes the signal consistently irrespective of + // the offset in the processing block. This is done by inserting a delay of + // less than a block size before one panner. + const delayTime = 0.7 * blockSize / ctx.sampleRate; + var leftDelay = ctx.createDelay(delayTime); + leftDelay.delayTime.value = delayTime; + leftDelay.connect(leftPanner); + // and compensating for the delay after the other. + var rightDelay = ctx.createDelay(delayTime); + rightDelay.delayTime.value = delayTime; + rightPanner.connect(rightDelay); + + // Feed the panners with a signal having some harmonics to fill the spectrum. + var oscillator = ctx.createOscillator(); + oscillator.frequency.value = 110; + oscillator.type = "sawtooth"; + oscillator.connect(leftDelay); + oscillator.connect(rightPanner); + oscillator.start(0); + + // Switch the channels on one panner output, and it should match the other. + var splitter = ctx.createChannelSplitter(); + leftPanner.connect(splitter); + var merger = ctx.createChannelMerger(); + splitter.connect(merger, 0, 1); + splitter.connect(merger, 1, 0); + + // Invert one signal so that mixing with the other will find the difference. + var gain = ctx.createGain(); + gain.gain.value = -1.0; + merger.connect(gain); + + var processor = ctx.createScriptProcessor(bufferSize, 2, 0); + gain.connect(processor); + rightDelay.connect(processor); + processor.onaudioprocess = + function(e) { + compareBuffers(e.inputBuffer, + ctx.createBuffer(2, bufferSize, ctx.sampleRate)); + e.target.onaudioprocess = null; + SimpleTest.finish(); + } +} + +function prepareTest() { + // A PannerNode will produce no output until it has loaded its HRIR + // database. Wait for this to load before starting the test. + var processor = ctx.createScriptProcessor(bufferSize, 2, 0); + var panner = ctx.createPanner(); + panner.connect(processor); + var oscillator = ctx.createOscillator(); + oscillator.connect(panner); + oscillator.start(0); + + processor.onaudioprocess = + function(e) { + if (isChannelSilent(e.inputBuffer.getChannelData(0))) + return; + + oscillator.stop(0); + panner.disconnect(); + e.target.onaudioprocess = null; + startTest(); + }; +} +prepareTest(); +</script> +</pre> +</body> +</html> diff --git a/dom/media/webaudio/test/test_pannerNodePassThrough.html b/dom/media/webaudio/test/test_pannerNodePassThrough.html new file mode 100644 index 000000000..ab1f4b46f --- /dev/null +++ b/dom/media/webaudio/test/test_pannerNodePassThrough.html @@ -0,0 +1,53 @@ +<!DOCTYPE HTML> +<html> +<head> + <title>Test PannerNode with passthrough</title> + <script type="text/javascript" src="/tests/SimpleTest/SimpleTest.js"></script> + <script type="text/javascript" src="webaudio.js"></script> + <link rel="stylesheet" type="text/css" href="/tests/SimpleTest/test.css" /> +</head> +<body> +<pre id="test"> +<script class="testbody" type="text/javascript"> + +var gTest = { + length: 2048, + numberOfChannels: 1, + createGraph: function(context) { + var source = context.createBufferSource(); + + var panner = context.createPanner(); + + source.buffer = this.buffer; + + source.connect(panner); + + context.listener.setOrientation(0, 6.311749985202524e+307, 0, 0.1, 1000, 0); + context.listener.setOrientation(0, 0, -6.311749985202524e+307, 0, 0, 6.311749985202524e+307); + panner.positionX = 2; + panner.rolloffFactor = 0; + panner.panningModel = "equalpower"; + + var pannerWrapped = SpecialPowers.wrap(panner); + ok("passThrough" in pannerWrapped, "PannerNode should support the passThrough API"); + pannerWrapped.passThrough = true; + + source.start(0); + return panner; + }, + createExpectedBuffers: function(context) { + this.buffer = context.createBuffer(1, 2048, context.sampleRate); + for (var i = 0; i < 2048; ++i) { + this.buffer.getChannelData(0)[i] = Math.sin(440 * 2 * Math.PI * i / context.sampleRate); + } + + return [this.buffer]; + }, +}; + +runTest(); + +</script> +</pre> +</body> +</html> diff --git a/dom/media/webaudio/test/test_pannerNodeTail.html b/dom/media/webaudio/test/test_pannerNodeTail.html new file mode 100644 index 000000000..5fff52797 --- /dev/null +++ b/dom/media/webaudio/test/test_pannerNodeTail.html @@ -0,0 +1,232 @@ +<!DOCTYPE HTML> +<html> +<head> + <title>Test tail time lifetime of PannerNode</title> + <script type="text/javascript" src="/tests/SimpleTest/SimpleTest.js"></script> + <script type="text/javascript" src="webaudio.js"></script> + <link rel="stylesheet" type="text/css" href="/tests/SimpleTest/test.css" /> +</head> +<body> +<pre id="test"> +<script class="testbody" type="text/javascript"> + +// This tests that a PannerNode does not release its reference before +// it finishes emitting sound. +// +// The PannerNode tail time is short, so, when a PannerNode is destroyed on +// the main thread, it is unlikely to notify the graph thread before the tail +// time expires. However, by adding DelayNodes downstream from the +// PannerNodes, the graph thread can have enough time to notice that a +// DelayNode has been destroyed. +// +// In the current implementation, DelayNodes will take a tail-time reference +// immediately when they receive the first block of sound from an upstream +// node, so this test connects the downstream DelayNodes while the upstream +// nodes are finishing, and then runs GC (on the main thread) before the +// DelayNodes receive any input (on the graph thread). +// +// Web Audio doesn't provide a means to precisely time connect()s but we can +// test that the output of delay nodes matches the output from a reference +// PannerNode that we know will not be GCed. +// +// Another set of delay nodes is added upstream to ensure that the source node +// has removed its self-reference after dispatching its "ended" event. + +SimpleTest.waitForExplicitFinish(); + +const blockSize = 128; +// bufferSize should be long enough that to allow an audioprocess event to be +// sent to the main thread and a connect message to return to the graph +// thread. +const bufferSize = 4096; +const pannerCount = bufferSize / blockSize; +// sourceDelayBufferCount should be long enough to allow the source node +// onended to finish and remove the source self-reference. +const sourceDelayBufferCount = 3; +var gotEnded = false; +// ccDelayLength should be long enough to allow CC to run +var ccDelayBufferCount = 20; +const ccDelayLength = ccDelayBufferCount * bufferSize; + +var ctx; +var testPanners = []; +var referencePanner; +var referenceProcessCount = 0; +var referenceOutput = [new Float32Array(bufferSize), + new Float32Array(bufferSize)]; +var testProcessor; +var testProcessCount = 0; + +function isChannelSilent(channel) { + for (var i = 0; i < channel.length; ++i) { + if (channel[i] != 0.0) { + return false; + } + } + return true; +} + +function onReferenceOutput(e) { + switch(referenceProcessCount) { + + case sourceDelayBufferCount - 1: + // The panners are about to finish. + if (!gotEnded) { + todo(false, "Source hasn't ended. Increase sourceDelayBufferCount?"); + } + + // Connect each PannerNode output to a downstream DelayNode, + // and connect ScriptProcessors to compare test and reference panners. + var delayDuration = ccDelayLength / ctx.sampleRate; + for (var i = 0; i < pannerCount; ++i) { + var delay = ctx.createDelay(delayDuration); + delay.delayTime.value = delayDuration; + delay.connect(testProcessor); + testPanners[i].connect(delay); + } + testProcessor = null; + testPanners = null; + + // The panning effect is linear so only one reference panner is required. + // This also checks that the individual panners don't chop their output + // too soon. + referencePanner.connect(e.target); + + // Assuming the above operations have already scheduled an event to run in + // stable state and ask the graph thread to make connections, schedule a + // subsequent event to run cycle collection, which should not collect + // panners that are still producing sound. + SimpleTest.executeSoon(function() { + SpecialPowers.forceGC(); + SpecialPowers.forceCC(); + }); + + break; + + case sourceDelayBufferCount: + // Record this buffer during which PannerNode outputs were connected. + for (var i = 0; i < 2; ++i) { + e.inputBuffer.copyFromChannel(referenceOutput[i], i); + } + e.target.onaudioprocess = null; + e.target.disconnect(); + + // If the buffer is silent, there is probably not much point just + // increasing the buffer size, because, with the buffer size already + // significantly larger than panner tail time, it demonstrates that the + // lag between threads is much greater than the tail time. + if (isChannelSilent(referenceOutput[0])) { + todo(false, "Connections not detected."); + } + } + + referenceProcessCount++; +} + +function onTestOutput(e) { + if (testProcessCount < sourceDelayBufferCount + ccDelayBufferCount) { + testProcessCount++; + return; + } + + for (var i = 0; i < 2; ++i) { + compareChannels(e.inputBuffer.getChannelData(i), referenceOutput[i]); + } + e.target.onaudioprocess = null; + e.target.disconnect(); + SimpleTest.finish(); +} + +function startTest() { + // 0.002 is MaxDelayTimeSeconds in HRTFpanner.cpp + // and 512 is fftSize() at 48 kHz. + const expectedPannerTailTime = 0.002 * ctx.sampleRate + 512; + + // Create some PannerNodes downstream from DelayNodes with delays long + // enough for their source to finish, dispatch its "ended" event + // and release its playing reference. The DelayNodes should expire their + // tail-time references before the PannerNodes and so only the PannerNode + // lifetimes depends on their tail-time references. Many DelayNodes are + // created and timed to finish at different times so that one PannerNode + // will be finishing the block processed immediately after the connect is + // received. + var source = ctx.createBufferSource(); + // Just short of blockSize here to avoid rounding into the next block + var buffer = ctx.createBuffer(1, blockSize - 1, ctx.sampleRate); + for (var i = 0; i < buffer.length; ++i) { + buffer.getChannelData(0)[i] = Math.cos(Math.PI * i / buffer.length); + } + source.buffer = buffer; + source.start(0); + source.onended = function(e) { + gotEnded = true; + }; + + // Time the first test panner to finish just before downstream DelayNodes + // are about the be connected. Note that DelayNode lifetime depends on + // maxDelayTime so set that equal to the delay. + var delayDuration = + (sourceDelayBufferCount * bufferSize + - expectedPannerTailTime - 2 * blockSize) / ctx.sampleRate; + + for (var i = 0; i < pannerCount; ++i) { + var delay = ctx.createDelay(delayDuration); + delay.delayTime.value = delayDuration; + source.connect(delay); + delay.connect(referencePanner) + + var panner = ctx.createPanner(); + panner.type = "HRTF"; + delay.connect(panner); + testPanners[i] = panner; + + delayDuration += blockSize / ctx.sampleRate; + } + + // Create a ScriptProcessor now to use as a timer to trigger connection of + // downstream nodes. It will also be used to record reference output. + var referenceProcessor = ctx.createScriptProcessor(bufferSize, 2, 0); + referenceProcessor.onaudioprocess = onReferenceOutput; + // Start audioprocess events before source delays are connected. + referenceProcessor.connect(ctx.destination); + + // The test ScriptProcessor will record output of testPanners. + // Create it now so that it is synchronized with the referenceProcessor. + testProcessor = ctx.createScriptProcessor(bufferSize, 2, 0); + testProcessor.onaudioprocess = onTestOutput; + // Start audioprocess events before source delays are connected. + testProcessor.connect(ctx.destination); +} + +function prepareTest() { + ctx = new AudioContext(); + // Place the listener to the side of the origin, where the panners are + // positioned, to maximize delay in one ear. + ctx.listener.setPosition(1,0,0); + + // A PannerNode will produce no output until it has loaded its HRIR + // database. Wait for this to load before starting the test. + var processor = ctx.createScriptProcessor(bufferSize, 2, 0); + referencePanner = ctx.createPanner(); + referencePanner.type = "HRTF"; + referencePanner.connect(processor); + var oscillator = ctx.createOscillator(); + oscillator.connect(referencePanner); + oscillator.start(0); + + processor.onaudioprocess = function(e) { + if (isChannelSilent(e.inputBuffer.getChannelData(0))) + return; + + oscillator.stop(0); + oscillator.disconnect(); + referencePanner.disconnect(); + e.target.onaudioprocess = null; + SimpleTest.executeSoon(startTest); + }; +} +prepareTest(); +</script> +</pre> +</body> +</html> diff --git a/dom/media/webaudio/test/test_pannerNode_equalPower.html b/dom/media/webaudio/test/test_pannerNode_equalPower.html new file mode 100644 index 000000000..14e9f2153 --- /dev/null +++ b/dom/media/webaudio/test/test_pannerNode_equalPower.html @@ -0,0 +1,26 @@ +<!DOCTYPE HTML> +<html> +<head> +<title>Test PannerNode</title> +<script type="text/javascript" src="/tests/SimpleTest/SimpleTest.js"></script> +<script type="text/javascript" src="webaudio.js"></script> +<script type="text/javascript" src="layouttest-glue.js"></script> +<script type="text/javascript" src="blink/audio-testing.js"></script> +<script type="text/javascript" src="blink/panner-model-testing.js"></script> +<link rel="stylesheet" type="text/css" href="/tests/SimpleTest/test.css" /> +</head> +<body> +<pre id="test"> +<script class="testbody" type="text/javascript"> +SimpleTest.waitForExplicitFinish(); +addLoadEvent(function() { + function checkFinished() { + SimpleTest.finish(); + } + var ctx = new OfflineAudioContext(2, sampleRate * renderLengthSeconds, sampleRate); + createTestAndRun(ctx, nodesToCreate, 2, checkFinished); +}); +</script> +</pre> +</body> +</html> diff --git a/dom/media/webaudio/test/test_pannerNode_maxDistance.html b/dom/media/webaudio/test/test_pannerNode_maxDistance.html new file mode 100644 index 000000000..faca136b3 --- /dev/null +++ b/dom/media/webaudio/test/test_pannerNode_maxDistance.html @@ -0,0 +1,64 @@ +<!DOCTYPE HTML> +<html> +<head> + <title>Test PannerNode outputs silence when the distance is greater than maxDist</title> + <script type="text/javascript" src="/tests/SimpleTest/SimpleTest.js"></script> + <link rel="stylesheet" type="text/css" href="/tests/SimpleTest/test.css" /> +</head> +<body> +<pre id="test"> +<script class="testbody" type="text/javascript"> + +var types = [ + "equalpower", + "HRTF" +] + +var finished = types.length; + +function finish() { + if (!--finished) { + SimpleTest.finish(); + } +} + +function test(type) { + var ac = new OfflineAudioContext(1, 128, 44100); + var osc = ac.createOscillator(); + var panner = ac.createPanner(); + + panner.distanceModel = "linear"; + panner.maxDistance = 100; + panner.positionY.value = 200; + ac.listener.setPosition(0, 0, 0); + + osc.connect(panner); + panner.connect(ac.destination); + + osc.start(); + + ac.startRendering().then(function(buffer) { + var silence = true; + var array = buffer.getChannelData(0); + for (var i = 0; i < buffer.length; i++) { + if (array[i] != 0) { + ok(false, "Found noise in the buffer."); + silence = false; + } + } + ok(silence, "The buffer is silent."); + finish(); + }); +} + + +addLoadEvent(function() { + types.forEach(test); +}); + +SimpleTest.waitForExplicitFinish(); + +</script> +</pre> +</body> +</html> diff --git a/dom/media/webaudio/test/test_periodicWave.html b/dom/media/webaudio/test/test_periodicWave.html new file mode 100644 index 000000000..3ed440748 --- /dev/null +++ b/dom/media/webaudio/test/test_periodicWave.html @@ -0,0 +1,94 @@ +<!DOCTYPE HTML> +<html> +<head> + <title>Test the PeriodicWave interface</title> + <script type="text/javascript" src="/tests/SimpleTest/SimpleTest.js"></script> + <script type="text/javascript" src="webaudio.js"></script> + <link rel="stylesheet" type="text/css" href="/tests/SimpleTest/test.css" /> +</head> +<body> +<pre id="test"> +<script class="testbody" type="text/javascript"> + +SimpleTest.waitForExplicitFinish(); + +// real and imag are used in separate PeriodicWaves to make their peak values +// easy to determine. +const realMax = 99; +var real = new Float32Array(realMax + 1); +real[1] = 2.0; // fundamental +real[realMax] = 3.0; +const realPeak = real[1] + real[realMax]; +const realFundamental = 19.0; +var imag = new Float32Array(4); +imag[0] = 6.0; // should be ignored. +imag[3] = 0.5; +const imagPeak = imag[3]; +const imagFundamental = 551.0; + +const testLength = 4096; + +addLoadEvent(function() { + var ac = new AudioContext(); + ac.createPeriodicWave(new Float32Array(4096), new Float32Array(4096)); + expectException(function() { + ac.createPeriodicWave(new Float32Array(512), imag); + }, DOMException.NOT_SUPPORTED_ERR); + expectException(function() { + ac.createPeriodicWave(new Float32Array(0), new Float32Array(0)); + }, DOMException.NOT_SUPPORTED_ERR); + expectNoException(function() { + ac.createPeriodicWave(new Float32Array(4097), new Float32Array(4097)); + }); + + runTest(); +}); + +var gTest = { + createGraph: function(context) { + var merger = context.createChannelMerger(); + + var osc0 = context.createOscillator(); + var osc1 = context.createOscillator(); + + osc0.setPeriodicWave(context. + createPeriodicWave(real, + new Float32Array(real.length))); + osc1.setPeriodicWave(context. + createPeriodicWave(new Float32Array(imag.length), + imag)); + + osc0.frequency.value = realFundamental; + osc1.frequency.value = imagFundamental; + + osc0.start(); + osc1.start(); + + osc0.connect(merger, 0, 0); + osc1.connect(merger, 0, 1); + + return merger; + }, + createExpectedBuffers: function(context) { + var buffer = context.createBuffer(2, testLength, context.sampleRate); + + for (var i = 0; i < buffer.length; ++i) { + + buffer.getChannelData(0)[i] = 1.0 / realPeak * + (real[1] * Math.cos(2 * Math.PI * realFundamental * i / + context.sampleRate) + + real[realMax] * Math.cos(2 * Math.PI * realMax * realFundamental * i / + context.sampleRate)); + + buffer.getChannelData(1)[i] = 1.0 / imagPeak * + imag[3] * Math.sin(2 * Math.PI * 3 * imagFundamental * i / + context.sampleRate); + } + return buffer; + }, +}; + +</script> +</pre> +</body> +</html> diff --git a/dom/media/webaudio/test/test_periodicWaveBandLimiting.html b/dom/media/webaudio/test/test_periodicWaveBandLimiting.html new file mode 100644 index 000000000..70fbb09e2 --- /dev/null +++ b/dom/media/webaudio/test/test_periodicWaveBandLimiting.html @@ -0,0 +1,86 @@ +<!DOCTYPE html> +<title>Test effect of band limiting on PeriodicWave signals</title> +<script src="/resources/testharness.js"></script> +<script src="/resources/testharnessreport.js"></script> +<script> +const sampleRate = 48000; +const bufferSize = 12800; +const epsilon = 0.01; + +// "All implementations must support arrays up to at least 8192", but the +// linear interpolation of the current implementation distorts the higher +// frequency components too much to pass this test. +const frequencyIndexMax = 200; + +// A set of oscillators are created near the Nyquist frequency. +// These are factors giving each oscillator frequency relative to the Nyquist. +// The first is an octave below Nyquist and the last is just above. +const OCTAVE_BELOW = 0; +const HALF_BELOW = 1; +const NEAR_BELOW = 2; +const ABOVE = 3; +const oscillatorFactors = [0.5, Math.sqrt(0.5), 0.99, 1.01]; +const oscillatorCount = oscillatorFactors.length; + +// Return magnitude relative to unit sine wave +function magnitude(array) { + var mag = 0 + for (var i = 0; i < array.length; ++i) { + sample = array[i]; + mag += sample * sample; + } + return Math.sqrt(2 * mag / array.length); +} + +function test_frequency_index(frequencyIndex) { + + var context = + new OfflineAudioContext(oscillatorCount, bufferSize, sampleRate); + + var merger = context.createChannelMerger(oscillatorCount); + merger.connect(context.destination); + + var real = new Float32Array(frequencyIndex + 1); + real[frequencyIndex] = 1; + var image = new Float32Array(real.length); + var wave = context.createPeriodicWave(real, image); + + for (var i = 0; i < oscillatorCount; ++i) { + var oscillator = context.createOscillator(); + oscillator.frequency.value = + oscillatorFactors[i] * sampleRate / (2 * frequencyIndex); + oscillator.connect(merger, 0, i); + oscillator.setPeriodicWave(wave); + oscillator.start(0); + } + + return context.startRendering(). + then((buffer) => { + assert_equals(buffer.numberOfChannels, oscillatorCount); + var magnitudes = []; + for (var i = 0; i < oscillatorCount; ++i) { + magnitudes[i] = magnitude(buffer.getChannelData(i)); + } + // Unaffected by band-limiting one octave below Nyquist. + assert_approx_equals(magnitudes[OCTAVE_BELOW], 1, epsilon, + "magnitude with frequency octave below Nyquist"); + // Still at least half the amplitude at half octave below Nyquist. + assert_greater_than(magnitudes[HALF_BELOW], 0.5 * (1 - epsilon), + "magnitude with frequency half octave below Nyquist"); + // Approaching zero or zero near Nyquist. + assert_less_than(magnitudes[NEAR_BELOW], 0.1, + "magnitude with frequency near Nyquist"); + assert_equals(magnitudes[ABOVE], 0, + "magnitude with frequency above Nyquist"); + }); +} + +// The 5/4 ratio with rounding up provides sampling across a range of +// octaves and offsets within octaves. +for (var frequencyIndex = 1; + frequencyIndex < frequencyIndexMax; + frequencyIndex = Math.floor((5 * frequencyIndex + 3) / 4)) { + promise_test(test_frequency_index.bind(null, frequencyIndex), + "Frequency " + frequencyIndex); +} +</script> diff --git a/dom/media/webaudio/test/test_periodicWaveDisableNormalization.html b/dom/media/webaudio/test/test_periodicWaveDisableNormalization.html new file mode 100644 index 000000000..fb924c475 --- /dev/null +++ b/dom/media/webaudio/test/test_periodicWaveDisableNormalization.html @@ -0,0 +1,100 @@ +<!DOCTYPE HTML> +<html> +<head> + <title>Test PeriodicWave disableNormalization Parameter</title> + <script type="text/javascript" src="/tests/SimpleTest/SimpleTest.js"></script> + <script type="text/javascript" src="webaudio.js"></script> + <link rel="stylesheet" type="text/css" href="/tests/SimpleTest/test.css" /> +</head> +<body> +<pre id="test"> +<script class="testbody" type="text/javascript"> + +SimpleTest.waitForExplicitFinish(); + +// We create PerodicWave instances containing two tones and compare it to +// buffers created directly in JavaScript by adding the two waves together. +// Two of the PeriodicWaves are normalized, the other is not. This test is +// a modification of test_periodicWave.html. +// +// These constants are borrowed from test_periodicWave.html and modified +// so that the realPeak (which is the normalization factor) will be small +// enough that the errors are within the bounds for the test. +const realMax = 99; +var real = new Float32Array(realMax + 1); +real[1] = 2.0; // fundamental +real[realMax] = 0.25; + +const realPeak = real[1] + real[realMax]; +const realFundamental = 19.0; + +const testLength = 4096; + +addLoadEvent(function() { + runTest(); +}); + +var gTest = { + createGraph: function(context) { + var merger = context.createChannelMerger(); + + var osc0 = context.createOscillator(); + var osc1 = context.createOscillator(); + var osc2 = context.createOscillator(); + + osc0.setPeriodicWave(context. + createPeriodicWave(real, + new Float32Array(real.length), + {disableNormalization: false})); + osc1.setPeriodicWave(context. + createPeriodicWave(real, + new Float32Array(real.length))); + osc2.setPeriodicWave(context. + createPeriodicWave(real, + new Float32Array(real.length), + {disableNormalization: true})); + + osc0.frequency.value = realFundamental; + osc1.frequency.value = realFundamental; + osc2.frequency.value = realFundamental; + + osc0.start(); + osc1.start(); + osc2.start(); + + osc0.connect(merger, 0, 0); + osc1.connect(merger, 0, 1); + osc2.connect(merger, 0, 2); + + return merger; + }, + createExpectedBuffers: function(context) { + var buffer = context.createBuffer(3, testLength, context.sampleRate); + + for (var i = 0; i < buffer.length; ++i) { + + buffer.getChannelData(0)[i] = 1.0 / realPeak * + (real[1] * Math.cos(2 * Math.PI * realFundamental * i / + context.sampleRate) + + real[realMax] * Math.cos(2 * Math.PI * realMax * realFundamental * i / + context.sampleRate)); + + buffer.getChannelData(1)[i] = buffer.getChannelData(0)[i]; + + // TODO: We need to scale by a factor of two to make the results work + // out here. This seems suspicious, see Bug 1266737. + buffer.getChannelData(2)[i] = 2.0 * + (real[1] * Math.cos(2 * Math.PI * realFundamental * i / + context.sampleRate) + + real[realMax] * Math.cos(2 * Math.PI * realMax * realFundamental * i / + context.sampleRate)); + } + return buffer; + }, + 'numberOfChannels': 3, +}; + +</script> +</pre> +</body> +</html> diff --git a/dom/media/webaudio/test/test_scriptProcessorNode.html b/dom/media/webaudio/test/test_scriptProcessorNode.html new file mode 100644 index 000000000..7cfb3d96e --- /dev/null +++ b/dom/media/webaudio/test/test_scriptProcessorNode.html @@ -0,0 +1,132 @@ +<!DOCTYPE HTML> +<html> +<head> + <title>Test ScriptProcessorNode</title> + <script type="text/javascript" src="/tests/SimpleTest/SimpleTest.js"></script> + <script type="text/javascript" src="webaudio.js"></script> + <link rel="stylesheet" type="text/css" href="/tests/SimpleTest/test.css" /> +</head> +<body> +<pre id="test"> +<script class="testbody" type="text/javascript"> + +// We do not use our generic graph test framework here because +// the testing logic here is sort of complicated, and would +// not be easy to map to OfflineAudioContext, as ScriptProcessorNodes +// can experience delays. + +SimpleTest.waitForExplicitFinish(); +addLoadEvent(function() { + var context = new AudioContext(); + var buffer = null; + + var sourceSP = context.createScriptProcessor(2048); + sourceSP.addEventListener("audioprocess", function(e) { + // generate the audio + for (var i = 0; i < 2048; ++i) { + // Make sure our first sample won't be zero + e.outputBuffer.getChannelData(0)[i] = Math.sin(440 * 2 * Math.PI * (i + 1) / context.sampleRate); + e.outputBuffer.getChannelData(1)[i] = Math.sin(880 * 2 * Math.PI * (i + 1) / context.sampleRate); + } + // Remember our generated audio + buffer = e.outputBuffer; + + sourceSP.removeEventListener("audioprocess", arguments.callee); + }, false); + + expectException(function() { + context.createScriptProcessor(1); + }, DOMException.INDEX_SIZE_ERR); + expectException(function() { + context.createScriptProcessor(2); + }, DOMException.INDEX_SIZE_ERR); + expectException(function() { + context.createScriptProcessor(128); + }, DOMException.INDEX_SIZE_ERR); + expectException(function() { + context.createScriptProcessor(255); + }, DOMException.INDEX_SIZE_ERR); + + is(sourceSP.channelCount, 2, "script processor node has 2 input channels by default"); + is(sourceSP.channelCountMode, "explicit", "Correct channelCountMode for the script processor node"); + is(sourceSP.channelInterpretation, "speakers", "Correct channelCountInterpretation for the script processor node"); + + function findFirstNonZeroSample(buffer) { + for (var i = 0; i < buffer.length; ++i) { + if (buffer.getChannelData(0)[i] != 0) { + return i; + } + } + return buffer.length; + } + + var sp = context.createScriptProcessor(2048); + sourceSP.connect(sp); + sp.connect(context.destination); + var lastPlaybackTime = 0; + + var emptyBuffer = context.createBuffer(1, 2048, context.sampleRate); + + function checkAudioProcessingEvent(e) { + is(e.target, sp, "Correct event target"); + ok(e.playbackTime > lastPlaybackTime, "playbackTime correctly set"); + lastPlaybackTime = e.playbackTime; + is(e.inputBuffer.numberOfChannels, 2, "Correct number of channels for the input buffer"); + is(e.inputBuffer.length, 2048, "Correct length for the input buffer"); + is(e.inputBuffer.sampleRate, context.sampleRate, "Correct sample rate for the input buffer"); + is(e.outputBuffer.numberOfChannels, 2, "Correct number of channels for the output buffer"); + is(e.outputBuffer.length, 2048, "Correct length for the output buffer"); + is(e.outputBuffer.sampleRate, context.sampleRate, "Correct sample rate for the output buffer"); + + compareChannels(e.outputBuffer.getChannelData(0), emptyBuffer.getChannelData(0)); + compareChannels(e.outputBuffer.getChannelData(1), emptyBuffer.getChannelData(0)); + } + + sp.onaudioprocess = function(e) { + isnot(buffer, null, "The audioprocess handler for sourceSP must be run at this point"); + checkAudioProcessingEvent(e); + + // Because of the initial latency added by the second script processor node, + // we will never see any generated audio frames in the first callback. + compareChannels(e.inputBuffer.getChannelData(0), emptyBuffer.getChannelData(0)); + compareChannels(e.inputBuffer.getChannelData(1), emptyBuffer.getChannelData(0)); + + sp.onaudioprocess = function(e) { + checkAudioProcessingEvent(e); + + var firstNonZero = findFirstNonZeroSample(e.inputBuffer); + ok(firstNonZero <= 2048, "First non-zero sample within range"); + + compareChannels(e.inputBuffer.getChannelData(0), emptyBuffer.getChannelData(0), firstNonZero); + compareChannels(e.inputBuffer.getChannelData(1), emptyBuffer.getChannelData(0), firstNonZero); + compareChannels(e.inputBuffer.getChannelData(0), buffer.getChannelData(0), 2048 - firstNonZero, firstNonZero, 0); + compareChannels(e.inputBuffer.getChannelData(1), buffer.getChannelData(1), 2048 - firstNonZero, firstNonZero, 0); + + if (firstNonZero == 0) { + // If we did not experience any delays, the test is done! + sp.onaudioprocess = null; + + SimpleTest.finish(); + } else if (firstNonZero != 2048) { + // In case we just saw a zero buffer this time, wait one more round + sp.onaudioprocess = function(e) { + checkAudioProcessingEvent(e); + + compareChannels(e.inputBuffer.getChannelData(0), buffer.getChannelData(0), firstNonZero, 0, 2048 - firstNonZero); + compareChannels(e.inputBuffer.getChannelData(1), buffer.getChannelData(1), firstNonZero, 0, 2048 - firstNonZero); + compareChannels(e.inputBuffer.getChannelData(0), emptyBuffer.getChannelData(0), undefined, firstNonZero); + compareChannels(e.inputBuffer.getChannelData(1), emptyBuffer.getChannelData(0), undefined, firstNonZero); + + sp.onaudioprocess = null; + + SimpleTest.finish(); + }; + } + }; + }; +}); + +</script> +</pre> +</body> +</html> diff --git a/dom/media/webaudio/test/test_scriptProcessorNodeChannelCount.html b/dom/media/webaudio/test/test_scriptProcessorNodeChannelCount.html new file mode 100644 index 000000000..6361a1747 --- /dev/null +++ b/dom/media/webaudio/test/test_scriptProcessorNodeChannelCount.html @@ -0,0 +1,80 @@ +<!DOCTYPE HTML> +<html> +<head> + <title>Test AudioBufferSourceNode</title> + <script type="text/javascript" src="/tests/SimpleTest/SimpleTest.js"></script> + <script type="text/javascript" src="webaudio.js"></script> + <link rel="stylesheet" type="text/css" href="/tests/SimpleTest/test.css" /> +</head> +<body> +<pre id="test"> +<script class="testbody" type="text/javascript"> + +// We do not use our generic graph test framework here because +// the testing logic here is sort of complicated, and would +// not be easy to map to OfflineAudioContext, as ScriptProcessorNodes +// can experience delays. + +SimpleTest.waitForExplicitFinish(); +addLoadEvent(function() { + var context = new AudioContext(); + var buffer = context.createBuffer(6, 2048, context.sampleRate); + for (var i = 0; i < 2048; ++i) { + for (var j = 0; j < 6; ++j) { + buffer.getChannelData(0)[i] = Math.sin(440 * j * Math.PI * i / context.sampleRate); + } + } + + var monoBuffer = context.createBuffer(1, 2048, context.sampleRate); + for (var i = 0; i < 2048; ++i) { + monoBuffer.getChannelData(0)[i] = 1; + } + + var source = context.createBufferSource(); + + var sp = context.createScriptProcessor(2048, 3); + expectException(function() { sp.channelCount = 2; }, + DOMException.NOT_SUPPORTED_ERR); + sp.channelCountMode = "explicit"; + expectException(function() { sp.channelCountMode = "max"; }, + DOMException.NOT_SUPPORTED_ERR); + expectException(function() { sp.channelCountMode = "clamped-max"; }, + DOMException.NOT_SUPPORTED_ERR); + sp.channelInterpretation = "discrete"; + source.start(0); + source.buffer = buffer; + source.connect(sp); + sp.connect(context.destination); + + var monoSource = context.createBufferSource(); + monoSource.buffer = monoBuffer; + monoSource.connect(sp); + monoSource.start(2048 / context.sampleRate); + + sp.onaudioprocess = function(e) { + is(e.inputBuffer.numberOfChannels, 3, "Should be correctly down-mixed to three channels"); + for (var i = 0; i < 3; ++i) { + compareChannels(e.inputBuffer.getChannelData(i), buffer.getChannelData(i)); + } + + // On the next iteration, we'll get a silence buffer + sp.onaudioprocess = function(e) { + var emptyBuffer = context.createBuffer(1, 2048, context.sampleRate); + is(e.inputBuffer.numberOfChannels, 3, "Should be correctly up-mixed to three channels"); + compareChannels(e.inputBuffer.getChannelData(0), monoBuffer.getChannelData(0)); + for (var i = 1; i < 3; ++i) { + compareChannels(e.inputBuffer.getChannelData(i), emptyBuffer.getChannelData(0)); + } + + sp.onaudioprocess = null; + sp.disconnect(context.destination); + + SimpleTest.finish(); + }; + }; +}); + +</script> +</pre> +</body> +</html> diff --git a/dom/media/webaudio/test/test_scriptProcessorNodeNotConnected.html b/dom/media/webaudio/test/test_scriptProcessorNodeNotConnected.html new file mode 100644 index 000000000..a3c073e38 --- /dev/null +++ b/dom/media/webaudio/test/test_scriptProcessorNodeNotConnected.html @@ -0,0 +1,34 @@ +<!DOCTYPE HTML> +<html> +<head> + <title>Test AudioBufferSourceNode: should not fire audioprocess if not connected.</title> + <script type="text/javascript" src="/tests/SimpleTest/SimpleTest.js"></script> + <script type="text/javascript" src="webaudio.js"></script> + <link rel="stylesheet" type="text/css" href="/tests/SimpleTest/test.css" /> +</head> +<body> +<pre id="test"> +<script class="testbody" type="text/javascript"> +SimpleTest.waitForExplicitFinish(); +SimpleTest.requestFlakyTimeout("This test needs to wait a while to ensure that a given event does not happen."); +addLoadEvent(function() { + var context = new AudioContext(); + + var sp = context.createScriptProcessor(2048, 2, 2); + sp.onaudioprocess = function(e) { + ok(false, "Should not call onaudioprocess if the node is not connected."); + sp.onaudioprocess = null; + SimpleTest.finish(); + }; + setTimeout(function() { + console.log(sp.onaudioprocess); + if (sp.onaudioprocess) { + ok(true, "onaudioprocess not fired."); + SimpleTest.finish(); + } + }, 4000); +}); +</script> +</pre> +</body> +</html> diff --git a/dom/media/webaudio/test/test_scriptProcessorNodePassThrough.html b/dom/media/webaudio/test/test_scriptProcessorNodePassThrough.html new file mode 100644 index 000000000..8352a331d --- /dev/null +++ b/dom/media/webaudio/test/test_scriptProcessorNodePassThrough.html @@ -0,0 +1,103 @@ +<!DOCTYPE HTML> +<html> +<head> + <title>Test ScriptProcessorNode with passthrough</title> + <script type="text/javascript" src="/tests/SimpleTest/SimpleTest.js"></script> + <script type="text/javascript" src="webaudio.js"></script> + <link rel="stylesheet" type="text/css" href="/tests/SimpleTest/test.css" /> +</head> +<body> +<pre id="test"> +<script class="testbody" type="text/javascript"> + +// We do not use our generic graph test framework here because +// the testing logic here is sort of complicated, and would +// not be easy to map to OfflineAudioContext, as ScriptProcessorNodes +// can experience delays. + +SimpleTest.waitForExplicitFinish(); +addLoadEvent(function() { + var context = new AudioContext(); + var buffer = null; + + var sourceSP = context.createScriptProcessor(2048); + sourceSP.addEventListener("audioprocess", function(e) { + // generate the audio + for (var i = 0; i < 2048; ++i) { + // Make sure our first sample won't be zero + e.outputBuffer.getChannelData(0)[i] = Math.sin(440 * 2 * Math.PI * (i + 1) / context.sampleRate); + e.outputBuffer.getChannelData(1)[i] = Math.sin(880 * 2 * Math.PI * (i + 1) / context.sampleRate); + } + // Remember our generated audio + buffer = e.outputBuffer; + + sourceSP.removeEventListener("audioprocess", arguments.callee); + }, false); + + function findFirstNonZeroSample(buffer) { + for (var i = 0; i < buffer.length; ++i) { + if (buffer.getChannelData(0)[i] != 0) { + return i; + } + } + return buffer.length; + } + + var sp = context.createScriptProcessor(2048); + sourceSP.connect(sp); + + var spWrapped = SpecialPowers.wrap(sp); + ok("passThrough" in spWrapped, "ScriptProcessorNode should support the passThrough API"); + spWrapped.passThrough = true; + + sp.onaudioprocess = function() { + ok(false, "The audioprocess event must never be dispatched on the passthrough ScriptProcessorNode"); + }; + + var sp2 = context.createScriptProcessor(2048); + sp.connect(sp2); + sp2.connect(context.destination); + + var emptyBuffer = context.createBuffer(1, 2048, context.sampleRate); + + sp2.onaudioprocess = function(e) { + // Because of the initial latency added by the second script processor node, + // we will never see any generated audio frames in the first callback. + compareChannels(e.inputBuffer.getChannelData(0), emptyBuffer.getChannelData(0)); + compareChannels(e.inputBuffer.getChannelData(1), emptyBuffer.getChannelData(0)); + + sp2.onaudioprocess = function(e) { + var firstNonZero = findFirstNonZeroSample(e.inputBuffer); + ok(firstNonZero <= 2048, "First non-zero sample within range"); + + compareChannels(e.inputBuffer.getChannelData(0), emptyBuffer.getChannelData(0), firstNonZero); + compareChannels(e.inputBuffer.getChannelData(1), emptyBuffer.getChannelData(0), firstNonZero); + compareChannels(e.inputBuffer.getChannelData(0), buffer.getChannelData(0), 2048 - firstNonZero, firstNonZero, 0); + compareChannels(e.inputBuffer.getChannelData(1), buffer.getChannelData(1), 2048 - firstNonZero, firstNonZero, 0); + + if (firstNonZero == 0) { + // If we did not experience any delays, the test is done! + sp2.onaudioprocess = null; + + SimpleTest.finish(); + } else if (firstNonZero != 2048) { + // In case we just saw a zero buffer this time, wait one more round + sp2.onaudioprocess = function(e) { + compareChannels(e.inputBuffer.getChannelData(0), buffer.getChannelData(0), firstNonZero, 0, 2048 - firstNonZero); + compareChannels(e.inputBuffer.getChannelData(1), buffer.getChannelData(1), firstNonZero, 0, 2048 - firstNonZero); + compareChannels(e.inputBuffer.getChannelData(0), emptyBuffer.getChannelData(0), undefined, firstNonZero); + compareChannels(e.inputBuffer.getChannelData(1), emptyBuffer.getChannelData(0), undefined, firstNonZero); + + sp2.onaudioprocess = null; + + SimpleTest.finish(); + }; + } + }; + }; +}); + +</script> +</pre> +</body> +</html> diff --git a/dom/media/webaudio/test/test_scriptProcessorNodeZeroInputOutput.html b/dom/media/webaudio/test/test_scriptProcessorNodeZeroInputOutput.html new file mode 100644 index 000000000..6ac8beda0 --- /dev/null +++ b/dom/media/webaudio/test/test_scriptProcessorNodeZeroInputOutput.html @@ -0,0 +1,39 @@ +<!DOCTYPE HTML> +<html> +<head> + <title>Test AudioBufferSourceNode</title> + <script type="text/javascript" src="/tests/SimpleTest/SimpleTest.js"></script> + <script type="text/javascript" src="webaudio.js"></script> + <link rel="stylesheet" type="text/css" href="/tests/SimpleTest/test.css" /> +</head> +<body> +<pre id="test"> +<script class="testbody" type="text/javascript"> + +SimpleTest.waitForExplicitFinish(); +addLoadEvent(function() { + var context = new AudioContext(); + + var sp = context.createScriptProcessor(2048, 0, 2); + sp.onaudioprocess = function(e) { + is(e.inputBuffer.numberOfChannels, 0, "Should have 0 input channels"); + is(e.outputBuffer.numberOfChannels, 2, "Should have 2 output channels"); + sp.onaudioprocess = null; + + sp = context.createScriptProcessor(2048, 2, 0); + sp.onaudioprocess = function(e) { + is(e.inputBuffer.numberOfChannels, 2, "Should have 2 input channels"); + is(e.outputBuffer.numberOfChannels, 0, "Should have 0 output channels"); + sp.onaudioprocess = null; + + SimpleTest.finish(); + }; + sp.connect(context.destination); + }; + sp.connect(context.destination); +}); + +</script> +</pre> +</body> +</html> diff --git a/dom/media/webaudio/test/test_scriptProcessorNode_playbackTime1.html b/dom/media/webaudio/test/test_scriptProcessorNode_playbackTime1.html new file mode 100644 index 000000000..43cd13912 --- /dev/null +++ b/dom/media/webaudio/test/test_scriptProcessorNode_playbackTime1.html @@ -0,0 +1,52 @@ +<!DOCTYPE HTML> +<html> +<head> + <title>Test ScriptProcessorNode playbackTime for bug 970773</title> + <script type="text/javascript" src="/tests/SimpleTest/SimpleTest.js"></script> + <link rel="stylesheet" type="text/css" href="/tests/SimpleTest/test.css" /> +</head> +<body> +<pre id="test"> +<script class="testbody" type="text/javascript"> + +SimpleTest.waitForExplicitFinish(); + +var context = new AudioContext(); +const delay = 0.1; + +function doTest() { + const processorBufferLength = 256; + // |currentTime| may include double precision floating point + // rounding errors, so round to nearest integer sample to ignore these. + var minimumPlaybackSample = + Math.round(context.currentTime * context.sampleRate) + + processorBufferLength; + var sp = context.createScriptProcessor(processorBufferLength); + sp.connect(context.destination); + sp.onaudioprocess = + function(e) { + is(e.inputBuffer.length, processorBufferLength, + "expected buffer length"); + var playbackSample = Math.round(e.playbackTime * context.sampleRate) + ok(playbackSample >= minimumPlaybackSample, + "playbackSample " + playbackSample + + " beyond expected minimum " + minimumPlaybackSample); + sp.onaudioprocess = null; + SimpleTest.finish(); + }; +} + +// Wait until AudioDestinationNode has accumulated enough 'extra' time so that +// a failure would be easily detected. +(function waitForExtraTime() { + if (context.currentTime < delay) { + SimpleTest.executeSoon(waitForExtraTime); + } else { + doTest(); + } +})(); + +</script> +</pre> +</body> +</html> diff --git a/dom/media/webaudio/test/test_sequentialBufferSourceWithResampling.html b/dom/media/webaudio/test/test_sequentialBufferSourceWithResampling.html new file mode 100644 index 000000000..5c03a8a91 --- /dev/null +++ b/dom/media/webaudio/test/test_sequentialBufferSourceWithResampling.html @@ -0,0 +1,72 @@ +<!DOCTYPE html> +<title>Test seamless playback of a series of resampled buffers</title> +<script src="/resources/testharness.js"></script> +<script src="/resources/testharnessreport.js"></script> +<script> +// Permitting some accumulation of rounding to int16_t. +// 64/2^15 would be only just small enough to detect off-by-one-subsample +// scheduling errors with the frequencies here. +const EPSILON = 4.0 / Math.pow(2, 15); +// Offsets test for rounding to nearest rather than up or down. +const OFFSETS = [EPSILON, 1.0 - EPSILON]; +// The ratio of resampling is 147:160, so 256 start points is enough to cover +// every fractional offset. +const LENGTH = 256; + +function do_test(context_rate, buffer_rate, start_offset) { + + var context = + new OfflineAudioContext(2, LENGTH, context_rate); + + var merger = context.createChannelMerger(context.destination.channelCount); + merger.connect(context.destination); + + // Create an audio signal that will be repeated + var repeating_signal = context.createBuffer(1, 1, buffer_rate); + repeating_signal.getChannelData(0)[0] = 0.5; + + // Schedule a series of nodes to repeat the signal. + for (var i = 0; i < LENGTH; ++i) { + var source = context.createBufferSource(); + source.buffer = repeating_signal; + source.connect(merger, 0, 0); + source.start((i + start_offset) / buffer_rate); + } + + // A single long signal should produce the same result. + var long_signal = context.createBuffer(1, LENGTH, buffer_rate); + var c = long_signal.getChannelData(0); + for (var i = 0; i < c.length; ++i) { + c[i] = 0.5; + } + + var source = context.createBufferSource(); + source.buffer = long_signal; + source.connect(merger, 0, 1); + source.start(start_offset / buffer_rate); + + return context.startRendering(). + then((buffer) => { + series_output = buffer.getChannelData(0); + expected = buffer.getChannelData(1); + + for (var i = 0; i < buffer.length; ++i) { + assert_approx_equals(series_output[i], expected[i], EPSILON, + "series output at " + i); + } + }); +} + +function start_tests(context_rate, buffer_rate) { + OFFSETS.forEach((start_offset) => { + promise_test(() => do_test(context_rate, buffer_rate, start_offset), + "" + context_rate + " context, " + + buffer_rate + " buffer, " + + start_offset + " start"); + }); +} + +start_tests(48000, 44100); +start_tests(44100, 48000); + +</script> diff --git a/dom/media/webaudio/test/test_singleSourceDest.html b/dom/media/webaudio/test/test_singleSourceDest.html new file mode 100644 index 000000000..8613a2dd9 --- /dev/null +++ b/dom/media/webaudio/test/test_singleSourceDest.html @@ -0,0 +1,70 @@ +<!DOCTYPE HTML> +<html> +<head> + <title>Test whether we can create an AudioContext interface</title> + <script type="text/javascript" src="/tests/SimpleTest/SimpleTest.js"></script> + <script type="text/javascript" src="webaudio.js"></script> + <link rel="stylesheet" type="text/css" href="/tests/SimpleTest/test.css" /> +</head> +<body> +<pre id="test"> +<script class="testbody" type="text/javascript"> + +SimpleTest.waitForExplicitFinish(); +addLoadEvent(function() { + var context = new AudioContext(); + var buffer = context.createBuffer(1, 2048, context.sampleRate); + for (var i = 0; i < 2048; ++i) { + buffer.getChannelData(0)[i] = Math.sin(440 * 2 * Math.PI * i / context.sampleRate); + } + + var destination = context.destination; + is(destination.context, context, "Destination node has proper context"); + is(destination.context, context, "Destination node has proper context"); + is(destination.numberOfInputs, 1, "Destination node has 1 inputs"); + is(destination.numberOfOutputs, 0, "Destination node has 0 outputs"); + is(destination.channelCount, 2, "Destination node has 2 input channels by default"); + is(destination.channelCountMode, "explicit", "Correct channelCountMode for the destination node"); + is(destination.channelInterpretation, "speakers", "Correct channelCountInterpretation for the destination node"); + ok(destination instanceof EventTarget, "AudioNodes must be EventTargets"); + + var source = context.createBufferSource(); + is(source.context, context, "Source node has proper context"); + is(source.numberOfInputs, 0, "Source node has 0 inputs"); + is(source.numberOfOutputs, 1, "Source node has 1 outputs"); + is(source.loop, false, "Source node is not looping"); + is(source.loopStart, 0, "Correct default value for loopStart"); + is(source.loopEnd, 0, "Correct default value for loopEnd"); + ok(!source.buffer, "Source node should not have a buffer when it's created"); + is(source.channelCount, 2, "source node has 2 input channels by default"); + is(source.channelCountMode, "max", "Correct channelCountMode for the source node"); + is(source.channelInterpretation, "speakers", "Correct channelCountInterpretation for the source node"); + + expectException(function() { + source.channelCount = 0; + }, DOMException.NOT_SUPPORTED_ERR); + + source.buffer = buffer; + ok(source.buffer, "Source node should have a buffer now"); + + source.connect(destination); + + is(source.numberOfInputs, 0, "Source node has 0 inputs"); + is(source.numberOfOutputs, 1, "Source node has 0 outputs"); + is(destination.numberOfInputs, 1, "Destination node has 0 inputs"); + is(destination.numberOfOutputs, 0, "Destination node has 0 outputs"); + + source.start(0); + SimpleTest.executeSoon(function() { + source.stop(0); + source.disconnect(); + + SpecialPowers.clearUserPref("media.webaudio.enabled"); + SimpleTest.finish(); + }); +}); + +</script> +</pre> +</body> +</html> diff --git a/dom/media/webaudio/test/test_stereoPannerNode.html b/dom/media/webaudio/test/test_stereoPannerNode.html new file mode 100644 index 000000000..ffc735364 --- /dev/null +++ b/dom/media/webaudio/test/test_stereoPannerNode.html @@ -0,0 +1,263 @@ +<!DOCTYPE HTML> +<html> +<head> + <title>Test StereoPannerNode</title> + <script type="text/javascript" src="/tests/SimpleTest/SimpleTest.js"></script> + <script type="text/javascript" src="webaudio.js"></script> + <link rel="stylesheet" type="text/css" href="/tests/SimpleTest/test.css" /> +</head> +<body> +<pre id="test"> +<script class="testbody" type="text/javascript"> + +var SR = 44100; +var BUF_SIZE = 128; +var PANNING = 0.1; +var GAIN = 0.5; + +// Cheap reimplementation of some bits of the spec +function gainForPanningMonoToStereo(panning) { + panning += 1; + panning /= 2; + return [ Math.cos(0.5 * Math.PI * panning), + Math.sin(0.5 * Math.PI * panning) ]; +} + +function gainForPanningStereoToStereo(panning) { + if (panning <= 0) { + panning += 1.; + } + return [ Math.cos(0.5 * Math.PI * panning), + Math.sin(0.5 * Math.PI * panning) ]; +} + +function applyStereoToStereoPanning(l, r, panningValues, panning) { + var outL, outR; + if (panning <= 0) { + outL = l + r * panningValues[0]; + outR = r * panningValues[1]; + } else { + outL = l * panningValues[0]; + outR = r + l * panningValues[1]; + } + return [outL,outR]; +} + +function applyMonoToStereoPanning(c, panning) { + return [c * panning[0], c * panning[1]]; +} + +// Test the DOM interface +var context = new OfflineAudioContext(1, 1, SR); +var stereoPanner = context.createStereoPanner(); +ok(stereoPanner.pan, "The AudioParam member must exist"); +is(stereoPanner.pan.value, 0.0, "Correct initial value"); +is(stereoPanner.pan.defaultValue, 0.0, "Correct default value"); +is(stereoPanner.channelCount, 2, "StereoPannerNode node has 2 input channels by default"); +is(stereoPanner.channelCountMode, "clamped-max", "Correct channelCountMode for the StereoPannerNode"); +is(stereoPanner.channelInterpretation, "speakers", "Correct channelCountInterpretation for the StereoPannerNode"); +expectException(function() { + stereoPanner.channelCount = 3; +}, DOMException.NOT_SUPPORTED_ERR); +expectException(function() { + stereoPanner.channelCountMode = "max"; +}, DOMException.NOT_SUPPORTED_ERR); + +// A sine to be used to fill the buffers +function sine(t) { + return Math.sin(440 * 2 * Math.PI * t / context.sampleRate); +} + +// A couple mono and stereo buffers: the StereoPannerNode equation is different +// if the input is mono or stereo +var stereoBuffer = context.createBuffer(2, BUF_SIZE, context.sampleRate); +var monoBuffer = context.createBuffer(1, BUF_SIZE, context.sampleRate); +for (var i = 0; i < BUF_SIZE; ++i) { + monoBuffer.getChannelData(0)[i] = + stereoBuffer.getChannelData(0)[i] = + stereoBuffer.getChannelData(1)[i] = sine(i); +} + +// Expected test vectors +function expectedBufferNoop(gain) { + gain = gain || 1.0; + var expectedBuffer = context.createBuffer(2, BUF_SIZE, SR); + for (var i = 0; i < BUF_SIZE; i++) { + expectedBuffer.getChannelData(0)[i] = gain * sine(i); + expectedBuffer.getChannelData(1)[i] = gain * sine(i); + } + return expectedBuffer; +} + +function expectedBufferForStereo(gain) { + gain = gain || 1.0; + var expectedBuffer = context.createBuffer(2, BUF_SIZE, SR); + var gainPanning = gainForPanningStereoToStereo(PANNING); + gainPanning[0] *= gain; + gainPanning[1] *= gain; + for (var i = 0; i < BUF_SIZE; i++) { + var values = [ sine(i), sine(i) ]; + var processed = applyStereoToStereoPanning(values[0], values[1], gainPanning, PANNING); + expectedBuffer.getChannelData(0)[i] = processed[0]; + expectedBuffer.getChannelData(1)[i] = processed[1]; + } + return expectedBuffer; +} + +function expectedBufferForMono(gain) { + gain = gain || 1.0; + var expectedBuffer = context.createBuffer(2, BUF_SIZE, SR); + var gainPanning = gainForPanningMonoToStereo(PANNING); + gainPanning[0] *= gain; + gainPanning[1] *= gain; + for (var i = 0; i < BUF_SIZE; i++) { + var value = sine(i); + var processed = applyMonoToStereoPanning(value, gainPanning); + expectedBuffer.getChannelData(0)[i] = processed[0]; + expectedBuffer.getChannelData(1)[i] = processed[1]; + } + return expectedBuffer; +} + +// Actual test cases +var tests = [ + function monoPanningNoop(ctx, panner) { + var monoSource = ctx.createBufferSource(); + monoSource.connect(panner); + monoSource.buffer = monoBuffer; + monoSource.start(0); + return expectedBufferNoop(); + }, + function stereoPanningNoop(ctx, panner) { + var stereoSource = ctx.createBufferSource(); + stereoSource.connect(panner); + stereoSource.buffer = stereoBuffer; + stereoSource.start(0); + return expectedBufferNoop(); + }, + function monoPanningNoopWithGain(ctx, panner) { + var monoSource = ctx.createBufferSource(); + var gain = ctx.createGain(); + gain.gain.value = GAIN; + monoSource.connect(gain); + gain.connect(panner); + monoSource.buffer = monoBuffer; + monoSource.start(0); + return expectedBufferNoop(GAIN); + }, + function stereoPanningNoopWithGain(ctx, panner) { + var stereoSource = ctx.createBufferSource(); + var gain = ctx.createGain(); + gain.gain.value = GAIN; + stereoSource.connect(gain); + gain.connect(panner); + stereoSource.buffer = stereoBuffer; + stereoSource.start(0); + return expectedBufferNoop(GAIN); + }, + function stereoPanningAutomation(ctx, panner) { + var stereoSource = ctx.createBufferSource(); + stereoSource.connect(panner); + stereoSource.buffer = stereoBuffer; + panner.pan.setValueAtTime(0.1, 0.0); + stereoSource.start(0); + return expectedBufferForStereo(); + }, + function stereoPanning(ctx, panner) { + var stereoSource = ctx.createBufferSource(); + stereoSource.buffer = stereoBuffer; + stereoSource.connect(panner); + panner.pan.value = 0.1; + stereoSource.start(0); + return expectedBufferForStereo(); + }, + function monoPanningAutomation(ctx, panner) { + var monoSource = ctx.createBufferSource(); + monoSource.connect(panner); + monoSource.buffer = monoBuffer; + panner.pan.setValueAtTime(PANNING, 0.0); + monoSource.start(0); + return expectedBufferForMono(); + }, + function monoPanning(ctx, panner) { + var monoSource = ctx.createBufferSource(); + monoSource.connect(panner); + monoSource.buffer = monoBuffer; + panner.pan.value = 0.1; + monoSource.start(0); + return expectedBufferForMono(); + }, + function monoPanningWithGain(ctx, panner) { + var monoSource = ctx.createBufferSource(); + var gain = ctx.createGain(); + gain.gain.value = GAIN; + monoSource.connect(gain); + gain.connect(panner); + monoSource.buffer = monoBuffer; + panner.pan.value = 0.1; + monoSource.start(0); + return expectedBufferForMono(GAIN); + }, + function stereoPanningWithGain(ctx, panner) { + var stereoSource = ctx.createBufferSource(); + var gain = ctx.createGain(); + gain.gain.value = GAIN; + stereoSource.connect(gain); + gain.connect(panner); + stereoSource.buffer = stereoBuffer; + panner.pan.value = 0.1; + stereoSource.start(0); + return expectedBufferForStereo(GAIN); + }, + function monoPanningWithGainAndAutomation(ctx, panner) { + var monoSource = ctx.createBufferSource(); + var gain = ctx.createGain(); + gain.gain.value = GAIN; + monoSource.connect(gain); + gain.connect(panner); + monoSource.buffer = monoBuffer; + panner.pan.setValueAtTime(PANNING, 0); + monoSource.start(0); + return expectedBufferForMono(GAIN); + }, + function stereoPanningWithGainAndAutomation(ctx, panner) { + var stereoSource = ctx.createBufferSource(); + var gain = ctx.createGain(); + gain.gain.value = GAIN; + stereoSource.connect(gain); + gain.connect(panner); + stereoSource.buffer = stereoBuffer; + panner.pan.setValueAtTime(PANNING, 0); + stereoSource.start(0); + return expectedBufferForStereo(GAIN); + } +]; + +var finished = 0; +function finish() { + if (++finished == tests.length) { + SimpleTest.finish(); + } +} + +tests.forEach(function(f) { + var ac = new OfflineAudioContext(2, BUF_SIZE, SR); + var panner = ac.createStereoPanner(); + panner.connect(ac.destination); + var expected = f(ac, panner); + ac.oncomplete = function(e) { + info(f.name); + compareBuffers(e.renderedBuffer, expected); + finish(); + }; + ac.startRendering() +}); + +SimpleTest.waitForExplicitFinish(); + +</script> +</pre> +<pre id=dump> +</pre> +</body> +</html> diff --git a/dom/media/webaudio/test/test_stereoPannerNodePassThrough.html b/dom/media/webaudio/test/test_stereoPannerNodePassThrough.html new file mode 100644 index 000000000..250a1a9de --- /dev/null +++ b/dom/media/webaudio/test/test_stereoPannerNodePassThrough.html @@ -0,0 +1,47 @@ +<!DOCTYPE HTML> +<html> +<head> + <title>Test StereoPanerNode with passthrough</title> + <script type="text/javascript" src="/tests/SimpleTest/SimpleTest.js"></script> + <script type="text/javascript" src="webaudio.js"></script> + <link rel="stylesheet" type="text/css" href="/tests/SimpleTest/test.css" /> +</head> +<body> +<pre id="test"> +<script class="testbody" type="text/javascript"> + +var gTest = { + length: 2048, + numberOfChannels: 1, + createGraph: function(context) { + var source = context.createBufferSource(); + + var stereoPanner = context.createStereoPanner(); + + source.buffer = this.buffer; + + source.connect(stereoPanner); + + var stereoPannerWrapped = SpecialPowers.wrap(stereoPanner); + ok("passThrough" in stereoPannerWrapped, "StereoPannerNode should support the passThrough API"); + stereoPannerWrapped.passThrough = true; + + source.start(0); + return stereoPanner; + }, + createExpectedBuffers: function(context) { + this.buffer = context.createBuffer(1, 2048, context.sampleRate); + for (var i = 0; i < 2048; ++i) { + this.buffer.getChannelData(0)[i] = Math.sin(440 * 2 * Math.PI * i / context.sampleRate); + } + + return [this.buffer]; + }, +}; + +runTest(); + +</script> +</pre> +</body> +</html> diff --git a/dom/media/webaudio/test/test_stereoPanningWithGain.html b/dom/media/webaudio/test/test_stereoPanningWithGain.html new file mode 100644 index 000000000..1ef0c037d --- /dev/null +++ b/dom/media/webaudio/test/test_stereoPanningWithGain.html @@ -0,0 +1,49 @@ +<!DOCTYPE HTML> +<html> +<head> + <title>Test stereo equalpower panning with a GainNode</title> + <script type="text/javascript" src="/tests/SimpleTest/SimpleTest.js"></script> + <link rel="stylesheet" type="text/css" href="/tests/SimpleTest/test.css" /> +</head> +<body> +<pre id="test"> +<script src="webaudio.js" type="text/javascript"></script> +<script class="testbody" type="text/javascript"> + +const size = 256; + +var gTest = { + numberOfChannels: 2, + createGraph: function(context) { + var panner = context.createPanner(); + panner.setPosition(1.0, 0.0, 0.0); // reference distance the right + panner.panningModel = "equalpower"; + + var gain = context.createGain(); + gain.gain.value = -0.5; + gain.connect(panner); + + var buffer = context.createBuffer(2, 2, context.sampleRate); + buffer.getChannelData(0)[0] = 1.0; + buffer.getChannelData(1)[1] = 1.0; + var source = context.createBufferSource(); + source.buffer = buffer; + source.connect(gain); + source.start(0); + + return panner; + }, + createExpectedBuffers: function(context) { + var expectedBuffer = context.createBuffer(2, size, context.sampleRate); + expectedBuffer.getChannelData(1)[0] = -0.5; + expectedBuffer.getChannelData(1)[1] = -0.5; + return expectedBuffer; + }, +}; + +runTest(); + +</script> +</pre> +</body> +</html> diff --git a/dom/media/webaudio/test/test_waveDecoder.html b/dom/media/webaudio/test/test_waveDecoder.html new file mode 100644 index 000000000..bd5faf6ad --- /dev/null +++ b/dom/media/webaudio/test/test_waveDecoder.html @@ -0,0 +1,69 @@ +<!DOCTYPE HTML> +<html> +<meta charset=utf-8> +<head> + <title>Test that we decode uint8 and sint16 wave files with correct conversion to float64</title> + <script type="text/javascript" src="/tests/SimpleTest/SimpleTest.js"></script> + <link rel="stylesheet" type="text/css" href="/tests/SimpleTest/test.css" /> +</head> +<body> +<pre id="test"> +<script class="testbody" type="text/javascript"> +var testsDone = 0; +var tests = ["UklGRjUrAABXQVZFZm10IBAAAAABAAEAESsAABErAAABAAgAZGF0YQMAAAD/AIA=", + "UklGRkZWAABXQVZFZm10IBAAAAABAAEAESsAACJWAAACABAAZGF0YQYAAAD/fwCAAAA="]; + +SimpleTest.waitForExplicitFinish(); + +function base64ToUint8Buffer(b64) { + var str = atob(b64) + var u8 = new Uint8Array(str.length); + for (var i = 0; i < str.length; ++i) { + u8[i] = str.charCodeAt(i); + } + return u8; +} + +function fixupBufferSampleRate(u8, rate) { + u8[24] = (rate & 0x000000ff) >> 0; + u8[25] = (rate & 0x0000ff00) >> 8; + u8[26] = (rate & 0x00ff0000) >> 16; + u8[27] = (rate & 0xff000000) >> 24; +} + +function finishTest() { + testsDone += 1; + if (testsDone == tests.length) { + SimpleTest.finish(); + } +} + +function decodeComplete(b) { + ok(true, "Decoding succeeded."); + is(b.numberOfChannels, 1, "Should have 1 channel."); + is(b.length, 3, "Should have three samples."); + var samples = b.getChannelData(0); + ok(samples[0] > 0.99 && samples[0] < 1.01, "Check near 1.0. Got " + samples[0]); + ok(samples[1] > -1.01 && samples[1] < -0.99, "Check near -1.0. Got " + samples[1]); + ok(samples[2] > -0.01 && samples[2] < 0.01, "Check near 0.0. Got " + samples[2]); + finishTest(); +} + +function decodeFailed() { + ok(false, "Decoding failed."); + finishTest(); +} + +addLoadEvent(function() { + var context = new AudioContext(); + + for (var i = 0; i < tests.length; ++i) { + var u8 = base64ToUint8Buffer(tests[i]); + fixupBufferSampleRate(u8, context.sampleRate); + context.decodeAudioData(u8.buffer, decodeComplete, decodeFailed); + } +}); +</script> +</pre> +</body> +</html> diff --git a/dom/media/webaudio/test/test_waveShaper.html b/dom/media/webaudio/test/test_waveShaper.html new file mode 100644 index 000000000..c95cf5e05 --- /dev/null +++ b/dom/media/webaudio/test/test_waveShaper.html @@ -0,0 +1,60 @@ +<!DOCTYPE HTML> +<html> +<head> + <title>Test WaveShaperNode with no curve</title> + <script type="text/javascript" src="/tests/SimpleTest/SimpleTest.js"></script> + <script type="text/javascript" src="webaudio.js"></script> + <link rel="stylesheet" type="text/css" href="/tests/SimpleTest/test.css" /> +</head> +<body> +<pre id="test"> +<script class="testbody" type="text/javascript"> + +var gTest = { + length: 4096, + numberOfChannels: 1, + createGraph: function(context) { + var source = context.createBufferSource(); + source.buffer = this.buffer; + + var shaper = context.createWaveShaper(); + shaper.curve = this.curve; + + source.connect(shaper); + + source.start(0); + return shaper; + }, + createExpectedBuffers: function(context) { + this.buffer = context.createBuffer(1, 4096, context.sampleRate); + for (var i = 1; i < 4095; ++i) { + this.buffer.getChannelData(0)[i] = 2 * (i / 4096) - 1; + } + // Two out of range values + this.buffer.getChannelData(0)[0] = -2; + this.buffer.getChannelData(0)[4095] = 2; + + this.curve = new Float32Array(2048); + for (var i = 0; i < 2048; ++i) { + this.curve[i] = Math.sin(100 * Math.PI * (i + 1) / context.sampleRate); + } + + var expectedBuffer = context.createBuffer(1, 4096, context.sampleRate); + for (var i = 1; i < 4095; ++i) { + var input = this.buffer.getChannelData(0)[i]; + var index = Math.floor(this.curve.length * (input + 1) / 2); + index = Math.max(0, Math.min(this.curve.length - 1, index)); + expectedBuffer.getChannelData(0)[i] = this.curve[index]; + } + expectedBuffer.getChannelData(0)[0] = this.curve[0]; + expectedBuffer.getChannelData(0)[4095] = this.curve[2047]; + return expectedBuffer; + }, +}; + +runTest(); + +</script> +</pre> +</body> +</html> diff --git a/dom/media/webaudio/test/test_waveShaperGain.html b/dom/media/webaudio/test/test_waveShaperGain.html new file mode 100644 index 000000000..b0c82d2d8 --- /dev/null +++ b/dom/media/webaudio/test/test_waveShaperGain.html @@ -0,0 +1,73 @@ +<!DOCTYPE HTML> +<html> +<head> +<meta charset="utf-8"> + <title>Test that WaveShaperNode doesn't corrupt its inputs when the gain is != + 1.0 (bug 1203616)</title> + <script type="text/javascript" src="/tests/SimpleTest/SimpleTest.js"></script> + <link rel="stylesheet" type="text/css" href="/tests/SimpleTest/test.css" /> +</head> +<body> +<script class="testbody" type="text/javascript"> +SimpleTest.waitForExplicitFinish(); +var samplerate = 44100; +var context = new OfflineAudioContext(1, 44100, samplerate); + +var dc = context.createBufferSource(); + +var buffer = context.createBuffer(1, 1, samplerate); +buffer.getChannelData(0)[0] = 1.0; +dc.buffer = buffer; + +var gain = context.createGain(); +var ws2 = context.createWaveShaper(); +var ws = []; + +// No-op waveshaper curves. +for (var i = 0; i < 2; i++) { + ws[i] = context.createWaveShaper(); + var curve = new Float32Array(2); + curve[0] = -1.0; + curve[1] = 1.0; + ws[i].curve = curve; + ws[i].connect(context.destination); + gain.connect(ws[i]); +} + +dc.connect(gain); +dc.start(); + +gain.gain.value = 0.5; + +context.startRendering().then(buffer => { + document.querySelector("pre").innerHTML = buffer.getChannelData(0)[0]; + ok(buffer.getChannelData(0)[0] == 1.0, "Volume was handled properly"); + + context = new OfflineAudioContext(1, 100, samplerate); + var oscillator = context.createOscillator(); + var gain = context.createGain(); + var waveShaper = context.createWaveShaper(); + + oscillator.start(0); + oscillator.connect(gain); + + // to silence + gain.gain.value = 0; + gain.connect(waveShaper); + + // convert all signal into 1.0. The non unity values are to detect the use + // of uninitialized buffers (see Bug 1283910). + waveShaper.curve = new Float32Array([ 0.5, 0.5, 0.5, 0.5, 0.5, 1, 1, 0.5, 0.5, 0.5, 0.5, 0.5 ]); + waveShaper.connect(context.destination); + + context.startRendering().then((buffer) => { + var result = buffer.getChannelData(0); + ok(result.every(x => x === 1), "WaveShaper handles zero gain properly"); + SimpleTest.finish(); + }); +}); +</script> +<pre> +</pre> +</body> + diff --git a/dom/media/webaudio/test/test_waveShaperInvalidLengthCurve.html b/dom/media/webaudio/test/test_waveShaperInvalidLengthCurve.html new file mode 100644 index 000000000..f117f0376 --- /dev/null +++ b/dom/media/webaudio/test/test_waveShaperInvalidLengthCurve.html @@ -0,0 +1,66 @@ +<!DOCTYPE HTML> +<html> +<head> + <title>Test WaveShaperNode with an invalid curve</title> + <script type="text/javascript" src="/tests/SimpleTest/SimpleTest.js"></script> + <script type="text/javascript" src="webaudio.js"></script> + <link rel="stylesheet" type="text/css" href="/tests/SimpleTest/test.css" /> +</head> +<body> +<pre id="test"> +<script class="testbody" type="text/javascript"> + +var gTest = { + length: 2048, + numberOfChannels: 1, + createGraph: function(context) { + var source = context.createBufferSource(); + source.buffer = this.buffer; + + var shaper = context.createWaveShaper(); + + expectException(() => { + shaper.curve = new Float32Array(0); + }, DOMException.INVALID_STATE_ERR); + + is(shaper.curve, null, "The curve mustn't have been set"); + + expectException(() => { + shaper.curve = new Float32Array(1); + }, DOMException.INVALID_STATE_ERR); + + is(shaper.curve, null, "The curve mustn't have been set"); + + expectNoException(() => { + shaper.curve = new Float32Array(2); + }); + + isnot(shaper.curve, null, "The curve must have been set"); + + expectNoException(() => { + shaper.curve = null; + }); + + is(shaper.curve, null, "The curve must be null by default"); + + source.connect(shaper); + + source.start(0); + return shaper; + }, + createExpectedBuffers: function(context) { + var expectedBuffer = context.createBuffer(1, 2048, context.sampleRate); + for (var i = 0; i < 2048; ++i) { + expectedBuffer.getChannelData(0)[i] = Math.sin(440 * 2 * Math.PI * i / context.sampleRate); + } + this.buffer = expectedBuffer; + return expectedBuffer; + }, +}; + +runTest(); + +</script> +</pre> +</body> +</html> diff --git a/dom/media/webaudio/test/test_waveShaperNoCurve.html b/dom/media/webaudio/test/test_waveShaperNoCurve.html new file mode 100644 index 000000000..c0d3187b2 --- /dev/null +++ b/dom/media/webaudio/test/test_waveShaperNoCurve.html @@ -0,0 +1,43 @@ +<!DOCTYPE HTML> +<html> +<head> + <title>Test WaveShaperNode with no curve</title> + <script type="text/javascript" src="/tests/SimpleTest/SimpleTest.js"></script> + <script type="text/javascript" src="webaudio.js"></script> + <link rel="stylesheet" type="text/css" href="/tests/SimpleTest/test.css" /> +</head> +<body> +<pre id="test"> +<script class="testbody" type="text/javascript"> + +var gTest = { + length: 2048, + numberOfChannels: 1, + createGraph: function(context) { + var source = context.createBufferSource(); + source.buffer = this.buffer; + + var shaper = context.createWaveShaper(); + is(shaper.curve, null, "The shaper curve must be null by default"); + + source.connect(shaper); + + source.start(0); + return shaper; + }, + createExpectedBuffers: function(context) { + var expectedBuffer = context.createBuffer(1, 2048, context.sampleRate); + for (var i = 0; i < 2048; ++i) { + expectedBuffer.getChannelData(0)[i] = Math.sin(440 * 2 * Math.PI * i / context.sampleRate); + } + this.buffer = expectedBuffer; + return expectedBuffer; + }, +}; + +runTest(); + +</script> +</pre> +</body> +</html> diff --git a/dom/media/webaudio/test/test_waveShaperPassThrough.html b/dom/media/webaudio/test/test_waveShaperPassThrough.html new file mode 100644 index 000000000..52c70d3c2 --- /dev/null +++ b/dom/media/webaudio/test/test_waveShaperPassThrough.html @@ -0,0 +1,55 @@ +<!DOCTYPE HTML> +<html> +<head> + <title>Test WaveShaperNode with passthrough</title> + <script type="text/javascript" src="/tests/SimpleTest/SimpleTest.js"></script> + <script type="text/javascript" src="webaudio.js"></script> + <link rel="stylesheet" type="text/css" href="/tests/SimpleTest/test.css" /> +</head> +<body> +<pre id="test"> +<script class="testbody" type="text/javascript"> + +var gTest = { + length: 4096, + numberOfChannels: 1, + createGraph: function(context) { + var source = context.createBufferSource(); + source.buffer = this.buffer; + + var shaper = context.createWaveShaper(); + shaper.curve = this.curve; + + var shaperWrapped = SpecialPowers.wrap(shaper); + ok("passThrough" in shaperWrapped, "WaveShaperNode should support the passThrough API"); + shaperWrapped.passThrough = true; + + source.connect(shaper); + + source.start(0); + return shaper; + }, + createExpectedBuffers: function(context) { + this.buffer = context.createBuffer(1, 4096, context.sampleRate); + for (var i = 1; i < 4095; ++i) { + this.buffer.getChannelData(0)[i] = 2 * (i / 4096) - 1; + } + // Two out of range values + this.buffer.getChannelData(0)[0] = -2; + this.buffer.getChannelData(0)[4095] = 2; + + this.curve = new Float32Array(2048); + for (var i = 0; i < 2048; ++i) { + this.curve[i] = Math.sin(100 * Math.PI * (i + 1) / context.sampleRate); + } + + return [this.buffer]; + }, +}; + +runTest(); + +</script> +</pre> +</body> +</html> diff --git a/dom/media/webaudio/test/ting-44.1k-1ch.ogg b/dom/media/webaudio/test/ting-44.1k-1ch.ogg Binary files differnew file mode 100644 index 000000000..a11aaf1cb --- /dev/null +++ b/dom/media/webaudio/test/ting-44.1k-1ch.ogg diff --git a/dom/media/webaudio/test/ting-44.1k-1ch.wav b/dom/media/webaudio/test/ting-44.1k-1ch.wav Binary files differnew file mode 100644 index 000000000..6854c9d89 --- /dev/null +++ b/dom/media/webaudio/test/ting-44.1k-1ch.wav diff --git a/dom/media/webaudio/test/ting-44.1k-2ch.ogg b/dom/media/webaudio/test/ting-44.1k-2ch.ogg Binary files differnew file mode 100644 index 000000000..94e001485 --- /dev/null +++ b/dom/media/webaudio/test/ting-44.1k-2ch.ogg diff --git a/dom/media/webaudio/test/ting-44.1k-2ch.wav b/dom/media/webaudio/test/ting-44.1k-2ch.wav Binary files differnew file mode 100644 index 000000000..703d88589 --- /dev/null +++ b/dom/media/webaudio/test/ting-44.1k-2ch.wav diff --git a/dom/media/webaudio/test/ting-48k-1ch.ogg b/dom/media/webaudio/test/ting-48k-1ch.ogg Binary files differnew file mode 100644 index 000000000..f45ce33a5 --- /dev/null +++ b/dom/media/webaudio/test/ting-48k-1ch.ogg diff --git a/dom/media/webaudio/test/ting-48k-1ch.wav b/dom/media/webaudio/test/ting-48k-1ch.wav Binary files differnew file mode 100644 index 000000000..8fe471666 --- /dev/null +++ b/dom/media/webaudio/test/ting-48k-1ch.wav diff --git a/dom/media/webaudio/test/ting-48k-2ch.ogg b/dom/media/webaudio/test/ting-48k-2ch.ogg Binary files differnew file mode 100644 index 000000000..e4c564abb --- /dev/null +++ b/dom/media/webaudio/test/ting-48k-2ch.ogg diff --git a/dom/media/webaudio/test/ting-48k-2ch.wav b/dom/media/webaudio/test/ting-48k-2ch.wav Binary files differnew file mode 100644 index 000000000..ad4d0466d --- /dev/null +++ b/dom/media/webaudio/test/ting-48k-2ch.wav diff --git a/dom/media/webaudio/test/ting-dualchannel44.1.wav b/dom/media/webaudio/test/ting-dualchannel44.1.wav Binary files differnew file mode 100644 index 000000000..62954394d --- /dev/null +++ b/dom/media/webaudio/test/ting-dualchannel44.1.wav diff --git a/dom/media/webaudio/test/ting-dualchannel48.wav b/dom/media/webaudio/test/ting-dualchannel48.wav Binary files differnew file mode 100644 index 000000000..a0b824788 --- /dev/null +++ b/dom/media/webaudio/test/ting-dualchannel48.wav diff --git a/dom/media/webaudio/test/webaudio.js b/dom/media/webaudio/test/webaudio.js new file mode 100644 index 000000000..1a1a8efb7 --- /dev/null +++ b/dom/media/webaudio/test/webaudio.js @@ -0,0 +1,269 @@ +// Helpers for Web Audio tests + +function expectException(func, exceptionCode) { + var threw = false; + try { + func(); + } catch (ex) { + threw = true; + ok(ex instanceof DOMException, "Expect a DOM exception"); + is(ex.code, exceptionCode, "Expect the correct exception code"); + } + ok(threw, "The exception was thrown"); +} + +function expectNoException(func) { + var threw = false; + try { + func(); + } catch (ex) { + threw = true; + } + ok(!threw, "An exception was not thrown"); +} + +function expectTypeError(func) { + var threw = false; + try { + func(); + } catch (ex) { + threw = true; + ok(ex instanceof TypeError, "Expect a TypeError"); + } + ok(threw, "The exception was thrown"); +} + +function expectRejectedPromise(that, func, exceptionName) { + var promise = that[func](); + + ok(promise instanceof Promise, "Expect a Promise"); + + promise.then(function(res) { + ok(false, "Promise resolved when it should have been rejected."); + }).catch(function(err) { + is(err.name, exceptionName, "Promise correctly reject with " + exceptionName); + }); +} + +function fuzzyCompare(a, b) { + return Math.abs(a - b) < 9e-3; +} + +function compareChannels(buf1, buf2, + /*optional*/ length, + /*optional*/ sourceOffset, + /*optional*/ destOffset, + /*optional*/ skipLengthCheck) { + if (!skipLengthCheck) { + is(buf1.length, buf2.length, "Channels must have the same length"); + } + sourceOffset = sourceOffset || 0; + destOffset = destOffset || 0; + if (length == undefined) { + length = buf1.length - sourceOffset; + } + var difference = 0; + var maxDifference = 0; + var firstBadIndex = -1; + for (var i = 0; i < length; ++i) { + if (!fuzzyCompare(buf1[i + sourceOffset], buf2[i + destOffset])) { + difference++; + maxDifference = Math.max(maxDifference, Math.abs(buf1[i + sourceOffset] - buf2[i + destOffset])); + if (firstBadIndex == -1) { + firstBadIndex = i; + } + } + }; + + is(difference, 0, "maxDifference: " + maxDifference + + ", first bad index: " + firstBadIndex + + " with test-data offset " + sourceOffset + " and expected-data offset " + + destOffset + "; corresponding values " + buf1[firstBadIndex + sourceOffset] + + " and " + buf2[firstBadIndex + destOffset] + " --- differences"); +} + +function compareBuffers(got, expected) { + if (got.numberOfChannels != expected.numberOfChannels) { + is(got.numberOfChannels, expected.numberOfChannels, + "Correct number of buffer channels"); + return; + } + if (got.length != expected.length) { + is(got.length, expected.length, + "Correct buffer length"); + return; + } + if (got.sampleRate != expected.sampleRate) { + is(got.sampleRate, expected.sampleRate, + "Correct sample rate"); + return; + } + + for (var i = 0; i < got.numberOfChannels; ++i) { + compareChannels(got.getChannelData(i), expected.getChannelData(i), + got.length, 0, 0, true); + } +} + +/** + * Compute the root mean square (RMS, + * <http://en.wikipedia.org/wiki/Root_mean_square>) of a channel of a slice + * (defined by `start` and `end`) of an AudioBuffer. + * + * This is useful to detect that a buffer is noisy or silent. + */ +function rms(audiobuffer, channel = 0, start = 0, end = audiobuffer.length) { + var buffer= audiobuffer.getChannelData(channel); + var rms = 0; + for (var i = start; i < end; i++) { + rms += buffer[i] * buffer[i]; + } + + rms /= buffer.length; + rms = Math.sqrt(rms); + return rms; +} + +function getEmptyBuffer(context, length) { + return context.createBuffer(gTest.numberOfChannels, length, context.sampleRate); +} + +/** + * This function assumes that the test file defines a single gTest variable with + * the following properties and methods: + * + * + numberOfChannels: optional property which specifies the number of channels + * in the output. The default value is 2. + * + createGraph: mandatory method which takes a context object and does + * everything needed in order to set up the Web Audio graph. + * This function returns the node to be inspected. + * + createGraphAsync: async version of createGraph. This function takes + * a callback which should be called with an argument + * set to the node to be inspected when the callee is + * ready to proceed with the test. Either this function + * or createGraph must be provided. + * + createExpectedBuffers: optional method which takes a context object and + * returns either one expected buffer or an array of + * them, designating what is expected to be observed + * in the output. If omitted, the output is expected + * to be silence. All buffers must have the same + * length, which must be a bufferSize supported by + * ScriptProcessorNode. This function is guaranteed + * to be called before createGraph. + * + length: property equal to the total number of frames which we are waiting + * to see in the output, mandatory if createExpectedBuffers is not + * provided, in which case it must be a bufferSize supported by + * ScriptProcessorNode (256, 512, 1024, 2048, 4096, 8192, or 16384). + * If createExpectedBuffers is provided then this must be equal to + * the number of expected buffers * the expected buffer length. + * + * + skipOfflineContextTests: optional. when true, skips running tests on an offline + * context by circumventing testOnOfflineContext. + */ +function runTest() +{ + function done() { + SimpleTest.finish(); + } + + SimpleTest.waitForExplicitFinish(); + function runTestFunction () { + if (!gTest.numberOfChannels) { + gTest.numberOfChannels = 2; // default + } + + var testLength; + + function runTestOnContext(context, callback, testOutput) { + if (!gTest.createExpectedBuffers) { + // Assume that the output is silence + var expectedBuffers = getEmptyBuffer(context, gTest.length); + } else { + var expectedBuffers = gTest.createExpectedBuffers(context); + } + if (!(expectedBuffers instanceof Array)) { + expectedBuffers = [expectedBuffers]; + } + var expectedFrames = 0; + for (var i = 0; i < expectedBuffers.length; ++i) { + is(expectedBuffers[i].numberOfChannels, gTest.numberOfChannels, + "Correct number of channels for expected buffer " + i); + expectedFrames += expectedBuffers[i].length; + } + if (gTest.length && gTest.createExpectedBuffers) { + is(expectedFrames, gTest.length, "Correct number of expected frames"); + } + + if (gTest.createGraphAsync) { + gTest.createGraphAsync(context, function(nodeToInspect) { + testOutput(nodeToInspect, expectedBuffers, callback); + }); + } else { + testOutput(gTest.createGraph(context), expectedBuffers, callback); + } + } + + function testOnNormalContext(callback) { + function testOutput(nodeToInspect, expectedBuffers, callback) { + testLength = 0; + var sp = context.createScriptProcessor(expectedBuffers[0].length, gTest.numberOfChannels, 0); + nodeToInspect.connect(sp); + sp.onaudioprocess = function(e) { + var expectedBuffer = expectedBuffers.shift(); + testLength += expectedBuffer.length; + compareBuffers(e.inputBuffer, expectedBuffer); + if (expectedBuffers.length == 0) { + sp.onaudioprocess = null; + callback(); + } + }; + } + var context = new AudioContext(); + runTestOnContext(context, callback, testOutput); + } + + function testOnOfflineContext(callback, sampleRate) { + function testOutput(nodeToInspect, expectedBuffers, callback) { + nodeToInspect.connect(context.destination); + context.oncomplete = function(e) { + var samplesSeen = 0; + while (expectedBuffers.length) { + var expectedBuffer = expectedBuffers.shift(); + is(e.renderedBuffer.numberOfChannels, expectedBuffer.numberOfChannels, + "Correct number of input buffer channels"); + for (var i = 0; i < e.renderedBuffer.numberOfChannels; ++i) { + compareChannels(e.renderedBuffer.getChannelData(i), + expectedBuffer.getChannelData(i), + expectedBuffer.length, + samplesSeen, + undefined, + true); + } + samplesSeen += expectedBuffer.length; + } + callback(); + }; + context.startRendering(); + } + + var context = new OfflineAudioContext(gTest.numberOfChannels, testLength, sampleRate); + runTestOnContext(context, callback, testOutput); + } + + testOnNormalContext(function() { + if (!gTest.skipOfflineContextTests) { + testOnOfflineContext(function() { + testOnOfflineContext(done, 44100); + }, 48000); + } else { + done(); + } + }); + }; + + if (document.readyState !== 'complete') { + addLoadEvent(runTestFunction); + } else { + runTestFunction(); + } +} |