summaryrefslogtreecommitdiffstats
path: root/testing/web-platform/tests/webaudio/the-audio-api
diff options
context:
space:
mode:
authorMatt A. Tobin <mattatobin@localhost.localdomain>2018-02-02 04:16:08 -0500
committerMatt A. Tobin <mattatobin@localhost.localdomain>2018-02-02 04:16:08 -0500
commit5f8de423f190bbb79a62f804151bc24824fa32d8 (patch)
tree10027f336435511475e392454359edea8e25895d /testing/web-platform/tests/webaudio/the-audio-api
parent49ee0794b5d912db1f95dce6eb52d781dc210db5 (diff)
downloadUXP-5f8de423f190bbb79a62f804151bc24824fa32d8.tar
UXP-5f8de423f190bbb79a62f804151bc24824fa32d8.tar.gz
UXP-5f8de423f190bbb79a62f804151bc24824fa32d8.tar.lz
UXP-5f8de423f190bbb79a62f804151bc24824fa32d8.tar.xz
UXP-5f8de423f190bbb79a62f804151bc24824fa32d8.zip
Add m-esr52 at 52.6.0
Diffstat (limited to 'testing/web-platform/tests/webaudio/the-audio-api')
-rw-r--r--testing/web-platform/tests/webaudio/the-audio-api/the-analysernode-interface/.gitkeep0
-rw-r--r--testing/web-platform/tests/webaudio/the-audio-api/the-audiobuffer-interface/.gitkeep0
-rw-r--r--testing/web-platform/tests/webaudio/the-audio-api/the-audiobuffer-interface/idl-test.html107
-rw-r--r--testing/web-platform/tests/webaudio/the-audio-api/the-audiobuffersourcenode-interface/.gitkeep0
-rw-r--r--testing/web-platform/tests/webaudio/the-audio-api/the-audiocontext-interface/.gitkeep0
-rw-r--r--testing/web-platform/tests/webaudio/the-audio-api/the-audiodestinationnode-interface/.gitkeep0
-rw-r--r--testing/web-platform/tests/webaudio/the-audio-api/the-audiodestinationnode-interface/idl-test.html128
-rw-r--r--testing/web-platform/tests/webaudio/the-audio-api/the-audiolistener-interface/.gitkeep0
-rw-r--r--testing/web-platform/tests/webaudio/the-audio-api/the-audionode-interface/.gitkeep0
-rw-r--r--testing/web-platform/tests/webaudio/the-audio-api/the-audionode-interface/audionode-connect-return-value.html15
-rw-r--r--testing/web-platform/tests/webaudio/the-audio-api/the-audioparam-interface/.gitkeep0
-rw-r--r--testing/web-platform/tests/webaudio/the-audio-api/the-audioparam-interface/retrospective-setValueAtTime.html71
-rw-r--r--testing/web-platform/tests/webaudio/the-audio-api/the-audioparam-interface/setTargetAtTime-after-event-within-block.html54
-rw-r--r--testing/web-platform/tests/webaudio/the-audio-api/the-audioparam-interface/setValueAtTime-within-block.html48
-rw-r--r--testing/web-platform/tests/webaudio/the-audio-api/the-audioprocessingevent-interface/.gitkeep0
-rw-r--r--testing/web-platform/tests/webaudio/the-audio-api/the-biquadfilternode-interface/.gitkeep0
-rw-r--r--testing/web-platform/tests/webaudio/the-audio-api/the-channelmergernode-interface/.gitkeep0
-rw-r--r--testing/web-platform/tests/webaudio/the-audio-api/the-channelsplitternode-interface/.gitkeep0
-rw-r--r--testing/web-platform/tests/webaudio/the-audio-api/the-constantsourcenode-interface/test-constantsourcenode.html135
-rw-r--r--testing/web-platform/tests/webaudio/the-audio-api/the-convolvernode-interface/.gitkeep0
-rw-r--r--testing/web-platform/tests/webaudio/the-audio-api/the-delaynode-interface/.gitkeep0
-rw-r--r--testing/web-platform/tests/webaudio/the-audio-api/the-delaynode-interface/idl-test.html152
-rw-r--r--testing/web-platform/tests/webaudio/the-audio-api/the-dynamicscompressornode-interface/.gitkeep0
-rw-r--r--testing/web-platform/tests/webaudio/the-audio-api/the-gainnode-interface/.gitkeep0
-rw-r--r--testing/web-platform/tests/webaudio/the-audio-api/the-gainnode-interface/gain-expected.wavbin0 -> 281400 bytes
-rw-r--r--testing/web-platform/tests/webaudio/the-audio-api/the-gainnode-interface/idl-test.html152
-rw-r--r--testing/web-platform/tests/webaudio/the-audio-api/the-gainnode-interface/test-gainnode.html121
-rw-r--r--testing/web-platform/tests/webaudio/the-audio-api/the-iirfilternode-interface/test-iirfilternode.html59
-rw-r--r--testing/web-platform/tests/webaudio/the-audio-api/the-mediaelementaudiosourcenode-interface/.gitkeep0
-rw-r--r--testing/web-platform/tests/webaudio/the-audio-api/the-mediaelementaudiosourcenode-interface/mediaElementAudioSourceToScriptProcessorTest.html124
-rw-r--r--testing/web-platform/tests/webaudio/the-audio-api/the-mediastreamaudiodestinationnode-interface/.gitkeep0
-rw-r--r--testing/web-platform/tests/webaudio/the-audio-api/the-mediastreamaudiosourcenode-interface/.gitkeep0
-rw-r--r--testing/web-platform/tests/webaudio/the-audio-api/the-offlineaudiocontext-interface/.gitkeep0
-rw-r--r--testing/web-platform/tests/webaudio/the-audio-api/the-offlineaudiocontext-interface/current-time-block-size.html17
-rw-r--r--testing/web-platform/tests/webaudio/the-audio-api/the-oscillatornode-interface/.gitkeep0
-rw-r--r--testing/web-platform/tests/webaudio/the-audio-api/the-pannernode-interface/.gitkeep0
-rw-r--r--testing/web-platform/tests/webaudio/the-audio-api/the-pannernode-interface/test-pannernode-automation.html31
-rw-r--r--testing/web-platform/tests/webaudio/the-audio-api/the-periodicwave-interface/.gitkeep0
-rw-r--r--testing/web-platform/tests/webaudio/the-audio-api/the-scriptprocessornode-interface/.gitkeep0
-rw-r--r--testing/web-platform/tests/webaudio/the-audio-api/the-waveshapernode-interface/.gitkeep0
-rw-r--r--testing/web-platform/tests/webaudio/the-audio-api/the-waveshapernode-interface/curve-tests.html212
41 files changed, 1426 insertions, 0 deletions
diff --git a/testing/web-platform/tests/webaudio/the-audio-api/the-analysernode-interface/.gitkeep b/testing/web-platform/tests/webaudio/the-audio-api/the-analysernode-interface/.gitkeep
new file mode 100644
index 000000000..e69de29bb
--- /dev/null
+++ b/testing/web-platform/tests/webaudio/the-audio-api/the-analysernode-interface/.gitkeep
diff --git a/testing/web-platform/tests/webaudio/the-audio-api/the-audiobuffer-interface/.gitkeep b/testing/web-platform/tests/webaudio/the-audio-api/the-audiobuffer-interface/.gitkeep
new file mode 100644
index 000000000..e69de29bb
--- /dev/null
+++ b/testing/web-platform/tests/webaudio/the-audio-api/the-audiobuffer-interface/.gitkeep
diff --git a/testing/web-platform/tests/webaudio/the-audio-api/the-audiobuffer-interface/idl-test.html b/testing/web-platform/tests/webaudio/the-audio-api/the-audiobuffer-interface/idl-test.html
new file mode 100644
index 000000000..72ed88d3c
--- /dev/null
+++ b/testing/web-platform/tests/webaudio/the-audio-api/the-audiobuffer-interface/idl-test.html
@@ -0,0 +1,107 @@
+<!DOCTYPE html>
+<html class="a">
+<head>
+<title>AudioBuffer IDL Test</title>
+<script src="/resources/testharness.js"></script><script src="/resources/testharnessreport.js"></script><script src="/resources/idlharness.js"></script><script src="/resources/WebIDLParser.js"></script><script src="/webaudio/js/lodash.js"></script><script src="/webaudio/js/vendor-prefixes.js"></script><script src="/webaudio/js/helpers.js"></script><style type="text/css">
+ #event-target-idl,
+ #audio-context-idl
+ { visibility:hidden; height: 0px;}
+ </style>
+</head>
+<body class="a">
+
+ <pre id="event-target-idl">interface EventTarget {
+ void addEventListener(DOMString type, EventListener? callback, optional boolean capture = false);
+ void removeEventListener(DOMString type, EventListener? callback, optional boolean capture = false);
+ boolean dispatchEvent(Event event);
+};
+
+/*
+callback interface EventListener {
+ void handleEvent(Event event);
+};
+*/
+// Callback interfaces are not supported yet, but that's ok
+interface EventListener {};
+</pre>
+
+ <pre id="audio-context-idl">callback DecodeSuccessCallback = void (AudioBuffer decodedData);
+callback DecodeErrorCallback = void ();
+
+[Constructor]
+interface AudioContext : EventTarget {
+
+ readonly attribute AudioDestinationNode destination;
+ readonly attribute float sampleRate;
+ readonly attribute double currentTime;
+ readonly attribute AudioListener listener;
+
+ AudioBuffer createBuffer(unsigned long numberOfChannels, unsigned long length, float sampleRate);
+
+ void decodeAudioData(ArrayBuffer audioData,
+ DecodeSuccessCallback successCallback,
+ optional DecodeErrorCallback errorCallback);
+
+
+ // AudioNode creation
+ AudioBufferSourceNode createBufferSource();
+
+ MediaElementAudioSourceNode createMediaElementSource(HTMLMediaElement mediaElement);
+
+ MediaStreamAudioSourceNode createMediaStreamSource(MediaStream mediaStream);
+ MediaStreamAudioDestinationNode createMediaStreamDestination();
+
+ ScriptProcessorNode createScriptProcessor(optional unsigned long bufferSize = 0,
+ optional unsigned long numberOfInputChannels = 2,
+ optional unsigned long numberOfOutputChannels = 2);
+
+ AnalyserNode createAnalyser();
+ GainNode createGain();
+ DelayNode createDelay(optional double maxDelayTime = 1.0);
+ BiquadFilterNode createBiquadFilter();
+ WaveShaperNode createWaveShaper();
+ PannerNode createPanner();
+ ConvolverNode createConvolver();
+
+ ChannelSplitterNode createChannelSplitter(optional unsigned long numberOfOutputs = 6);
+ ChannelMergerNode createChannelMerger(optional unsigned long numberOfInputs = 6);
+
+ DynamicsCompressorNode createDynamicsCompressor();
+
+ OscillatorNode createOscillator();
+ PeriodicWave createPeriodicWave(Float32Array real, Float32Array imag);
+
+};</pre>
+
+ <pre id="audio-buffer-idl">interface AudioBuffer {
+
+ readonly attribute float sampleRate;
+ readonly attribute long length;
+
+ // in seconds
+ readonly attribute double duration;
+
+ readonly attribute long numberOfChannels;
+
+ Float32Array getChannelData(unsigned long channel);
+
+};</pre>
+
+ <div id="log"></div>
+
+ <script>
+(function() {
+ var idl_array = new IdlArray();
+ idl_array.add_untested_idls(document.getElementById("event-target-idl").textContent);
+ idl_array.add_untested_idls(document.getElementById("audio-context-idl").textContent);
+ idl_array.add_idls(document.getElementById("audio-buffer-idl").textContent);
+
+ // For these tests the value of the arguments is unimportant.
+ audio_buffer = (new AudioContext).createBuffer(numberOfChannels = 1, length = 256, sampleRate = 44100);
+
+ idl_array.add_objects({AudioBuffer: ["audio_buffer"]});
+ idl_array.test();
+})();
+ </script>
+</body>
+</html>
diff --git a/testing/web-platform/tests/webaudio/the-audio-api/the-audiobuffersourcenode-interface/.gitkeep b/testing/web-platform/tests/webaudio/the-audio-api/the-audiobuffersourcenode-interface/.gitkeep
new file mode 100644
index 000000000..e69de29bb
--- /dev/null
+++ b/testing/web-platform/tests/webaudio/the-audio-api/the-audiobuffersourcenode-interface/.gitkeep
diff --git a/testing/web-platform/tests/webaudio/the-audio-api/the-audiocontext-interface/.gitkeep b/testing/web-platform/tests/webaudio/the-audio-api/the-audiocontext-interface/.gitkeep
new file mode 100644
index 000000000..e69de29bb
--- /dev/null
+++ b/testing/web-platform/tests/webaudio/the-audio-api/the-audiocontext-interface/.gitkeep
diff --git a/testing/web-platform/tests/webaudio/the-audio-api/the-audiodestinationnode-interface/.gitkeep b/testing/web-platform/tests/webaudio/the-audio-api/the-audiodestinationnode-interface/.gitkeep
new file mode 100644
index 000000000..e69de29bb
--- /dev/null
+++ b/testing/web-platform/tests/webaudio/the-audio-api/the-audiodestinationnode-interface/.gitkeep
diff --git a/testing/web-platform/tests/webaudio/the-audio-api/the-audiodestinationnode-interface/idl-test.html b/testing/web-platform/tests/webaudio/the-audio-api/the-audiodestinationnode-interface/idl-test.html
new file mode 100644
index 000000000..257b18df5
--- /dev/null
+++ b/testing/web-platform/tests/webaudio/the-audio-api/the-audiodestinationnode-interface/idl-test.html
@@ -0,0 +1,128 @@
+<!DOCTYPE html>
+<html class="a">
+<head>
+<title>AudioDestinationNode IDL Test</title>
+<script src="/resources/testharness.js"></script><script src="/resources/testharnessreport.js"></script><script src="/resources/idlharness.js"></script><script src="/resources/WebIDLParser.js"></script><script src="/webaudio/js/lodash.js"></script><script src="/webaudio/js/vendor-prefixes.js"></script><script src="/webaudio/js/helpers.js"></script><style type="text/css">
+ #event-target-idl,
+ #audio-context-idl,
+ #audio-node-idl
+ { visibility:hidden; height: 0px;}
+ </style>
+</head>
+<body class="a">
+
+ <pre id="event-target-idl">interface EventTarget {
+ void addEventListener(DOMString type, EventListener? callback, optional boolean capture = false);
+ void removeEventListener(DOMString type, EventListener? callback, optional boolean capture = false);
+ boolean dispatchEvent(Event event);
+};
+
+/*
+callback interface EventListener {
+ void handleEvent(Event event);
+};
+*/
+// Callback interfaces are not supported yet, but that's ok
+interface EventListener {};
+</pre>
+
+ <pre id="audio-context-idl">callback DecodeSuccessCallback = void (AudioBuffer decodedData);
+callback DecodeErrorCallback = void ();
+
+[Constructor]
+interface AudioContext : EventTarget {
+
+ readonly attribute AudioDestinationNode destination;
+ readonly attribute float sampleRate;
+ readonly attribute double currentTime;
+ readonly attribute AudioListener listener;
+
+ AudioBuffer createBuffer(unsigned long numberOfChannels, unsigned long length, float sampleRate);
+
+ void decodeAudioData(ArrayBuffer audioData,
+ DecodeSuccessCallback successCallback,
+ optional DecodeErrorCallback errorCallback);
+
+
+ // AudioNode creation
+ AudioBufferSourceNode createBufferSource();
+
+ MediaElementAudioSourceNode createMediaElementSource(HTMLMediaElement mediaElement);
+
+ MediaStreamAudioSourceNode createMediaStreamSource(MediaStream mediaStream);
+ MediaStreamAudioDestinationNode createMediaStreamDestination();
+
+ ScriptProcessorNode createScriptProcessor(optional unsigned long bufferSize = 0,
+ optional unsigned long numberOfInputChannels = 2,
+ optional unsigned long numberOfOutputChannels = 2);
+
+ AnalyserNode createAnalyser();
+ GainNode createGain();
+ DelayNode createDelay(optional double maxDelayTime = 1.0);
+ BiquadFilterNode createBiquadFilter();
+ WaveShaperNode createWaveShaper();
+ PannerNode createPanner();
+ ConvolverNode createConvolver();
+
+ ChannelSplitterNode createChannelSplitter(optional unsigned long numberOfOutputs = 6);
+ ChannelMergerNode createChannelMerger(optional unsigned long numberOfInputs = 6);
+
+ DynamicsCompressorNode createDynamicsCompressor();
+
+ OscillatorNode createOscillator();
+ PeriodicWave createPeriodicWave(Float32Array real, Float32Array imag);
+
+};</pre>
+
+ <pre id="audio-node-idl">enum ChannelCountMode {
+ "max",
+ "clamped-max",
+ "explicit"
+};
+
+enum ChannelInterpretation {
+ "speakers",
+ "discrete"
+};
+
+interface AudioNode : EventTarget {
+
+ void connect(AudioNode destination, optional unsigned long output = 0, optional unsigned long input = 0);
+ void connect(AudioParam destination, optional unsigned long output = 0);
+ void disconnect(optional unsigned long output = 0);
+
+ readonly attribute AudioContext context;
+ readonly attribute unsigned long numberOfInputs;
+ readonly attribute unsigned long numberOfOutputs;
+
+ // Channel up-mixing and down-mixing rules for all inputs.
+ attribute unsigned long channelCount;
+ attribute ChannelCountMode channelCountMode;
+ attribute ChannelInterpretation channelInterpretation;
+
+};</pre>
+
+ <pre id="audio-destination-node-idl">interface AudioDestinationNode : AudioNode {
+
+ readonly attribute unsigned long maxChannelCount;
+
+};</pre>
+
+ <div id="log"></div>
+
+ <script>
+(function() {
+ var idl_array = new IdlArray();
+ idl_array.add_untested_idls(document.getElementById("event-target-idl").textContent);
+ idl_array.add_untested_idls(document.getElementById("audio-context-idl").textContent);
+ idl_array.add_untested_idls(document.getElementById("audio-node-idl").textContent);
+ idl_array.add_idls(document.getElementById("audio-destination-node-idl").textContent);
+
+ audio_destination_node = (new AudioContext).destination;
+
+ idl_array.add_objects({AudioDestinationNode: ["audio_destination_node"]});
+ idl_array.test();
+})();
+ </script>
+</body>
+</html>
diff --git a/testing/web-platform/tests/webaudio/the-audio-api/the-audiolistener-interface/.gitkeep b/testing/web-platform/tests/webaudio/the-audio-api/the-audiolistener-interface/.gitkeep
new file mode 100644
index 000000000..e69de29bb
--- /dev/null
+++ b/testing/web-platform/tests/webaudio/the-audio-api/the-audiolistener-interface/.gitkeep
diff --git a/testing/web-platform/tests/webaudio/the-audio-api/the-audionode-interface/.gitkeep b/testing/web-platform/tests/webaudio/the-audio-api/the-audionode-interface/.gitkeep
new file mode 100644
index 000000000..e69de29bb
--- /dev/null
+++ b/testing/web-platform/tests/webaudio/the-audio-api/the-audionode-interface/.gitkeep
diff --git a/testing/web-platform/tests/webaudio/the-audio-api/the-audionode-interface/audionode-connect-return-value.html b/testing/web-platform/tests/webaudio/the-audio-api/the-audionode-interface/audionode-connect-return-value.html
new file mode 100644
index 000000000..3af44fb7a
--- /dev/null
+++ b/testing/web-platform/tests/webaudio/the-audio-api/the-audionode-interface/audionode-connect-return-value.html
@@ -0,0 +1,15 @@
+<!DOCTYPE html>
+<title>Test the return value of connect when connecting two AudioNodes</title>
+<script src="/resources/testharness.js"></script>
+<script src="/resources/testharnessreport.js"></script>
+<script>
+test(function(t) {
+ var context = new OfflineAudioContext(1, 1, 44100);
+ var g1 = context.createGain();
+ var g2 = context.createGain();
+ var rv = g1.connect(g2);
+ assert_equals(rv, g2);
+ var rv = g1.connect(g2);
+ assert_equals(rv, g2);
+}, "connect should return the node connected to.");
+</script>
diff --git a/testing/web-platform/tests/webaudio/the-audio-api/the-audioparam-interface/.gitkeep b/testing/web-platform/tests/webaudio/the-audio-api/the-audioparam-interface/.gitkeep
new file mode 100644
index 000000000..e69de29bb
--- /dev/null
+++ b/testing/web-platform/tests/webaudio/the-audio-api/the-audioparam-interface/.gitkeep
diff --git a/testing/web-platform/tests/webaudio/the-audio-api/the-audioparam-interface/retrospective-setValueAtTime.html b/testing/web-platform/tests/webaudio/the-audio-api/the-audioparam-interface/retrospective-setValueAtTime.html
new file mode 100644
index 000000000..dde8c27b9
--- /dev/null
+++ b/testing/web-platform/tests/webaudio/the-audio-api/the-audioparam-interface/retrospective-setValueAtTime.html
@@ -0,0 +1,71 @@
+<!DOCTYPE html>
+<title>Test setValueAtTime with startTime in the past</title>
+<script src="/resources/testharness.js"></script>
+<script src="/resources/testharnessreport.js"></script>
+<script>
+function do_test(t, context) {
+ var source = context.createBufferSource();
+ source.buffer =
+ function() {
+ var buffer = context.createBuffer(1, 1, context.sampleRate);
+ buffer.getChannelData(0)[0] = 1.0;
+ return buffer;
+ }();
+ source.loop = true;
+ source.start();
+
+ // Use a ramp of slope 1/sample to measure time.
+ // The end value is the extent of exact precision in single precision float.
+ const rampEnd = Math.pow(2, 24);
+ const rampEndSeconds = rampEnd / context.sampleRate;
+ var test = context.createGain();
+ test.gain.setValueAtTime(0.0, 0.0);
+ test.gain.linearRampToValueAtTime(rampEnd, rampEndSeconds);
+
+ // With a different starting point on the same line, the result should be
+ // the same. |currentTime| may include double precision floating point
+ // rounding errors, so round to nearest integer sample to ignore these.
+ var scheduledSample = Math.round(context.currentTime * context.sampleRate);
+ assert_equals(scheduledSample % 128, 0,
+ "currentTime advances in blocks of 128 samples");
+ var reference = context.createGain();
+ reference.gain.setValueAtTime(scheduledSample, context.currentTime);
+ reference.gain.linearRampToValueAtTime(rampEnd, rampEndSeconds);
+
+ source.connect(test);
+ source.connect(reference);
+
+ var merger = context.createChannelMerger();
+ test.connect(merger, 0, 0);
+ reference.connect(merger, 0, 1);
+
+ var processor = context.createScriptProcessor(0, 2, 0);
+ merger.connect(processor);
+ processor.onaudioprocess =
+ t.step_func_done((e) => {
+ source.stop();
+ processor.onaudioprocess = null;
+
+ var testValue = e.inputBuffer.getChannelData(0)[0];
+ var referenceValue = e.inputBuffer.getChannelData(1)[0];
+
+ assert_equals(testValue, referenceValue,
+ "ramp value matches expected");
+ assert_greater_than_equal(testValue, scheduledSample,
+ "time does not retreat");
+ assert_equals(testValue % 128, 0,
+ "ScriptProcessor blocks align on 128-sample blocks");
+ });
+}
+
+async_test(function(t) {
+ var context = new AudioContext;
+ (function waitForTimeAdvance() {
+ if (context.currentTime == 0) {
+ t.step_timeout(waitForTimeAdvance, 0);
+ } else {
+ do_test(t, context);
+ }
+ })();
+});
+</script>
diff --git a/testing/web-platform/tests/webaudio/the-audio-api/the-audioparam-interface/setTargetAtTime-after-event-within-block.html b/testing/web-platform/tests/webaudio/the-audio-api/the-audioparam-interface/setTargetAtTime-after-event-within-block.html
new file mode 100644
index 000000000..827aeeabd
--- /dev/null
+++ b/testing/web-platform/tests/webaudio/the-audio-api/the-audioparam-interface/setTargetAtTime-after-event-within-block.html
@@ -0,0 +1,54 @@
+<!DOCTYPE html>
+<title>Test setTargetAtTime after an event in the same processing block</title>
+<script src="/resources/testharness.js"></script>
+<script src="/resources/testharnessreport.js"></script>
+<script>
+promise_test(function() {
+ const bufferSize = 179;
+ const valueStartOffset = 42;
+ const targetStartOffset = 53;
+ const sampleRate = 48000;
+ const scheduledValue = -0.5;
+
+ var context = new OfflineAudioContext(1, bufferSize, sampleRate);
+
+ var gain = context.createGain();
+ gain.gain.setValueAtTime(scheduledValue, valueStartOffset/sampleRate);
+ gain.gain.setTargetAtTime(scheduledValue, targetStartOffset/sampleRate,
+ 128/sampleRate);
+ gain.connect(context.destination);
+
+ // Apply unit DC signal to gain node.
+ var source = context.createBufferSource();
+ source.buffer =
+ function() {
+ var buffer = context.createBuffer(1, 1, context.sampleRate);
+ buffer.getChannelData(0)[0] = 1.0;
+ return buffer;
+ }();
+ source.loop = true;
+ source.start();
+ source.connect(gain);
+
+ return context.startRendering().
+ then(function(buffer) {
+ assert_equals(buffer.length, bufferSize, "output buffer length");
+ var output = buffer.getChannelData(0);
+ var i = 0;
+ for (; i < valueStartOffset; ++i) {
+ // "Its default value is 1."
+ assert_equals(output[i], 1.0, "default gain at sample " + i);
+ }
+ for (; i < buffer.length; ++i) {
+ // "If the next event (having time T1) after this SetValue event is
+ // not of type LinearRampToValue or ExponentialRampToValue, then, for
+ // T0≤t<T1: v(t)=V".
+ // "Start exponentially approaching the target value at the given time
+ // with a rate having the given time constant."
+ // The target is the same value, and so the SetValue value continues.
+ assert_equals(output[i], scheduledValue,
+ "scheduled value at sample " + i);
+ }
+ });
+});
+</script>
diff --git a/testing/web-platform/tests/webaudio/the-audio-api/the-audioparam-interface/setValueAtTime-within-block.html b/testing/web-platform/tests/webaudio/the-audio-api/the-audioparam-interface/setValueAtTime-within-block.html
new file mode 100644
index 000000000..36fde2b99
--- /dev/null
+++ b/testing/web-platform/tests/webaudio/the-audio-api/the-audioparam-interface/setValueAtTime-within-block.html
@@ -0,0 +1,48 @@
+<!DOCTYPE html>
+<title>Test setValueAtTime with start time not on a block boundary</title>
+<script src="/resources/testharness.js"></script>
+<script src="/resources/testharnessreport.js"></script>
+<script>
+promise_test(function() {
+ const bufferSize = 200;
+ const offset = 65;
+ const sampleRate = 48000;
+ const scheduledValue = -2.0;
+
+ var context = new OfflineAudioContext(1, bufferSize, sampleRate);
+
+ var gain = context.createGain();
+ gain.gain.setValueAtTime(scheduledValue, offset/sampleRate);
+ gain.connect(context.destination);
+
+ // Apply unit DC signal to gain node.
+ var source = context.createBufferSource();
+ source.buffer =
+ function() {
+ var buffer = context.createBuffer(1, 1, context.sampleRate);
+ buffer.getChannelData(0)[0] = 1.0;
+ return buffer;
+ }();
+ source.loop = true;
+ source.start();
+ source.connect(gain);
+
+ return context.startRendering().
+ then(function(buffer) {
+ assert_equals(buffer.length, bufferSize, "output buffer length");
+ var output = buffer.getChannelData(0);
+ var i = 0;
+ for (; i < offset; ++i) {
+ // "Its default value is 1."
+ assert_equals(output[i], 1.0, "default gain at sample " + i);
+ }
+ for (; i < buffer.length; ++i) {
+ // "If there are no more events after this SetValue event, then for
+ // t≥T0, v(t)=V, where T0 is the startTime parameter and V is the
+ // value parameter."
+ assert_equals(output[i], scheduledValue,
+ "scheduled value at sample " + i);
+ }
+ });
+});
+</script>
diff --git a/testing/web-platform/tests/webaudio/the-audio-api/the-audioprocessingevent-interface/.gitkeep b/testing/web-platform/tests/webaudio/the-audio-api/the-audioprocessingevent-interface/.gitkeep
new file mode 100644
index 000000000..e69de29bb
--- /dev/null
+++ b/testing/web-platform/tests/webaudio/the-audio-api/the-audioprocessingevent-interface/.gitkeep
diff --git a/testing/web-platform/tests/webaudio/the-audio-api/the-biquadfilternode-interface/.gitkeep b/testing/web-platform/tests/webaudio/the-audio-api/the-biquadfilternode-interface/.gitkeep
new file mode 100644
index 000000000..e69de29bb
--- /dev/null
+++ b/testing/web-platform/tests/webaudio/the-audio-api/the-biquadfilternode-interface/.gitkeep
diff --git a/testing/web-platform/tests/webaudio/the-audio-api/the-channelmergernode-interface/.gitkeep b/testing/web-platform/tests/webaudio/the-audio-api/the-channelmergernode-interface/.gitkeep
new file mode 100644
index 000000000..e69de29bb
--- /dev/null
+++ b/testing/web-platform/tests/webaudio/the-audio-api/the-channelmergernode-interface/.gitkeep
diff --git a/testing/web-platform/tests/webaudio/the-audio-api/the-channelsplitternode-interface/.gitkeep b/testing/web-platform/tests/webaudio/the-audio-api/the-channelsplitternode-interface/.gitkeep
new file mode 100644
index 000000000..e69de29bb
--- /dev/null
+++ b/testing/web-platform/tests/webaudio/the-audio-api/the-channelsplitternode-interface/.gitkeep
diff --git a/testing/web-platform/tests/webaudio/the-audio-api/the-constantsourcenode-interface/test-constantsourcenode.html b/testing/web-platform/tests/webaudio/the-audio-api/the-constantsourcenode-interface/test-constantsourcenode.html
new file mode 100644
index 000000000..a49ae875b
--- /dev/null
+++ b/testing/web-platform/tests/webaudio/the-audio-api/the-constantsourcenode-interface/test-constantsourcenode.html
@@ -0,0 +1,135 @@
+<!doctype html>
+<meta charset=utf-8>
+<title>Test the ConstantSourceNode Interface</title>
+<script src=/resources/testharness.js></script>
+<script src=/resources/testharnessreport.js></script>
+<script>
+test(function(t) {
+ var ac = new AudioContext();
+
+ var csn = ac.createConstantSource();
+ assert_true(csn.offset.value == 1.0, "Default offset is 1.0");
+
+ csn = new ConstantSourceNode(ac);
+ assert_true(csn.offset.value == 1.0, "Default offset is 1.0");
+
+ csn = new ConstantSourceNode(ac, {offset: -0.25});
+ assert_true(csn.offset.value == -0.25, "Offset can be set during construction");
+}, "ConstantSourceNode can be constructed");
+
+test(function(t) {
+ var ac = new AudioContext();
+
+ var csn = ac.createConstantSource();
+
+ assert_throws("InvalidStateError", function() {
+ csn.stop(1);
+ }, "Start must be called before stop");
+
+ assert_throws("NotSupportedError", function() {
+ csn.start(-1);
+ }, "When can not be negative");
+
+ csn.start(0);
+ assert_throws("NotSupportedError", function() {
+ csn.stop(-1);
+ }, "When can not be negative");
+}, "ConstantSourceNode stop and start");
+
+async_test(function(t) {
+ var ac = new OfflineAudioContext(1, 2048, 44100);
+ var csn = ac.createConstantSource();
+ csn.connect(ac.destination);
+ csn.start()
+ csn.stop(1024/44100)
+ csn.onended = function(e) {
+ t.step(function() {
+ assert_true(e.type == "ended", "Event type should be 'ended', received: " + e.type);
+ });
+ t.done();
+ }
+ ac.startRendering();
+}, "ConstantSourceNode onended event");
+
+async_test(function(t) {
+ var ac = new OfflineAudioContext(1, 2048, 44100);
+ var csn = ac.createConstantSource();
+ csn.connect(ac.destination);
+ csn.start(512/44100)
+ csn.stop(1024/44100)
+
+ ac.oncomplete = function(e) {
+ t.step(function() {
+ var result = e.renderedBuffer.getChannelData(0);
+ for (var i = 0; i < 2048; ++i) {
+ if (i >= 512 && i < 1024) {
+ assert_true(result[i] == 1.0, "sample " + i + " should equal 1.0");
+ } else {
+ assert_true(result[i] == 0.0, "sample " + i + " should equal 0.0");
+ }
+ }
+ });
+ t.done();
+ }
+
+ ac.startRendering();
+}, "ConstantSourceNode start and stop when work");
+
+async_test(function(t) {
+ var ac = new OfflineAudioContext(1, 2048, 44100);
+ var csn = ac.createConstantSource();
+ csn.offset.value = 0.25;
+ csn.connect(ac.destination);
+ csn.start()
+
+ ac.oncomplete = function(e) {
+ t.step(function() {
+ var result = e.renderedBuffer.getChannelData(0);
+ for (var i = 0; i < 2048; ++i) {
+ assert_true(result[i] == 0.25, "sample " + i + " should equal 0.25");
+ }
+ });
+ t.done();
+ }
+
+ ac.startRendering();
+}, "ConstantSourceNode with no automation");
+
+async_test(function(t) {
+ var ac = new OfflineAudioContext(1, 2048, 44100);
+
+ var timeConstant = 2.0;
+ var offsetStart = 0.25;
+ var offsetEnd = 0.1;
+
+ var csn = ac.createConstantSource();
+ csn.offset.value = offsetStart;
+ csn.offset.setTargetAtTime(offsetEnd, 1024/ac.sampleRate, timeConstant);
+ csn.connect(ac.destination);
+ csn.start()
+
+ ac.oncomplete = function(e) {
+ t.step(function() {
+ // create buffer with expected values
+ var buffer = ac.createBuffer(1, 2048, ac.sampleRate);
+ for (var i = 0; i < 2048; ++i) {
+ if (i < 1024) {
+ buffer.getChannelData(0)[i] = offsetStart;
+ } else {
+ time = (i-1024)/ac.sampleRate;
+ buffer.getChannelData(0)[i] = offsetEnd + (offsetStart - offsetEnd)*Math.exp(-time/timeConstant);
+ }
+ }
+
+ var result = e.renderedBuffer.getChannelData(0);
+ var expected = buffer.getChannelData(0);
+ for (var i = 0; i < 2048; ++i) {
+ assert_true(Math.abs(result[i] - expected[i]) < 1e-6, "sample " + i + " should equal " + expected[i]);
+ }
+ });
+ t.done();
+ }
+
+ ac.startRendering();
+}, "ConstantSourceNode with automation");
+</script>
diff --git a/testing/web-platform/tests/webaudio/the-audio-api/the-convolvernode-interface/.gitkeep b/testing/web-platform/tests/webaudio/the-audio-api/the-convolvernode-interface/.gitkeep
new file mode 100644
index 000000000..e69de29bb
--- /dev/null
+++ b/testing/web-platform/tests/webaudio/the-audio-api/the-convolvernode-interface/.gitkeep
diff --git a/testing/web-platform/tests/webaudio/the-audio-api/the-delaynode-interface/.gitkeep b/testing/web-platform/tests/webaudio/the-audio-api/the-delaynode-interface/.gitkeep
new file mode 100644
index 000000000..e69de29bb
--- /dev/null
+++ b/testing/web-platform/tests/webaudio/the-audio-api/the-delaynode-interface/.gitkeep
diff --git a/testing/web-platform/tests/webaudio/the-audio-api/the-delaynode-interface/idl-test.html b/testing/web-platform/tests/webaudio/the-audio-api/the-delaynode-interface/idl-test.html
new file mode 100644
index 000000000..4587e39c0
--- /dev/null
+++ b/testing/web-platform/tests/webaudio/the-audio-api/the-delaynode-interface/idl-test.html
@@ -0,0 +1,152 @@
+<!DOCTYPE html>
+<html class="a">
+<head>
+<title>DelayNode IDL Test</title>
+<script src="/resources/testharness.js"></script><script src="/resources/testharnessreport.js"></script><script src="/resources/idlharness.js"></script><script src="/resources/WebIDLParser.js"></script><script src="/webaudio/js/lodash.js"></script><script src="/webaudio/js/vendor-prefixes.js"></script><script src="/webaudio/js/helpers.js"></script><style type="text/css">
+ #event-target-idl,
+ #audio-context-idl,
+ #audio-node-idl,
+ #audio-param-idl
+ { visibility:hidden; height: 0px;}
+ </style>
+</head>
+<body class="a">
+
+ <pre id="event-target-idl">interface EventTarget {
+ void addEventListener(DOMString type, EventListener? callback, optional boolean capture = false);
+ void removeEventListener(DOMString type, EventListener? callback, optional boolean capture = false);
+ boolean dispatchEvent(Event event);
+};
+
+/*
+callback interface EventListener {
+ void handleEvent(Event event);
+};
+*/
+// Callback interfaces are not supported yet, but that's ok
+interface EventListener {};
+</pre>
+
+ <pre id="audio-context-idl">callback DecodeSuccessCallback = void (AudioBuffer decodedData);
+callback DecodeErrorCallback = void ();
+
+[Constructor]
+interface AudioContext : EventTarget {
+
+ readonly attribute AudioDestinationNode destination;
+ readonly attribute float sampleRate;
+ readonly attribute double currentTime;
+ readonly attribute AudioListener listener;
+
+ AudioBuffer createBuffer(unsigned long numberOfChannels, unsigned long length, float sampleRate);
+
+ void decodeAudioData(ArrayBuffer audioData,
+ DecodeSuccessCallback successCallback,
+ optional DecodeErrorCallback errorCallback);
+
+
+ // AudioNode creation
+ AudioBufferSourceNode createBufferSource();
+
+ MediaElementAudioSourceNode createMediaElementSource(HTMLMediaElement mediaElement);
+
+ MediaStreamAudioSourceNode createMediaStreamSource(MediaStream mediaStream);
+ MediaStreamAudioDestinationNode createMediaStreamDestination();
+
+ ScriptProcessorNode createScriptProcessor(optional unsigned long bufferSize = 0,
+ optional unsigned long numberOfInputChannels = 2,
+ optional unsigned long numberOfOutputChannels = 2);
+
+ AnalyserNode createAnalyser();
+ GainNode createGain();
+ DelayNode createDelay(optional double maxDelayTime = 1.0);
+ BiquadFilterNode createBiquadFilter();
+ WaveShaperNode createWaveShaper();
+ PannerNode createPanner();
+ ConvolverNode createConvolver();
+
+ ChannelSplitterNode createChannelSplitter(optional unsigned long numberOfOutputs = 6);
+ ChannelMergerNode createChannelMerger(optional unsigned long numberOfInputs = 6);
+
+ DynamicsCompressorNode createDynamicsCompressor();
+
+ OscillatorNode createOscillator();
+ PeriodicWave createPeriodicWave(Float32Array real, Float32Array imag);
+
+};</pre>
+
+ <pre id="audio-node-idl">enum ChannelCountMode {
+ "max",
+ "clamped-max",
+ "explicit"
+};
+
+enum ChannelInterpretation {
+ "speakers",
+ "discrete"
+};
+
+interface AudioNode : EventTarget {
+
+ void connect(AudioNode destination, optional unsigned long output = 0, optional unsigned long input = 0);
+ void connect(AudioParam destination, optional unsigned long output = 0);
+ void disconnect(optional unsigned long output = 0);
+
+ readonly attribute AudioContext context;
+ readonly attribute unsigned long numberOfInputs;
+ readonly attribute unsigned long numberOfOutputs;
+
+ // Channel up-mixing and down-mixing rules for all inputs.
+ attribute unsigned long channelCount;
+ attribute ChannelCountMode channelCountMode;
+ attribute ChannelInterpretation channelInterpretation;
+
+};</pre>
+
+ <pre id="audio-param-idl">interface AudioParam {
+
+ attribute float value;
+ readonly attribute float defaultValue;
+
+ // Parameter automation.
+ void setValueAtTime(float value, double startTime);
+ void linearRampToValueAtTime(float value, double endTime);
+ void exponentialRampToValueAtTime(float value, double endTime);
+
+ // Exponentially approach the target value with a rate having the given time constant.
+ void setTargetAtTime(float target, double startTime, double timeConstant);
+
+ // Sets an array of arbitrary parameter values starting at time for the given duration.
+ // The number of values will be scaled to fit into the desired duration.
+ void setValueCurveAtTime(Float32Array values, double startTime, double duration);
+
+ // Cancels all scheduled parameter changes with times greater than or equal to startTime.
+ void cancelScheduledValues(double startTime);
+
+};</pre>
+
+<pre id="delay-node-idl">interface DelayNode : AudioNode {
+
+ readonly attribute AudioParam delayTime;
+
+};</pre>
+
+ <div id="log"></div>
+
+ <script>
+(function() {
+ var idl_array = new IdlArray();
+ idl_array.add_untested_idls(document.getElementById("event-target-idl").textContent);
+ idl_array.add_untested_idls(document.getElementById("audio-context-idl").textContent);
+ idl_array.add_untested_idls(document.getElementById("audio-node-idl").textContent);
+ idl_array.add_untested_idls(document.getElementById("audio-param-idl").textContent);
+ idl_array.add_idls(document.getElementById("delay-node-idl").textContent);
+
+ delay_node = (new AudioContext).createDelay();
+
+ idl_array.add_objects({DelayNode: ["delay_node"]});
+ idl_array.test();
+})();
+ </script>
+</body>
+</html>
diff --git a/testing/web-platform/tests/webaudio/the-audio-api/the-dynamicscompressornode-interface/.gitkeep b/testing/web-platform/tests/webaudio/the-audio-api/the-dynamicscompressornode-interface/.gitkeep
new file mode 100644
index 000000000..e69de29bb
--- /dev/null
+++ b/testing/web-platform/tests/webaudio/the-audio-api/the-dynamicscompressornode-interface/.gitkeep
diff --git a/testing/web-platform/tests/webaudio/the-audio-api/the-gainnode-interface/.gitkeep b/testing/web-platform/tests/webaudio/the-audio-api/the-gainnode-interface/.gitkeep
new file mode 100644
index 000000000..e69de29bb
--- /dev/null
+++ b/testing/web-platform/tests/webaudio/the-audio-api/the-gainnode-interface/.gitkeep
diff --git a/testing/web-platform/tests/webaudio/the-audio-api/the-gainnode-interface/gain-expected.wav b/testing/web-platform/tests/webaudio/the-audio-api/the-gainnode-interface/gain-expected.wav
new file mode 100644
index 000000000..b445bd8a6
--- /dev/null
+++ b/testing/web-platform/tests/webaudio/the-audio-api/the-gainnode-interface/gain-expected.wav
Binary files differ
diff --git a/testing/web-platform/tests/webaudio/the-audio-api/the-gainnode-interface/idl-test.html b/testing/web-platform/tests/webaudio/the-audio-api/the-gainnode-interface/idl-test.html
new file mode 100644
index 000000000..dea13b179
--- /dev/null
+++ b/testing/web-platform/tests/webaudio/the-audio-api/the-gainnode-interface/idl-test.html
@@ -0,0 +1,152 @@
+<!DOCTYPE html>
+<html class="a">
+<head>
+<title>GainNode IDL Test</title>
+<script src="/resources/testharness.js"></script><script src="/resources/testharnessreport.js"></script><script src="/resources/idlharness.js"></script><script src="/resources/WebIDLParser.js"></script><script src="/webaudio/js/lodash.js"></script><script src="/webaudio/js/vendor-prefixes.js"></script><script src="/webaudio/js/helpers.js"></script><style type="text/css">
+ #event-target-idl,
+ #audio-context-idl,
+ #audio-node-idl,
+ #audio-param-idl
+ { visibility:hidden; height: 0px;}
+ </style>
+</head>
+<body class="a">
+
+ <pre id="event-target-idl">interface EventTarget {
+ void addEventListener(DOMString type, EventListener? callback, optional boolean capture = false);
+ void removeEventListener(DOMString type, EventListener? callback, optional boolean capture = false);
+ boolean dispatchEvent(Event event);
+};
+
+/*
+callback interface EventListener {
+ void handleEvent(Event event);
+};
+*/
+// Callback interfaces are not supported yet, but that's ok
+interface EventListener {};
+</pre>
+
+ <pre id="audio-context-idl">callback DecodeSuccessCallback = void (AudioBuffer decodedData);
+callback DecodeErrorCallback = void ();
+
+[Constructor]
+interface AudioContext : EventTarget {
+
+ readonly attribute AudioDestinationNode destination;
+ readonly attribute float sampleRate;
+ readonly attribute double currentTime;
+ readonly attribute AudioListener listener;
+
+ AudioBuffer createBuffer(unsigned long numberOfChannels, unsigned long length, float sampleRate);
+
+ void decodeAudioData(ArrayBuffer audioData,
+ DecodeSuccessCallback successCallback,
+ optional DecodeErrorCallback errorCallback);
+
+
+ // AudioNode creation
+ AudioBufferSourceNode createBufferSource();
+
+ MediaElementAudioSourceNode createMediaElementSource(HTMLMediaElement mediaElement);
+
+ MediaStreamAudioSourceNode createMediaStreamSource(MediaStream mediaStream);
+ MediaStreamAudioDestinationNode createMediaStreamDestination();
+
+ ScriptProcessorNode createScriptProcessor(optional unsigned long bufferSize = 0,
+ optional unsigned long numberOfInputChannels = 2,
+ optional unsigned long numberOfOutputChannels = 2);
+
+ AnalyserNode createAnalyser();
+ GainNode createGain();
+ DelayNode createDelay(optional double maxDelayTime = 1.0);
+ BiquadFilterNode createBiquadFilter();
+ WaveShaperNode createWaveShaper();
+ PannerNode createPanner();
+ ConvolverNode createConvolver();
+
+ ChannelSplitterNode createChannelSplitter(optional unsigned long numberOfOutputs = 6);
+ ChannelMergerNode createChannelMerger(optional unsigned long numberOfInputs = 6);
+
+ DynamicsCompressorNode createDynamicsCompressor();
+
+ OscillatorNode createOscillator();
+ PeriodicWave createPeriodicWave(Float32Array real, Float32Array imag);
+
+};</pre>
+
+ <pre id="audio-node-idl">enum ChannelCountMode {
+ "max",
+ "clamped-max",
+ "explicit"
+};
+
+enum ChannelInterpretation {
+ "speakers",
+ "discrete"
+};
+
+interface AudioNode : EventTarget {
+
+ void connect(AudioNode destination, optional unsigned long output = 0, optional unsigned long input = 0);
+ void connect(AudioParam destination, optional unsigned long output = 0);
+ void disconnect(optional unsigned long output = 0);
+
+ readonly attribute AudioContext context;
+ readonly attribute unsigned long numberOfInputs;
+ readonly attribute unsigned long numberOfOutputs;
+
+ // Channel up-mixing and down-mixing rules for all inputs.
+ attribute unsigned long channelCount;
+ attribute ChannelCountMode channelCountMode;
+ attribute ChannelInterpretation channelInterpretation;
+
+};</pre>
+
+ <pre id="audio-param-idl">interface AudioParam {
+
+ attribute float value;
+ readonly attribute float defaultValue;
+
+ // Parameter automation.
+ void setValueAtTime(float value, double startTime);
+ void linearRampToValueAtTime(float value, double endTime);
+ void exponentialRampToValueAtTime(float value, double endTime);
+
+ // Exponentially approach the target value with a rate having the given time constant.
+ void setTargetAtTime(float target, double startTime, double timeConstant);
+
+ // Sets an array of arbitrary parameter values starting at time for the given duration.
+ // The number of values will be scaled to fit into the desired duration.
+ void setValueCurveAtTime(Float32Array values, double startTime, double duration);
+
+ // Cancels all scheduled parameter changes with times greater than or equal to startTime.
+ void cancelScheduledValues(double startTime);
+
+};</pre>
+
+<pre id="gain-node-idl">interface GainNode : AudioNode {
+
+ readonly attribute AudioParam gain;
+
+};</pre>
+
+ <div id="log"></div>
+
+ <script>
+(function() {
+ var idl_array = new IdlArray();
+ idl_array.add_untested_idls(document.getElementById("event-target-idl").textContent);
+ idl_array.add_untested_idls(document.getElementById("audio-context-idl").textContent);
+ idl_array.add_untested_idls(document.getElementById("audio-node-idl").textContent);
+ idl_array.add_untested_idls(document.getElementById("audio-param-idl").textContent);
+ idl_array.add_idls(document.getElementById("gain-node-idl").textContent);
+
+ gain_node = (new AudioContext).createGain();
+
+ idl_array.add_objects({GainNode: ["gain_node"]});
+ idl_array.test();
+})();
+ </script>
+</body>
+</html>
diff --git a/testing/web-platform/tests/webaudio/the-audio-api/the-gainnode-interface/test-gainnode.html b/testing/web-platform/tests/webaudio/the-audio-api/the-gainnode-interface/test-gainnode.html
new file mode 100644
index 000000000..4f92fbbe5
--- /dev/null
+++ b/testing/web-platform/tests/webaudio/the-audio-api/the-gainnode-interface/test-gainnode.html
@@ -0,0 +1,121 @@
+<!doctype html>
+
+<!--
+Tests that GainNode is properly scaling the gain.
+We'll render 11 notes, starting at a gain of 1.0, decreasing in gain by 0.1.
+The 11th note will be of gain 0.0, so it should be silent (at the end in the rendered output).
+
+Based on a test from the WebKit test suite
+(https://github.com/WebKit/webkit/blob/master/LayoutTests/webaudio/gain.html)
+-->
+
+<html class="a">
+ <head>
+ <title>GainNode interface</title>
+ <script src="/resources/testharness.js"></script>
+ <script src="/resources/testharnessreport.js"></script>
+ <script src="/webaudio/js/lodash.js"></script>
+ <script src="/webaudio/js/vendor-prefixes.js"></script>
+ <script src="/webaudio/js/helpers.js"></script>
+ <script src="/webaudio/js/buffer-loader.js"></script>
+ </head>
+ <body class="a">
+ <div id="log"></div>
+ <script>
+var gainNodeTest = async_test("GainNode");
+
+var sampleRate = 44100.0;
+var bufferDurationSeconds = 0.125;
+var numberOfNotes = 11;
+var noteSpacing = bufferDurationSeconds + 0.020; // leave 20ms of silence between each "note"
+var lengthInSeconds = numberOfNotes * noteSpacing;
+
+var context = 0;
+var expectedBuffer = 0;
+var actualBuffer = 0;
+var sinWaveBuffer = 0;
+
+function createSinWaveBuffer(lengthInSeconds, frequency) {
+ var audioBuffer = context.createBuffer(2, lengthInSeconds * sampleRate, sampleRate);
+
+ var n = audioBuffer.length;
+ var channelL = audioBuffer.getChannelData(0);
+ var channelR = audioBuffer.getChannelData(1);
+
+ for (var i = 0; i < n; ++i) {
+ channelL[i] = Math.sin(frequency * 2.0*Math.PI * i / sampleRate);
+ channelR[i] = channelL[i];
+ }
+
+ return audioBuffer;
+}
+
+function playNote(time, gain) {
+ var source = context.createBufferSource();
+ source.buffer = sinWaveBuffer;
+
+ var gainNode = context.createGain();
+ gainNode.gain.value = gain;
+
+ source.connect(gainNode);
+ gainNode.connect(context.destination);
+
+ source.start(time);
+}
+
+function loadExpectedBuffer(event) {
+ actualBuffer = event.renderedBuffer;
+
+ bufferLoader = new BufferLoader(
+ context,
+ ['/webaudio/the-audio-api/the-gainnode-interface/gain-expected.wav'],
+ bufferLoadCompleted
+ );
+ bufferLoader.load();
+};
+
+function bufferLoadCompleted(buffer) {
+ compareExpectedWithActualBuffer(buffer);
+};
+
+setup( function() {
+ // Create offline audio context.
+ context = new OfflineAudioContext(2, sampleRate * lengthInSeconds, sampleRate);
+
+ // Create a buffer for a short "note".
+ sinWaveBuffer = createSinWaveBuffer(bufferDurationSeconds, 880.0);
+
+ // Render 11 notes, starting at a gain of 1.0, decreasing in gain by 0.1.
+ // The last note will be of gain 0.0, so shouldn't be perceptible in the rendered output.
+ for (var i = 0; i < numberOfNotes; ++i) {
+ var time = i * noteSpacing;
+ var gain = 1.0 - i / (numberOfNotes - 1);
+ playNote(time, gain);
+ }
+
+ context.oncomplete = loadExpectedBuffer;
+ context.startRendering();
+}, {timeout: 10000});
+
+function compareExpectedWithActualBuffer(expected) {
+ var expectedBuffer = expected[0];
+
+ gainNodeTest.step(function() {
+ assert_array_approx_equals(expectedBuffer.getChannelData(0),
+ actualBuffer.getChannelData(0),
+ 1e-4,
+ "comparing expected and rendered buffers (channel 0)");
+ });
+
+ gainNodeTest.step(function() {
+ assert_array_approx_equals(expectedBuffer.getChannelData(1),
+ actualBuffer.getChannelData(1),
+ 1e-4,
+ "comparing expected and rendered buffers (channel 1)");
+ });
+
+ gainNodeTest.done();
+};
+ </script>
+ </body>
+</html>
diff --git a/testing/web-platform/tests/webaudio/the-audio-api/the-iirfilternode-interface/test-iirfilternode.html b/testing/web-platform/tests/webaudio/the-audio-api/the-iirfilternode-interface/test-iirfilternode.html
new file mode 100644
index 000000000..61c11ffc5
--- /dev/null
+++ b/testing/web-platform/tests/webaudio/the-audio-api/the-iirfilternode-interface/test-iirfilternode.html
@@ -0,0 +1,59 @@
+<!doctype html>
+<meta charset=utf-8>
+<title>Test the IIRFilterNode Interface</title>
+<script src=/resources/testharness.js></script>
+<script src=/resources/testharnessreport.js></script>
+<script>
+test(function(t) {
+ var ac = new AudioContext();
+
+ function check_args(arg1, arg2, err, desc) {
+ test(function() {
+ assert_throws(err, function() {
+ ac.createIIRFilter(arg1, arg2)
+ })
+ }, desc)
+ }
+
+ check_args([], [1.0], 'NotSupportedError',
+ 'feedforward coefficients can not be empty');
+
+ check_args([1.0], [], 'NotSupportedError',
+ 'feedback coefficients can not be empty');
+
+ var coeff = new Float32Array(21)
+ coeff[0] = 1.0;
+
+ check_args(coeff, [1.0], 'NotSupportedError',
+ 'more than 20 feedforward coefficients can not be used');
+
+ check_args([1.0], coeff, 'NotSupportedError',
+ 'more than 20 feedback coefficients can not be used');
+
+ check_args([0.0, 0.0], [1.0], 'InvalidStateError',
+ 'at least one feedforward coefficient must be non-zero');
+
+ check_args([0.5, 0.5], [0.0], 'InvalidStateError',
+ 'the first feedback coefficient must be non-zero');
+
+}, "IIRFilterNode coefficients are checked properly");
+
+test(function(t) {
+ var ac = new AudioContext();
+
+ var frequencies = new Float32Array([-1.0, ac.sampleRate*0.5 - 1.0, ac.sampleRate]);
+ var magResults = new Float32Array(3);
+ var phaseResults = new Float32Array(3);
+
+ var filter = ac.createIIRFilter([0.5, 0.5], [1.0]);
+ filter.getFrequencyResponse(frequencies, magResults, phaseResults);
+
+ assert_true(isNaN(magResults[0]), "Invalid input frequency should give NaN magnitude response");
+ assert_true(!isNaN(magResults[1]), "Valid input frequency should not give NaN magnitude response");
+ assert_true(isNaN(magResults[2]), "Invalid input frequency should give NaN magnitude response");
+ assert_true(isNaN(phaseResults[0]), "Invalid input frequency should give NaN phase response");
+ assert_true(!isNaN(phaseResults[1]), "Valid input frequency should not give NaN phase response");
+ assert_true(isNaN(phaseResults[2]), "Invalid input frequency should give NaN phase response");
+
+}, "IIRFilterNode getFrequencyResponse handles invalid frequencies properly");
+</script>
diff --git a/testing/web-platform/tests/webaudio/the-audio-api/the-mediaelementaudiosourcenode-interface/.gitkeep b/testing/web-platform/tests/webaudio/the-audio-api/the-mediaelementaudiosourcenode-interface/.gitkeep
new file mode 100644
index 000000000..e69de29bb
--- /dev/null
+++ b/testing/web-platform/tests/webaudio/the-audio-api/the-mediaelementaudiosourcenode-interface/.gitkeep
diff --git a/testing/web-platform/tests/webaudio/the-audio-api/the-mediaelementaudiosourcenode-interface/mediaElementAudioSourceToScriptProcessorTest.html b/testing/web-platform/tests/webaudio/the-audio-api/the-mediaelementaudiosourcenode-interface/mediaElementAudioSourceToScriptProcessorTest.html
new file mode 100644
index 000000000..ba6eec668
--- /dev/null
+++ b/testing/web-platform/tests/webaudio/the-audio-api/the-mediaelementaudiosourcenode-interface/mediaElementAudioSourceToScriptProcessorTest.html
@@ -0,0 +1,124 @@
+<!doctype html>
+
+<!--
+Tests that a create MediaElementSourceNode that is passed through
+a script processor passes the stream data.
+The the script processor saves the input buffers it gets to a temporary
+array, and after the playback has stopped, the contents are compared
+to those of a loaded AudioBuffer with the same source.
+
+Somewhat similiar to a test from Mozilla:
+(http://dxr.mozilla.org/mozilla-central/source/content/media/webaudio/test/test_mediaElementAudioSourceNode.html?force=1)
+-->
+
+<html class="a">
+ <head>
+ <title>MediaElementAudioSource interface test (to scriptProcessor)</title>
+ <script src="/resources/testharness.js"></script>
+ <script src="/resources/testharnessreport.js"></script>
+ <script src="/webaudio/js/lodash.js"></script>
+ <script src="/webaudio/js/vendor-prefixes.js"></script>
+ <script src="/webaudio/js/helpers.js"></script>
+ <script src="/webaudio/js/buffer-loader.js"></script>
+ </head>
+ <body class="a">
+ <div id="log"></div>
+ <script>
+ var elementSourceTest = async_test("Element Source tests completed");
+
+ var src = '/webaudio/resources/sin_440Hz_-6dBFS_1s.wav';
+ var BUFFER_SIZE = 2048;
+ var context = null;
+ var actualBufferArrayC0 = new Float32Array(0);
+ var actualBufferArrayC1 = new Float32Array(0);
+ var audio = null, source = null, processor = null
+
+ function loadExpectedBuffer(event) {
+ bufferLoader = new BufferLoader(
+ context,
+ [src],
+ bufferLoadCompleted
+ );
+ bufferLoader.load();
+ };
+
+ function bufferLoadCompleted(buffer) {
+ runTests(buffer);
+ };
+
+ function concatTypedArray(arr1, arr2) {
+ var result = new Float32Array(arr1.length + arr2.length);
+ result.set(arr1);
+ result.set(arr2, arr1.length);
+ return result;
+ }
+
+ // Create Audio context
+ context = new AudioContext();
+
+ // Create an audio element, and a media element source
+ audio = document.createElement('audio');
+ audio.src = src;
+ source = context.createMediaElementSource(audio);
+
+function processListener (e) {
+ actualBufferArrayC0 = concatTypedArray(actualBufferArrayC0, e.inputBuffer.getChannelData(0));
+ actualBufferArrayC1 = concatTypedArray(actualBufferArrayC1, e.inputBuffer.getChannelData(1));
+}
+
+ // Create a processor node to copy the input to the actual buffer
+ processor = context.createScriptProcessor(BUFFER_SIZE);
+ source.connect(processor);
+ processor.connect(context.destination);
+ processor.addEventListener('audioprocess', processListener);
+
+ // When media playback ended, save the begin to compare with expected buffer
+ audio.addEventListener("ended", function(e) {
+ // Setting a timeout since we need audioProcess event to run for all samples
+ window.setTimeout(loadExpectedBuffer, 50);
+ });
+
+ audio.play();
+
+ function runTests(expected) {
+ source.disconnect();
+ processor.disconnect();
+
+ // firefox seems to process events after disconnect
+ processor.removeEventListener('audioprocess', processListener)
+
+ var expectedBuffer = expected[0];
+
+ // Trim the actual elements because we don't have a fine-grained
+ // control over the start and end time of recording the data.
+ var actualTrimmedC0 = trimEmptyElements(actualBufferArrayC0);
+ var actualTrimmedC1 = trimEmptyElements(actualBufferArrayC1);
+ var expectedLength = trimEmptyElements(expectedBuffer.getChannelData(0)).length;
+
+ // Test that there is some data.
+ test(function() {
+ assert_greater_than(actualTrimmedC0.length, 0,
+ "processed data array (C0) length greater than 0");
+ assert_greater_than(actualTrimmedC1.length, 0,
+ "processed data array (C1) length greater than 0");
+ }, "Channel 0 processed some data");
+
+ // Test the actual contents of the 1st and second channel.
+ test(function() {
+ assert_array_approx_equals(
+ actualTrimmedC0,
+ trimEmptyElements(expectedBuffer.getChannelData(0)),
+ 1e-4,
+ "comparing expected and rendered buffers (channel 0)");
+ assert_array_approx_equals(
+ actualTrimmedC1,
+ trimEmptyElements(expectedBuffer.getChannelData(1)),
+ 1e-4,
+ "comparing expected and rendered buffers (channel 1)");
+ }, "All data processed correctly");
+
+ elementSourceTest.done();
+ };
+ </script>
+ </body>
+</html>
diff --git a/testing/web-platform/tests/webaudio/the-audio-api/the-mediastreamaudiodestinationnode-interface/.gitkeep b/testing/web-platform/tests/webaudio/the-audio-api/the-mediastreamaudiodestinationnode-interface/.gitkeep
new file mode 100644
index 000000000..e69de29bb
--- /dev/null
+++ b/testing/web-platform/tests/webaudio/the-audio-api/the-mediastreamaudiodestinationnode-interface/.gitkeep
diff --git a/testing/web-platform/tests/webaudio/the-audio-api/the-mediastreamaudiosourcenode-interface/.gitkeep b/testing/web-platform/tests/webaudio/the-audio-api/the-mediastreamaudiosourcenode-interface/.gitkeep
new file mode 100644
index 000000000..e69de29bb
--- /dev/null
+++ b/testing/web-platform/tests/webaudio/the-audio-api/the-mediastreamaudiosourcenode-interface/.gitkeep
diff --git a/testing/web-platform/tests/webaudio/the-audio-api/the-offlineaudiocontext-interface/.gitkeep b/testing/web-platform/tests/webaudio/the-audio-api/the-offlineaudiocontext-interface/.gitkeep
new file mode 100644
index 000000000..e69de29bb
--- /dev/null
+++ b/testing/web-platform/tests/webaudio/the-audio-api/the-offlineaudiocontext-interface/.gitkeep
diff --git a/testing/web-platform/tests/webaudio/the-audio-api/the-offlineaudiocontext-interface/current-time-block-size.html b/testing/web-platform/tests/webaudio/the-audio-api/the-offlineaudiocontext-interface/current-time-block-size.html
new file mode 100644
index 000000000..ee976f7f7
--- /dev/null
+++ b/testing/web-platform/tests/webaudio/the-audio-api/the-offlineaudiocontext-interface/current-time-block-size.html
@@ -0,0 +1,17 @@
+<!DOCTYPE html>
+<title>Test currentTime at completion of OfflineAudioContext rendering</title>
+<script src="/resources/testharness.js"></script>
+<script src="/resources/testharnessreport.js"></script>
+<script>
+promise_test(function() {
+ // sampleRate is a power of two so that time can be represented exactly
+ // in double currentTime.
+ var context = new OfflineAudioContext(1, 1, 65536);
+ return context.startRendering().
+ then(function(buffer) {
+ assert_equals(buffer.length, 1, "buffer length");
+ assert_equals(context.currentTime, 128 / context.sampleRate,
+ "currentTime at completion");
+ });
+});
+</script>
diff --git a/testing/web-platform/tests/webaudio/the-audio-api/the-oscillatornode-interface/.gitkeep b/testing/web-platform/tests/webaudio/the-audio-api/the-oscillatornode-interface/.gitkeep
new file mode 100644
index 000000000..e69de29bb
--- /dev/null
+++ b/testing/web-platform/tests/webaudio/the-audio-api/the-oscillatornode-interface/.gitkeep
diff --git a/testing/web-platform/tests/webaudio/the-audio-api/the-pannernode-interface/.gitkeep b/testing/web-platform/tests/webaudio/the-audio-api/the-pannernode-interface/.gitkeep
new file mode 100644
index 000000000..e69de29bb
--- /dev/null
+++ b/testing/web-platform/tests/webaudio/the-audio-api/the-pannernode-interface/.gitkeep
diff --git a/testing/web-platform/tests/webaudio/the-audio-api/the-pannernode-interface/test-pannernode-automation.html b/testing/web-platform/tests/webaudio/the-audio-api/the-pannernode-interface/test-pannernode-automation.html
new file mode 100644
index 000000000..74bbdc6fe
--- /dev/null
+++ b/testing/web-platform/tests/webaudio/the-audio-api/the-pannernode-interface/test-pannernode-automation.html
@@ -0,0 +1,31 @@
+<!doctype html>
+<meta charset=utf-8>
+<title></title>
+<script src=/resources/testharness.js></script>
+<script src=/resources/testharnessreport.js></script>
+<script>
+promise_test(function(t) {
+ var ac = new OfflineAudioContext(2, 2048, 44100);
+ var panner = ac.createPanner();
+ panner.positionX.value = -1;
+ panner.positionY.value = -1;
+ panner.positionZ.value = 1;
+ panner.positionX.setValueAtTime(1, 1024/ac.sampleRate);
+ var osc = ac.createOscillator();
+ osc.connect(panner);
+ panner.connect(ac.destination);
+ osc.start()
+ return ac.startRendering().then(function(buffer) {
+ var left = buffer.getChannelData(0);
+ var right = buffer.getChannelData(1);
+ for (var i = 0; i < 2048; ++i) {
+ if (i < 1024) {
+ assert_true(Math.abs(left[i]) >= Math.abs(right[i]), "index " + i + " should be on the left");
+ } else {
+ assert_true(Math.abs(left[i]) < Math.abs(right[i]), "index " + i + " should be on the right");
+ }
+ }
+ });
+}, "PannerNode AudioParam automation works properly");
+
+</script>
diff --git a/testing/web-platform/tests/webaudio/the-audio-api/the-periodicwave-interface/.gitkeep b/testing/web-platform/tests/webaudio/the-audio-api/the-periodicwave-interface/.gitkeep
new file mode 100644
index 000000000..e69de29bb
--- /dev/null
+++ b/testing/web-platform/tests/webaudio/the-audio-api/the-periodicwave-interface/.gitkeep
diff --git a/testing/web-platform/tests/webaudio/the-audio-api/the-scriptprocessornode-interface/.gitkeep b/testing/web-platform/tests/webaudio/the-audio-api/the-scriptprocessornode-interface/.gitkeep
new file mode 100644
index 000000000..e69de29bb
--- /dev/null
+++ b/testing/web-platform/tests/webaudio/the-audio-api/the-scriptprocessornode-interface/.gitkeep
diff --git a/testing/web-platform/tests/webaudio/the-audio-api/the-waveshapernode-interface/.gitkeep b/testing/web-platform/tests/webaudio/the-audio-api/the-waveshapernode-interface/.gitkeep
new file mode 100644
index 000000000..e69de29bb
--- /dev/null
+++ b/testing/web-platform/tests/webaudio/the-audio-api/the-waveshapernode-interface/.gitkeep
diff --git a/testing/web-platform/tests/webaudio/the-audio-api/the-waveshapernode-interface/curve-tests.html b/testing/web-platform/tests/webaudio/the-audio-api/the-waveshapernode-interface/curve-tests.html
new file mode 100644
index 000000000..791b74a6c
--- /dev/null
+++ b/testing/web-platform/tests/webaudio/the-audio-api/the-waveshapernode-interface/curve-tests.html
@@ -0,0 +1,212 @@
+<!doctype html>
+<html>
+<head>
+ <title>WaveShaperNode interface - Curve tests | WebAudio</title>
+
+ <script type="text/javascript" src="/resources/testharness.js"></script>
+ <script type="text/javascript" src="/resources/testharnessreport.js"></script>
+ <script type="text/javascript" src="../../js/vendor-prefixes.js"></script>
+</head>
+<body>
+ <div id="log">
+ </div>
+
+ <script type="text/javascript">
+ var sampleRate=44100.0;
+ var tolerance=0.01;
+
+ /*
+ Testing that -1, 0 and +1 map correctly to curve (with 1:1 correlation)
+ =======================================================================
+ From the specification:
+ The input signal is nominally within the range -1 -> +1.
+ Each input sample within this range will index into the shaping curve with a signal level of zero corresponding
+ to the center value of the curve array.
+ */
+ (function() {
+ var threeElementCurve=[2.0, -3.0, 4.0];
+ var inputData=[-1.0, 0, 1.0];
+ var expectedData=[2.0, -3.0, 4.0];
+ executeTest(threeElementCurve, inputData, expectedData, "Testing that -1, 0 and +1 map correctly to curve (with 1:1 correlation)");
+ })();
+
+ /*
+ Testing interpolation (where inputs don't correlate directly to curve elements)
+ ===============================================================================
+ From the specification:
+ The implementation must perform linear interpolation between adjacent points in the curve.
+ */
+ (function() {
+ var threeElementCurve=[2.0, -3.0, 4.0];
+ var inputData=[-0.5, +0.5, +0.75];
+ var expectedData=[-0.5, +0.5, +2.25];
+ executeTest(threeElementCurve, inputData, expectedData, "Testing interpolation (where inputs don't correlate directly to curve elements)");
+ })();
+
+ /*
+ Testing out-of-range inputs (should be mapped to the first/last elements of the curve)
+ ======================================================================================
+ From the specification:
+ Any sample value less than -1 will correspond to the first value in the curve array.
+ Any sample value greater than +1 will correspond to the last value in the curve array.
+ */
+ (function() {
+ var threeElementCurve=[2.0, -3.0, 4.0];
+ var inputData=[-1.5, +1.5];
+ var expectedData=[2.0, 4.0];
+ executeTest(threeElementCurve, inputData, expectedData, "Testing out-of-range inputs (should be mapped to the first/last elements of the curve)");
+ })();
+
+ /*
+ Testing a 2-element curve (does not have a middle element)
+ ==========================================================
+ From the specification:
+ Each input sample within this range will index into the shaping curve with a signal level of zero corresponding
+ to the center value of the curve array.
+ The implementation must perform linear interpolation between adjacent points in the curve.
+ */
+ (function() {
+ var twoElementCurve=[2.0, -2.0];
+ var inputData=[-1.0, 0, 1.0];
+ var expectedData=[2.0, 0.0, -2.0];
+ executeTest(twoElementCurve, inputData, expectedData, "Testing a 2-element curve (does not have a middle element)");
+ })();
+
+ /*
+ Testing a 4-element curve (does not have a middle element)
+ ==========================================================
+ From the specification:
+ Each input sample within this range will index into the shaping curve with a signal level of zero corresponding
+ to the center value of the curve array.
+ The implementation must perform linear interpolation between adjacent points in the curve.
+ */
+ (function() {
+ var fourElementCurve=[1.0, 2.0, 4.0, 7.0];
+ var inputData=[-1.0, 0, 1.0];
+ var expectedData=[1.0, 3.0, 7.0];
+ executeTest(fourElementCurve, inputData, expectedData, "Testing a 4-element curve (does not have a middle element)");
+ })();
+
+ /*
+ Testing a huge curve
+ ====================
+ From the specification:
+ Each input sample within this range will index into the shaping curve with a signal level of zero corresponding
+ to the center value of the curve array.
+ */
+ (function() {
+ var bigCurve=[];
+ for(var i=0;i<=60000;i++) { bigCurve.push(i/3.5435); }
+ var inputData=[-1.0, 0, 1.0];
+ var expectedData=[bigCurve[0], bigCurve[30000], bigCurve[60000]];
+ executeTest(bigCurve, inputData, expectedData, "Testing a huge curve");
+ })();
+
+ /*
+ Testing single-element curve (boundary condition)
+ =================================================
+ From the specification:
+ Each input sample within this range will index into the shaping curve with a signal level of zero corresponding
+ to the center value of the curve array.
+ Any sample value less than -1 will correspond to the first value in the curve array.
+ Any sample value greater than +1 will correspond to the last value in the curve array.
+ The implementation must perform linear interpolation between adjacent points in the curve.
+ Note:
+ I found a post on the W3C audio mailing list (from one of the Chris's) that suggested it would be feasible
+ to use the WaveShaperNode to create constant values.
+ */
+ (function() {
+ var oneElementCurve=[1.0];
+ var inputData=[-1.0, 0, 1.0, -2.0, 2.0];
+ var expectedData=[1.0, 1.0, 1.0, 1.0, 1.0];
+ executeTest(oneElementCurve, inputData, expectedData, "Testing single-element curve (boundary condition)");
+ })();
+
+ /*
+ Testing null curve (should return input values)
+ ===============================================
+ From the specification:
+ Initially the curve attribute is null, which means that the WaveShaperNode will pass its input to its output
+ without modification.
+ */
+ (function() {
+ var inputData=[-1.0, 0, 1.0, 2.0];
+ var expectedData=[-1.0, 0.0, 1.0, 2.0];
+ executeTest(null, inputData, expectedData, "Testing null curve (should return input values)");
+ })();
+
+ /*
+ Testing zero-element curve (unspecified result)
+ ===============================================
+ From the specification:
+ Unspecified result (I assume it will be treated in the same way as a null curve).
+ Note:
+ Mozilla test_waveShaperNoCurve.html indicates they expect same results as a null curve.
+ */
+ (function() {
+ var zeroElementCurve=[];
+ var inputData=[-1.0, 0, 1.0, 2.0];
+ var expectedData=[-1.0, 0.0, 1.0, 2.0];
+ executeTest(zeroElementCurve, inputData, expectedData, "Testing zero-element curve (unspecified result)");
+ })();
+
+
+ /**
+ * Function that does the actual testing (using an asynchronous test).
+ * @param {?Array.<number>} curveData - Array containing values for the WaveShaper curve.
+ * @param {!Array.<number>} inputData - Array containing values for the input stream.
+ * @param {!Array.<number>} expectedData - Array containing expected results for each of the corresponding inputs.
+ * @param {!string} testName - Name of the test case.
+ */
+ function executeTest(curveData, inputData, expectedData, testName) {
+ var stTest=async_test("WaveShaperNode - "+testName);
+
+ // Create offline audio context.
+ var ac=new OfflineAudioContext(1, inputData.length, sampleRate);
+
+ // Create the WaveShaper and its curve.
+ var waveShaper=ac.createWaveShaper();
+ if(curveData!=null) {
+ var curve=new Float32Array(curveData.length);
+ for(var i=0;i<curveData.length;i++) { curve[i]=curveData[i]; }
+ waveShaper.curve=curve;
+ }
+ waveShaper.connect(ac.destination);
+
+ // Create buffer containing the input values.
+ var inputBuffer=ac.createBuffer(1, Math.max(inputData.length, 2), sampleRate);
+ var d=inputBuffer.getChannelData(0);
+ for(var i=0;i<inputData.length;i++) { d[i]=inputData[i]; }
+
+ // Play the input buffer through the WaveShaper.
+ var src=ac.createBufferSource();
+ src.buffer=inputBuffer;
+ src.connect(waveShaper);
+ src.start();
+
+ // Test the outputs match the expected values.
+ ac.oncomplete=function(ev) {
+ var d=ev.renderedBuffer.getChannelData(0);
+
+ stTest.step(function() {
+ for(var i=0;i<expectedData.length;i++) {
+ var curveText="null";
+ if(curve!=null) {
+ if(curveData.length<20) {
+ curveText=curveData.join(",");
+ } else {
+ curveText="TooBigToDisplay ("+(curveData.length-1)+" elements)";
+ }
+ }
+ var comment="Input="+inputData[i]+", Curve=["+curveText+"] >>> ";
+ assert_approx_equals(d[i], expectedData[i], tolerance, comment);
+ }
+ });
+
+ stTest.done();
+ };
+ ac.startRendering();
+ }
+ </script>
+</body>
+</html>