diff --git a/packages/webaudio/sampler.mjs b/packages/webaudio/sampler.mjs index e08b1084..141dd5f7 100644 --- a/packages/webaudio/sampler.mjs +++ b/packages/webaudio/sampler.mjs @@ -150,7 +150,7 @@ export const samples = async (sampleMap, baseUrl = sampleMap._base || '', option }), ); } - setSound(key, (t, hapValue) => onTriggerSample(t, hapValue, value), { + setSound(key, (t, hapValue, onended) => onTriggerSample(t, hapValue, onended, value), { type: 'sample', samples: value, baseUrl, @@ -161,7 +161,7 @@ export const samples = async (sampleMap, baseUrl = sampleMap._base || '', option const cutGroups = []; -export async function onTriggerSample(t, value, bank) { +export async function onTriggerSample(t, value, onended, bank) { const { s, freq, @@ -176,24 +176,19 @@ export async function onTriggerSample(t, value, bank) { begin = 0, end = 1, } = value; - const ac = getAudioContext(); - // destructure adsr here, because the default should be different for synths and samples - const { attack = 0.001, decay = 0.001, sustain = 1, release = 0.001 } = value; // load sample if (speed === 0) { // no playback return; } + const ac = getAudioContext(); + // destructure adsr here, because the default should be different for synths and samples + const { attack = 0.001, decay = 0.001, sustain = 1, release = 0.001 } = value; //const soundfont = getSoundfontKey(s); - let bufferSource; + const time = t + nudge; + + const bufferSource = await getSampleBufferSource(s, n, note, speed, freq, bank); - //if (soundfont) { - // is soundfont - //bufferSource = await globalThis.getFontBufferSource(soundfont, note || n, ac, freq); - //} else { - // is sample from loaded samples(..) - bufferSource = await getSampleBufferSource(s, n, note, speed, freq, bank); - //} // asny stuff above took too long? if (ac.currentTime > t) { logger(`[sampler] still loading sound "${s}:${n}"`, 'highlight'); @@ -201,7 +196,7 @@ export async function onTriggerSample(t, value, bank) { return; } if (!bufferSource) { - console.warn('no buffer source'); + logger(`[sampler] could not load "${s}:${n}"`, 'error'); return; } bufferSource.playbackRate.value = Math.abs(speed) * bufferSource.playbackRate.value; @@ -212,7 +207,6 @@ export async function onTriggerSample(t, value, bank) { // "The computation of the offset into the sound is performed using the sound buffer's natural sample rate, // rather than the current playback rate, so even if the sound is playing at twice its normal speed, // the midway point through a 10-second audio buffer is still 5." - const time = t + nudge; const offset = begin * bufferSource.buffer.duration; bufferSource.start(time, offset); const bufferDuration = bufferSource.buffer.duration / bufferSource.playbackRate.value; @@ -229,19 +223,33 @@ export async function onTriggerSample(t, value, bank) { }*/ const { node: envelope, stop: releaseEnvelope } = getEnvelope(attack, decay, sustain, release, 1, t); bufferSource.connect(envelope); - if (cut !== undefined) { - cutGroups[cut]?.stop(time); // fade out? - cutGroups[cut] = bufferSource; - } - const stop = (endTime) => { + bufferSource.onended = function () { + bufferSource.disconnect(); + envelope.disconnect(); + onended(); + }; + const stop = (endTime, playWholeBuffer = !clip) => { let releaseTime = endTime; - if (!clip) { + if (playWholeBuffer) { releaseTime = t + (end - begin) * bufferDuration; } bufferSource.stop(releaseTime + release); releaseEnvelope(releaseTime); }; - return { node: envelope, stop }; + const handle = { node: envelope, bufferSource, stop }; + + // cut groups + // TODO: sometimes, the cutting won't work for very fast triggering... + // it worked before :-/ + if (cut !== undefined) { + const prev = cutGroups[cut]; + if (prev) { + prev.stop(time, false); + } + cutGroups[cut] = handle; + } + + return handle; } /*const getSoundfontKey = (s) => { @@ -262,3 +270,4 @@ export async function onTriggerSample(t, value, bank) { } return; };*/ +// bufferSource = await globalThis.getFontBufferSource(soundfont, note || n, ac, freq); diff --git a/packages/webaudio/synth.mjs b/packages/webaudio/synth.mjs index 426e6573..f3be1ce6 100644 --- a/packages/webaudio/synth.mjs +++ b/packages/webaudio/synth.mjs @@ -6,7 +6,7 @@ export function registerSynthSounds() { ['sine', 'square', 'triangle', 'sawtooth'].forEach((wave) => { setSound( wave, - (t, value) => { + (t, value, onended) => { // destructure adsr here, because the default should be different for synths and samples const { attack = 0.001, decay = 0.05, sustain = 0.6, release = 0.01 } = value; let { n, note, freq } = value; @@ -25,6 +25,11 @@ export function registerSynthSounds() { const g = gainNode(0.3); // envelope const { node: envelope, stop: releaseEnvelope } = getEnvelope(attack, decay, sustain, release, 1, t); + o.onended = () => { + o.disconnect(); + g.disconnect(); + onended(); + }; return { node: o.connect(g).connect(envelope), stop: (t) => { diff --git a/packages/webaudio/webaudio.mjs b/packages/webaudio/webaudio.mjs index c2bca304..06b2ab49 100644 --- a/packages/webaudio/webaudio.mjs +++ b/packages/webaudio/webaudio.mjs @@ -155,8 +155,10 @@ export const webaudioOutput = async (hap, deadline, hapDuration, cps) => { } = hap.value; const { velocity = 1 } = hap.context; gain *= velocity; // legacy fix for velocity - // the chain will hold all audio nodes that connect to each other - const chain = []; + let toDisconnect = []; // audio nodes that will be disconnected when the source has ended + const onended = () => { + toDisconnect.forEach((n) => n?.disconnect()); + }; if (bank && s) { s = `${bank}_${s}`; } @@ -166,7 +168,7 @@ export const webaudioOutput = async (hap, deadline, hapDuration, cps) => { sourceNode = source(t, hap.value); } else if (soundMap.get()[s]) { const { onTrigger } = soundMap.get()[s]; - const soundHandle = await onTrigger(t, hap.value); + const soundHandle = await onTrigger(t, hap.value, onended); if (soundHandle) { sourceNode = soundHandle.node; soundHandle.stop(t + hapDuration); @@ -183,6 +185,7 @@ export const webaudioOutput = async (hap, deadline, hapDuration, cps) => { logger('[webaudio] skip hap: still loading', ac.currentTime - t); return; } + const chain = []; // audio nodes that will be connected to each other sequentially chain.push(sourceNode); // gain stage @@ -227,8 +230,9 @@ export const webaudioOutput = async (hap, deadline, hapDuration, cps) => { // connect chain elements together chain.slice(1).reduce((last, current) => last.connect(current), chain[0]); - // disconnect all nodes when source node has ended: - chain[0].onended = () => chain.concat([delaySend, reverbSend]).forEach((n) => n?.disconnect()); + // toDisconnect = all the node that should be disconnected in onended callback + // this is crucial for performance + toDisconnect = chain.concat([delaySend, reverbSend]); }; export const webaudioOutputTrigger = (t, hap, ct, cps) => webaudioOutput(hap, t - ct, hap.duration / cps, cps);