use superdough in webaudio package

This commit is contained in:
Felix Roos 2023-08-11 12:13:24 +02:00
parent 898bfaeecf
commit 738e714f45
13 changed files with 13 additions and 845 deletions

View File

@ -124,7 +124,7 @@ function effectSend(input, effect, wet) {
return send;
}
export const superdough = async (value, deadline, hapDuration, cps) => {
export const superdough = async (value, deadline, hapDuration) => {
const ac = getAudioContext();
if (typeof value !== 'object') {
throw new Error(
@ -164,7 +164,6 @@ export const superdough = async (value, deadline, hapDuration, cps) => {
size = 2,
velocity = 1,
} = value;
// const { velocity = 1 } = hap.context;
gain *= velocity; // legacy fix for velocity
let toDisconnect = []; // audio nodes that will be disconnected when the source has ended
const onended = () => {

View File

@ -1,6 +1,7 @@
# @strudel.cycles/webaudio
This package contains helpers to make music with strudel and the Web Audio API.
It is a thin binding to [superdough](https://www.npmjs.com/package/superdough).
## Install

View File

@ -1,31 +0,0 @@
if (typeof DelayNode !== 'undefined') {
class FeedbackDelayNode extends DelayNode {
constructor(ac, wet, time, feedback) {
super(ac);
wet = Math.abs(wet);
this.delayTime.value = time;
const feedbackGain = ac.createGain();
feedbackGain.gain.value = Math.min(Math.abs(feedback), 0.995);
this.feedback = feedbackGain.gain;
const delayGain = ac.createGain();
delayGain.gain.value = wet;
this.delayGain = delayGain;
this.connect(feedbackGain);
this.connect(delayGain);
feedbackGain.connect(this);
this.connect = (target) => delayGain.connect(target);
return this;
}
start(t) {
this.delayGain.gain.setValueAtTime(this.delayGain.gain.value, t + this.delayTime.value);
}
}
AudioContext.prototype.createFeedbackDelay = function (wet, time, feedback) {
return new FeedbackDelayNode(this, wet, time, feedback);
};
}

View File

@ -1,70 +0,0 @@
import { getAudioContext } from './webaudio.mjs';
export function gainNode(value) {
const node = getAudioContext().createGain();
node.gain.value = value;
return node;
}
export const getOscillator = ({ s, freq, t }) => {
// make oscillator
const o = getAudioContext().createOscillator();
o.type = s || 'triangle';
o.frequency.value = Number(freq);
o.start(t);
//o.stop(t + duration + release);
const stop = (time) => o.stop(time);
return { node: o, stop };
};
// alternative to getADSR returning the gain node and a stop handle to trigger the release anytime in the future
export const getEnvelope = (attack, decay, sustain, release, velocity, begin) => {
const gainNode = getAudioContext().createGain();
gainNode.gain.setValueAtTime(0, begin);
gainNode.gain.linearRampToValueAtTime(velocity, begin + attack); // attack
gainNode.gain.linearRampToValueAtTime(sustain * velocity, begin + attack + decay); // sustain start
// sustain end
return {
node: gainNode,
stop: (t) => {
//if (typeof gainNode.gain.cancelAndHoldAtTime === 'function') {
// gainNode.gain.cancelAndHoldAtTime(t); // this seems to release instantly....
// see https://discord.com/channels/779427371270275082/937365093082079272/1086053607360712735
//} else {
// firefox: this will glitch when the sustain has not been reached yet at the time of release
gainNode.gain.setValueAtTime(sustain * velocity, t);
//}
gainNode.gain.linearRampToValueAtTime(0, t + release);
},
};
};
export const getADSR = (attack, decay, sustain, release, velocity, begin, end) => {
const gainNode = getAudioContext().createGain();
gainNode.gain.setValueAtTime(0, begin);
gainNode.gain.linearRampToValueAtTime(velocity, begin + attack); // attack
gainNode.gain.linearRampToValueAtTime(sustain * velocity, begin + attack + decay); // sustain start
gainNode.gain.setValueAtTime(sustain * velocity, end); // sustain end
gainNode.gain.linearRampToValueAtTime(0, end + release); // release
// for some reason, using exponential ramping creates little cracklings
/* let t = begin;
gainNode.gain.setValueAtTime(0, t);
gainNode.gain.exponentialRampToValueAtTime(velocity, (t += attack));
const sustainGain = Math.max(sustain * velocity, 0.001);
gainNode.gain.exponentialRampToValueAtTime(sustainGain, (t += decay));
if (end - begin < attack + decay) {
gainNode.gain.cancelAndHoldAtTime(end);
} else {
gainNode.gain.setValueAtTime(sustainGain, end);
}
gainNode.gain.exponentialRampToValueAtTime(0.001, end + release); // release */
return gainNode;
};
export const getFilter = (type, frequency, Q) => {
const filter = getAudioContext().createBiquadFilter();
filter.type = type;
filter.frequency.value = frequency;
filter.Q.value = Q;
return filter;
};

View File

@ -5,6 +5,4 @@ This program is free software: you can redistribute it and/or modify it under th
*/
export * from './webaudio.mjs';
export * from './sampler.mjs';
export * from './helpers.mjs';
export * from './synth.mjs';
export * from 'superdough';

View File

@ -35,7 +35,7 @@
"homepage": "https://github.com/tidalcycles/strudel#readme",
"dependencies": {
"@strudel.cycles/core": "workspace:*",
"nanostores": "^0.8.1"
"superdough": "workspace:*"
},
"devDependencies": {
"vite": "^4.3.3"

View File

@ -1,23 +0,0 @@
if (typeof AudioContext !== 'undefined') {
AudioContext.prototype.impulseResponse = function (duration, channels = 1) {
const length = this.sampleRate * duration;
const impulse = this.createBuffer(channels, length, this.sampleRate);
const IR = impulse.getChannelData(0);
for (let i = 0; i < length; i++) IR[i] = (2 * Math.random() - 1) * Math.pow(1 - i / length, duration);
return impulse;
};
AudioContext.prototype.createReverb = function (duration) {
const convolver = this.createConvolver();
convolver.setDuration = (d) => {
convolver.buffer = this.impulseResponse(d);
convolver.duration = duration;
return convolver;
};
convolver.setDuration(duration);
return convolver;
};
}
// TODO: make the reverb more exciting
// check out https://blog.gskinner.com/archives/2019/02/reverb-web-audio-api.html

View File

@ -1,288 +0,0 @@
import { logger, noteToMidi, valueToMidi } from '@strudel.cycles/core';
import { getAudioContext, registerSound } from './index.mjs';
import { getEnvelope } from './helpers.mjs';
const bufferCache = {}; // string: Promise<ArrayBuffer>
const loadCache = {}; // string: Promise<ArrayBuffer>
export const getCachedBuffer = (url) => bufferCache[url];
function humanFileSize(bytes, si) {
var thresh = si ? 1000 : 1024;
if (bytes < thresh) return bytes + ' B';
var units = si
? ['kB', 'MB', 'GB', 'TB', 'PB', 'EB', 'ZB', 'YB']
: ['KiB', 'MiB', 'GiB', 'TiB', 'PiB', 'EiB', 'ZiB', 'YiB'];
var u = -1;
do {
bytes /= thresh;
++u;
} while (bytes >= thresh);
return bytes.toFixed(1) + ' ' + units[u];
}
export const getSampleBufferSource = async (s, n, note, speed, freq, bank, resolveUrl) => {
let transpose = 0;
if (freq !== undefined && note !== undefined) {
logger('[sampler] hap has note and freq. ignoring note', 'warning');
}
let midi = valueToMidi({ freq, note }, 36);
transpose = midi - 36; // C3 is middle C
const ac = getAudioContext();
let sampleUrl;
if (Array.isArray(bank)) {
sampleUrl = bank[n % bank.length];
} else {
const midiDiff = (noteA) => noteToMidi(noteA) - midi;
// object format will expect keys as notes
const closest = Object.keys(bank)
.filter((k) => !k.startsWith('_'))
.reduce(
(closest, key, j) => (!closest || Math.abs(midiDiff(key)) < Math.abs(midiDiff(closest)) ? key : closest),
null,
);
transpose = -midiDiff(closest); // semitones to repitch
sampleUrl = bank[closest][n % bank[closest].length];
}
if (resolveUrl) {
sampleUrl = await resolveUrl(sampleUrl);
}
let buffer = await loadBuffer(sampleUrl, ac, s, n);
if (speed < 0) {
// should this be cached?
buffer = reverseBuffer(buffer);
}
const bufferSource = ac.createBufferSource();
bufferSource.buffer = buffer;
const playbackRate = 1.0 * Math.pow(2, transpose / 12);
// bufferSource.playbackRate.value = Math.pow(2, transpose / 12);
bufferSource.playbackRate.value = playbackRate;
return bufferSource;
};
export const loadBuffer = (url, ac, s, n = 0) => {
const label = s ? `sound "${s}:${n}"` : 'sample';
if (!loadCache[url]) {
logger(`[sampler] load ${label}..`, 'load-sample', { url });
const timestamp = Date.now();
loadCache[url] = fetch(url)
.then((res) => res.arrayBuffer())
.then(async (res) => {
const took = Date.now() - timestamp;
const size = humanFileSize(res.byteLength);
// const downSpeed = humanFileSize(res.byteLength / took);
logger(`[sampler] load ${label}... done! loaded ${size} in ${took}ms`, 'loaded-sample', { url });
const decoded = await ac.decodeAudioData(res);
bufferCache[url] = decoded;
return decoded;
});
}
return loadCache[url];
};
export function reverseBuffer(buffer) {
const ac = getAudioContext();
const reversed = ac.createBuffer(buffer.numberOfChannels, buffer.length, ac.sampleRate);
for (let channel = 0; channel < buffer.numberOfChannels; channel++) {
reversed.copyToChannel(buffer.getChannelData(channel).slice().reverse(), channel, channel);
}
return reversed;
}
export const getLoadedBuffer = (url) => {
return bufferCache[url];
};
export const processSampleMap = (sampleMap, fn, baseUrl = sampleMap._base || '') => {
return Object.entries(sampleMap).forEach(([key, value]) => {
if (typeof value === 'string') {
value = [value];
}
if (typeof value !== 'object') {
throw new Error('wrong sample map format for ' + key);
}
baseUrl = value._base || baseUrl;
const replaceUrl = (v) => (baseUrl + v).replace('github:', 'https://raw.githubusercontent.com/');
if (Array.isArray(value)) {
//return [key, value.map(replaceUrl)];
value = value.map(replaceUrl);
} else {
// must be object
value = Object.fromEntries(
Object.entries(value).map(([note, samples]) => {
return [note, (typeof samples === 'string' ? [samples] : samples).map(replaceUrl)];
}),
);
}
fn(key, value);
});
};
// allows adding a custom url prefix handler
// for example, it is used by the desktop app to load samples starting with '~/music'
let resourcePrefixHandlers = {};
export function registerSamplesPrefix(prefix, resolve) {
resourcePrefixHandlers[prefix] = resolve;
}
// finds a prefix handler for the given url (if any)
function getSamplesPrefixHandler(url) {
const handler = Object.entries(resourcePrefixHandlers).find(([key]) => url.startsWith(key));
if (handler) {
return handler[1];
}
return;
}
/**
* Loads a collection of samples to use with `s`
* @example
* samples('github:tidalcycles/Dirt-Samples/master');
* s("[bd ~]*2, [~ hh]*2, ~ sd")
* @example
* samples({
* bd: '808bd/BD0000.WAV',
* sd: '808sd/SD0010.WAV'
* }, 'https://raw.githubusercontent.com/tidalcycles/Dirt-Samples/master/');
* s("[bd ~]*2, [~ hh]*2, ~ sd")
*
*/
export const samples = async (sampleMap, baseUrl = sampleMap._base || '', options = {}) => {
if (typeof sampleMap === 'string') {
// check if custom prefix handler
const handler = getSamplesPrefixHandler(sampleMap);
if (handler) {
return handler(sampleMap);
}
if (sampleMap.startsWith('github:')) {
let [_, path] = sampleMap.split('github:');
path = path.endsWith('/') ? path.slice(0, -1) : path;
sampleMap = `https://raw.githubusercontent.com/${path}/strudel.json`;
}
if (typeof fetch !== 'function') {
// not a browser
return;
}
const base = sampleMap.split('/').slice(0, -1).join('/');
if (typeof fetch === 'undefined') {
// skip fetch when in node / testing
return;
}
return fetch(sampleMap)
.then((res) => res.json())
.then((json) => samples(json, baseUrl || json._base || base, options))
.catch((error) => {
console.error(error);
throw new Error(`error loading "${sampleMap}"`);
});
}
const { prebake, tag } = options;
processSampleMap(
sampleMap,
(key, value) =>
registerSound(key, (t, hapValue, onended) => onTriggerSample(t, hapValue, onended, value), {
type: 'sample',
samples: value,
baseUrl,
prebake,
tag,
}),
baseUrl,
);
};
const cutGroups = [];
export async function onTriggerSample(t, value, onended, bank, resolveUrl) {
const {
s,
freq,
unit,
nudge = 0, // TODO: is this in seconds?
cut,
loop,
clip = undefined, // if 1, samples will be cut off when the hap ends
n = 0,
note,
speed = 1, // sample playback speed
begin = 0,
end = 1,
} = value;
// load sample
if (speed === 0) {
// no playback
return;
}
const ac = getAudioContext();
// destructure adsr here, because the default should be different for synths and samples
const { attack = 0.001, decay = 0.001, sustain = 1, release = 0.001 } = value;
//const soundfont = getSoundfontKey(s);
const time = t + nudge;
const bufferSource = await getSampleBufferSource(s, n, note, speed, freq, bank, resolveUrl);
// asny stuff above took too long?
if (ac.currentTime > t) {
logger(`[sampler] still loading sound "${s}:${n}"`, 'highlight');
// console.warn('sample still loading:', s, n);
return;
}
if (!bufferSource) {
logger(`[sampler] could not load "${s}:${n}"`, 'error');
return;
}
bufferSource.playbackRate.value = Math.abs(speed) * bufferSource.playbackRate.value;
if (unit === 'c') {
// are there other units?
bufferSource.playbackRate.value = bufferSource.playbackRate.value * bufferSource.buffer.duration * 1; //cps;
}
// "The computation of the offset into the sound is performed using the sound buffer's natural sample rate,
// rather than the current playback rate, so even if the sound is playing at twice its normal speed,
// the midway point through a 10-second audio buffer is still 5."
const offset = begin * bufferSource.buffer.duration;
bufferSource.start(time, offset);
const bufferDuration = bufferSource.buffer.duration / bufferSource.playbackRate.value;
/*if (loop) {
// TODO: idea for loopBegin / loopEnd
// if one of [loopBegin,loopEnd] is <= 1, interpret it as normlized
// if [loopBegin,loopEnd] is bigger >= 1, interpret it as sample number
// this will simplify perfectly looping things, while still keeping the normalized option
// the only drawback is that looping between samples 0 and 1 is not possible (which is not real use case)
bufferSource.loop = true;
bufferSource.loopStart = offset;
bufferSource.loopEnd = offset + duration;
duration = loop * duration;
}*/
const { node: envelope, stop: releaseEnvelope } = getEnvelope(attack, decay, sustain, release, 1, t);
bufferSource.connect(envelope);
const out = ac.createGain(); // we need a separate gain for the cutgroups because firefox...
envelope.connect(out);
bufferSource.onended = function () {
bufferSource.disconnect();
envelope.disconnect();
out.disconnect();
onended();
};
const stop = (endTime, playWholeBuffer = clip === undefined) => {
let releaseTime = endTime;
if (playWholeBuffer) {
releaseTime = t + (end - begin) * bufferDuration;
}
bufferSource.stop(releaseTime + release);
releaseEnvelope(releaseTime);
};
const handle = { node: out, bufferSource, stop };
// cut groups
if (cut !== undefined) {
const prev = cutGroups[cut];
if (prev) {
prev.node.gain.setValueAtTime(1, time);
prev.node.gain.linearRampToValueAtTime(0, time + 0.01);
}
cutGroups[cut] = handle;
}
return handle;
}

View File

@ -1,44 +0,0 @@
import { midiToFreq, noteToMidi } from '@strudel.cycles/core';
import { registerSound } from './webaudio.mjs';
import { getOscillator, gainNode, getEnvelope } from './helpers.mjs';
export function registerSynthSounds() {
['sine', 'square', 'triangle', 'sawtooth'].forEach((wave) => {
registerSound(
wave,
(t, value, onended) => {
// destructure adsr here, because the default should be different for synths and samples
const { attack = 0.001, decay = 0.05, sustain = 0.6, release = 0.01 } = value;
let { n, note, freq } = value;
// with synths, n and note are the same thing
n = note || n || 36;
if (typeof n === 'string') {
n = noteToMidi(n); // e.g. c3 => 48
}
// get frequency
if (!freq && typeof n === 'number') {
freq = midiToFreq(n); // + 48);
}
// maybe pull out the above frequency resolution?? (there is also getFrequency but it has no default)
// make oscillator
const { node: o, stop } = getOscillator({ t, s: wave, freq });
const g = gainNode(0.3);
// envelope
const { node: envelope, stop: releaseEnvelope } = getEnvelope(attack, decay, sustain, release, 1, t);
o.onended = () => {
o.disconnect();
g.disconnect();
onended();
};
return {
node: o.connect(g).connect(envelope),
stop: (releaseTime) => {
releaseEnvelope(releaseTime);
stop(releaseTime + release);
},
};
},
{ type: 'synth', prebake: true },
);
});
}

View File

@ -1,38 +0,0 @@
// credits to webdirt: https://github.com/dktr0/WebDirt/blob/41342e81d6ad694a2310d491fef7b7e8b0929efe/js-src/Graph.js#L597
export var vowelFormant = {
a: { freqs: [660, 1120, 2750, 3000, 3350], gains: [1, 0.5012, 0.0708, 0.0631, 0.0126], qs: [80, 90, 120, 130, 140] },
e: { freqs: [440, 1800, 2700, 3000, 3300], gains: [1, 0.1995, 0.1259, 0.1, 0.1], qs: [70, 80, 100, 120, 120] },
i: { freqs: [270, 1850, 2900, 3350, 3590], gains: [1, 0.0631, 0.0631, 0.0158, 0.0158], qs: [40, 90, 100, 120, 120] },
o: { freqs: [430, 820, 2700, 3000, 3300], gains: [1, 0.3162, 0.0501, 0.0794, 0.01995], qs: [40, 80, 100, 120, 120] },
u: { freqs: [370, 630, 2750, 3000, 3400], gains: [1, 0.1, 0.0708, 0.0316, 0.01995], qs: [40, 60, 100, 120, 120] },
};
if (typeof GainNode !== 'undefined') {
class VowelNode extends GainNode {
constructor(ac, letter) {
super(ac);
if (!vowelFormant[letter]) {
throw new Error('vowel: unknown vowel ' + letter);
}
const { gains, qs, freqs } = vowelFormant[letter];
const makeupGain = ac.createGain();
for (let i = 0; i < 5; i++) {
const gain = ac.createGain();
gain.gain.value = gains[i];
const filter = ac.createBiquadFilter();
filter.type = 'bandpass';
filter.Q.value = qs[i];
filter.frequency.value = freqs[i];
this.connect(filter);
filter.connect(gain);
gain.connect(makeupGain);
}
makeupGain.gain.value = 8; // how much makeup gain to add?
this.connect = (target) => makeupGain.connect(target);
return this;
}
}
AudioContext.prototype.createVowelFilter = function (letter) {
return new VowelNode(this, letter);
};
}

View File

@ -5,247 +5,19 @@ This program is free software: you can redistribute it and/or modify it under th
*/
import * as strudel from '@strudel.cycles/core';
import './feedbackdelay.mjs';
import './reverb.mjs';
import { superdough, getAudioContext } from 'superdough';
const { Pattern, logger } = strudel;
import './vowel.mjs';
import workletsUrl from './worklets.mjs?url';
import { getFilter, gainNode } from './helpers.mjs';
import { map } from 'nanostores';
export const soundMap = map();
export function registerSound(key, onTrigger, data = {}) {
soundMap.setKey(key, { onTrigger, data });
}
export function getSound(s) {
return soundMap.get()[s];
}
export const resetLoadedSounds = () => soundMap.set({});
let audioContext;
export const getAudioContext = () => {
if (!audioContext) {
audioContext = new AudioContext();
}
return audioContext;
};
let destination;
const getDestination = () => {
const ctx = getAudioContext();
if (!destination) {
destination = ctx.createGain();
destination.connect(ctx.destination);
}
return destination;
};
export const panic = () => {
getDestination().gain.linearRampToValueAtTime(0, getAudioContext().currentTime + 0.01);
destination = null;
};
let workletsLoading;
function loadWorklets() {
if (workletsLoading) {
return workletsLoading;
}
workletsLoading = getAudioContext().audioWorklet.addModule(workletsUrl);
return workletsLoading;
}
function getWorklet(ac, processor, params) {
const node = new AudioWorkletNode(ac, processor);
Object.entries(params).forEach(([key, value]) => {
node.parameters.get(key).value = value;
});
return node;
}
// this function should be called on first user interaction (to avoid console warning)
export async function initAudio() {
if (typeof window !== 'undefined') {
try {
await getAudioContext().resume();
await loadWorklets();
} catch (err) {
console.warn('could not load AudioWorklet effects coarse, crush and shape', err);
}
}
}
export async function initAudioOnFirstClick() {
return new Promise((resolve) => {
document.addEventListener('click', async function listener() {
await initAudio();
resolve();
document.removeEventListener('click', listener);
});
});
}
let delays = {};
const maxfeedback = 0.98;
function getDelay(orbit, delaytime, delayfeedback, t) {
if (delayfeedback > maxfeedback) {
logger(`delayfeedback was clamped to ${maxfeedback} to save your ears`);
}
delayfeedback = strudel.clamp(delayfeedback, 0, 0.98);
if (!delays[orbit]) {
const ac = getAudioContext();
const dly = ac.createFeedbackDelay(1, delaytime, delayfeedback);
dly.start?.(t); // for some reason, this throws when audion extension is installed..
dly.connect(getDestination());
delays[orbit] = dly;
}
delays[orbit].delayTime.value !== delaytime && delays[orbit].delayTime.setValueAtTime(delaytime, t);
delays[orbit].feedback.value !== delayfeedback && delays[orbit].feedback.setValueAtTime(delayfeedback, t);
return delays[orbit];
}
let reverbs = {};
function getReverb(orbit, duration = 2) {
if (!reverbs[orbit]) {
const ac = getAudioContext();
const reverb = ac.createReverb(duration);
reverb.connect(getDestination());
reverbs[orbit] = reverb;
}
if (reverbs[orbit].duration !== duration) {
reverbs[orbit] = reverbs[orbit].setDuration(duration);
reverbs[orbit].duration = duration;
}
return reverbs[orbit];
}
function effectSend(input, effect, wet) {
const send = gainNode(wet);
input.connect(send);
send.connect(effect);
return send;
}
// export const webaudioOutput = async (t, hap, ct, cps) => {
export const webaudioOutput = async (hap, deadline, hapDuration, cps) => {
const ac = getAudioContext();
const hap2value = (hap) => {
hap.ensureObjectValue();
// calculate absolute time
let t = ac.currentTime + deadline;
// destructure
let {
s = 'triangle',
bank,
source,
gain = 0.8,
// low pass
cutoff,
resonance = 1,
// high pass
hcutoff,
hresonance = 1,
// band pass
bandf,
bandq = 1,
//
coarse,
crush,
shape,
pan,
vowel,
delay = 0,
delayfeedback = 0.5,
delaytime = 0.25,
orbit = 1,
room,
size = 2,
} = hap.value;
const { velocity = 1 } = hap.context;
gain *= velocity; // legacy fix for velocity
let toDisconnect = []; // audio nodes that will be disconnected when the source has ended
const onended = () => {
toDisconnect.forEach((n) => n?.disconnect());
};
if (bank && s) {
s = `${bank}_${s}`;
}
// get source AudioNode
let sourceNode;
if (source) {
sourceNode = source(t, hap.value, hapDuration);
} else if (getSound(s)) {
const { onTrigger } = getSound(s);
const soundHandle = await onTrigger(t, hap.value, onended);
if (soundHandle) {
sourceNode = soundHandle.node;
soundHandle.stop(t + hapDuration);
}
} else {
throw new Error(`sound ${s} not found! Is it loaded?`);
}
if (!sourceNode) {
// if onTrigger does not return anything, we will just silently skip
// this can be used for things like speed(0) in the sampler
return;
}
if (ac.currentTime > t) {
logger('[webaudio] skip hap: still loading', ac.currentTime - t);
return;
}
const chain = []; // audio nodes that will be connected to each other sequentially
chain.push(sourceNode);
// gain stage
chain.push(gainNode(gain));
// filters
cutoff !== undefined && chain.push(getFilter('lowpass', cutoff, resonance));
hcutoff !== undefined && chain.push(getFilter('highpass', hcutoff, hresonance));
bandf !== undefined && chain.push(getFilter('bandpass', bandf, bandq));
vowel !== undefined && chain.push(ac.createVowelFilter(vowel));
// effects
coarse !== undefined && chain.push(getWorklet(ac, 'coarse-processor', { coarse }));
crush !== undefined && chain.push(getWorklet(ac, 'crush-processor', { crush }));
shape !== undefined && chain.push(getWorklet(ac, 'shape-processor', { shape }));
// panning
if (pan !== undefined) {
const panner = ac.createStereoPanner();
panner.pan.value = 2 * pan - 1;
chain.push(panner);
}
// last gain
const post = gainNode(1);
chain.push(post);
post.connect(getDestination());
// delay
let delaySend;
if (delay > 0 && delaytime > 0 && delayfeedback > 0) {
const delyNode = getDelay(orbit, delaytime, delayfeedback, t);
delaySend = effectSend(post, delyNode, delay);
}
// reverb
let reverbSend;
if (room > 0 && size > 0) {
const reverbNode = getReverb(orbit, size);
reverbSend = effectSend(post, reverbNode, room);
}
// connect chain elements together
chain.slice(1).reduce((last, current) => last.connect(current), chain[0]);
// toDisconnect = all the node that should be disconnected in onended callback
// this is crucial for performance
toDisconnect = chain.concat([delaySend, reverbSend]);
return { ...hap.value, velocity: hap.context.velocity };
};
export const webaudioOutputTrigger = (t, hap, ct, cps) => webaudioOutput(hap, t - ct, hap.duration / cps, cps);
// TODO: bind logger
export const webaudioOutputTrigger = (t, hap, ct, cps) => superdough(hap2value(hap), t - ct, hap.duration / cps, cps);
export const webaudioOutput = (hap, deadline, hapDuration) => superdough(hap2value(hap), deadline, hapDuration);
Pattern.prototype.webaudio = function () {
// TODO: refactor (t, hap, ct, cps) to (hap, deadline, duration) ?
return this.onTrigger(webaudioOutputTrigger);
};

View File

@ -1,108 +0,0 @@
// LICENSE GNU General Public License v3.0 see https://github.com/dktr0/WebDirt/blob/main/LICENSE
// all the credit goes to dktr0's webdirt: https://github.com/dktr0/WebDirt/blob/5ce3d698362c54d6e1b68acc47eb2955ac62c793/dist/AudioWorklets.js
// <3
class CoarseProcessor extends AudioWorkletProcessor {
static get parameterDescriptors() {
return [{ name: 'coarse', defaultValue: 1 }];
}
constructor() {
super();
this.notStarted = true;
}
process(inputs, outputs, parameters) {
const input = inputs[0];
const output = outputs[0];
const coarse = parameters.coarse;
const blockSize = 128;
const hasInput = !(input[0] === undefined);
if (hasInput) {
this.notStarted = false;
output[0][0] = input[0][0];
for (let n = 1; n < blockSize; n++) {
for (let o = 0; o < output.length; o++) {
output[o][n] = n % coarse == 0 ? input[0][n] : output[o][n - 1];
}
}
}
return this.notStarted || hasInput;
}
}
registerProcessor('coarse-processor', CoarseProcessor);
class CrushProcessor extends AudioWorkletProcessor {
static get parameterDescriptors() {
return [{ name: 'crush', defaultValue: 0 }];
}
constructor() {
super();
this.notStarted = true;
}
process(inputs, outputs, parameters) {
const input = inputs[0];
const output = outputs[0];
const crush = parameters.crush;
const blockSize = 128;
const hasInput = !(input[0] === undefined);
if (hasInput) {
this.notStarted = false;
if (crush.length === 1) {
const x = Math.pow(2, crush[0] - 1);
for (let n = 0; n < blockSize; n++) {
const value = Math.round(input[0][n] * x) / x;
for (let o = 0; o < output.length; o++) {
output[o][n] = value;
}
}
} else {
for (let n = 0; n < blockSize; n++) {
let x = Math.pow(2, crush[n] - 1);
const value = Math.round(input[0][n] * x) / x;
for (let o = 0; o < output.length; o++) {
output[o][n] = value;
}
}
}
}
return this.notStarted || hasInput;
}
}
registerProcessor('crush-processor', CrushProcessor);
class ShapeProcessor extends AudioWorkletProcessor {
static get parameterDescriptors() {
return [{ name: 'shape', defaultValue: 0 }];
}
constructor() {
super();
this.notStarted = true;
}
process(inputs, outputs, parameters) {
const input = inputs[0];
const output = outputs[0];
const shape0 = parameters.shape[0];
const shape1 = shape0 < 1 ? shape0 : 1.0 - 4e-10;
const shape = (2.0 * shape1) / (1.0 - shape1);
const blockSize = 128;
const hasInput = !(input[0] === undefined);
if (hasInput) {
this.notStarted = false;
for (let n = 0; n < blockSize; n++) {
const value = ((1 + shape) * input[0][n]) / (1 + shape * Math.abs(input[0][n]));
for (let o = 0; o < output.length; o++) {
output[o][n] = value;
}
}
}
return this.notStarted || hasInput;
}
}
registerProcessor('shape-processor', ShapeProcessor);

6
pnpm-lock.yaml generated
View File

@ -490,9 +490,9 @@ importers:
'@strudel.cycles/core':
specifier: workspace:*
version: link:../core
nanostores:
specifier: ^0.8.1
version: 0.8.1
superdough:
specifier: workspace:*
version: link:../superdough
devDependencies:
vite:
specifier: ^4.3.3