mirror of
https://github.com/eliasstepanik/strudel-docker.git
synced 2026-01-14 23:28:30 +00:00
Merge pull request #516 from tidalcycles/source-nodes
registerSound API + improved sounds tab + regroup soundfonts
This commit is contained in:
commit
18d62b32c2
@ -23,6 +23,15 @@ const generic_params = [
|
||||
*
|
||||
*/
|
||||
[['s', 'n', 'gain'], 'sound'],
|
||||
/**
|
||||
* Define a custom webaudio node to use as a sound source.
|
||||
*
|
||||
* @name source
|
||||
* @param {function} getSource
|
||||
* @synonyms src
|
||||
*
|
||||
*/
|
||||
['source', 'src'],
|
||||
/**
|
||||
* Selects the given index from the sample map.
|
||||
* Numbers too high will wrap around.
|
||||
|
||||
@ -1,4 +1,6 @@
|
||||
import { toMidi } from '@strudel.cycles/core';
|
||||
import { getAudioContext, registerSound, getEnvelope } from '@strudel.cycles/webaudio';
|
||||
import gm from './gm.mjs';
|
||||
|
||||
let loadCache = {};
|
||||
async function loadFont(name) {
|
||||
@ -8,7 +10,6 @@ async function loadFont(name) {
|
||||
const load = async () => {
|
||||
// TODO: make soundfont source configurable
|
||||
const url = `https://felixroos.github.io/webaudiofontdata/sound/${name}.js`;
|
||||
console.log('load font', name, url);
|
||||
const preset = await fetch(url).then((res) => res.text());
|
||||
let [_, data] = preset.split('={');
|
||||
return eval('{' + data);
|
||||
@ -114,3 +115,34 @@ async function getBuffer(zone, audioContext) {
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
export function registerSoundfonts() {
|
||||
Object.entries(gm).forEach(([name, fonts]) => {
|
||||
registerSound(
|
||||
name,
|
||||
async (time, value, onended) => {
|
||||
const { note = 'c3', n = 0 } = value;
|
||||
const { attack = 0.001, decay = 0.001, sustain = 1, release = 0.001 } = value;
|
||||
const font = fonts[n % fonts.length];
|
||||
const ctx = getAudioContext();
|
||||
const bufferSource = await getFontBufferSource(font, note, ctx);
|
||||
bufferSource.start(time);
|
||||
const { node: envelope, stop: releaseEnvelope } = getEnvelope(attack, decay, sustain, release, 0.3, time);
|
||||
bufferSource.connect(envelope);
|
||||
const stop = (releaseTime) => {
|
||||
bufferSource.stop(releaseTime + release);
|
||||
releaseEnvelope(releaseTime);
|
||||
};
|
||||
bufferSource.onended = () => {
|
||||
bufferSource.disconnect();
|
||||
envelope.disconnect();
|
||||
onended();
|
||||
};
|
||||
return { node: envelope, stop };
|
||||
},
|
||||
{ type: 'soundfont', prebake: true, fonts },
|
||||
);
|
||||
});
|
||||
}
|
||||
|
||||
registerSoundfonts();
|
||||
|
||||
1787
packages/soundfonts/gm.mjs
Normal file
1787
packages/soundfonts/gm.mjs
Normal file
File diff suppressed because it is too large
Load Diff
@ -1,6 +1,6 @@
|
||||
import { getFontBufferSource } from './fontloader.mjs';
|
||||
import { getFontBufferSource, registerSoundfonts } from './fontloader.mjs';
|
||||
import * as soundfontList from './list.mjs';
|
||||
import { startPresetNote } from 'sfumato';
|
||||
import { loadSoundfont } from './sfumato.mjs';
|
||||
|
||||
export { loadSoundfont, startPresetNote, getFontBufferSource, soundfontList };
|
||||
export { loadSoundfont, startPresetNote, getFontBufferSource, soundfontList, registerSoundfonts };
|
||||
|
||||
File diff suppressed because it is too large
Load Diff
@ -1,5 +1,5 @@
|
||||
import { Pattern, getPlayableNoteValue, toMidi } from '@strudel.cycles/core';
|
||||
import { getAudioContext } from '@strudel.cycles/webaudio';
|
||||
import { getAudioContext, registerSound } from '@strudel.cycles/webaudio';
|
||||
import { loadSoundfont as _loadSoundfont, startPresetNote } from 'sfumato';
|
||||
|
||||
Pattern.prototype.soundfont = function (sf, n = 0) {
|
||||
@ -21,5 +21,29 @@ export function loadSoundfont(url) {
|
||||
}
|
||||
const sf = _loadSoundfont(url);
|
||||
soundfontCache.set(url, sf);
|
||||
/*sf.then((font) => {
|
||||
font.presets.forEach((preset) => {
|
||||
console.log('preset', preset.header.name);
|
||||
registerSound(
|
||||
preset.header.name.replaceAll(' ', '_'),
|
||||
(time, value, onended) => {
|
||||
const ctx = getAudioContext();
|
||||
let { note } = value; // freq ?
|
||||
|
||||
const p = font.presets.find((p) => p.header.name === preset.header.name);
|
||||
|
||||
if (!p) {
|
||||
throw new Error('preset not found');
|
||||
}
|
||||
const deadline = time; // - ctx.currentTime;
|
||||
const args = [ctx, p, toMidi(note), deadline];
|
||||
const stop = startPresetNote(...args);
|
||||
return { node: undefined, stop };
|
||||
},
|
||||
{ type: 'soundfont' },
|
||||
);
|
||||
});
|
||||
//console.log('f', f);
|
||||
});*/
|
||||
return sf;
|
||||
}
|
||||
|
||||
69
packages/webaudio/helpers.mjs
Normal file
69
packages/webaudio/helpers.mjs
Normal file
@ -0,0 +1,69 @@
|
||||
import { getAudioContext } from './webaudio.mjs';
|
||||
|
||||
export function gainNode(value) {
|
||||
const node = getAudioContext().createGain();
|
||||
node.gain.value = value;
|
||||
return node;
|
||||
}
|
||||
|
||||
export const getOscillator = ({ s, freq, t }) => {
|
||||
// make oscillator
|
||||
const o = getAudioContext().createOscillator();
|
||||
o.type = s || 'triangle';
|
||||
o.frequency.value = Number(freq);
|
||||
o.start(t);
|
||||
//o.stop(t + duration + release);
|
||||
const stop = (time) => o.stop(time);
|
||||
return { node: o, stop };
|
||||
};
|
||||
|
||||
// alternative to getADSR returning the gain node and a stop handle to trigger the release anytime in the future
|
||||
export const getEnvelope = (attack, decay, sustain, release, velocity, begin) => {
|
||||
const gainNode = getAudioContext().createGain();
|
||||
gainNode.gain.setValueAtTime(0, begin);
|
||||
gainNode.gain.linearRampToValueAtTime(velocity, begin + attack); // attack
|
||||
gainNode.gain.linearRampToValueAtTime(sustain * velocity, begin + attack + decay); // sustain start
|
||||
// sustain end
|
||||
return {
|
||||
node: gainNode,
|
||||
stop: (t) => {
|
||||
if (typeof gainNode.gain.cancelAndHoldAtTime === 'function') {
|
||||
gainNode.gain.cancelAndHoldAtTime(t);
|
||||
} else {
|
||||
// firefox: this will glitch when the sustain has not been reached yet at the time of release
|
||||
gainNode.gain.setValueAtTime(sustain * velocity, t);
|
||||
}
|
||||
gainNode.gain.linearRampToValueAtTime(0, t + release);
|
||||
},
|
||||
};
|
||||
};
|
||||
|
||||
export const getADSR = (attack, decay, sustain, release, velocity, begin, end) => {
|
||||
const gainNode = getAudioContext().createGain();
|
||||
gainNode.gain.setValueAtTime(0, begin);
|
||||
gainNode.gain.linearRampToValueAtTime(velocity, begin + attack); // attack
|
||||
gainNode.gain.linearRampToValueAtTime(sustain * velocity, begin + attack + decay); // sustain start
|
||||
gainNode.gain.setValueAtTime(sustain * velocity, end); // sustain end
|
||||
gainNode.gain.linearRampToValueAtTime(0, end + release); // release
|
||||
// for some reason, using exponential ramping creates little cracklings
|
||||
/* let t = begin;
|
||||
gainNode.gain.setValueAtTime(0, t);
|
||||
gainNode.gain.exponentialRampToValueAtTime(velocity, (t += attack));
|
||||
const sustainGain = Math.max(sustain * velocity, 0.001);
|
||||
gainNode.gain.exponentialRampToValueAtTime(sustainGain, (t += decay));
|
||||
if (end - begin < attack + decay) {
|
||||
gainNode.gain.cancelAndHoldAtTime(end);
|
||||
} else {
|
||||
gainNode.gain.setValueAtTime(sustainGain, end);
|
||||
}
|
||||
gainNode.gain.exponentialRampToValueAtTime(0.001, end + release); // release */
|
||||
return gainNode;
|
||||
};
|
||||
|
||||
export const getFilter = (type, frequency, Q) => {
|
||||
const filter = getAudioContext().createBiquadFilter();
|
||||
filter.type = type;
|
||||
filter.frequency.value = frequency;
|
||||
filter.Q.value = Q;
|
||||
return filter;
|
||||
};
|
||||
@ -6,3 +6,5 @@ This program is free software: you can redistribute it and/or modify it under th
|
||||
|
||||
export * from './webaudio.mjs';
|
||||
export * from './sampler.mjs';
|
||||
export * from './helpers.mjs';
|
||||
export * from './synth.mjs';
|
||||
|
||||
@ -34,7 +34,8 @@
|
||||
},
|
||||
"homepage": "https://github.com/tidalcycles/strudel#readme",
|
||||
"dependencies": {
|
||||
"@strudel.cycles/core": "workspace:*"
|
||||
"@strudel.cycles/core": "workspace:*",
|
||||
"nanostores": "^0.7.4"
|
||||
},
|
||||
"devDependencies": {
|
||||
"vite": "^3.2.2"
|
||||
|
||||
@ -1,5 +1,6 @@
|
||||
import { logger, toMidi, valueToMidi } from '@strudel.cycles/core';
|
||||
import { getAudioContext } from './index.mjs';
|
||||
import { getAudioContext, registerSound } from './index.mjs';
|
||||
import { getEnvelope } from './helpers.mjs';
|
||||
|
||||
const bufferCache = {}; // string: Promise<ArrayBuffer>
|
||||
const loadCache = {}; // string: Promise<ArrayBuffer>
|
||||
@ -20,7 +21,7 @@ function humanFileSize(bytes, si) {
|
||||
return bytes.toFixed(1) + ' ' + units[u];
|
||||
}
|
||||
|
||||
export const getSampleBufferSource = async (s, n, note, speed, freq) => {
|
||||
export const getSampleBufferSource = async (s, n, note, speed, freq, bank) => {
|
||||
let transpose = 0;
|
||||
if (freq !== undefined && note !== undefined) {
|
||||
logger('[sampler] hap has note and freq. ignoring note', 'warning');
|
||||
@ -29,23 +30,6 @@ export const getSampleBufferSource = async (s, n, note, speed, freq) => {
|
||||
transpose = midi - 36; // C3 is middle C
|
||||
|
||||
const ac = getAudioContext();
|
||||
// is sample from loaded samples(..)
|
||||
const samples = getLoadedSamples();
|
||||
if (!samples) {
|
||||
throw new Error('no samples loaded');
|
||||
}
|
||||
const bank = samples?.[s];
|
||||
if (!bank) {
|
||||
throw new Error(
|
||||
`sample not found: "${s}"`,
|
||||
// , try one of ${Object.keys(samples)
|
||||
// .map((s) => `"${s}"`)
|
||||
// .join(', ')}.
|
||||
);
|
||||
}
|
||||
if (typeof bank !== 'object') {
|
||||
throw new Error('wrong format for sample bank:', s);
|
||||
}
|
||||
let sampleUrl;
|
||||
if (Array.isArray(bank)) {
|
||||
sampleUrl = bank[n % bank.length];
|
||||
@ -107,8 +91,6 @@ export const getLoadedBuffer = (url) => {
|
||||
return bufferCache[url];
|
||||
};
|
||||
|
||||
let sampleCache = { current: undefined };
|
||||
|
||||
/**
|
||||
* Loads a collection of samples to use with `s`
|
||||
* @example
|
||||
@ -123,7 +105,7 @@ let sampleCache = { current: undefined };
|
||||
*
|
||||
*/
|
||||
|
||||
export const samples = async (sampleMap, baseUrl = sampleMap._base || '') => {
|
||||
export const samples = async (sampleMap, baseUrl = sampleMap._base || '', options = {}) => {
|
||||
if (typeof sampleMap === 'string') {
|
||||
if (sampleMap.startsWith('github:')) {
|
||||
let [_, path] = sampleMap.split('github:');
|
||||
@ -141,43 +123,134 @@ export const samples = async (sampleMap, baseUrl = sampleMap._base || '') => {
|
||||
}
|
||||
return fetch(sampleMap)
|
||||
.then((res) => res.json())
|
||||
.then((json) => samples(json, baseUrl || json._base || base))
|
||||
.then((json) => samples(json, baseUrl || json._base || base, options))
|
||||
.catch((error) => {
|
||||
console.error(error);
|
||||
throw new Error(`error loading "${sampleMap}"`);
|
||||
});
|
||||
}
|
||||
sampleCache.current = {
|
||||
...sampleCache.current,
|
||||
...Object.fromEntries(
|
||||
Object.entries(sampleMap).map(([key, value]) => {
|
||||
if (typeof value === 'string') {
|
||||
value = [value];
|
||||
}
|
||||
if (typeof value !== 'object') {
|
||||
throw new Error('wrong sample map format for ' + key);
|
||||
}
|
||||
baseUrl = value._base || baseUrl;
|
||||
const replaceUrl = (v) => (baseUrl + v).replace('github:', 'https://raw.githubusercontent.com/');
|
||||
if (Array.isArray(value)) {
|
||||
return [key, value.map(replaceUrl)];
|
||||
}
|
||||
// must be object
|
||||
return [
|
||||
key,
|
||||
Object.fromEntries(
|
||||
Object.entries(value).map(([note, samples]) => {
|
||||
return [note, (typeof samples === 'string' ? [samples] : samples).map(replaceUrl)];
|
||||
}),
|
||||
),
|
||||
];
|
||||
}),
|
||||
),
|
||||
const { prebake, tag } = options;
|
||||
Object.entries(sampleMap).forEach(([key, value]) => {
|
||||
if (typeof value === 'string') {
|
||||
value = [value];
|
||||
}
|
||||
if (typeof value !== 'object') {
|
||||
throw new Error('wrong sample map format for ' + key);
|
||||
}
|
||||
baseUrl = value._base || baseUrl;
|
||||
const replaceUrl = (v) => (baseUrl + v).replace('github:', 'https://raw.githubusercontent.com/');
|
||||
if (Array.isArray(value)) {
|
||||
//return [key, value.map(replaceUrl)];
|
||||
value = value.map(replaceUrl);
|
||||
} else {
|
||||
// must be object
|
||||
value = Object.fromEntries(
|
||||
Object.entries(value).map(([note, samples]) => {
|
||||
return [note, (typeof samples === 'string' ? [samples] : samples).map(replaceUrl)];
|
||||
}),
|
||||
);
|
||||
}
|
||||
registerSound(key, (t, hapValue, onended) => onTriggerSample(t, hapValue, onended, value), {
|
||||
type: 'sample',
|
||||
samples: value,
|
||||
baseUrl,
|
||||
prebake,
|
||||
tag,
|
||||
});
|
||||
});
|
||||
};
|
||||
|
||||
const cutGroups = [];
|
||||
|
||||
export async function onTriggerSample(t, value, onended, bank) {
|
||||
const {
|
||||
s,
|
||||
freq,
|
||||
unit,
|
||||
nudge = 0, // TODO: is this in seconds?
|
||||
cut,
|
||||
loop,
|
||||
clip = 0, // if 1, samples will be cut off when the hap ends
|
||||
n = 0,
|
||||
note,
|
||||
speed = 1, // sample playback speed
|
||||
begin = 0,
|
||||
end = 1,
|
||||
} = value;
|
||||
// load sample
|
||||
if (speed === 0) {
|
||||
// no playback
|
||||
return;
|
||||
}
|
||||
const ac = getAudioContext();
|
||||
// destructure adsr here, because the default should be different for synths and samples
|
||||
const { attack = 0.001, decay = 0.001, sustain = 1, release = 0.001 } = value;
|
||||
//const soundfont = getSoundfontKey(s);
|
||||
const time = t + nudge;
|
||||
|
||||
const bufferSource = await getSampleBufferSource(s, n, note, speed, freq, bank);
|
||||
|
||||
// asny stuff above took too long?
|
||||
if (ac.currentTime > t) {
|
||||
logger(`[sampler] still loading sound "${s}:${n}"`, 'highlight');
|
||||
// console.warn('sample still loading:', s, n);
|
||||
return;
|
||||
}
|
||||
if (!bufferSource) {
|
||||
logger(`[sampler] could not load "${s}:${n}"`, 'error');
|
||||
return;
|
||||
}
|
||||
bufferSource.playbackRate.value = Math.abs(speed) * bufferSource.playbackRate.value;
|
||||
if (unit === 'c') {
|
||||
// are there other units?
|
||||
bufferSource.playbackRate.value = bufferSource.playbackRate.value * bufferSource.buffer.duration * 1; //cps;
|
||||
}
|
||||
// "The computation of the offset into the sound is performed using the sound buffer's natural sample rate,
|
||||
// rather than the current playback rate, so even if the sound is playing at twice its normal speed,
|
||||
// the midway point through a 10-second audio buffer is still 5."
|
||||
const offset = begin * bufferSource.buffer.duration;
|
||||
bufferSource.start(time, offset);
|
||||
const bufferDuration = bufferSource.buffer.duration / bufferSource.playbackRate.value;
|
||||
/*if (loop) {
|
||||
// TODO: idea for loopBegin / loopEnd
|
||||
// if one of [loopBegin,loopEnd] is <= 1, interpret it as normlized
|
||||
// if [loopBegin,loopEnd] is bigger >= 1, interpret it as sample number
|
||||
// this will simplify perfectly looping things, while still keeping the normalized option
|
||||
// the only drawback is that looping between samples 0 and 1 is not possible (which is not real use case)
|
||||
bufferSource.loop = true;
|
||||
bufferSource.loopStart = offset;
|
||||
bufferSource.loopEnd = offset + duration;
|
||||
duration = loop * duration;
|
||||
}*/
|
||||
const { node: envelope, stop: releaseEnvelope } = getEnvelope(attack, decay, sustain, release, 1, t);
|
||||
bufferSource.connect(envelope);
|
||||
const out = ac.createGain(); // we need a separate gain for the cutgroups because firefox...
|
||||
envelope.connect(out);
|
||||
bufferSource.onended = function () {
|
||||
bufferSource.disconnect();
|
||||
envelope.disconnect();
|
||||
out.disconnect();
|
||||
onended();
|
||||
};
|
||||
};
|
||||
const stop = (endTime, playWholeBuffer = !clip) => {
|
||||
let releaseTime = endTime;
|
||||
if (playWholeBuffer) {
|
||||
releaseTime = t + (end - begin) * bufferDuration;
|
||||
}
|
||||
bufferSource.stop(releaseTime + release);
|
||||
releaseEnvelope(releaseTime);
|
||||
};
|
||||
const handle = { node: out, bufferSource, stop };
|
||||
|
||||
export const resetLoadedSamples = () => {
|
||||
sampleCache.current = undefined;
|
||||
};
|
||||
// cut groups
|
||||
if (cut !== undefined) {
|
||||
const prev = cutGroups[cut];
|
||||
if (prev) {
|
||||
prev.node.gain.setValueAtTime(1, time);
|
||||
prev.node.gain.linearRampToValueAtTime(0, time + 0.01);
|
||||
}
|
||||
cutGroups[cut] = handle;
|
||||
}
|
||||
|
||||
export const getLoadedSamples = () => sampleCache.current;
|
||||
return handle;
|
||||
}
|
||||
|
||||
44
packages/webaudio/synth.mjs
Normal file
44
packages/webaudio/synth.mjs
Normal file
@ -0,0 +1,44 @@
|
||||
import { fromMidi, toMidi } from '@strudel.cycles/core';
|
||||
import { registerSound } from './webaudio.mjs';
|
||||
import { getOscillator, gainNode, getEnvelope } from './helpers.mjs';
|
||||
|
||||
export function registerSynthSounds() {
|
||||
['sine', 'square', 'triangle', 'sawtooth'].forEach((wave) => {
|
||||
registerSound(
|
||||
wave,
|
||||
(t, value, onended) => {
|
||||
// destructure adsr here, because the default should be different for synths and samples
|
||||
const { attack = 0.001, decay = 0.05, sustain = 0.6, release = 0.01 } = value;
|
||||
let { n, note, freq } = value;
|
||||
// with synths, n and note are the same thing
|
||||
n = note || n || 36;
|
||||
if (typeof n === 'string') {
|
||||
n = toMidi(n); // e.g. c3 => 48
|
||||
}
|
||||
// get frequency
|
||||
if (!freq && typeof n === 'number') {
|
||||
freq = fromMidi(n); // + 48);
|
||||
}
|
||||
// maybe pull out the above frequency resolution?? (there is also getFrequency but it has no default)
|
||||
// make oscillator
|
||||
const { node: o, stop } = getOscillator({ t, s: wave, freq });
|
||||
const g = gainNode(0.3);
|
||||
// envelope
|
||||
const { node: envelope, stop: releaseEnvelope } = getEnvelope(attack, decay, sustain, release, 1, t);
|
||||
o.onended = () => {
|
||||
o.disconnect();
|
||||
g.disconnect();
|
||||
onended();
|
||||
};
|
||||
return {
|
||||
node: o.connect(g).connect(envelope),
|
||||
stop: (t) => {
|
||||
releaseEnvelope(t);
|
||||
stop(t + release);
|
||||
},
|
||||
};
|
||||
},
|
||||
{ type: 'synth', prebake: true },
|
||||
);
|
||||
});
|
||||
}
|
||||
@ -5,15 +5,22 @@ This program is free software: you can redistribute it and/or modify it under th
|
||||
*/
|
||||
|
||||
import * as strudel from '@strudel.cycles/core';
|
||||
import { fromMidi, logger, toMidi } from '@strudel.cycles/core';
|
||||
import './feedbackdelay.mjs';
|
||||
import './reverb.mjs';
|
||||
import { getSampleBufferSource } from './sampler.mjs';
|
||||
const { Pattern } = strudel;
|
||||
const { Pattern, logger } = strudel;
|
||||
import './vowel.mjs';
|
||||
import workletsUrl from './worklets.mjs?url';
|
||||
import { getFilter, gainNode } from './helpers.mjs';
|
||||
import { map } from 'nanostores';
|
||||
|
||||
// export const getAudioContext = () => Tone.getContext().rawContext;
|
||||
export const soundMap = map();
|
||||
export function registerSound(key, onTrigger, data = {}) {
|
||||
soundMap.setKey(key, { onTrigger, data });
|
||||
}
|
||||
export function getSound(s) {
|
||||
return soundMap.get()[s];
|
||||
}
|
||||
export const resetLoadedSounds = () => soundMap.set({});
|
||||
|
||||
let audioContext;
|
||||
export const getAudioContext = () => {
|
||||
@ -38,65 +45,6 @@ export const panic = () => {
|
||||
destination = null;
|
||||
};
|
||||
|
||||
const getFilter = (type, frequency, Q) => {
|
||||
const filter = getAudioContext().createBiquadFilter();
|
||||
filter.type = type;
|
||||
filter.frequency.value = frequency;
|
||||
filter.Q.value = Q;
|
||||
return filter;
|
||||
};
|
||||
|
||||
const getADSR = (attack, decay, sustain, release, velocity, begin, end) => {
|
||||
const gainNode = getAudioContext().createGain();
|
||||
gainNode.gain.setValueAtTime(0, begin);
|
||||
gainNode.gain.linearRampToValueAtTime(velocity, begin + attack); // attack
|
||||
gainNode.gain.linearRampToValueAtTime(sustain * velocity, begin + attack + decay); // sustain start
|
||||
gainNode.gain.setValueAtTime(sustain * velocity, end); // sustain end
|
||||
gainNode.gain.linearRampToValueAtTime(0, end + release); // release
|
||||
// for some reason, using exponential ramping creates little cracklings
|
||||
/* let t = begin;
|
||||
gainNode.gain.setValueAtTime(0, t);
|
||||
gainNode.gain.exponentialRampToValueAtTime(velocity, (t += attack));
|
||||
const sustainGain = Math.max(sustain * velocity, 0.001);
|
||||
gainNode.gain.exponentialRampToValueAtTime(sustainGain, (t += decay));
|
||||
if (end - begin < attack + decay) {
|
||||
gainNode.gain.cancelAndHoldAtTime(end);
|
||||
} else {
|
||||
gainNode.gain.setValueAtTime(sustainGain, end);
|
||||
}
|
||||
gainNode.gain.exponentialRampToValueAtTime(0.001, end + release); // release */
|
||||
return gainNode;
|
||||
};
|
||||
|
||||
const getOscillator = ({ s, freq, t, duration, release }) => {
|
||||
// make oscillator
|
||||
const o = getAudioContext().createOscillator();
|
||||
o.type = s || 'triangle';
|
||||
o.frequency.value = Number(freq);
|
||||
o.start(t);
|
||||
o.stop(t + duration + release);
|
||||
return o;
|
||||
};
|
||||
|
||||
const getSoundfontKey = (s) => {
|
||||
if (!globalThis.soundfontList) {
|
||||
// soundfont package not loaded
|
||||
return false;
|
||||
}
|
||||
if (globalThis.soundfontList?.instruments?.includes(s)) {
|
||||
return s;
|
||||
}
|
||||
// check if s is one of the soundfonts, which are loaded into globalThis, to avoid coupling both packages
|
||||
const nameIndex = globalThis.soundfontList?.instrumentNames?.indexOf(s);
|
||||
// convert number nameIndex (0-128) to 3 digit string (001-128)
|
||||
const name = nameIndex < 10 ? `00${nameIndex}` : nameIndex < 100 ? `0${nameIndex}` : nameIndex;
|
||||
if (nameIndex !== -1) {
|
||||
// TODO: indices of instrumentNames do not seem to match instruments
|
||||
return globalThis.soundfontList.instruments.find((instrument) => instrument.startsWith(name));
|
||||
}
|
||||
return;
|
||||
};
|
||||
|
||||
let workletsLoading;
|
||||
function loadWorklets() {
|
||||
if (workletsLoading) {
|
||||
@ -136,13 +84,6 @@ export async function initAudioOnFirstClick() {
|
||||
});
|
||||
}
|
||||
|
||||
function gainNode(value) {
|
||||
const node = getAudioContext().createGain();
|
||||
node.gain.value = value;
|
||||
return node;
|
||||
}
|
||||
const cutGroups = [];
|
||||
|
||||
let delays = {};
|
||||
function getDelay(orbit, delaytime, delayfeedback, t) {
|
||||
if (!delays[orbit]) {
|
||||
@ -186,15 +127,11 @@ export const webaudioOutput = async (hap, deadline, hapDuration, cps) => {
|
||||
|
||||
// calculate absolute time
|
||||
let t = ac.currentTime + deadline;
|
||||
// destructure value
|
||||
// destructure
|
||||
let {
|
||||
freq,
|
||||
s,
|
||||
s = 'triangle',
|
||||
bank,
|
||||
sf,
|
||||
clip = 0, // if 1, samples will be cut off when the hap ends
|
||||
n = 0,
|
||||
note,
|
||||
source,
|
||||
gain = 0.8,
|
||||
// low pass
|
||||
cutoff,
|
||||
@ -210,110 +147,48 @@ export const webaudioOutput = async (hap, deadline, hapDuration, cps) => {
|
||||
crush,
|
||||
shape,
|
||||
pan,
|
||||
speed = 1, // sample playback speed
|
||||
begin = 0,
|
||||
end = 1,
|
||||
vowel,
|
||||
delay = 0,
|
||||
delayfeedback = 0.5,
|
||||
delaytime = 0.25,
|
||||
unit,
|
||||
nudge = 0, // TODO: is this in seconds?
|
||||
cut,
|
||||
loop,
|
||||
orbit = 1,
|
||||
room,
|
||||
size = 2,
|
||||
} = hap.value;
|
||||
const { velocity = 1 } = hap.context;
|
||||
gain *= velocity; // legacy fix for velocity
|
||||
// the chain will hold all audio nodes that connect to each other
|
||||
const chain = [];
|
||||
let toDisconnect = []; // audio nodes that will be disconnected when the source has ended
|
||||
const onended = () => {
|
||||
toDisconnect.forEach((n) => n?.disconnect());
|
||||
};
|
||||
if (bank && s) {
|
||||
s = `${bank}_${s}`;
|
||||
}
|
||||
if (!s || ['sine', 'square', 'triangle', 'sawtooth'].includes(s)) {
|
||||
// destructure adsr here, because the default should be different for synths and samples
|
||||
const { attack = 0.001, decay = 0.05, sustain = 0.6, release = 0.01 } = hap.value;
|
||||
// with synths, n and note are the same thing
|
||||
n = note || n || 36;
|
||||
if (typeof n === 'string') {
|
||||
n = toMidi(n); // e.g. c3 => 48
|
||||
// get source AudioNode
|
||||
let sourceNode;
|
||||
if (source) {
|
||||
sourceNode = source(t, hap.value, hapDuration);
|
||||
} else if (getSound(s)) {
|
||||
const { onTrigger } = getSound(s);
|
||||
const soundHandle = await onTrigger(t, hap.value, onended);
|
||||
if (soundHandle) {
|
||||
sourceNode = soundHandle.node;
|
||||
soundHandle.stop(t + hapDuration);
|
||||
}
|
||||
// get frequency
|
||||
if (!freq && typeof n === 'number') {
|
||||
freq = fromMidi(n); // + 48);
|
||||
}
|
||||
// make oscillator
|
||||
const o = getOscillator({ t, s, freq, duration: hapDuration, release });
|
||||
chain.push(o);
|
||||
// level down oscillators as they are really loud compared to samples i've tested
|
||||
chain.push(gainNode(0.3));
|
||||
// TODO: make adsr work with samples without pops
|
||||
// envelope
|
||||
const adsr = getADSR(attack, decay, sustain, release, 1, t, t + hapDuration);
|
||||
chain.push(adsr);
|
||||
} else {
|
||||
// destructure adsr here, because the default should be different for synths and samples
|
||||
const { attack = 0.001, decay = 0.001, sustain = 1, release = 0.001 } = hap.value;
|
||||
// load sample
|
||||
if (speed === 0) {
|
||||
// no playback
|
||||
return;
|
||||
}
|
||||
if (!s) {
|
||||
console.warn('no sample specified');
|
||||
return;
|
||||
}
|
||||
const soundfont = getSoundfontKey(s);
|
||||
let bufferSource;
|
||||
|
||||
if (soundfont) {
|
||||
// is soundfont
|
||||
bufferSource = await globalThis.getFontBufferSource(soundfont, note || n, ac, freq);
|
||||
} else {
|
||||
// is sample from loaded samples(..)
|
||||
bufferSource = await getSampleBufferSource(s, n, note, speed, freq);
|
||||
}
|
||||
// asny stuff above took too long?
|
||||
if (ac.currentTime > t) {
|
||||
logger(`[sampler] still loading sound "${s}:${n}"`, 'highlight');
|
||||
// console.warn('sample still loading:', s, n);
|
||||
return;
|
||||
}
|
||||
if (!bufferSource) {
|
||||
console.warn('no buffer source');
|
||||
return;
|
||||
}
|
||||
bufferSource.playbackRate.value = Math.abs(speed) * bufferSource.playbackRate.value;
|
||||
if (unit === 'c') {
|
||||
// are there other units?
|
||||
bufferSource.playbackRate.value = bufferSource.playbackRate.value * bufferSource.buffer.duration * cps;
|
||||
}
|
||||
let duration = soundfont || clip ? hapDuration : bufferSource.buffer.duration / bufferSource.playbackRate.value;
|
||||
// "The computation of the offset into the sound is performed using the sound buffer's natural sample rate,
|
||||
// rather than the current playback rate, so even if the sound is playing at twice its normal speed,
|
||||
// the midway point through a 10-second audio buffer is still 5."
|
||||
const offset = begin * duration * bufferSource.playbackRate.value;
|
||||
duration = (end - begin) * duration;
|
||||
if (loop) {
|
||||
bufferSource.loop = true;
|
||||
bufferSource.loopStart = offset;
|
||||
bufferSource.loopEnd = offset + duration;
|
||||
duration = loop * duration;
|
||||
}
|
||||
t += nudge;
|
||||
|
||||
bufferSource.start(t, offset);
|
||||
if (cut !== undefined) {
|
||||
cutGroups[cut]?.stop(t); // fade out?
|
||||
cutGroups[cut] = bufferSource;
|
||||
}
|
||||
chain.push(bufferSource);
|
||||
bufferSource.stop(t + duration + release);
|
||||
const adsr = getADSR(attack, decay, sustain, release, 1, t, t + duration);
|
||||
chain.push(adsr);
|
||||
throw new Error(`sound ${s} not found! Is it loaded?`);
|
||||
}
|
||||
if (!sourceNode) {
|
||||
// if onTrigger does not return anything, we will just silently skip
|
||||
// this can be used for things like speed(0) in the sampler
|
||||
return;
|
||||
}
|
||||
if (ac.currentTime > t) {
|
||||
logger('[webaudio] skip hap: still loading', ac.currentTime - t);
|
||||
return;
|
||||
}
|
||||
const chain = []; // audio nodes that will be connected to each other sequentially
|
||||
chain.push(sourceNode);
|
||||
|
||||
// gain stage
|
||||
chain.push(gainNode(gain));
|
||||
@ -357,8 +232,9 @@ export const webaudioOutput = async (hap, deadline, hapDuration, cps) => {
|
||||
// connect chain elements together
|
||||
chain.slice(1).reduce((last, current) => last.connect(current), chain[0]);
|
||||
|
||||
// disconnect all nodes when source node has ended:
|
||||
chain[0].onended = () => chain.concat([delaySend, reverbSend]).forEach((n) => n?.disconnect());
|
||||
// toDisconnect = all the node that should be disconnected in onended callback
|
||||
// this is crucial for performance
|
||||
toDisconnect = chain.concat([delaySend, reverbSend]);
|
||||
};
|
||||
|
||||
export const webaudioOutputTrigger = (t, hap, ct, cps) => webaudioOutput(hap, t - ct, hap.duration / cps, cps);
|
||||
|
||||
@ -1,7 +1,7 @@
|
||||
import * as strudel from '@strudel.cycles/core';
|
||||
const { Pattern } = strudel;
|
||||
import * as WebDirt from 'WebDirt';
|
||||
import { getLoadedSamples, loadBuffer, getLoadedBuffer } from '@strudel.cycles/webaudio';
|
||||
//import { loadBuffer, getLoadedBuffer } from '@strudel.cycles/webaudio';
|
||||
|
||||
let webDirt;
|
||||
|
||||
@ -62,8 +62,9 @@ export function loadWebDirt(config) {
|
||||
* @noAutocomplete
|
||||
*/
|
||||
Pattern.prototype.webdirt = function () {
|
||||
throw new Error('webdirt support has been dropped..');
|
||||
// create a WebDirt object and initialize Web Audio context
|
||||
return this.onTrigger(async (time, e, currentTime) => {
|
||||
/* return this.onTrigger(async (time, e, currentTime) => {
|
||||
if (!webDirt) {
|
||||
throw new Error('WebDirt not initialized!');
|
||||
}
|
||||
@ -92,5 +93,5 @@ Pattern.prototype.webdirt = function () {
|
||||
webDirt.playSample(msg, deadline);
|
||||
}
|
||||
}
|
||||
});
|
||||
}); */
|
||||
};
|
||||
|
||||
2
pnpm-lock.yaml
generated
2
pnpm-lock.yaml
generated
@ -321,9 +321,11 @@ importers:
|
||||
packages/webaudio:
|
||||
specifiers:
|
||||
'@strudel.cycles/core': workspace:*
|
||||
nanostores: ^0.7.4
|
||||
vite: ^3.2.2
|
||||
dependencies:
|
||||
'@strudel.cycles/core': link:../core
|
||||
nanostores: 0.7.4
|
||||
devDependencies:
|
||||
vite: 3.2.5
|
||||
|
||||
|
||||
@ -8193,11 +8193,11 @@ exports[`renders tunes > tune: orbit 1`] = `
|
||||
|
||||
exports[`renders tunes > tune: outroMusic 1`] = `
|
||||
[
|
||||
"[ (0/1 → 1/1) ⇝ 3/1 | note:E3 s:0040_FluidR3_GM_sf2_file attack:0.05 decay:0.1 sustain:0.7 cutoff:1111.7252990603447 gain:0.3 ]",
|
||||
"[ (0/1 → 1/1) ⇝ 3/1 | note:G3 s:0040_FluidR3_GM_sf2_file attack:0.05 decay:0.1 sustain:0.7 cutoff:1111.7252990603447 gain:0.3 ]",
|
||||
"[ (0/1 → 1/1) ⇝ 3/1 | note:B3 s:0040_FluidR3_GM_sf2_file attack:0.05 decay:0.1 sustain:0.7 cutoff:1111.7252990603447 gain:0.3 ]",
|
||||
"[ (0/1 → 1/1) ⇝ 3/1 | note:D4 s:0040_FluidR3_GM_sf2_file attack:0.05 decay:0.1 sustain:0.7 cutoff:1111.7252990603447 gain:0.3 ]",
|
||||
"[ (0/1 → 1/1) ⇝ 9/2 | note:C5 s:0040_FluidR3_GM_sf2_file attack:0.05 decay:0.1 sustain:0.7 cutoff:1111.7252990603447 gain:0.3 ]",
|
||||
"[ (0/1 → 1/1) ⇝ 3/1 | note:E3 s:gm_epiano1 n:1 attack:0.05 decay:0.1 sustain:0.7 cutoff:1111.7252990603447 gain:0.3 ]",
|
||||
"[ (0/1 → 1/1) ⇝ 3/1 | note:G3 s:gm_epiano1 n:1 attack:0.05 decay:0.1 sustain:0.7 cutoff:1111.7252990603447 gain:0.3 ]",
|
||||
"[ (0/1 → 1/1) ⇝ 3/1 | note:B3 s:gm_epiano1 n:1 attack:0.05 decay:0.1 sustain:0.7 cutoff:1111.7252990603447 gain:0.3 ]",
|
||||
"[ (0/1 → 1/1) ⇝ 3/1 | note:D4 s:gm_epiano1 n:1 attack:0.05 decay:0.1 sustain:0.7 cutoff:1111.7252990603447 gain:0.3 ]",
|
||||
"[ (0/1 → 1/1) ⇝ 9/2 | note:C5 s:gm_epiano1 n:1 attack:0.05 decay:0.1 sustain:0.7 cutoff:1111.7252990603447 gain:0.3 ]",
|
||||
"[ 0/1 → 3/4 | note:C2 s:sawtooth attack:0.05 decay:0.1 sustain:0.7 cutoff:864.536878321087 gain:0.3 ]",
|
||||
"[ 0/1 → 3/4 | s:bd speed:0.9107561463868479 n:3 ]",
|
||||
"[ (3/4 → 1/1) ⇝ 3/2 | s:sd speed:0.9931522866332672 n:3 ]",
|
||||
|
||||
@ -1986,7 +1986,7 @@
|
||||
"Idiophones/Struck%20Idiophones/Tambourine%202/Tamb2_Shake_rr3_Mid.wav",
|
||||
"Idiophones/Struck%20Idiophones/Tambourine%202/Tamb2_Shake_rr4_Mid.wav"
|
||||
],
|
||||
"triangle": [
|
||||
"triangles": [
|
||||
"Idiophones/Struck%20Idiophones/Triangles/Triangle1_HitFM_v1_rr1_Mid.wav",
|
||||
"Idiophones/Struck%20Idiophones/Triangles/Triangle1_HitFM_v1_rr2_Mid.wav",
|
||||
"Idiophones/Struck%20Idiophones/Triangles/Triangle1_HitM_v1_rr2_Mid.wav",
|
||||
|
||||
@ -73,6 +73,7 @@ export const SIDEBAR: Sidebar = {
|
||||
],
|
||||
Development: [
|
||||
{ text: 'REPL', link: 'technical-manual/repl' },
|
||||
{ text: 'Sounds', link: 'technical-manual/sounds' },
|
||||
{ text: 'Packages', link: 'technical-manual/packages' },
|
||||
{ text: 'Docs', link: 'technical-manual/docs' },
|
||||
{ text: 'Testing', link: 'technical-manual/testing' },
|
||||
|
||||
@ -18,6 +18,7 @@ if (typeof window !== 'undefined') {
|
||||
import('@strudel.cycles/webaudio'),
|
||||
import('@strudel.cycles/osc'),
|
||||
import('@strudel.cycles/csound'),
|
||||
import('@strudel.cycles/soundfonts'),
|
||||
);
|
||||
}
|
||||
|
||||
|
||||
75
website/src/pages/technical-manual/sounds.mdx
Normal file
75
website/src/pages/technical-manual/sounds.mdx
Normal file
@ -0,0 +1,75 @@
|
||||
---
|
||||
title: Sounds
|
||||
layout: ../../layouts/MainLayout.astro
|
||||
---
|
||||
|
||||
import { MiniRepl } from '../../docs/MiniRepl';
|
||||
|
||||
# Sounds
|
||||
|
||||
Let's take a closer look about how sounds are implemented in the webaudio output.
|
||||
|
||||
## Registering a sound
|
||||
|
||||
All sounds are registered in the sound map, using the the `registerSound` function:
|
||||
|
||||
```ts
|
||||
function registerSound(
|
||||
name: string, // The name of the sound that should be given to `s`, e.g. `mysaw`
|
||||
// The function called by the scheduler to trigger the sound:
|
||||
(
|
||||
time: number, // The audio context time the sound should start
|
||||
value: object, // The value of the `Hap`
|
||||
onended: () => void // A callback that should be fired when the sound has ended
|
||||
) => {
|
||||
node: AudioNode, // node to connect to rest of the effects chain
|
||||
stop: (time:number) => void // a function that will stop the sound
|
||||
},
|
||||
data: object // meta data, only for ui logic in sounds tab
|
||||
);
|
||||
```
|
||||
|
||||
When `registerSound` is called, it registers `{ onTrigger, data }` under the given `name` in a [nanostore map](https://github.com/nanostores/nanostores#maps).
|
||||
|
||||
### Example
|
||||
|
||||
This might be a bit abstract, so here is a minimal example:
|
||||
|
||||
```js
|
||||
registerSound(
|
||||
'mysaw',
|
||||
(time, value, onended) => {
|
||||
let { freq } = value; // destructure control params
|
||||
const ctx = getAudioContext();
|
||||
// create oscillator
|
||||
const o = new OscillatorNode(ctx, { type: 'sawtooth', frequency: Number(freq) });
|
||||
o.start(time);
|
||||
// add gain node to level down osc
|
||||
const g = new GainNode(ctx, { gain: 0.3 });
|
||||
// connect osc to gain
|
||||
const node = o.connect(g);
|
||||
// this function can be called from outside to stop the sound
|
||||
const stop = (time) => o.stop(time);
|
||||
// ended will be fired when stop has been fired
|
||||
o.addEventListener('ended', () => {
|
||||
o.disconnect();
|
||||
g.disconnect();
|
||||
onended();
|
||||
});
|
||||
return { node, stop };
|
||||
},
|
||||
{ type: 'synth' },
|
||||
);
|
||||
// use the sound
|
||||
freq(220, 440, 330).s('mysaw');
|
||||
```
|
||||
|
||||
You can actually use this code in the [REPL](https://strudel.tidalcycles.org/) and it'll work.
|
||||
After evaluating the code, you should see `mysaw` in listed in the sounds tab.
|
||||
|
||||
## Playing sounds
|
||||
|
||||
Now here is what happens when a sound is played:
|
||||
When the webaudio output plays a `Hap`, it will lookup and call the `onTrigger` function for the given `s`.
|
||||
The returned `node` can then be connected to the rest of the standard effects chain
|
||||
Having the stop function separate allows playing sounds via midi too, where you don't know how long the noteon will last
|
||||
@ -3,11 +3,12 @@ import { logger } from '@strudel.cycles/core';
|
||||
import { useEvent, cx } from '@strudel.cycles/react';
|
||||
// import { cx } from '@strudel.cycles/react';
|
||||
import { nanoid } from 'nanoid';
|
||||
import React, { useCallback, useLayoutEffect, useRef, useState } from 'react';
|
||||
import { loadedSamples } from './Repl';
|
||||
import React, { useMemo, useCallback, useLayoutEffect, useRef, useState } from 'react';
|
||||
import { Reference } from './Reference';
|
||||
import { themes } from './themes.mjs';
|
||||
import { useSettings, settingsMap, setActiveFooter, defaultSettings } from '../settings.mjs';
|
||||
import { getAudioContext, soundMap } from '@strudel.cycles/webaudio';
|
||||
import { useStore } from '@nanostores/react';
|
||||
|
||||
export function Footer({ context }) {
|
||||
const footerContent = useRef();
|
||||
@ -72,7 +73,7 @@ export function Footer({ context }) {
|
||||
<div className="flex justify-between px-2">
|
||||
<div className={cx('flex select-none max-w-full overflow-auto', activeFooter && 'pb-2')}>
|
||||
<FooterTab name="intro" label="welcome" />
|
||||
<FooterTab name="samples" />
|
||||
<FooterTab name="sounds" />
|
||||
<FooterTab name="console" />
|
||||
<FooterTab name="reference" />
|
||||
<FooterTab name="settings" />
|
||||
@ -84,13 +85,10 @@ export function Footer({ context }) {
|
||||
)}
|
||||
</div>
|
||||
{activeFooter !== '' && (
|
||||
<div
|
||||
className="text-white font-mono text-sm h-[360px] flex-none overflow-auto max-w-full relative"
|
||||
ref={footerContent}
|
||||
>
|
||||
<div className="text-white flex-none h-[360px] overflow-auto max-w-full relative" ref={footerContent}>
|
||||
{activeFooter === 'intro' && <WelcomeTab />}
|
||||
{activeFooter === 'console' && <ConsoleTab log={log} />}
|
||||
{activeFooter === 'samples' && <SamplesTab />}
|
||||
{activeFooter === 'sounds' && <SoundsTab />}
|
||||
{activeFooter === 'reference' && <Reference />}
|
||||
{activeFooter === 'settings' && <SettingsTab scheduler={context.scheduler} />}
|
||||
</div>
|
||||
@ -173,7 +171,7 @@ function WelcomeTab() {
|
||||
|
||||
function ConsoleTab({ log }) {
|
||||
return (
|
||||
<div id="console-tab" className="break-all px-4 dark:text-white text-stone-900">
|
||||
<div id="console-tab" className="break-all px-4 dark:text-white text-stone-900 text-sm">
|
||||
<pre>{`███████╗████████╗██████╗ ██╗ ██╗██████╗ ███████╗██╗
|
||||
██╔════╝╚══██╔══╝██╔══██╗██║ ██║██╔══██╗██╔════╝██║
|
||||
███████╗ ██║ ██████╔╝██║ ██║██║ ██║█████╗ ██║
|
||||
@ -193,36 +191,104 @@ function ConsoleTab({ log }) {
|
||||
);
|
||||
}
|
||||
|
||||
function SamplesTab() {
|
||||
const getSamples = (samples) =>
|
||||
Array.isArray(samples) ? samples.length : typeof samples === 'object' ? Object.values(samples).length : 1;
|
||||
|
||||
function SoundsTab() {
|
||||
const sounds = useStore(soundMap);
|
||||
const { soundsFilter } = useSettings();
|
||||
const soundEntries = useMemo(() => {
|
||||
let filtered = Object.entries(sounds).filter(([key]) => !key.startsWith('_'));
|
||||
if (!sounds) {
|
||||
return [];
|
||||
}
|
||||
if (soundsFilter === 'user') {
|
||||
return filtered.filter(([key, { data }]) => !data.prebake);
|
||||
}
|
||||
if (soundsFilter === 'drums') {
|
||||
return filtered.filter(([_, { data }]) => data.type === 'sample' && data.tag === 'drum-machines');
|
||||
}
|
||||
if (soundsFilter === 'samples') {
|
||||
return filtered.filter(([_, { data }]) => data.type === 'sample' && data.tag !== 'drum-machines');
|
||||
}
|
||||
if (soundsFilter === 'synths') {
|
||||
return filtered.filter(([_, { data }]) => ['synth', 'soundfont'].includes(data.type));
|
||||
}
|
||||
return filtered;
|
||||
}, [sounds, soundsFilter]);
|
||||
// holds mutable ref to current triggered sound
|
||||
const trigRef = useRef();
|
||||
// stop current sound on mouseup
|
||||
useEvent('mouseup', () => {
|
||||
const t = trigRef.current;
|
||||
trigRef.current = undefined;
|
||||
t?.then((ref) => {
|
||||
ref?.stop(getAudioContext().currentTime + 0.01);
|
||||
});
|
||||
});
|
||||
return (
|
||||
<div id="samples-tab" className="break-normal w-full px-4 dark:text-white text-stone-900">
|
||||
<span>{loadedSamples.length} banks loaded:</span>
|
||||
{loadedSamples.map(([name, samples]) => (
|
||||
<span key={name} className="cursor-pointer hover:opacity-50" onClick={() => {}}>
|
||||
{' '}
|
||||
{name}(
|
||||
{Array.isArray(samples) ? samples.length : typeof samples === 'object' ? Object.values(samples).length : 1}){' '}
|
||||
</span>
|
||||
))}
|
||||
<div id="sounds-tab" className="flex flex-col w-full h-full dark:text-white text-stone-900">
|
||||
<div className="px-2 pb-2 flex-none">
|
||||
<ButtonGroup
|
||||
value={soundsFilter}
|
||||
onChange={(value) => settingsMap.setKey('soundsFilter', value)}
|
||||
items={{
|
||||
samples: 'samples',
|
||||
drums: 'drum-machines',
|
||||
synths: 'Synths',
|
||||
user: 'User',
|
||||
}}
|
||||
></ButtonGroup>
|
||||
</div>
|
||||
<div className="p-2 min-h-0 max-h-full grow overflow-auto font-mono text-sm break-normal">
|
||||
{soundEntries.map(([name, { data, onTrigger }]) => (
|
||||
<span
|
||||
key={name}
|
||||
className="cursor-pointer hover:opacity-50"
|
||||
onMouseDown={async () => {
|
||||
const ctx = getAudioContext();
|
||||
const params = {
|
||||
note: ['synth', 'soundfont'].includes(data.type) ? 'a3' : undefined,
|
||||
s: name,
|
||||
clip: 1,
|
||||
release: 0.5,
|
||||
};
|
||||
const time = ctx.currentTime + 0.05;
|
||||
const onended = () => trigRef.current?.node?.disconnect();
|
||||
trigRef.current = Promise.resolve(onTrigger(time, params, onended));
|
||||
trigRef.current.then((ref) => {
|
||||
ref?.node.connect(ctx.destination);
|
||||
});
|
||||
}}
|
||||
>
|
||||
{' '}
|
||||
{name}
|
||||
{data?.type === 'sample' ? `(${getSamples(data.samples)})` : ''}
|
||||
{data?.type === 'soundfont' ? `(${data.fonts.length})` : ''}
|
||||
</span>
|
||||
))}
|
||||
{!soundEntries.length ? 'No custom sounds loaded in this pattern (yet).' : ''}
|
||||
</div>
|
||||
</div>
|
||||
);
|
||||
}
|
||||
|
||||
function ButtonGroup({ value, onChange, items }) {
|
||||
return (
|
||||
<div className="flex grow border border-foreground rounded-md">
|
||||
<div className="flex max-w-lg">
|
||||
{Object.entries(items).map(([key, label], i, arr) => (
|
||||
<button
|
||||
key={key}
|
||||
onClick={() => onChange(key)}
|
||||
className={cx(
|
||||
'p-2 grow',
|
||||
i === 0 && 'rounded-l-md',
|
||||
i === arr.length - 1 && 'rounded-r-md',
|
||||
value === key ? 'bg-background' : 'bg-lineHighlight',
|
||||
'px-2 border-b h-8',
|
||||
// i === 0 && 'rounded-l-md',
|
||||
// i === arr.length - 1 && 'rounded-r-md',
|
||||
// value === key ? 'bg-background' : 'bg-lineHighlight',
|
||||
value === key ? 'border-foreground' : 'border-transparent',
|
||||
)}
|
||||
>
|
||||
{label}
|
||||
{label.toLowerCase()}
|
||||
</button>
|
||||
))}
|
||||
</div>
|
||||
|
||||
@ -6,7 +6,7 @@ const visibleFunctions = jsdocJson.docs
|
||||
export function Reference() {
|
||||
return (
|
||||
<div className="flex h-full w-full pt-2 text-foreground">
|
||||
<div className="w-64 flex-none h-full overflow-y-auto overflow-x-hidden pr-4">
|
||||
<div className="w-42 flex-none h-full overflow-y-auto overflow-x-hidden pr-4">
|
||||
{visibleFunctions.map((entry, i) => (
|
||||
<a key={i} className="cursor-pointer block hover:bg-lineHighlight py-1 px-4" href={`#doc-${i}`}>
|
||||
{entry.name} {/* <span className="text-gray-600">{entry.meta.filename}</span> */}
|
||||
|
||||
@ -6,13 +6,7 @@ This program is free software: you can redistribute it and/or modify it under th
|
||||
|
||||
import { cleanupDraw, cleanupUi, controls, evalScope, getDrawContext, logger } from '@strudel.cycles/core';
|
||||
import { CodeMirror, cx, flash, useHighlighting, useStrudel, useKeydown } from '@strudel.cycles/react';
|
||||
import {
|
||||
getAudioContext,
|
||||
getLoadedSamples,
|
||||
initAudioOnFirstClick,
|
||||
resetLoadedSamples,
|
||||
webaudioOutput,
|
||||
} from '@strudel.cycles/webaudio';
|
||||
import { getAudioContext, initAudioOnFirstClick, resetLoadedSounds, webaudioOutput } from '@strudel.cycles/webaudio';
|
||||
import { createClient } from '@supabase/supabase-js';
|
||||
import { nanoid } from 'nanoid';
|
||||
import React, { createContext, useCallback, useEffect, useState } from 'react';
|
||||
@ -53,7 +47,6 @@ evalScope(
|
||||
...modules,
|
||||
);
|
||||
|
||||
export let loadedSamples = [];
|
||||
const presets = prebake();
|
||||
|
||||
let drawContext, clearCanvas;
|
||||
@ -62,11 +55,6 @@ if (typeof window !== 'undefined') {
|
||||
clearCanvas = () => drawContext.clearRect(0, 0, drawContext.canvas.height, drawContext.canvas.width);
|
||||
}
|
||||
|
||||
Promise.all([...modules, presets]).then((data) => {
|
||||
// console.log('modules and sample registry loade', data);
|
||||
loadedSamples = Object.entries(getLoadedSamples() || {});
|
||||
});
|
||||
|
||||
const getTime = () => getAudioContext().currentTime;
|
||||
|
||||
async function initCode() {
|
||||
@ -211,7 +199,7 @@ export function Repl({ embedded = false }) {
|
||||
const { code, name } = getRandomTune();
|
||||
logger(`[repl] ✨ loading random tune "${name}"`);
|
||||
clearCanvas();
|
||||
resetLoadedSamples();
|
||||
resetLoadedSounds();
|
||||
await prebake(); // declare default samples
|
||||
await evaluate(code, false);
|
||||
};
|
||||
|
||||
@ -1,17 +1,23 @@
|
||||
import { Pattern, toMidi, valueToMidi } from '@strudel.cycles/core';
|
||||
import { samples } from '@strudel.cycles/webaudio';
|
||||
//import { registerSoundfonts } from '@strudel.cycles/soundfonts';
|
||||
import { registerSynthSounds, samples } from '@strudel.cycles/webaudio';
|
||||
|
||||
export async function prebake() {
|
||||
// https://archive.org/details/SalamanderGrandPianoV3
|
||||
// License: CC-by http://creativecommons.org/licenses/by/3.0/ Author: Alexander Holm
|
||||
return await Promise.all([
|
||||
samples(`./piano.json`, `./piano/`),
|
||||
registerSynthSounds();
|
||||
//registerSoundfonts();
|
||||
await Promise.all([
|
||||
samples(`./piano.json`, `./piano/`, { prebake: true }),
|
||||
// https://github.com/sgossner/VCSL/
|
||||
// https://api.github.com/repositories/126427031/contents/
|
||||
// LICENSE: CC0 general-purpose
|
||||
samples(`./vcsl.json`, 'github:sgossner/VCSL/master/'),
|
||||
samples(`./tidal-drum-machines.json`, 'github:ritchse/tidal-drum-machines/main/machines/'),
|
||||
samples(`./EmuSP12.json`, `./EmuSP12/`),
|
||||
samples(`./vcsl.json`, 'github:sgossner/VCSL/master/', { prebake: true }),
|
||||
samples(`./tidal-drum-machines.json`, 'github:ritchse/tidal-drum-machines/main/machines/', {
|
||||
prebake: true,
|
||||
tag: 'drum-machines',
|
||||
}),
|
||||
samples(`./EmuSP12.json`, `./EmuSP12/`, { prebake: true, tag: 'drum-machines' }),
|
||||
// samples('github:tidalcycles/Dirt-Samples/master'),
|
||||
]);
|
||||
}
|
||||
|
||||
@ -566,7 +566,7 @@ samples({
|
||||
"C^7 Am7 Dm7 G7".slow(2).voicings('lefthand')
|
||||
.stack("0@6 [<1 2> <2 0> 1]@2".scale('C5 major'))
|
||||
.note().slow(4)
|
||||
.s('0040_FluidR3_GM_sf2_file')
|
||||
.s("gm_epiano1:1")
|
||||
.color('steelblue')
|
||||
.stack(
|
||||
"<-7 ~@2 [~@2 -7] -9 ~@2 [~@2 -9] -10!2 ~ [~@2 -10] -5 ~ [-3 -2 -10]@2>*2".scale('C3 major')
|
||||
|
||||
@ -9,6 +9,7 @@ export const defaultSettings = {
|
||||
fontSize: 18,
|
||||
latestCode: '',
|
||||
isZen: false,
|
||||
soundsFilter: 'all',
|
||||
};
|
||||
|
||||
export const settingsMap = persistentMap('strudel-settings', defaultSettings);
|
||||
|
||||
@ -30,7 +30,6 @@
|
||||
--app-height: 100vh;
|
||||
}
|
||||
|
||||
#console-tab,
|
||||
#samples-tab {
|
||||
#console-tab {
|
||||
font-family: BigBlueTerminal, monospace;
|
||||
}
|
||||
|
||||
Loading…
x
Reference in New Issue
Block a user