mirror of
https://github.com/eliasstepanik/strudel-docker.git
synced 2026-01-11 13:48:34 +00:00
first draft of soundMap to register keys for s
+ refactor sampler to use it + refactor synth to use it + add 'source' control + wip: samples tab + wip: webadirt ? + wip: soundfonts
This commit is contained in:
parent
da11069f75
commit
6059c69995
@ -23,6 +23,15 @@ const generic_params = [
|
||||
*
|
||||
*/
|
||||
[['s', 'n', 'gain'], 'sound'],
|
||||
/**
|
||||
* Define a custom webaudio node to use as a sound source.
|
||||
*
|
||||
* @name source
|
||||
* @param {function} getSource
|
||||
* @synonyms src
|
||||
*
|
||||
*/
|
||||
['source', 'src'],
|
||||
/**
|
||||
* Selects the given index from the sample map.
|
||||
* Numbers too high will wrap around.
|
||||
|
||||
47
packages/webaudio/helpers.mjs
Normal file
47
packages/webaudio/helpers.mjs
Normal file
@ -0,0 +1,47 @@
|
||||
import { getAudioContext } from './webaudio.mjs';
|
||||
|
||||
export function gainNode(value) {
|
||||
const node = getAudioContext().createGain();
|
||||
node.gain.value = value;
|
||||
return node;
|
||||
}
|
||||
|
||||
export const getOscillator = ({ s, freq, t, duration, release }) => {
|
||||
// make oscillator
|
||||
const o = getAudioContext().createOscillator();
|
||||
o.type = s || 'triangle';
|
||||
o.frequency.value = Number(freq);
|
||||
o.start(t);
|
||||
o.stop(t + duration + release);
|
||||
return o;
|
||||
};
|
||||
|
||||
export const getADSR = (attack, decay, sustain, release, velocity, begin, end) => {
|
||||
const gainNode = getAudioContext().createGain();
|
||||
gainNode.gain.setValueAtTime(0, begin);
|
||||
gainNode.gain.linearRampToValueAtTime(velocity, begin + attack); // attack
|
||||
gainNode.gain.linearRampToValueAtTime(sustain * velocity, begin + attack + decay); // sustain start
|
||||
gainNode.gain.setValueAtTime(sustain * velocity, end); // sustain end
|
||||
gainNode.gain.linearRampToValueAtTime(0, end + release); // release
|
||||
// for some reason, using exponential ramping creates little cracklings
|
||||
/* let t = begin;
|
||||
gainNode.gain.setValueAtTime(0, t);
|
||||
gainNode.gain.exponentialRampToValueAtTime(velocity, (t += attack));
|
||||
const sustainGain = Math.max(sustain * velocity, 0.001);
|
||||
gainNode.gain.exponentialRampToValueAtTime(sustainGain, (t += decay));
|
||||
if (end - begin < attack + decay) {
|
||||
gainNode.gain.cancelAndHoldAtTime(end);
|
||||
} else {
|
||||
gainNode.gain.setValueAtTime(sustainGain, end);
|
||||
}
|
||||
gainNode.gain.exponentialRampToValueAtTime(0.001, end + release); // release */
|
||||
return gainNode;
|
||||
};
|
||||
|
||||
export const getFilter = (type, frequency, Q) => {
|
||||
const filter = getAudioContext().createBiquadFilter();
|
||||
filter.type = type;
|
||||
filter.frequency.value = frequency;
|
||||
filter.Q.value = Q;
|
||||
return filter;
|
||||
};
|
||||
@ -6,3 +6,5 @@ This program is free software: you can redistribute it and/or modify it under th
|
||||
|
||||
export * from './webaudio.mjs';
|
||||
export * from './sampler.mjs';
|
||||
export * from './helpers.mjs';
|
||||
export * from './synth.mjs';
|
||||
|
||||
@ -1,5 +1,6 @@
|
||||
import { logger, toMidi, valueToMidi } from '@strudel.cycles/core';
|
||||
import { getAudioContext } from './index.mjs';
|
||||
import { getAudioContext, setSound } from './index.mjs';
|
||||
import { getADSR } from './helpers.mjs';
|
||||
|
||||
const bufferCache = {}; // string: Promise<ArrayBuffer>
|
||||
const loadCache = {}; // string: Promise<ArrayBuffer>
|
||||
@ -20,7 +21,7 @@ function humanFileSize(bytes, si) {
|
||||
return bytes.toFixed(1) + ' ' + units[u];
|
||||
}
|
||||
|
||||
export const getSampleBufferSource = async (s, n, note, speed, freq) => {
|
||||
export const getSampleBufferSource = async (s, n, note, speed, freq, bank) => {
|
||||
let transpose = 0;
|
||||
if (freq !== undefined && note !== undefined) {
|
||||
logger('[sampler] hap has note and freq. ignoring note', 'warning');
|
||||
@ -29,23 +30,6 @@ export const getSampleBufferSource = async (s, n, note, speed, freq) => {
|
||||
transpose = midi - 36; // C3 is middle C
|
||||
|
||||
const ac = getAudioContext();
|
||||
// is sample from loaded samples(..)
|
||||
const samples = getLoadedSamples();
|
||||
if (!samples) {
|
||||
throw new Error('no samples loaded');
|
||||
}
|
||||
const bank = samples?.[s];
|
||||
if (!bank) {
|
||||
throw new Error(
|
||||
`sample not found: "${s}"`,
|
||||
// , try one of ${Object.keys(samples)
|
||||
// .map((s) => `"${s}"`)
|
||||
// .join(', ')}.
|
||||
);
|
||||
}
|
||||
if (typeof bank !== 'object') {
|
||||
throw new Error('wrong format for sample bank:', s);
|
||||
}
|
||||
let sampleUrl;
|
||||
if (Array.isArray(bank)) {
|
||||
sampleUrl = bank[n % bank.length];
|
||||
@ -107,8 +91,6 @@ export const getLoadedBuffer = (url) => {
|
||||
return bufferCache[url];
|
||||
};
|
||||
|
||||
let sampleCache = { current: undefined };
|
||||
|
||||
/**
|
||||
* Loads a collection of samples to use with `s`
|
||||
* @example
|
||||
@ -147,37 +129,129 @@ export const samples = async (sampleMap, baseUrl = sampleMap._base || '') => {
|
||||
throw new Error(`error loading "${sampleMap}"`);
|
||||
});
|
||||
}
|
||||
sampleCache.current = {
|
||||
...sampleCache.current,
|
||||
...Object.fromEntries(
|
||||
Object.entries(sampleMap).map(([key, value]) => {
|
||||
if (typeof value === 'string') {
|
||||
value = [value];
|
||||
}
|
||||
if (typeof value !== 'object') {
|
||||
throw new Error('wrong sample map format for ' + key);
|
||||
}
|
||||
baseUrl = value._base || baseUrl;
|
||||
const replaceUrl = (v) => (baseUrl + v).replace('github:', 'https://raw.githubusercontent.com/');
|
||||
if (Array.isArray(value)) {
|
||||
return [key, value.map(replaceUrl)];
|
||||
}
|
||||
// must be object
|
||||
return [
|
||||
key,
|
||||
Object.fromEntries(
|
||||
Object.entries(value).map(([note, samples]) => {
|
||||
return [note, (typeof samples === 'string' ? [samples] : samples).map(replaceUrl)];
|
||||
}),
|
||||
),
|
||||
];
|
||||
}),
|
||||
),
|
||||
};
|
||||
Object.entries(sampleMap).forEach(([key, value]) => {
|
||||
if (typeof value === 'string') {
|
||||
value = [value];
|
||||
}
|
||||
if (typeof value !== 'object') {
|
||||
throw new Error('wrong sample map format for ' + key);
|
||||
}
|
||||
baseUrl = value._base || baseUrl;
|
||||
const replaceUrl = (v) => (baseUrl + v).replace('github:', 'https://raw.githubusercontent.com/');
|
||||
if (Array.isArray(value)) {
|
||||
//return [key, value.map(replaceUrl)];
|
||||
value = value.map(replaceUrl);
|
||||
} else {
|
||||
// must be object
|
||||
value = Object.fromEntries(
|
||||
Object.entries(value).map(([note, samples]) => {
|
||||
return [note, (typeof samples === 'string' ? [samples] : samples).map(replaceUrl)];
|
||||
}),
|
||||
);
|
||||
}
|
||||
setSound(key, (options) => onTriggerSample(options, value));
|
||||
});
|
||||
};
|
||||
|
||||
export const resetLoadedSamples = () => {
|
||||
sampleCache.current = undefined;
|
||||
};
|
||||
const cutGroups = [];
|
||||
|
||||
export const getLoadedSamples = () => sampleCache.current;
|
||||
export async function onTriggerSample(options, bank) {
|
||||
const { hap, duration: hapDuration, t, cps } = options;
|
||||
const {
|
||||
s,
|
||||
freq,
|
||||
unit,
|
||||
nudge = 0, // TODO: is this in seconds?
|
||||
cut,
|
||||
loop,
|
||||
clip = 0, // if 1, samples will be cut off when the hap ends
|
||||
n = 0,
|
||||
note,
|
||||
speed = 1, // sample playback speed
|
||||
begin = 0,
|
||||
end = 1,
|
||||
} = hap.value;
|
||||
const ac = getAudioContext();
|
||||
// destructure adsr here, because the default should be different for synths and samples
|
||||
const { attack = 0.001, decay = 0.001, sustain = 1, release = 0.001 } = hap.value;
|
||||
// load sample
|
||||
if (speed === 0) {
|
||||
// no playback
|
||||
return;
|
||||
}
|
||||
if (!s) {
|
||||
// is this check really needed?
|
||||
console.warn('no sample specified');
|
||||
return;
|
||||
}
|
||||
//const soundfont = getSoundfontKey(s);
|
||||
let bufferSource;
|
||||
|
||||
//if (soundfont) {
|
||||
// is soundfont
|
||||
//bufferSource = await globalThis.getFontBufferSource(soundfont, note || n, ac, freq);
|
||||
//} else {
|
||||
// is sample from loaded samples(..)
|
||||
bufferSource = await getSampleBufferSource(s, n, note, speed, freq, bank);
|
||||
//}
|
||||
// asny stuff above took too long?
|
||||
if (ac.currentTime > t) {
|
||||
logger(`[sampler] still loading sound "${s}:${n}"`, 'highlight');
|
||||
// console.warn('sample still loading:', s, n);
|
||||
return;
|
||||
}
|
||||
if (!bufferSource) {
|
||||
console.warn('no buffer source');
|
||||
return;
|
||||
}
|
||||
bufferSource.playbackRate.value = Math.abs(speed) * bufferSource.playbackRate.value;
|
||||
if (unit === 'c') {
|
||||
// are there other units?
|
||||
bufferSource.playbackRate.value = bufferSource.playbackRate.value * bufferSource.buffer.duration * cps;
|
||||
}
|
||||
const shouldClip = /* soundfont || */ clip;
|
||||
let duration = shouldClip ? hapDuration : bufferSource.buffer.duration / bufferSource.playbackRate.value;
|
||||
// "The computation of the offset into the sound is performed using the sound buffer's natural sample rate,
|
||||
// rather than the current playback rate, so even if the sound is playing at twice its normal speed,
|
||||
// the midway point through a 10-second audio buffer is still 5."
|
||||
const offset = begin * duration * bufferSource.playbackRate.value;
|
||||
duration = (end - begin) * duration;
|
||||
if (loop) {
|
||||
bufferSource.loop = true;
|
||||
bufferSource.loopStart = offset;
|
||||
bufferSource.loopEnd = offset + duration;
|
||||
duration = loop * duration;
|
||||
}
|
||||
const time = t + nudge;
|
||||
|
||||
bufferSource.start(time, offset);
|
||||
if (cut !== undefined) {
|
||||
cutGroups[cut]?.stop(time); // fade out?
|
||||
cutGroups[cut] = bufferSource;
|
||||
}
|
||||
//chain.push(bufferSource);
|
||||
bufferSource.stop(t + duration + release);
|
||||
const adsr = getADSR(attack, decay, sustain, release, 1, time, time + duration);
|
||||
bufferSource.connect(adsr);
|
||||
//chain.push(adsr);
|
||||
return adsr;
|
||||
}
|
||||
|
||||
/*const getSoundfontKey = (s) => {
|
||||
if (!globalThis.soundfontList) {
|
||||
// soundfont package not loaded
|
||||
return false;
|
||||
}
|
||||
if (globalThis.soundfontList?.instruments?.includes(s)) {
|
||||
return s;
|
||||
}
|
||||
// check if s is one of the soundfonts, which are loaded into globalThis, to avoid coupling both packages
|
||||
const nameIndex = globalThis.soundfontList?.instrumentNames?.indexOf(s);
|
||||
// convert number nameIndex (0-128) to 3 digit string (001-128)
|
||||
const name = nameIndex < 10 ? `00${nameIndex}` : nameIndex < 100 ? `0${nameIndex}` : nameIndex;
|
||||
if (nameIndex !== -1) {
|
||||
// TODO: indices of instrumentNames do not seem to match instruments
|
||||
return globalThis.soundfontList.instruments.find((instrument) => instrument.startsWith(name));
|
||||
}
|
||||
return;
|
||||
};*/
|
||||
|
||||
34
packages/webaudio/synth.mjs
Normal file
34
packages/webaudio/synth.mjs
Normal file
@ -0,0 +1,34 @@
|
||||
import { fromMidi, toMidi } from '@strudel.cycles/core';
|
||||
import { setSound } from './webaudio.mjs';
|
||||
import { getOscillator, gainNode, getADSR } from './helpers.mjs';
|
||||
|
||||
export function loadSynthSounds() {
|
||||
['sine', 'square', 'triangle', 'sawtooth'].forEach((wave) => {
|
||||
setSound(wave, ({ hap, duration, t }) => {
|
||||
// destructure adsr here, because the default should be different for synths and samples
|
||||
const { attack = 0.001, decay = 0.05, sustain = 0.6, release = 0.01 } = hap.value;
|
||||
let { n, note, freq } = hap.value;
|
||||
// with synths, n and note are the same thing
|
||||
n = note || n || 36;
|
||||
if (typeof n === 'string') {
|
||||
n = toMidi(n); // e.g. c3 => 48
|
||||
}
|
||||
// get frequency
|
||||
if (!freq && typeof n === 'number') {
|
||||
freq = fromMidi(n); // + 48);
|
||||
}
|
||||
// maybe pull out the above frequency resolution?? (there is also getFrequency but it has no default)
|
||||
// make oscillator
|
||||
const o = getOscillator({ t, s: wave, freq, duration, release });
|
||||
// chain.push(o);
|
||||
// level down oscillators as they are really loud compared to samples i've tested
|
||||
//chain.push(gainNode(0.3));
|
||||
const g = gainNode(0.3);
|
||||
// TODO: make adsr work with samples without pops
|
||||
// envelope
|
||||
const adsr = getADSR(attack, decay, sustain, release, 1, t, t + duration);
|
||||
//chain.push(adsr);
|
||||
return o.connect(g).connect(adsr);
|
||||
});
|
||||
});
|
||||
}
|
||||
@ -5,16 +5,22 @@ This program is free software: you can redistribute it and/or modify it under th
|
||||
*/
|
||||
|
||||
import * as strudel from '@strudel.cycles/core';
|
||||
import { fromMidi, logger, toMidi } from '@strudel.cycles/core';
|
||||
import './feedbackdelay.mjs';
|
||||
import './reverb.mjs';
|
||||
import { getSampleBufferSource } from './sampler.mjs';
|
||||
const { Pattern } = strudel;
|
||||
import './vowel.mjs';
|
||||
import workletsUrl from './worklets.mjs?url';
|
||||
import { getFilter, gainNode } from './helpers.mjs';
|
||||
|
||||
// export const getAudioContext = () => Tone.getContext().rawContext;
|
||||
|
||||
export const soundMap = new Map();
|
||||
// onTrigger = ({ hap: Hap, t: number, deadline: number, duration: number, cps: number }) => AudioNode
|
||||
export function setSound(key, onTrigger) {
|
||||
soundMap.set(key, onTrigger);
|
||||
}
|
||||
export const resetLoadedSounds = () => soundMap.clear();
|
||||
|
||||
let audioContext;
|
||||
export const getAudioContext = () => {
|
||||
if (!audioContext) {
|
||||
@ -38,65 +44,6 @@ export const panic = () => {
|
||||
destination = null;
|
||||
};
|
||||
|
||||
const getFilter = (type, frequency, Q) => {
|
||||
const filter = getAudioContext().createBiquadFilter();
|
||||
filter.type = type;
|
||||
filter.frequency.value = frequency;
|
||||
filter.Q.value = Q;
|
||||
return filter;
|
||||
};
|
||||
|
||||
const getADSR = (attack, decay, sustain, release, velocity, begin, end) => {
|
||||
const gainNode = getAudioContext().createGain();
|
||||
gainNode.gain.setValueAtTime(0, begin);
|
||||
gainNode.gain.linearRampToValueAtTime(velocity, begin + attack); // attack
|
||||
gainNode.gain.linearRampToValueAtTime(sustain * velocity, begin + attack + decay); // sustain start
|
||||
gainNode.gain.setValueAtTime(sustain * velocity, end); // sustain end
|
||||
gainNode.gain.linearRampToValueAtTime(0, end + release); // release
|
||||
// for some reason, using exponential ramping creates little cracklings
|
||||
/* let t = begin;
|
||||
gainNode.gain.setValueAtTime(0, t);
|
||||
gainNode.gain.exponentialRampToValueAtTime(velocity, (t += attack));
|
||||
const sustainGain = Math.max(sustain * velocity, 0.001);
|
||||
gainNode.gain.exponentialRampToValueAtTime(sustainGain, (t += decay));
|
||||
if (end - begin < attack + decay) {
|
||||
gainNode.gain.cancelAndHoldAtTime(end);
|
||||
} else {
|
||||
gainNode.gain.setValueAtTime(sustainGain, end);
|
||||
}
|
||||
gainNode.gain.exponentialRampToValueAtTime(0.001, end + release); // release */
|
||||
return gainNode;
|
||||
};
|
||||
|
||||
const getOscillator = ({ s, freq, t, duration, release }) => {
|
||||
// make oscillator
|
||||
const o = getAudioContext().createOscillator();
|
||||
o.type = s || 'triangle';
|
||||
o.frequency.value = Number(freq);
|
||||
o.start(t);
|
||||
o.stop(t + duration + release);
|
||||
return o;
|
||||
};
|
||||
|
||||
const getSoundfontKey = (s) => {
|
||||
if (!globalThis.soundfontList) {
|
||||
// soundfont package not loaded
|
||||
return false;
|
||||
}
|
||||
if (globalThis.soundfontList?.instruments?.includes(s)) {
|
||||
return s;
|
||||
}
|
||||
// check if s is one of the soundfonts, which are loaded into globalThis, to avoid coupling both packages
|
||||
const nameIndex = globalThis.soundfontList?.instrumentNames?.indexOf(s);
|
||||
// convert number nameIndex (0-128) to 3 digit string (001-128)
|
||||
const name = nameIndex < 10 ? `00${nameIndex}` : nameIndex < 100 ? `0${nameIndex}` : nameIndex;
|
||||
if (nameIndex !== -1) {
|
||||
// TODO: indices of instrumentNames do not seem to match instruments
|
||||
return globalThis.soundfontList.instruments.find((instrument) => instrument.startsWith(name));
|
||||
}
|
||||
return;
|
||||
};
|
||||
|
||||
let workletsLoading;
|
||||
function loadWorklets() {
|
||||
if (workletsLoading) {
|
||||
@ -136,13 +83,6 @@ export async function initAudioOnFirstClick() {
|
||||
});
|
||||
}
|
||||
|
||||
function gainNode(value) {
|
||||
const node = getAudioContext().createGain();
|
||||
node.gain.value = value;
|
||||
return node;
|
||||
}
|
||||
const cutGroups = [];
|
||||
|
||||
let delays = {};
|
||||
function getDelay(orbit, delaytime, delayfeedback, t) {
|
||||
if (!delays[orbit]) {
|
||||
@ -188,13 +128,9 @@ export const webaudioOutput = async (hap, deadline, hapDuration, cps) => {
|
||||
let t = ac.currentTime + deadline;
|
||||
// destructure value
|
||||
let {
|
||||
freq,
|
||||
s,
|
||||
s = 'triangle',
|
||||
bank,
|
||||
sf,
|
||||
clip = 0, // if 1, samples will be cut off when the hap ends
|
||||
n = 0,
|
||||
note,
|
||||
source,
|
||||
gain = 0.8,
|
||||
// low pass
|
||||
cutoff,
|
||||
@ -210,17 +146,10 @@ export const webaudioOutput = async (hap, deadline, hapDuration, cps) => {
|
||||
crush,
|
||||
shape,
|
||||
pan,
|
||||
speed = 1, // sample playback speed
|
||||
begin = 0,
|
||||
end = 1,
|
||||
vowel,
|
||||
delay = 0,
|
||||
delayfeedback = 0.5,
|
||||
delaytime = 0.25,
|
||||
unit,
|
||||
nudge = 0, // TODO: is this in seconds?
|
||||
cut,
|
||||
loop,
|
||||
orbit = 1,
|
||||
room,
|
||||
size = 2,
|
||||
@ -232,87 +161,13 @@ export const webaudioOutput = async (hap, deadline, hapDuration, cps) => {
|
||||
if (bank && s) {
|
||||
s = `${bank}_${s}`;
|
||||
}
|
||||
if (!s || ['sine', 'square', 'triangle', 'sawtooth'].includes(s)) {
|
||||
// destructure adsr here, because the default should be different for synths and samples
|
||||
const { attack = 0.001, decay = 0.05, sustain = 0.6, release = 0.01 } = hap.value;
|
||||
// with synths, n and note are the same thing
|
||||
n = note || n || 36;
|
||||
if (typeof n === 'string') {
|
||||
n = toMidi(n); // e.g. c3 => 48
|
||||
}
|
||||
// get frequency
|
||||
if (!freq && typeof n === 'number') {
|
||||
freq = fromMidi(n); // + 48);
|
||||
}
|
||||
// make oscillator
|
||||
const o = getOscillator({ t, s, freq, duration: hapDuration, release });
|
||||
chain.push(o);
|
||||
// level down oscillators as they are really loud compared to samples i've tested
|
||||
chain.push(gainNode(0.3));
|
||||
// TODO: make adsr work with samples without pops
|
||||
// envelope
|
||||
const adsr = getADSR(attack, decay, sustain, release, 1, t, t + hapDuration);
|
||||
chain.push(adsr);
|
||||
if (soundMap.has(s)) {
|
||||
const node = await soundMap.get(s)({ hap, t, deadline, duration: hapDuration, cps });
|
||||
chain.push(node);
|
||||
} else if (source) {
|
||||
chain.push(source({ hap, t, deadline, duration: hapDuration, cps }));
|
||||
} else {
|
||||
// destructure adsr here, because the default should be different for synths and samples
|
||||
const { attack = 0.001, decay = 0.001, sustain = 1, release = 0.001 } = hap.value;
|
||||
// load sample
|
||||
if (speed === 0) {
|
||||
// no playback
|
||||
return;
|
||||
}
|
||||
if (!s) {
|
||||
console.warn('no sample specified');
|
||||
return;
|
||||
}
|
||||
const soundfont = getSoundfontKey(s);
|
||||
let bufferSource;
|
||||
|
||||
if (soundfont) {
|
||||
// is soundfont
|
||||
bufferSource = await globalThis.getFontBufferSource(soundfont, note || n, ac, freq);
|
||||
} else {
|
||||
// is sample from loaded samples(..)
|
||||
bufferSource = await getSampleBufferSource(s, n, note, speed, freq);
|
||||
}
|
||||
// asny stuff above took too long?
|
||||
if (ac.currentTime > t) {
|
||||
logger(`[sampler] still loading sound "${s}:${n}"`, 'highlight');
|
||||
// console.warn('sample still loading:', s, n);
|
||||
return;
|
||||
}
|
||||
if (!bufferSource) {
|
||||
console.warn('no buffer source');
|
||||
return;
|
||||
}
|
||||
bufferSource.playbackRate.value = Math.abs(speed) * bufferSource.playbackRate.value;
|
||||
if (unit === 'c') {
|
||||
// are there other units?
|
||||
bufferSource.playbackRate.value = bufferSource.playbackRate.value * bufferSource.buffer.duration * cps;
|
||||
}
|
||||
let duration = soundfont || clip ? hapDuration : bufferSource.buffer.duration / bufferSource.playbackRate.value;
|
||||
// "The computation of the offset into the sound is performed using the sound buffer's natural sample rate,
|
||||
// rather than the current playback rate, so even if the sound is playing at twice its normal speed,
|
||||
// the midway point through a 10-second audio buffer is still 5."
|
||||
const offset = begin * duration * bufferSource.playbackRate.value;
|
||||
duration = (end - begin) * duration;
|
||||
if (loop) {
|
||||
bufferSource.loop = true;
|
||||
bufferSource.loopStart = offset;
|
||||
bufferSource.loopEnd = offset + duration;
|
||||
duration = loop * duration;
|
||||
}
|
||||
t += nudge;
|
||||
|
||||
bufferSource.start(t, offset);
|
||||
if (cut !== undefined) {
|
||||
cutGroups[cut]?.stop(t); // fade out?
|
||||
cutGroups[cut] = bufferSource;
|
||||
}
|
||||
chain.push(bufferSource);
|
||||
bufferSource.stop(t + duration + release);
|
||||
const adsr = getADSR(attack, decay, sustain, release, 1, t, t + duration);
|
||||
chain.push(adsr);
|
||||
throw new Error(`sound ${s} not found! Is it loaded?`);
|
||||
}
|
||||
|
||||
// gain stage
|
||||
|
||||
@ -1,7 +1,7 @@
|
||||
import * as strudel from '@strudel.cycles/core';
|
||||
const { Pattern } = strudel;
|
||||
import * as WebDirt from 'WebDirt';
|
||||
import { getLoadedSamples, loadBuffer, getLoadedBuffer } from '@strudel.cycles/webaudio';
|
||||
//import { loadBuffer, getLoadedBuffer } from '@strudel.cycles/webaudio';
|
||||
|
||||
let webDirt;
|
||||
|
||||
@ -63,7 +63,7 @@ export function loadWebDirt(config) {
|
||||
*/
|
||||
Pattern.prototype.webdirt = function () {
|
||||
// create a WebDirt object and initialize Web Audio context
|
||||
return this.onTrigger(async (time, e, currentTime) => {
|
||||
/* return this.onTrigger(async (time, e, currentTime) => {
|
||||
if (!webDirt) {
|
||||
throw new Error('WebDirt not initialized!');
|
||||
}
|
||||
@ -92,5 +92,5 @@ Pattern.prototype.webdirt = function () {
|
||||
webDirt.playSample(msg, deadline);
|
||||
}
|
||||
}
|
||||
});
|
||||
}); */
|
||||
};
|
||||
|
||||
@ -4,7 +4,6 @@ import { useEvent, cx } from '@strudel.cycles/react';
|
||||
// import { cx } from '@strudel.cycles/react';
|
||||
import { nanoid } from 'nanoid';
|
||||
import React, { useCallback, useLayoutEffect, useRef, useState } from 'react';
|
||||
import { loadedSamples } from './Repl';
|
||||
import { Reference } from './Reference';
|
||||
import { themes } from './themes.mjs';
|
||||
import { useSettings, settingsMap, setActiveFooter, defaultSettings } from '../settings.mjs';
|
||||
@ -196,14 +195,15 @@ function ConsoleTab({ log }) {
|
||||
function SamplesTab() {
|
||||
return (
|
||||
<div id="samples-tab" className="break-normal w-full px-4 dark:text-white text-stone-900">
|
||||
<span>{loadedSamples.length} banks loaded:</span>
|
||||
TODO: use nanostore with sampleMap
|
||||
{/* <span>{loadedSamples.length} banks loaded:</span>
|
||||
{loadedSamples.map(([name, samples]) => (
|
||||
<span key={name} className="cursor-pointer hover:opacity-50" onClick={() => {}}>
|
||||
{' '}
|
||||
{name}(
|
||||
{Array.isArray(samples) ? samples.length : typeof samples === 'object' ? Object.values(samples).length : 1}){' '}
|
||||
</span>
|
||||
))}
|
||||
))} */}
|
||||
</div>
|
||||
);
|
||||
}
|
||||
|
||||
@ -6,13 +6,7 @@ This program is free software: you can redistribute it and/or modify it under th
|
||||
|
||||
import { cleanupDraw, cleanupUi, controls, evalScope, getDrawContext, logger } from '@strudel.cycles/core';
|
||||
import { CodeMirror, cx, flash, useHighlighting, useStrudel, useKeydown } from '@strudel.cycles/react';
|
||||
import {
|
||||
getAudioContext,
|
||||
getLoadedSamples,
|
||||
initAudioOnFirstClick,
|
||||
resetLoadedSamples,
|
||||
webaudioOutput,
|
||||
} from '@strudel.cycles/webaudio';
|
||||
import { getAudioContext, initAudioOnFirstClick, resetLoadedSounds, webaudioOutput } from '@strudel.cycles/webaudio';
|
||||
import { createClient } from '@supabase/supabase-js';
|
||||
import { nanoid } from 'nanoid';
|
||||
import React, { createContext, useCallback, useEffect, useState } from 'react';
|
||||
@ -53,7 +47,6 @@ evalScope(
|
||||
...modules,
|
||||
);
|
||||
|
||||
export let loadedSamples = [];
|
||||
const presets = prebake();
|
||||
|
||||
let drawContext, clearCanvas;
|
||||
@ -62,11 +55,6 @@ if (typeof window !== 'undefined') {
|
||||
clearCanvas = () => drawContext.clearRect(0, 0, drawContext.canvas.height, drawContext.canvas.width);
|
||||
}
|
||||
|
||||
Promise.all([...modules, presets]).then((data) => {
|
||||
// console.log('modules and sample registry loade', data);
|
||||
loadedSamples = Object.entries(getLoadedSamples() || {});
|
||||
});
|
||||
|
||||
const getTime = () => getAudioContext().currentTime;
|
||||
|
||||
async function initCode() {
|
||||
@ -211,7 +199,7 @@ export function Repl({ embedded = false }) {
|
||||
const { code, name } = getRandomTune();
|
||||
logger(`[repl] ✨ loading random tune "${name}"`);
|
||||
clearCanvas();
|
||||
resetLoadedSamples();
|
||||
resetLoadedSounds();
|
||||
await prebake(); // declare default samples
|
||||
await evaluate(code, false);
|
||||
};
|
||||
|
||||
@ -1,9 +1,10 @@
|
||||
import { Pattern, toMidi, valueToMidi } from '@strudel.cycles/core';
|
||||
import { samples } from '@strudel.cycles/webaudio';
|
||||
import { loadSynthSounds, samples } from '@strudel.cycles/webaudio';
|
||||
|
||||
export async function prebake() {
|
||||
// https://archive.org/details/SalamanderGrandPianoV3
|
||||
// License: CC-by http://creativecommons.org/licenses/by/3.0/ Author: Alexander Holm
|
||||
loadSynthSounds();
|
||||
return await Promise.all([
|
||||
samples(`./piano.json`, `./piano/`),
|
||||
// https://github.com/sgossner/VCSL/
|
||||
|
||||
Loading…
x
Reference in New Issue
Block a user