note * .octaves and ramps to note
+ * over the duration of .pitchDecay.
+ * @example
+ * const synth = new Tone.MembraneSynth().toDestination();
+ * synth.triggerAttackRelease("C2", "8n");
+ * @category Instrument
+ */
+class MembraneSynth extends Synth {
+ constructor() {
+ super(optionsFromArguments(MembraneSynth.getDefaults(), arguments));
+ this.name = "MembraneSynth";
+ /**
+ * Portamento is ignored in this synth. use pitch decay instead.
+ */
+ this.portamento = 0;
+ const options = optionsFromArguments(MembraneSynth.getDefaults(), arguments);
+ this.pitchDecay = options.pitchDecay;
+ this.octaves = options.octaves;
+ readOnly(this, ["oscillator", "envelope"]);
+ }
+ static getDefaults() {
+ return deepMerge(Monophonic.getDefaults(), Synth.getDefaults(), {
+ envelope: {
+ attack: 0.001,
+ attackCurve: "exponential",
+ decay: 0.4,
+ release: 1.4,
+ sustain: 0.01,
+ },
+ octaves: 10,
+ oscillator: {
+ type: "sine",
+ },
+ pitchDecay: 0.05,
+ });
+ }
+ setNote(note, time) {
+ const seconds = this.toSeconds(time);
+ const hertz = this.toFrequency(note instanceof FrequencyClass ? note.toFrequency() : note);
+ const maxNote = hertz * this.octaves;
+ this.oscillator.frequency.setValueAtTime(maxNote, seconds);
+ this.oscillator.frequency.exponentialRampToValueAtTime(hertz, seconds + this.toSeconds(this.pitchDecay));
+ return this;
+ }
+ dispose() {
+ super.dispose();
+ return this;
+ }
+}
+__decorate([
+ range(0)
+], MembraneSynth.prototype, "octaves", void 0);
+__decorate([
+ timeRange(0)
+], MembraneSynth.prototype, "pitchDecay", void 0);
+
+/**
+ * All of the classes or functions which are loaded into the AudioWorkletGlobalScope
+ */
+const workletContext = new Set();
+/**
+ * Add a class to the AudioWorkletGlobalScope
+ */
+function addToWorklet(classOrFunction) {
+ workletContext.add(classOrFunction);
+}
+/**
+ * Register a processor in the AudioWorkletGlobalScope with the given name
+ */
+function registerProcessor(name, classDesc) {
+ const processor = /* javascript */ `registerProcessor("${name}", ${classDesc})`;
+ workletContext.add(processor);
+}
+/**
+ * Get all of the modules which have been registered to the AudioWorkletGlobalScope
+ */
+function getWorkletGlobalScope() {
+ return Array.from(workletContext).join("\n");
+}
+
+const toneAudioWorkletProcessor = /* javascript */ `
+ /**
+ * The base AudioWorkletProcessor for use in Tone.js. Works with the [[ToneAudioWorklet]].
+ */
+ class ToneAudioWorkletProcessor extends AudioWorkletProcessor {
+
+ constructor(options) {
+
+ super(options);
+ /**
+ * If the processor was disposed or not. Keep alive until it's disposed.
+ */
+ this.disposed = false;
+ /**
+ * The number of samples in the processing block
+ */
+ this.blockSize = 128;
+ /**
+ * the sample rate
+ */
+ this.sampleRate = sampleRate;
+
+ this.port.onmessage = (event) => {
+ // when it receives a dispose
+ if (event.data === "dispose") {
+ this.disposed = true;
+ }
+ };
+ }
+ }
+`;
+addToWorklet(toneAudioWorkletProcessor);
+
+const singleIOProcess = /* javascript */ `
+ /**
+ * Abstract class for a single input/output processor.
+ * has a 'generate' function which processes one sample at a time
+ */
+ class SingleIOProcessor extends ToneAudioWorkletProcessor {
+
+ constructor(options) {
+ super(Object.assign(options, {
+ numberOfInputs: 1,
+ numberOfOutputs: 1
+ }));
+ /**
+ * Holds the name of the parameter and a single value of that
+ * parameter at the current sample
+ * @type { [name: string]: number }
+ */
+ this.params = {}
+ }
+
+ /**
+ * Generate an output sample from the input sample and parameters
+ * @abstract
+ * @param input number
+ * @param channel number
+ * @param parameters { [name: string]: number }
+ * @returns number
+ */
+ generate(){}
+
+ /**
+ * Update the private params object with the
+ * values of the parameters at the given index
+ * @param parameters { [name: string]: Float32Array },
+ * @param index number
+ */
+ updateParams(parameters, index) {
+ for (const paramName in parameters) {
+ const param = parameters[paramName];
+ if (param.length > 1) {
+ this.params[paramName] = parameters[paramName][index];
+ } else {
+ this.params[paramName] = parameters[paramName][0];
+ }
+ }
+ }
+
+ /**
+ * Process a single frame of the audio
+ * @param inputs Float32Array[][]
+ * @param outputs Float32Array[][]
+ */
+ process(inputs, outputs, parameters) {
+ const input = inputs[0];
+ const output = outputs[0];
+ // get the parameter values
+ const channelCount = Math.max(input && input.length || 0, output.length);
+ for (let sample = 0; sample < this.blockSize; sample++) {
+ this.updateParams(parameters, sample);
+ for (let channel = 0; channel < channelCount; channel++) {
+ const inputSample = input && input.length ? input[channel][sample] : 0;
+ output[channel][sample] = this.generate(inputSample, channel, this.params);
+ }
+ }
+ return !this.disposed;
+ }
+ };
+`;
+addToWorklet(singleIOProcess);
+
+const delayLine = /* javascript */ `
+ /**
+ * A multichannel buffer for use within an AudioWorkletProcessor as a delay line
+ */
+ class DelayLine {
+
+ constructor(size, channels) {
+ this.buffer = [];
+ this.writeHead = []
+ this.size = size;
+
+ // create the empty channels
+ for (let i = 0; i < channels; i++) {
+ this.buffer[i] = new Float32Array(this.size);
+ this.writeHead[i] = 0;
+ }
+ }
+
+ /**
+ * Push a value onto the end
+ * @param channel number
+ * @param value number
+ */
+ push(channel, value) {
+ this.writeHead[channel] += 1;
+ if (this.writeHead[channel] > this.size) {
+ this.writeHead[channel] = 0;
+ }
+ this.buffer[channel][this.writeHead[channel]] = value;
+ }
+
+ /**
+ * Get the recorded value of the channel given the delay
+ * @param channel number
+ * @param delay number delay samples
+ */
+ get(channel, delay) {
+ let readHead = this.writeHead[channel] - Math.floor(delay);
+ if (readHead < 0) {
+ readHead += this.size;
+ }
+ return this.buffer[channel][readHead];
+ }
+ }
+`;
+addToWorklet(delayLine);
+
+const workletName = "feedback-comb-filter";
+const feedbackCombFilter = /* javascript */ `
+ class FeedbackCombFilterWorklet extends SingleIOProcessor {
+
+ constructor(options) {
+ super(options);
+ this.delayLine = new DelayLine(this.sampleRate, options.channelCount || 2);
+ }
+
+ static get parameterDescriptors() {
+ return [{
+ name: "delayTime",
+ defaultValue: 0.1,
+ minValue: 0,
+ maxValue: 1,
+ automationRate: "k-rate"
+ }, {
+ name: "feedback",
+ defaultValue: 0.5,
+ minValue: 0,
+ maxValue: 0.9999,
+ automationRate: "k-rate"
+ }];
+ }
+
+ generate(input, channel, parameters) {
+ const delayedSample = this.delayLine.get(channel, parameters.delayTime * this.sampleRate);
+ this.delayLine.push(channel, input + delayedSample * parameters.feedback);
+ return delayedSample;
+ }
+ }
+`;
+registerProcessor(workletName, feedbackCombFilter);
+
+/**
+ * Pass in an object which maps the note's pitch or midi value to the url,
+ * then you can trigger the attack and release of that note like other instruments.
+ * By automatically repitching the samples, it is possible to play pitches which
+ * were not explicitly included which can save loading time.
+ *
+ * For sample or buffer playback where repitching is not necessary,
+ * use [[Player]].
+ * @example
+ * const sampler = new Tone.Sampler({
+ * urls: {
+ * A1: "A1.mp3",
+ * A2: "A2.mp3",
+ * },
+ * baseUrl: "https://tonejs.github.io/audio/casio/",
+ * onload: () => {
+ * sampler.triggerAttackRelease(["C1", "E1", "G1", "B1"], 0.5);
+ * }
+ * }).toDestination();
+ * @category Instrument
+ */
+class Sampler extends Instrument {
+ constructor() {
+ super(optionsFromArguments(Sampler.getDefaults(), arguments, ["urls", "onload", "baseUrl"], "urls"));
+ this.name = "Sampler";
+ /**
+ * The object of all currently playing BufferSources
+ */
+ this._activeSources = new Map();
+ const options = optionsFromArguments(Sampler.getDefaults(), arguments, ["urls", "onload", "baseUrl"], "urls");
+ const urlMap = {};
+ Object.keys(options.urls).forEach((note) => {
+ const noteNumber = parseInt(note, 10);
+ assert(isNote(note)
+ || (isNumber(noteNumber) && isFinite(noteNumber)), `url key is neither a note or midi pitch: ${note}`);
+ if (isNote(note)) {
+ // convert the note name to MIDI
+ const mid = new FrequencyClass(this.context, note).toMidi();
+ urlMap[mid] = options.urls[note];
+ }
+ else if (isNumber(noteNumber) && isFinite(noteNumber)) {
+ // otherwise if it's numbers assume it's midi
+ urlMap[noteNumber] = options.urls[noteNumber];
+ }
+ });
+ this._buffers = new ToneAudioBuffers({
+ urls: urlMap,
+ onload: options.onload,
+ baseUrl: options.baseUrl,
+ onerror: options.onerror,
+ });
+ this.attack = options.attack;
+ this.release = options.release;
+ this.curve = options.curve;
+ // invoke the callback if it's already loaded
+ if (this._buffers.loaded) {
+ // invoke onload deferred
+ Promise.resolve().then(options.onload);
+ }
+ }
+ static getDefaults() {
+ return Object.assign(Instrument.getDefaults(), {
+ attack: 0,
+ baseUrl: "",
+ curve: "exponential",
+ onload: noOp,
+ onerror: noOp,
+ release: 0.1,
+ urls: {},
+ });
+ }
+ /**
+ * Returns the difference in steps between the given midi note at the closets sample.
+ */
+ _findClosest(midi) {
+ // searches within 8 octaves of the given midi note
+ const MAX_INTERVAL = 96;
+ let interval = 0;
+ while (interval < MAX_INTERVAL) {
+ // check above and below
+ if (this._buffers.has(midi + interval)) {
+ return -interval;
+ }
+ else if (this._buffers.has(midi - interval)) {
+ return interval;
+ }
+ interval++;
+ }
+ throw new Error(`No available buffers for note: ${midi}`);
+ }
+ /**
+ * @param notes The note to play, or an array of notes.
+ * @param time When to play the note
+ * @param velocity The velocity to play the sample back.
+ */
+ triggerAttack(notes, time, velocity = 1) {
+ this.log("triggerAttack", notes, time, velocity);
+ if (!Array.isArray(notes)) {
+ notes = [notes];
+ }
+ notes.forEach(note => {
+ const midiFloat = ftomf(new FrequencyClass(this.context, note).toFrequency());
+ const midi = Math.round(midiFloat);
+ const remainder = midiFloat - midi;
+ // find the closest note pitch
+ const difference = this._findClosest(midi);
+ const closestNote = midi - difference;
+ const buffer = this._buffers.get(closestNote);
+ const playbackRate = intervalToFrequencyRatio(difference + remainder);
+ // play that note
+ const source = new ToneBufferSource({
+ url: buffer,
+ context: this.context,
+ curve: this.curve,
+ fadeIn: this.attack,
+ fadeOut: this.release,
+ playbackRate,
+ }).connect(this.output);
+ source.start(time, 0, buffer.duration / playbackRate, velocity);
+ // add it to the active sources
+ if (!isArray(this._activeSources.get(midi))) {
+ this._activeSources.set(midi, []);
+ }
+ this._activeSources.get(midi).push(source);
+ // remove it when it's done
+ source.onended = () => {
+ if (this._activeSources && this._activeSources.has(midi)) {
+ const sources = this._activeSources.get(midi);
+ const index = sources.indexOf(source);
+ if (index !== -1) {
+ sources.splice(index, 1);
+ }
+ }
+ };
+ });
+ return this;
+ }
+ /**
+ * @param notes The note to release, or an array of notes.
+ * @param time When to release the note.
+ */
+ triggerRelease(notes, time) {
+ this.log("triggerRelease", notes, time);
+ if (!Array.isArray(notes)) {
+ notes = [notes];
+ }
+ notes.forEach(note => {
+ const midi = new FrequencyClass(this.context, note).toMidi();
+ // find the note
+ if (this._activeSources.has(midi) && this._activeSources.get(midi).length) {
+ const sources = this._activeSources.get(midi);
+ time = this.toSeconds(time);
+ sources.forEach(source => {
+ source.stop(time);
+ });
+ this._activeSources.set(midi, []);
+ }
+ });
+ return this;
+ }
+ /**
+ * Release all currently active notes.
+ * @param time When to release the notes.
+ */
+ releaseAll(time) {
+ const computedTime = this.toSeconds(time);
+ this._activeSources.forEach(sources => {
+ while (sources.length) {
+ const source = sources.shift();
+ source.stop(computedTime);
+ }
+ });
+ return this;
+ }
+ sync() {
+ if (this._syncState()) {
+ this._syncMethod("triggerAttack", 1);
+ this._syncMethod("triggerRelease", 1);
+ }
+ return this;
+ }
+ /**
+ * Invoke the attack phase, then after the duration, invoke the release.
+ * @param notes The note to play and release, or an array of notes.
+ * @param duration The time the note should be held
+ * @param time When to start the attack
+ * @param velocity The velocity of the attack
+ */
+ triggerAttackRelease(notes, duration, time, velocity = 1) {
+ const computedTime = this.toSeconds(time);
+ this.triggerAttack(notes, computedTime, velocity);
+ if (isArray(duration)) {
+ assert(isArray(notes), "notes must be an array when duration is array");
+ notes.forEach((note, index) => {
+ const d = duration[Math.min(index, duration.length - 1)];
+ this.triggerRelease(note, computedTime + this.toSeconds(d));
+ });
+ }
+ else {
+ this.triggerRelease(notes, computedTime + this.toSeconds(duration));
+ }
+ return this;
+ }
+ /**
+ * Add a note to the sampler.
+ * @param note The buffer's pitch.
+ * @param url Either the url of the buffer, or a buffer which will be added with the given name.
+ * @param callback The callback to invoke when the url is loaded.
+ */
+ add(note, url, callback) {
+ assert(isNote(note) || isFinite(note), `note must be a pitch or midi: ${note}`);
+ if (isNote(note)) {
+ // convert the note name to MIDI
+ const mid = new FrequencyClass(this.context, note).toMidi();
+ this._buffers.add(mid, url, callback);
+ }
+ else {
+ // otherwise if it's numbers assume it's midi
+ this._buffers.add(note, url, callback);
+ }
+ return this;
+ }
+ /**
+ * If the buffers are loaded or not
+ */
+ get loaded() {
+ return this._buffers.loaded;
+ }
+ /**
+ * Clean up
+ */
+ dispose() {
+ super.dispose();
+ this._buffers.dispose();
+ this._activeSources.forEach(sources => {
+ sources.forEach(source => source.dispose());
+ });
+ this._activeSources.clear();
+ return this;
+ }
+}
+__decorate([
+ timeRange(0)
+], Sampler.prototype, "attack", void 0);
+__decorate([
+ timeRange(0)
+], Sampler.prototype, "release", void 0);
+
+/**
+ * Panner is an equal power Left/Right Panner. It is a wrapper around the StereoPannerNode.
+ * @example
+ * return Tone.Offline(() => {
+ * // move the input signal from right to left
+ * const panner = new Tone.Panner(1).toDestination();
+ * panner.pan.rampTo(-1, 0.5);
+ * const osc = new Tone.Oscillator(100).connect(panner).start();
+ * }, 0.5, 2);
+ * @category Component
+ */
+class Panner extends ToneAudioNode {
+ constructor() {
+ super(Object.assign(optionsFromArguments(Panner.getDefaults(), arguments, ["pan"])));
+ this.name = "Panner";
+ /**
+ * the panner node
+ */
+ this._panner = this.context.createStereoPanner();
+ this.input = this._panner;
+ this.output = this._panner;
+ const options = optionsFromArguments(Panner.getDefaults(), arguments, ["pan"]);
+ this.pan = new Param({
+ context: this.context,
+ param: this._panner.pan,
+ value: options.pan,
+ minValue: -1,
+ maxValue: 1,
+ });
+ // this is necessary for standardized-audio-context
+ // doesn't make any difference for the native AudioContext
+ // https://github.com/chrisguttandin/standardized-audio-context/issues/647
+ this._panner.channelCount = options.channelCount;
+ this._panner.channelCountMode = "explicit";
+ // initial value
+ readOnly(this, "pan");
+ }
+ static getDefaults() {
+ return Object.assign(ToneAudioNode.getDefaults(), {
+ pan: 0,
+ channelCount: 1,
+ });
+ }
+ dispose() {
+ super.dispose();
+ this._panner.disconnect();
+ this.pan.dispose();
+ return this;
+ }
+}
+
+const workletName$1 = "bit-crusher";
+const bitCrusherWorklet = /* javascript */ `
+ class BitCrusherWorklet extends SingleIOProcessor {
+
+ static get parameterDescriptors() {
+ return [{
+ name: "bits",
+ defaultValue: 12,
+ minValue: 1,
+ maxValue: 16,
+ automationRate: 'k-rate'
+ }];
+ }
+
+ generate(input, _channel, parameters) {
+ const step = Math.pow(0.5, parameters.bits - 1);
+ const val = step * Math.floor(input / step + 0.5);
+ return val;
+ }
+ }
+`;
+registerProcessor(workletName$1, bitCrusherWorklet);
+
+/**
+ * Solo lets you isolate a specific audio stream. When an instance is set to `solo=true`,
+ * it will mute all other instances of Solo.
+ * @example
+ * const soloA = new Tone.Solo().toDestination();
+ * const oscA = new Tone.Oscillator("C4", "sawtooth").connect(soloA);
+ * const soloB = new Tone.Solo().toDestination();
+ * const oscB = new Tone.Oscillator("E4", "square").connect(soloB);
+ * soloA.solo = true;
+ * // no audio will pass through soloB
+ * @category Component
+ */
+class Solo extends ToneAudioNode {
+ constructor() {
+ super(optionsFromArguments(Solo.getDefaults(), arguments, ["solo"]));
+ this.name = "Solo";
+ const options = optionsFromArguments(Solo.getDefaults(), arguments, ["solo"]);
+ this.input = this.output = new Gain({
+ context: this.context,
+ });
+ if (!Solo._allSolos.has(this.context)) {
+ Solo._allSolos.set(this.context, new Set());
+ }
+ Solo._allSolos.get(this.context).add(this);
+ // set initially
+ this.solo = options.solo;
+ }
+ static getDefaults() {
+ return Object.assign(ToneAudioNode.getDefaults(), {
+ solo: false,
+ });
+ }
+ /**
+ * Isolates this instance and mutes all other instances of Solo.
+ * Only one instance can be soloed at a time. A soloed
+ * instance will report `solo=false` when another instance is soloed.
+ */
+ get solo() {
+ return this._isSoloed();
+ }
+ set solo(solo) {
+ if (solo) {
+ this._addSolo();
+ }
+ else {
+ this._removeSolo();
+ }
+ Solo._allSolos.get(this.context).forEach(instance => instance._updateSolo());
+ }
+ /**
+ * If the current instance is muted, i.e. another instance is soloed
+ */
+ get muted() {
+ return this.input.gain.value === 0;
+ }
+ /**
+ * Add this to the soloed array
+ */
+ _addSolo() {
+ if (!Solo._soloed.has(this.context)) {
+ Solo._soloed.set(this.context, new Set());
+ }
+ Solo._soloed.get(this.context).add(this);
+ }
+ /**
+ * Remove this from the soloed array
+ */
+ _removeSolo() {
+ if (Solo._soloed.has(this.context)) {
+ Solo._soloed.get(this.context).delete(this);
+ }
+ }
+ /**
+ * Is this on the soloed array
+ */
+ _isSoloed() {
+ return Solo._soloed.has(this.context) && Solo._soloed.get(this.context).has(this);
+ }
+ /**
+ * Returns true if no one is soloed
+ */
+ _noSolos() {
+ // either does not have any soloed added
+ return !Solo._soloed.has(this.context) ||
+ // or has a solo set but doesn't include any items
+ (Solo._soloed.has(this.context) && Solo._soloed.get(this.context).size === 0);
+ }
+ /**
+ * Solo the current instance and unsolo all other instances.
+ */
+ _updateSolo() {
+ if (this._isSoloed()) {
+ this.input.gain.value = 1;
+ }
+ else if (this._noSolos()) {
+ // no one is soloed
+ this.input.gain.value = 1;
+ }
+ else {
+ this.input.gain.value = 0;
+ }
+ }
+ dispose() {
+ super.dispose();
+ Solo._allSolos.get(this.context).delete(this);
+ this._removeSolo();
+ return this;
+ }
+}
+/**
+ * Hold all of the solo'ed tracks belonging to a specific context
+ */
+Solo._allSolos = new Map();
+/**
+ * Hold the currently solo'ed instance(s)
+ */
+Solo._soloed = new Map();
+
+/**
+ * PanVol is a Tone.Panner and Tone.Volume in one.
+ * @example
+ * // pan the incoming signal left and drop the volume
+ * const panVol = new Tone.PanVol(-0.25, -12).toDestination();
+ * const osc = new Tone.Oscillator().connect(panVol).start();
+ * @category Component
+ */
+class PanVol extends ToneAudioNode {
+ constructor() {
+ super(optionsFromArguments(PanVol.getDefaults(), arguments, ["pan", "volume"]));
+ this.name = "PanVol";
+ const options = optionsFromArguments(PanVol.getDefaults(), arguments, ["pan", "volume"]);
+ this._panner = this.input = new Panner({
+ context: this.context,
+ pan: options.pan,
+ channelCount: options.channelCount,
+ });
+ this.pan = this._panner.pan;
+ this._volume = this.output = new Volume({
+ context: this.context,
+ volume: options.volume,
+ });
+ this.volume = this._volume.volume;
+ // connections
+ this._panner.connect(this._volume);
+ this.mute = options.mute;
+ readOnly(this, ["pan", "volume"]);
+ }
+ static getDefaults() {
+ return Object.assign(ToneAudioNode.getDefaults(), {
+ mute: false,
+ pan: 0,
+ volume: 0,
+ channelCount: 1,
+ });
+ }
+ /**
+ * Mute/unmute the volume
+ */
+ get mute() {
+ return this._volume.mute;
+ }
+ set mute(mute) {
+ this._volume.mute = mute;
+ }
+ dispose() {
+ super.dispose();
+ this._panner.dispose();
+ this.pan.dispose();
+ this._volume.dispose();
+ this.volume.dispose();
+ return this;
+ }
+}
+
+/**
+ * Channel provides a channel strip interface with volume, pan, solo and mute controls.
+ * See [[PanVol]] and [[Solo]]
+ * @example
+ * // pan the incoming signal left and drop the volume 12db
+ * const channel = new Tone.Channel(-0.25, -12);
+ * @category Component
+ */
+class Channel extends ToneAudioNode {
+ constructor() {
+ super(optionsFromArguments(Channel.getDefaults(), arguments, ["volume", "pan"]));
+ this.name = "Channel";
+ const options = optionsFromArguments(Channel.getDefaults(), arguments, ["volume", "pan"]);
+ this._solo = this.input = new Solo({
+ solo: options.solo,
+ context: this.context,
+ });
+ this._panVol = this.output = new PanVol({
+ context: this.context,
+ pan: options.pan,
+ volume: options.volume,
+ mute: options.mute,
+ channelCount: options.channelCount
+ });
+ this.pan = this._panVol.pan;
+ this.volume = this._panVol.volume;
+ this._solo.connect(this._panVol);
+ readOnly(this, ["pan", "volume"]);
+ }
+ static getDefaults() {
+ return Object.assign(ToneAudioNode.getDefaults(), {
+ pan: 0,
+ volume: 0,
+ mute: false,
+ solo: false,
+ channelCount: 1,
+ });
+ }
+ /**
+ * Solo/unsolo the channel. Soloing is only relative to other [[Channels]] and [[Solo]] instances
+ */
+ get solo() {
+ return this._solo.solo;
+ }
+ set solo(solo) {
+ this._solo.solo = solo;
+ }
+ /**
+ * If the current instance is muted, i.e. another instance is soloed,
+ * or the channel is muted
+ */
+ get muted() {
+ return this._solo.muted || this.mute;
+ }
+ /**
+ * Mute/unmute the volume
+ */
+ get mute() {
+ return this._panVol.mute;
+ }
+ set mute(mute) {
+ this._panVol.mute = mute;
+ }
+ /**
+ * Get the gain node belonging to the bus name. Create it if
+ * it doesn't exist
+ * @param name The bus name
+ */
+ _getBus(name) {
+ if (!Channel.buses.has(name)) {
+ Channel.buses.set(name, new Gain({ context: this.context }));
+ }
+ return Channel.buses.get(name);
+ }
+ /**
+ * Send audio to another channel using a string. `send` is a lot like
+ * [[connect]], except it uses a string instead of an object. This can
+ * be useful in large applications to decouple sections since [[send]]
+ * and [[receive]] can be invoked separately in order to connect an object
+ * @param name The channel name to send the audio
+ * @param volume The amount of the signal to send.
+ * Defaults to 0db, i.e. send the entire signal
+ * @returns Returns the gain node of this connection.
+ */
+ send(name, volume = 0) {
+ const bus = this._getBus(name);
+ const sendKnob = new Gain({
+ context: this.context,
+ units: "decibels",
+ gain: volume,
+ });
+ this.connect(sendKnob);
+ sendKnob.connect(bus);
+ return sendKnob;
+ }
+ /**
+ * Receive audio from a channel which was connected with [[send]].
+ * @param name The channel name to receive audio from.
+ */
+ receive(name) {
+ const bus = this._getBus(name);
+ bus.connect(this);
+ return this;
+ }
+ dispose() {
+ super.dispose();
+ this._panVol.dispose();
+ this.pan.dispose();
+ this.volume.dispose();
+ this._solo.dispose();
+ return this;
+ }
+}
+/**
+ * Store the send/receive channels by name.
+ */
+Channel.buses = new Map();
+
+/**
+ * Tone.Listener is a thin wrapper around the AudioListener. Listener combined
+ * with [[Panner3D]] makes up the Web Audio API's 3D panning system. Panner3D allows you
+ * to place sounds in 3D and Listener allows you to navigate the 3D sound environment from
+ * a first-person perspective. There is only one listener per audio context.
+ */
+class Listener extends ToneAudioNode {
+ constructor() {
+ super(...arguments);
+ this.name = "Listener";
+ this.positionX = new Param({
+ context: this.context,
+ param: this.context.rawContext.listener.positionX,
+ });
+ this.positionY = new Param({
+ context: this.context,
+ param: this.context.rawContext.listener.positionY,
+ });
+ this.positionZ = new Param({
+ context: this.context,
+ param: this.context.rawContext.listener.positionZ,
+ });
+ this.forwardX = new Param({
+ context: this.context,
+ param: this.context.rawContext.listener.forwardX,
+ });
+ this.forwardY = new Param({
+ context: this.context,
+ param: this.context.rawContext.listener.forwardY,
+ });
+ this.forwardZ = new Param({
+ context: this.context,
+ param: this.context.rawContext.listener.forwardZ,
+ });
+ this.upX = new Param({
+ context: this.context,
+ param: this.context.rawContext.listener.upX,
+ });
+ this.upY = new Param({
+ context: this.context,
+ param: this.context.rawContext.listener.upY,
+ });
+ this.upZ = new Param({
+ context: this.context,
+ param: this.context.rawContext.listener.upZ,
+ });
+ }
+ static getDefaults() {
+ return Object.assign(ToneAudioNode.getDefaults(), {
+ positionX: 0,
+ positionY: 0,
+ positionZ: 0,
+ forwardX: 0,
+ forwardY: 0,
+ forwardZ: -1,
+ upX: 0,
+ upY: 1,
+ upZ: 0,
+ });
+ }
+ dispose() {
+ super.dispose();
+ this.positionX.dispose();
+ this.positionY.dispose();
+ this.positionZ.dispose();
+ this.forwardX.dispose();
+ this.forwardY.dispose();
+ this.forwardZ.dispose();
+ this.upX.dispose();
+ this.upY.dispose();
+ this.upZ.dispose();
+ return this;
+ }
+}
+//-------------------------------------
+// INITIALIZATION
+//-------------------------------------
+onContextInit(context => {
+ context.listener = new Listener({ context });
+});
+onContextClose(context => {
+ context.listener.dispose();
+});
+
+/**
+ * The current audio context time of the global [[Context]].
+ * See [[Context.now]]
+ * @category Core
+ */
+function now() {
+ return getContext().now();
+}
+/**
+ * The current audio context time of the global [[Context]] without the [[Context.lookAhead]]
+ * See [[Context.immediate]]
+ * @category Core
+ */
+function immediate() {
+ return getContext().immediate();
+}
+/**
+ * The Transport object belonging to the global Tone.js Context.
+ * See [[Transport]]
+ * @category Core
+ */
+const Transport$1 = getContext().transport;
+/**
+ * The Transport object belonging to the global Tone.js Context.
+ * See [[Transport]]
+ * @category Core
+ */
+function getTransport() {
+ return getContext().transport;
+}
+/**
+ * The Destination (output) belonging to the global Tone.js Context.
+ * See [[Destination]]
+ * @category Core
+ */
+const Destination$1 = getContext().destination;
+/**
+ * @deprecated Use [[Destination]]
+ */
+const Master = getContext().destination;
+/**
+ * The Destination (output) belonging to the global Tone.js Context.
+ * See [[Destination]]
+ * @category Core
+ */
+function getDestination() {
+ return getContext().destination;
+}
+/**
+ * The [[Listener]] belonging to the global Tone.js Context.
+ * @category Core
+ */
+const Listener$1 = getContext().listener;
+/**
+ * The [[Listener]] belonging to the global Tone.js Context.
+ * @category Core
+ */
+function getListener() {
+ return getContext().listener;
+}
+/**
+ * Draw is used to synchronize the draw frame with the Transport's callbacks.
+ * See [[Draw]]
+ * @category Core
+ */
+const Draw$1 = getContext().draw;
+/**
+ * Get the singleton attached to the global context.
+ * Draw is used to synchronize the draw frame with the Transport's callbacks.
+ * See [[Draw]]
+ * @category Core
+ */
+function getDraw() {
+ return getContext().draw;
+}
+/**
+ * A reference to the global context
+ * See [[Context]]
+ */
+const context = getContext();
+/**
+ * Promise which resolves when all of the loading promises are resolved.
+ * Alias for static [[ToneAudioBuffer.loaded]] method.
+ * @category Core
+ */
+function loaded() {
+ return ToneAudioBuffer.loaded();
+}
+const Buffer = ToneAudioBuffer;
+const Buffers = ToneAudioBuffers;
+const BufferSource = ToneBufferSource;
+
+export { isArray as $, AudioToGain as A, TransportTimeClass as B, Clock as C, Monophonic as D, Synth as E, Frequency as F, Gain as G, omitFromObject as H, OmniOscillator as I, Envelope as J, writable as K, AmplitudeEnvelope as L, Midi as M, deepMerge as N, OfflineContext as O, Param as P, FMOscillator as Q, Instrument as R, Sampler as S, ToneAudioNode as T, getWorkletGlobalScope as U, Volume as V, WaveShaper as W, workletName as X, warn as Y, MidiClass as Z, __awaiter as _, ToneAudioBuffers as a, ToneWithContext as a0, StateTimeline as a1, TicksClass as a2, isBoolean as a3, isObject as a4, isUndef as a5, clamp as a6, Panner as a7, gainToDb as a8, dbToGain as a9, Context as aA, BaseContext as aB, FrequencyClass as aC, TimeClass as aD, Time as aE, Ticks as aF, TransportTime as aG, Emitter as aH, IntervalTimeline as aI, Timeline as aJ, isFunction as aK, AMOscillator as aL, PulseOscillator as aM, FatOscillator as aN, PWMOscillator as aO, Channel as aP, PanVol as aQ, Solo as aR, version as aS, workletName$1 as aa, ToneOscillatorNode as ab, theWindow as ac, isNote as ad, MembraneSynth as ae, getDestination as af, now as ag, immediate as ah, Transport$1 as ai, getTransport as aj, Destination$1 as ak, Master as al, Listener$1 as am, getListener as an, Draw$1 as ao, getDraw as ap, context as aq, loaded as ar, Buffer as as, Buffers as at, BufferSource as au, start as av, isSupported as aw, Debug as ax, ftom as ay, mtof as az, ToneBufferSource as b, ToneAudioBuffer as c, Source as d, assert as e, isNumber as f, getContext as g, isDefined as h, isString as i, connect as j, Signal as k, connectSeries as l, SignalOperator as m, Multiply as n, optionsFromArguments as o, disconnect as p, Oscillator as q, readOnly as r, setContext as s, connectSignal as t, noOp as u, Player as v, intervalToFrequencyRatio as w, assertRange as x, defaultArg as y, ToneConstantSource as z };
diff --git a/docs/_snowpack/pkg/common/webmidi.min-97732fd4.js b/docs/_snowpack/pkg/common/webmidi.min-97732fd4.js
new file mode 100644
index 00000000..f58b4b13
--- /dev/null
+++ b/docs/_snowpack/pkg/common/webmidi.min-97732fd4.js
@@ -0,0 +1,37 @@
+import { c as createCommonjsModule, a as commonjsGlobal } from './_commonjsHelpers-8c19dec8.js';
+
+var webmidi_min = createCommonjsModule(function (module) {
+/*
+
+WebMidi v2.5.3
+
+WebMidi.js helps you tame the Web MIDI API. Send and receive MIDI messages with ease. Control instruments with user-friendly functions (playNote, sendPitchBend, etc.). React to MIDI input with simple event listeners (noteon, pitchbend, controlchange, etc.).
+https://github.com/djipco/webmidi
+
+
+The MIT License (MIT)
+
+Copyright (c) 2015-2019, Jean-Philippe Côté
+
+Permission is hereby granted, free of charge, to any person obtaining a copy of this software and
+associated documentation files (the "Software"), to deal in the Software without restriction,
+including without limitation the rights to use, copy, modify, merge, publish, distribute,
+sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in all copies or substantial
+portions of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT
+NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES
+OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+
+*/
+
+
+!function(scope){function WebMidi(){if(WebMidi.prototype._singleton)throw new Error("WebMidi is a singleton, it cannot be instantiated directly.");(WebMidi.prototype._singleton=this)._inputs=[],this._outputs=[],this._userHandlers={},this._stateChangeQueue=[],this._processingStateChange=!1,this._midiInterfaceEvents=["connected","disconnected"],this._nrpnBuffer=[[],[],[],[],[],[],[],[],[],[],[],[],[],[],[],[]],this._nrpnEventsEnabled=!0,this._nrpnTypes=["entry","increment","decrement"],this._notes=["C","C#","D","D#","E","F","F#","G","G#","A","A#","B"],this._semitones={C:0,D:2,E:4,F:5,G:7,A:9,B:11},Object.defineProperties(this,{MIDI_SYSTEM_MESSAGES:{value:{sysex:240,timecode:241,songposition:242,songselect:243,tuningrequest:246,sysexend:247,clock:248,start:250,continue:251,stop:252,activesensing:254,reset:255,midimessage:0,unknownsystemmessage:-1},writable:!1,enumerable:!0,configurable:!1},MIDI_CHANNEL_MESSAGES:{value:{noteoff:8,noteon:9,keyaftertouch:10,controlchange:11,channelmode:11,nrpn:11,programchange:12,channelaftertouch:13,pitchbend:14},writable:!1,enumerable:!0,configurable:!1},MIDI_REGISTERED_PARAMETER:{value:{pitchbendrange:[0,0],channelfinetuning:[0,1],channelcoarsetuning:[0,2],tuningprogram:[0,3],tuningbank:[0,4],modulationrange:[0,5],azimuthangle:[61,0],elevationangle:[61,1],gain:[61,2],distanceratio:[61,3],maximumdistance:[61,4],maximumdistancegain:[61,5],referencedistanceratio:[61,6],panspreadangle:[61,7],rollangle:[61,8]},writable:!1,enumerable:!0,configurable:!1},MIDI_CONTROL_CHANGE_MESSAGES:{value:{bankselectcoarse:0,modulationwheelcoarse:1,breathcontrollercoarse:2,footcontrollercoarse:4,portamentotimecoarse:5,dataentrycoarse:6,volumecoarse:7,balancecoarse:8,pancoarse:10,expressioncoarse:11,effectcontrol1coarse:12,effectcontrol2coarse:13,generalpurposeslider1:16,generalpurposeslider2:17,generalpurposeslider3:18,generalpurposeslider4:19,bankselectfine:32,modulationwheelfine:33,breathcontrollerfine:34,footcontrollerfine:36,portamentotimefine:37,dataentryfine:38,volumefine:39,balancefine:40,panfine:42,expressionfine:43,effectcontrol1fine:44,effectcontrol2fine:45,holdpedal:64,portamento:65,sustenutopedal:66,softpedal:67,legatopedal:68,hold2pedal:69,soundvariation:70,resonance:71,soundreleasetime:72,soundattacktime:73,brightness:74,soundcontrol6:75,soundcontrol7:76,soundcontrol8:77,soundcontrol9:78,soundcontrol10:79,generalpurposebutton1:80,generalpurposebutton2:81,generalpurposebutton3:82,generalpurposebutton4:83,reverblevel:91,tremololevel:92,choruslevel:93,celestelevel:94,phaserlevel:95,databuttonincrement:96,databuttondecrement:97,nonregisteredparametercoarse:98,nonregisteredparameterfine:99,registeredparametercoarse:100,registeredparameterfine:101},writable:!1,enumerable:!0,configurable:!1},MIDI_NRPN_MESSAGES:{value:{entrymsb:6,entrylsb:38,increment:96,decrement:97,paramlsb:98,parammsb:99,nullactiveparameter:127},writable:!1,enumerable:!0,configurable:!1},MIDI_CHANNEL_MODE_MESSAGES:{value:{allsoundoff:120,resetallcontrollers:121,localcontrol:122,allnotesoff:123,omnimodeoff:124,omnimodeon:125,monomodeon:126,polymodeon:127},writable:!1,enumerable:!0,configurable:!1},octaveOffset:{value:0,writable:!0,enumerable:!0,configurable:!1}}),Object.defineProperties(this,{supported:{enumerable:!0,get:function(){return "requestMIDIAccess"in navigator}},enabled:{enumerable:!0,get:function(){return void 0!==this.interface}.bind(this)},inputs:{enumerable:!0,get:function(){return this._inputs}.bind(this)},outputs:{enumerable:!0,get:function(){return this._outputs}.bind(this)},sysexEnabled:{enumerable:!0,get:function(){return !(!this.interface||!this.interface.sysexEnabled)}.bind(this)},nrpnEventsEnabled:{enumerable:!0,get:function(){return !!this._nrpnEventsEnabled}.bind(this),set:function(enabled){return this._nrpnEventsEnabled=enabled,this._nrpnEventsEnabled}},nrpnTypes:{enumerable:!0,get:function(){return this._nrpnTypes}.bind(this)},time:{enumerable:!0,get:function(){return performance.now()}}});}var wm=new WebMidi;function Input(midiInput){var that=this;this._userHandlers={channel:{},system:{}},this._midiInput=midiInput,Object.defineProperties(this,{connection:{enumerable:!0,get:function(){return that._midiInput.connection}},id:{enumerable:!0,get:function(){return that._midiInput.id}},manufacturer:{enumerable:!0,get:function(){return that._midiInput.manufacturer}},name:{enumerable:!0,get:function(){return that._midiInput.name}},state:{enumerable:!0,get:function(){return that._midiInput.state}},type:{enumerable:!0,get:function(){return that._midiInput.type}}}),this._initializeUserHandlers(),this._midiInput.onmidimessage=this._onMidiMessage.bind(this);}function Output(midiOutput){var that=this;this._midiOutput=midiOutput,Object.defineProperties(this,{connection:{enumerable:!0,get:function(){return that._midiOutput.connection}},id:{enumerable:!0,get:function(){return that._midiOutput.id}},manufacturer:{enumerable:!0,get:function(){return that._midiOutput.manufacturer}},name:{enumerable:!0,get:function(){return that._midiOutput.name}},state:{enumerable:!0,get:function(){return that._midiOutput.state}},type:{enumerable:!0,get:function(){return that._midiOutput.type}}});}WebMidi.prototype.enable=function(callback,sysex){this.enabled||(this.supported?navigator.requestMIDIAccess({sysex:sysex}).then(function(midiAccess){var promiseTimeout,events=[],promises=[];this.interface=midiAccess,this._resetInterfaceUserHandlers(),this.interface.onstatechange=function(e){events.push(e);};for(var inputs=midiAccess.inputs.values(),input=inputs.next();input&&!input.done;input=inputs.next())promises.push(input.value.open());for(var outputs=midiAccess.outputs.values(),output=outputs.next();output&&!output.done;output=outputs.next())promises.push(output.value.open());function onPortsOpen(){clearTimeout(promiseTimeout),this._updateInputsAndOutputs(),this.interface.onstatechange=this._onInterfaceStateChange.bind(this),"function"==typeof callback&&callback.call(this),events.forEach(function(event){this._onInterfaceStateChange(event);}.bind(this));}promiseTimeout=setTimeout(onPortsOpen.bind(this),200),Promise&&Promise.all(promises).catch(function(err){}).then(onPortsOpen.bind(this));}.bind(this),function(err){"function"==typeof callback&&callback.call(this,err);}.bind(this)):"function"==typeof callback&&callback(new Error("The Web MIDI API is not supported by your browser.")));},WebMidi.prototype.disable=function(){if(!this.supported)throw new Error("The Web MIDI API is not supported by your browser.");this.enabled&&(this.removeListener(),this.inputs.forEach(function(input){input.removeListener();})),this.interface&&(this.interface.onstatechange=void 0),this.interface=void 0,this._inputs=[],this._outputs=[],this._nrpnEventsEnabled=!0,this._resetInterfaceUserHandlers();},WebMidi.prototype.addListener=function(type,listener){if(!this.enabled)throw new Error("WebMidi must be enabled before adding event listeners.");if("function"!=typeof listener)throw new TypeError("The 'listener' parameter must be a function.");if(!(0<=this._midiInterfaceEvents.indexOf(type)))throw new TypeError("The specified event type is not supported.");return this._userHandlers[type].push(listener),this},WebMidi.prototype.hasListener=function(type,listener){if(!this.enabled)throw new Error("WebMidi must be enabled before checking event listeners.");if("function"!=typeof listener)throw new TypeError("The 'listener' parameter must be a function.");if(!(0<=this._midiInterfaceEvents.indexOf(type)))throw new TypeError("The specified event type is not supported.");for(var o=0;onote * .octaves and ramps to note
- * over the duration of .pitchDecay.
- * @example
- * const synth = new Tone.MembraneSynth().toDestination();
- * synth.triggerAttackRelease("C2", "8n");
- * @category Instrument
- */
-class MembraneSynth extends Synth {
- constructor() {
- super(optionsFromArguments(MembraneSynth.getDefaults(), arguments));
- this.name = "MembraneSynth";
- /**
- * Portamento is ignored in this synth. use pitch decay instead.
- */
- this.portamento = 0;
- const options = optionsFromArguments(MembraneSynth.getDefaults(), arguments);
- this.pitchDecay = options.pitchDecay;
- this.octaves = options.octaves;
- readOnly(this, ["oscillator", "envelope"]);
- }
- static getDefaults() {
- return deepMerge(Monophonic.getDefaults(), Synth.getDefaults(), {
- envelope: {
- attack: 0.001,
- attackCurve: "exponential",
- decay: 0.4,
- release: 1.4,
- sustain: 0.01,
- },
- octaves: 10,
- oscillator: {
- type: "sine",
- },
- pitchDecay: 0.05,
- });
- }
- setNote(note, time) {
- const seconds = this.toSeconds(time);
- const hertz = this.toFrequency(note instanceof FrequencyClass ? note.toFrequency() : note);
- const maxNote = hertz * this.octaves;
- this.oscillator.frequency.setValueAtTime(maxNote, seconds);
- this.oscillator.frequency.exponentialRampToValueAtTime(hertz, seconds + this.toSeconds(this.pitchDecay));
- return this;
- }
- dispose() {
- super.dispose();
- return this;
- }
-}
-__decorate([
- range(0)
-], MembraneSynth.prototype, "octaves", void 0);
-__decorate([
- timeRange(0)
-], MembraneSynth.prototype, "pitchDecay", void 0);
-
/**
* Tone.NoiseSynth is composed of [[Noise]] through an [[AmplitudeEnvelope]].
* ```
@@ -21427,30 +2778,6 @@ class NoiseSynth extends Instrument {
}
}
-/**
- * All of the classes or functions which are loaded into the AudioWorkletGlobalScope
- */
-const workletContext = new Set();
-/**
- * Add a class to the AudioWorkletGlobalScope
- */
-function addToWorklet(classOrFunction) {
- workletContext.add(classOrFunction);
-}
-/**
- * Register a processor in the AudioWorkletGlobalScope with the given name
- */
-function registerProcessor(name, classDesc) {
- const processor = /* javascript */ `registerProcessor("${name}", ${classDesc})`;
- workletContext.add(processor);
-}
-/**
- * Get all of the modules which have been registered to the AudioWorkletGlobalScope
- */
-function getWorkletGlobalScope() {
- return Array.from(workletContext).join("\n");
-}
-
class ToneAudioWorklet extends ToneAudioNode {
constructor(options) {
super(options);
@@ -21488,190 +2815,6 @@ class ToneAudioWorklet extends ToneAudioNode {
}
}
-const toneAudioWorkletProcessor = /* javascript */ `
- /**
- * The base AudioWorkletProcessor for use in Tone.js. Works with the [[ToneAudioWorklet]].
- */
- class ToneAudioWorkletProcessor extends AudioWorkletProcessor {
-
- constructor(options) {
-
- super(options);
- /**
- * If the processor was disposed or not. Keep alive until it's disposed.
- */
- this.disposed = false;
- /**
- * The number of samples in the processing block
- */
- this.blockSize = 128;
- /**
- * the sample rate
- */
- this.sampleRate = sampleRate;
-
- this.port.onmessage = (event) => {
- // when it receives a dispose
- if (event.data === "dispose") {
- this.disposed = true;
- }
- };
- }
- }
-`;
-addToWorklet(toneAudioWorkletProcessor);
-
-const singleIOProcess = /* javascript */ `
- /**
- * Abstract class for a single input/output processor.
- * has a 'generate' function which processes one sample at a time
- */
- class SingleIOProcessor extends ToneAudioWorkletProcessor {
-
- constructor(options) {
- super(Object.assign(options, {
- numberOfInputs: 1,
- numberOfOutputs: 1
- }));
- /**
- * Holds the name of the parameter and a single value of that
- * parameter at the current sample
- * @type { [name: string]: number }
- */
- this.params = {}
- }
-
- /**
- * Generate an output sample from the input sample and parameters
- * @abstract
- * @param input number
- * @param channel number
- * @param parameters { [name: string]: number }
- * @returns number
- */
- generate(){}
-
- /**
- * Update the private params object with the
- * values of the parameters at the given index
- * @param parameters { [name: string]: Float32Array },
- * @param index number
- */
- updateParams(parameters, index) {
- for (const paramName in parameters) {
- const param = parameters[paramName];
- if (param.length > 1) {
- this.params[paramName] = parameters[paramName][index];
- } else {
- this.params[paramName] = parameters[paramName][0];
- }
- }
- }
-
- /**
- * Process a single frame of the audio
- * @param inputs Float32Array[][]
- * @param outputs Float32Array[][]
- */
- process(inputs, outputs, parameters) {
- const input = inputs[0];
- const output = outputs[0];
- // get the parameter values
- const channelCount = Math.max(input && input.length || 0, output.length);
- for (let sample = 0; sample < this.blockSize; sample++) {
- this.updateParams(parameters, sample);
- for (let channel = 0; channel < channelCount; channel++) {
- const inputSample = input && input.length ? input[channel][sample] : 0;
- output[channel][sample] = this.generate(inputSample, channel, this.params);
- }
- }
- return !this.disposed;
- }
- };
-`;
-addToWorklet(singleIOProcess);
-
-const delayLine = /* javascript */ `
- /**
- * A multichannel buffer for use within an AudioWorkletProcessor as a delay line
- */
- class DelayLine {
-
- constructor(size, channels) {
- this.buffer = [];
- this.writeHead = []
- this.size = size;
-
- // create the empty channels
- for (let i = 0; i < channels; i++) {
- this.buffer[i] = new Float32Array(this.size);
- this.writeHead[i] = 0;
- }
- }
-
- /**
- * Push a value onto the end
- * @param channel number
- * @param value number
- */
- push(channel, value) {
- this.writeHead[channel] += 1;
- if (this.writeHead[channel] > this.size) {
- this.writeHead[channel] = 0;
- }
- this.buffer[channel][this.writeHead[channel]] = value;
- }
-
- /**
- * Get the recorded value of the channel given the delay
- * @param channel number
- * @param delay number delay samples
- */
- get(channel, delay) {
- let readHead = this.writeHead[channel] - Math.floor(delay);
- if (readHead < 0) {
- readHead += this.size;
- }
- return this.buffer[channel][readHead];
- }
- }
-`;
-addToWorklet(delayLine);
-
-const workletName = "feedback-comb-filter";
-const feedbackCombFilter = /* javascript */ `
- class FeedbackCombFilterWorklet extends SingleIOProcessor {
-
- constructor(options) {
- super(options);
- this.delayLine = new DelayLine(this.sampleRate, options.channelCount || 2);
- }
-
- static get parameterDescriptors() {
- return [{
- name: "delayTime",
- defaultValue: 0.1,
- minValue: 0,
- maxValue: 1,
- automationRate: "k-rate"
- }, {
- name: "feedback",
- defaultValue: 0.5,
- minValue: 0,
- maxValue: 0.9999,
- automationRate: "k-rate"
- }];
- }
-
- generate(input, channel, parameters) {
- const delayedSample = this.delayLine.get(channel, parameters.delayTime * this.sampleRate);
- this.delayLine.push(channel, input + delayedSample * parameters.feedback);
- return delayedSample;
- }
- }
-`;
-registerProcessor(workletName, feedbackCombFilter);
-
/**
* Comb filters are basic building blocks for physical modeling. Read more
* about comb filters on [CCRMA's website](https://ccrma.stanford.edu/~jos/pasp/Feedback_Comb_Filters.html).
@@ -22254,254 +3397,6 @@ class PolySynth extends Instrument {
}
}
-/**
- * Pass in an object which maps the note's pitch or midi value to the url,
- * then you can trigger the attack and release of that note like other instruments.
- * By automatically repitching the samples, it is possible to play pitches which
- * were not explicitly included which can save loading time.
- *
- * For sample or buffer playback where repitching is not necessary,
- * use [[Player]].
- * @example
- * const sampler = new Tone.Sampler({
- * urls: {
- * A1: "A1.mp3",
- * A2: "A2.mp3",
- * },
- * baseUrl: "https://tonejs.github.io/audio/casio/",
- * onload: () => {
- * sampler.triggerAttackRelease(["C1", "E1", "G1", "B1"], 0.5);
- * }
- * }).toDestination();
- * @category Instrument
- */
-class Sampler extends Instrument {
- constructor() {
- super(optionsFromArguments(Sampler.getDefaults(), arguments, ["urls", "onload", "baseUrl"], "urls"));
- this.name = "Sampler";
- /**
- * The object of all currently playing BufferSources
- */
- this._activeSources = new Map();
- const options = optionsFromArguments(Sampler.getDefaults(), arguments, ["urls", "onload", "baseUrl"], "urls");
- const urlMap = {};
- Object.keys(options.urls).forEach((note) => {
- const noteNumber = parseInt(note, 10);
- assert(isNote(note)
- || (isNumber(noteNumber) && isFinite(noteNumber)), `url key is neither a note or midi pitch: ${note}`);
- if (isNote(note)) {
- // convert the note name to MIDI
- const mid = new FrequencyClass(this.context, note).toMidi();
- urlMap[mid] = options.urls[note];
- }
- else if (isNumber(noteNumber) && isFinite(noteNumber)) {
- // otherwise if it's numbers assume it's midi
- urlMap[noteNumber] = options.urls[noteNumber];
- }
- });
- this._buffers = new ToneAudioBuffers({
- urls: urlMap,
- onload: options.onload,
- baseUrl: options.baseUrl,
- onerror: options.onerror,
- });
- this.attack = options.attack;
- this.release = options.release;
- this.curve = options.curve;
- // invoke the callback if it's already loaded
- if (this._buffers.loaded) {
- // invoke onload deferred
- Promise.resolve().then(options.onload);
- }
- }
- static getDefaults() {
- return Object.assign(Instrument.getDefaults(), {
- attack: 0,
- baseUrl: "",
- curve: "exponential",
- onload: noOp,
- onerror: noOp,
- release: 0.1,
- urls: {},
- });
- }
- /**
- * Returns the difference in steps between the given midi note at the closets sample.
- */
- _findClosest(midi) {
- // searches within 8 octaves of the given midi note
- const MAX_INTERVAL = 96;
- let interval = 0;
- while (interval < MAX_INTERVAL) {
- // check above and below
- if (this._buffers.has(midi + interval)) {
- return -interval;
- }
- else if (this._buffers.has(midi - interval)) {
- return interval;
- }
- interval++;
- }
- throw new Error(`No available buffers for note: ${midi}`);
- }
- /**
- * @param notes The note to play, or an array of notes.
- * @param time When to play the note
- * @param velocity The velocity to play the sample back.
- */
- triggerAttack(notes, time, velocity = 1) {
- this.log("triggerAttack", notes, time, velocity);
- if (!Array.isArray(notes)) {
- notes = [notes];
- }
- notes.forEach(note => {
- const midiFloat = ftomf(new FrequencyClass(this.context, note).toFrequency());
- const midi = Math.round(midiFloat);
- const remainder = midiFloat - midi;
- // find the closest note pitch
- const difference = this._findClosest(midi);
- const closestNote = midi - difference;
- const buffer = this._buffers.get(closestNote);
- const playbackRate = intervalToFrequencyRatio(difference + remainder);
- // play that note
- const source = new ToneBufferSource({
- url: buffer,
- context: this.context,
- curve: this.curve,
- fadeIn: this.attack,
- fadeOut: this.release,
- playbackRate,
- }).connect(this.output);
- source.start(time, 0, buffer.duration / playbackRate, velocity);
- // add it to the active sources
- if (!isArray(this._activeSources.get(midi))) {
- this._activeSources.set(midi, []);
- }
- this._activeSources.get(midi).push(source);
- // remove it when it's done
- source.onended = () => {
- if (this._activeSources && this._activeSources.has(midi)) {
- const sources = this._activeSources.get(midi);
- const index = sources.indexOf(source);
- if (index !== -1) {
- sources.splice(index, 1);
- }
- }
- };
- });
- return this;
- }
- /**
- * @param notes The note to release, or an array of notes.
- * @param time When to release the note.
- */
- triggerRelease(notes, time) {
- this.log("triggerRelease", notes, time);
- if (!Array.isArray(notes)) {
- notes = [notes];
- }
- notes.forEach(note => {
- const midi = new FrequencyClass(this.context, note).toMidi();
- // find the note
- if (this._activeSources.has(midi) && this._activeSources.get(midi).length) {
- const sources = this._activeSources.get(midi);
- time = this.toSeconds(time);
- sources.forEach(source => {
- source.stop(time);
- });
- this._activeSources.set(midi, []);
- }
- });
- return this;
- }
- /**
- * Release all currently active notes.
- * @param time When to release the notes.
- */
- releaseAll(time) {
- const computedTime = this.toSeconds(time);
- this._activeSources.forEach(sources => {
- while (sources.length) {
- const source = sources.shift();
- source.stop(computedTime);
- }
- });
- return this;
- }
- sync() {
- if (this._syncState()) {
- this._syncMethod("triggerAttack", 1);
- this._syncMethod("triggerRelease", 1);
- }
- return this;
- }
- /**
- * Invoke the attack phase, then after the duration, invoke the release.
- * @param notes The note to play and release, or an array of notes.
- * @param duration The time the note should be held
- * @param time When to start the attack
- * @param velocity The velocity of the attack
- */
- triggerAttackRelease(notes, duration, time, velocity = 1) {
- const computedTime = this.toSeconds(time);
- this.triggerAttack(notes, computedTime, velocity);
- if (isArray(duration)) {
- assert(isArray(notes), "notes must be an array when duration is array");
- notes.forEach((note, index) => {
- const d = duration[Math.min(index, duration.length - 1)];
- this.triggerRelease(note, computedTime + this.toSeconds(d));
- });
- }
- else {
- this.triggerRelease(notes, computedTime + this.toSeconds(duration));
- }
- return this;
- }
- /**
- * Add a note to the sampler.
- * @param note The buffer's pitch.
- * @param url Either the url of the buffer, or a buffer which will be added with the given name.
- * @param callback The callback to invoke when the url is loaded.
- */
- add(note, url, callback) {
- assert(isNote(note) || isFinite(note), `note must be a pitch or midi: ${note}`);
- if (isNote(note)) {
- // convert the note name to MIDI
- const mid = new FrequencyClass(this.context, note).toMidi();
- this._buffers.add(mid, url, callback);
- }
- else {
- // otherwise if it's numbers assume it's midi
- this._buffers.add(note, url, callback);
- }
- return this;
- }
- /**
- * If the buffers are loaded or not
- */
- get loaded() {
- return this._buffers.loaded;
- }
- /**
- * Clean up
- */
- dispose() {
- super.dispose();
- this._buffers.dispose();
- this._activeSources.forEach(sources => {
- sources.forEach(source => source.dispose());
- });
- this._activeSources.clear();
- return this;
- }
-}
-__decorate([
- timeRange(0)
-], Sampler.prototype, "attack", void 0);
-__decorate([
- timeRange(0)
-], Sampler.prototype, "release", void 0);
-
/**
* ToneEvent abstracts away this.context.transport.schedule and provides a schedulable
* callback for a single or repeatable events along the timeline.
@@ -24124,57 +5019,6 @@ class AutoFilter extends LFOEffect {
}
}
-/**
- * Panner is an equal power Left/Right Panner. It is a wrapper around the StereoPannerNode.
- * @example
- * return Tone.Offline(() => {
- * // move the input signal from right to left
- * const panner = new Tone.Panner(1).toDestination();
- * panner.pan.rampTo(-1, 0.5);
- * const osc = new Tone.Oscillator(100).connect(panner).start();
- * }, 0.5, 2);
- * @category Component
- */
-class Panner extends ToneAudioNode {
- constructor() {
- super(Object.assign(optionsFromArguments(Panner.getDefaults(), arguments, ["pan"])));
- this.name = "Panner";
- /**
- * the panner node
- */
- this._panner = this.context.createStereoPanner();
- this.input = this._panner;
- this.output = this._panner;
- const options = optionsFromArguments(Panner.getDefaults(), arguments, ["pan"]);
- this.pan = new Param({
- context: this.context,
- param: this._panner.pan,
- value: options.pan,
- minValue: -1,
- maxValue: 1,
- });
- // this is necessary for standardized-audio-context
- // doesn't make any difference for the native AudioContext
- // https://github.com/chrisguttandin/standardized-audio-context/issues/647
- this._panner.channelCount = options.channelCount;
- this._panner.channelCountMode = "explicit";
- // initial value
- readOnly(this, "pan");
- }
- static getDefaults() {
- return Object.assign(ToneAudioNode.getDefaults(), {
- pan: 0,
- channelCount: 1,
- });
- }
- dispose() {
- super.dispose();
- this._panner.disconnect();
- this.pan.dispose();
- return this;
- }
-}
-
/**
* AutoPanner is a [[Panner]] with an [[LFO]] connected to the pan amount.
* [Related Reading](https://www.ableton.com/en/blog/autopan-chopper-effect-and-more-liveschool/).
@@ -24383,29 +5227,6 @@ class AutoWah extends Effect {
}
}
-const workletName$1 = "bit-crusher";
-const bitCrusherWorklet = /* javascript */ `
- class BitCrusherWorklet extends SingleIOProcessor {
-
- static get parameterDescriptors() {
- return [{
- name: "bits",
- defaultValue: 12,
- minValue: 1,
- maxValue: 16,
- automationRate: 'k-rate'
- }];
- }
-
- generate(input, _channel, parameters) {
- const step = Math.pow(0.5, parameters.bits - 1);
- const val = step * Math.floor(input / step + 0.5);
- return val;
- }
- }
-`;
-registerProcessor(workletName$1, bitCrusherWorklet);
-
/**
* BitCrusher down-samples the incoming signal to a different bit depth.
* Lowering the bit depth of the signal creates distortion. Read more about BitCrushing
@@ -26501,298 +7322,6 @@ class Waveform extends MeterBase {
}
}
-/**
- * Solo lets you isolate a specific audio stream. When an instance is set to `solo=true`,
- * it will mute all other instances of Solo.
- * @example
- * const soloA = new Tone.Solo().toDestination();
- * const oscA = new Tone.Oscillator("C4", "sawtooth").connect(soloA);
- * const soloB = new Tone.Solo().toDestination();
- * const oscB = new Tone.Oscillator("E4", "square").connect(soloB);
- * soloA.solo = true;
- * // no audio will pass through soloB
- * @category Component
- */
-class Solo extends ToneAudioNode {
- constructor() {
- super(optionsFromArguments(Solo.getDefaults(), arguments, ["solo"]));
- this.name = "Solo";
- const options = optionsFromArguments(Solo.getDefaults(), arguments, ["solo"]);
- this.input = this.output = new Gain({
- context: this.context,
- });
- if (!Solo._allSolos.has(this.context)) {
- Solo._allSolos.set(this.context, new Set());
- }
- Solo._allSolos.get(this.context).add(this);
- // set initially
- this.solo = options.solo;
- }
- static getDefaults() {
- return Object.assign(ToneAudioNode.getDefaults(), {
- solo: false,
- });
- }
- /**
- * Isolates this instance and mutes all other instances of Solo.
- * Only one instance can be soloed at a time. A soloed
- * instance will report `solo=false` when another instance is soloed.
- */
- get solo() {
- return this._isSoloed();
- }
- set solo(solo) {
- if (solo) {
- this._addSolo();
- }
- else {
- this._removeSolo();
- }
- Solo._allSolos.get(this.context).forEach(instance => instance._updateSolo());
- }
- /**
- * If the current instance is muted, i.e. another instance is soloed
- */
- get muted() {
- return this.input.gain.value === 0;
- }
- /**
- * Add this to the soloed array
- */
- _addSolo() {
- if (!Solo._soloed.has(this.context)) {
- Solo._soloed.set(this.context, new Set());
- }
- Solo._soloed.get(this.context).add(this);
- }
- /**
- * Remove this from the soloed array
- */
- _removeSolo() {
- if (Solo._soloed.has(this.context)) {
- Solo._soloed.get(this.context).delete(this);
- }
- }
- /**
- * Is this on the soloed array
- */
- _isSoloed() {
- return Solo._soloed.has(this.context) && Solo._soloed.get(this.context).has(this);
- }
- /**
- * Returns true if no one is soloed
- */
- _noSolos() {
- // either does not have any soloed added
- return !Solo._soloed.has(this.context) ||
- // or has a solo set but doesn't include any items
- (Solo._soloed.has(this.context) && Solo._soloed.get(this.context).size === 0);
- }
- /**
- * Solo the current instance and unsolo all other instances.
- */
- _updateSolo() {
- if (this._isSoloed()) {
- this.input.gain.value = 1;
- }
- else if (this._noSolos()) {
- // no one is soloed
- this.input.gain.value = 1;
- }
- else {
- this.input.gain.value = 0;
- }
- }
- dispose() {
- super.dispose();
- Solo._allSolos.get(this.context).delete(this);
- this._removeSolo();
- return this;
- }
-}
-/**
- * Hold all of the solo'ed tracks belonging to a specific context
- */
-Solo._allSolos = new Map();
-/**
- * Hold the currently solo'ed instance(s)
- */
-Solo._soloed = new Map();
-
-/**
- * PanVol is a Tone.Panner and Tone.Volume in one.
- * @example
- * // pan the incoming signal left and drop the volume
- * const panVol = new Tone.PanVol(-0.25, -12).toDestination();
- * const osc = new Tone.Oscillator().connect(panVol).start();
- * @category Component
- */
-class PanVol extends ToneAudioNode {
- constructor() {
- super(optionsFromArguments(PanVol.getDefaults(), arguments, ["pan", "volume"]));
- this.name = "PanVol";
- const options = optionsFromArguments(PanVol.getDefaults(), arguments, ["pan", "volume"]);
- this._panner = this.input = new Panner({
- context: this.context,
- pan: options.pan,
- channelCount: options.channelCount,
- });
- this.pan = this._panner.pan;
- this._volume = this.output = new Volume({
- context: this.context,
- volume: options.volume,
- });
- this.volume = this._volume.volume;
- // connections
- this._panner.connect(this._volume);
- this.mute = options.mute;
- readOnly(this, ["pan", "volume"]);
- }
- static getDefaults() {
- return Object.assign(ToneAudioNode.getDefaults(), {
- mute: false,
- pan: 0,
- volume: 0,
- channelCount: 1,
- });
- }
- /**
- * Mute/unmute the volume
- */
- get mute() {
- return this._volume.mute;
- }
- set mute(mute) {
- this._volume.mute = mute;
- }
- dispose() {
- super.dispose();
- this._panner.dispose();
- this.pan.dispose();
- this._volume.dispose();
- this.volume.dispose();
- return this;
- }
-}
-
-/**
- * Channel provides a channel strip interface with volume, pan, solo and mute controls.
- * See [[PanVol]] and [[Solo]]
- * @example
- * // pan the incoming signal left and drop the volume 12db
- * const channel = new Tone.Channel(-0.25, -12);
- * @category Component
- */
-class Channel extends ToneAudioNode {
- constructor() {
- super(optionsFromArguments(Channel.getDefaults(), arguments, ["volume", "pan"]));
- this.name = "Channel";
- const options = optionsFromArguments(Channel.getDefaults(), arguments, ["volume", "pan"]);
- this._solo = this.input = new Solo({
- solo: options.solo,
- context: this.context,
- });
- this._panVol = this.output = new PanVol({
- context: this.context,
- pan: options.pan,
- volume: options.volume,
- mute: options.mute,
- channelCount: options.channelCount
- });
- this.pan = this._panVol.pan;
- this.volume = this._panVol.volume;
- this._solo.connect(this._panVol);
- readOnly(this, ["pan", "volume"]);
- }
- static getDefaults() {
- return Object.assign(ToneAudioNode.getDefaults(), {
- pan: 0,
- volume: 0,
- mute: false,
- solo: false,
- channelCount: 1,
- });
- }
- /**
- * Solo/unsolo the channel. Soloing is only relative to other [[Channels]] and [[Solo]] instances
- */
- get solo() {
- return this._solo.solo;
- }
- set solo(solo) {
- this._solo.solo = solo;
- }
- /**
- * If the current instance is muted, i.e. another instance is soloed,
- * or the channel is muted
- */
- get muted() {
- return this._solo.muted || this.mute;
- }
- /**
- * Mute/unmute the volume
- */
- get mute() {
- return this._panVol.mute;
- }
- set mute(mute) {
- this._panVol.mute = mute;
- }
- /**
- * Get the gain node belonging to the bus name. Create it if
- * it doesn't exist
- * @param name The bus name
- */
- _getBus(name) {
- if (!Channel.buses.has(name)) {
- Channel.buses.set(name, new Gain({ context: this.context }));
- }
- return Channel.buses.get(name);
- }
- /**
- * Send audio to another channel using a string. `send` is a lot like
- * [[connect]], except it uses a string instead of an object. This can
- * be useful in large applications to decouple sections since [[send]]
- * and [[receive]] can be invoked separately in order to connect an object
- * @param name The channel name to send the audio
- * @param volume The amount of the signal to send.
- * Defaults to 0db, i.e. send the entire signal
- * @returns Returns the gain node of this connection.
- */
- send(name, volume = 0) {
- const bus = this._getBus(name);
- const sendKnob = new Gain({
- context: this.context,
- units: "decibels",
- gain: volume,
- });
- this.connect(sendKnob);
- sendKnob.connect(bus);
- return sendKnob;
- }
- /**
- * Receive audio from a channel which was connected with [[send]].
- * @param name The channel name to receive audio from.
- */
- receive(name) {
- const bus = this._getBus(name);
- bus.connect(this);
- return this;
- }
- dispose() {
- super.dispose();
- this._panVol.dispose();
- this.pan.dispose();
- this.volume.dispose();
- this._solo.dispose();
- return this;
- }
-}
-/**
- * Store the send/receive channels by name.
- */
-Channel.buses = new Map();
-
/**
* Mono coerces the incoming mono or stereo signal into a mono signal
* where both left and right channels have the same value. This can be useful
@@ -26934,90 +7463,6 @@ class MultibandSplit extends ToneAudioNode {
}
}
-/**
- * Tone.Listener is a thin wrapper around the AudioListener. Listener combined
- * with [[Panner3D]] makes up the Web Audio API's 3D panning system. Panner3D allows you
- * to place sounds in 3D and Listener allows you to navigate the 3D sound environment from
- * a first-person perspective. There is only one listener per audio context.
- */
-class Listener extends ToneAudioNode {
- constructor() {
- super(...arguments);
- this.name = "Listener";
- this.positionX = new Param({
- context: this.context,
- param: this.context.rawContext.listener.positionX,
- });
- this.positionY = new Param({
- context: this.context,
- param: this.context.rawContext.listener.positionY,
- });
- this.positionZ = new Param({
- context: this.context,
- param: this.context.rawContext.listener.positionZ,
- });
- this.forwardX = new Param({
- context: this.context,
- param: this.context.rawContext.listener.forwardX,
- });
- this.forwardY = new Param({
- context: this.context,
- param: this.context.rawContext.listener.forwardY,
- });
- this.forwardZ = new Param({
- context: this.context,
- param: this.context.rawContext.listener.forwardZ,
- });
- this.upX = new Param({
- context: this.context,
- param: this.context.rawContext.listener.upX,
- });
- this.upY = new Param({
- context: this.context,
- param: this.context.rawContext.listener.upY,
- });
- this.upZ = new Param({
- context: this.context,
- param: this.context.rawContext.listener.upZ,
- });
- }
- static getDefaults() {
- return Object.assign(ToneAudioNode.getDefaults(), {
- positionX: 0,
- positionY: 0,
- positionZ: 0,
- forwardX: 0,
- forwardY: 0,
- forwardZ: -1,
- upX: 0,
- upY: 1,
- upZ: 0,
- });
- }
- dispose() {
- super.dispose();
- this.positionX.dispose();
- this.positionY.dispose();
- this.positionZ.dispose();
- this.forwardX.dispose();
- this.forwardY.dispose();
- this.forwardZ.dispose();
- this.upX.dispose();
- this.upY.dispose();
- this.upZ.dispose();
- return this;
- }
-}
-//-------------------------------------
-// INITIALIZATION
-//-------------------------------------
-onContextInit(context => {
- context.listener = new Listener({ context });
-});
-onContextClose(context => {
- context.listener.dispose();
-});
-
/**
* A spatialized panner node which supports equalpower or HRTF panning.
* @category Component
@@ -27816,96 +8261,4 @@ class Convolver extends ToneAudioNode {
}
}
-/**
- * The current audio context time of the global [[Context]].
- * See [[Context.now]]
- * @category Core
- */
-function now() {
- return getContext().now();
-}
-/**
- * The current audio context time of the global [[Context]] without the [[Context.lookAhead]]
- * See [[Context.immediate]]
- * @category Core
- */
-function immediate() {
- return getContext().immediate();
-}
-/**
- * The Transport object belonging to the global Tone.js Context.
- * See [[Transport]]
- * @category Core
- */
-const Transport$1 = getContext().transport;
-/**
- * The Transport object belonging to the global Tone.js Context.
- * See [[Transport]]
- * @category Core
- */
-function getTransport() {
- return getContext().transport;
-}
-/**
- * The Destination (output) belonging to the global Tone.js Context.
- * See [[Destination]]
- * @category Core
- */
-const Destination$1 = getContext().destination;
-/**
- * @deprecated Use [[Destination]]
- */
-const Master = getContext().destination;
-/**
- * The Destination (output) belonging to the global Tone.js Context.
- * See [[Destination]]
- * @category Core
- */
-function getDestination() {
- return getContext().destination;
-}
-/**
- * The [[Listener]] belonging to the global Tone.js Context.
- * @category Core
- */
-const Listener$1 = getContext().listener;
-/**
- * The [[Listener]] belonging to the global Tone.js Context.
- * @category Core
- */
-function getListener() {
- return getContext().listener;
-}
-/**
- * Draw is used to synchronize the draw frame with the Transport's callbacks.
- * See [[Draw]]
- * @category Core
- */
-const Draw$1 = getContext().draw;
-/**
- * Get the singleton attached to the global context.
- * Draw is used to synchronize the draw frame with the Transport's callbacks.
- * See [[Draw]]
- * @category Core
- */
-function getDraw() {
- return getContext().draw;
-}
-/**
- * A reference to the global context
- * See [[Context]]
- */
-const context = getContext();
-/**
- * Promise which resolves when all of the loading promises are resolved.
- * Alias for static [[ToneAudioBuffer.loaded]] method.
- * @category Core
- */
-function loaded() {
- return ToneAudioBuffer.loaded();
-}
-const Buffer = ToneAudioBuffer;
-const Buffers = ToneAudioBuffers;
-const BufferSource = ToneBufferSource;
-
-export { AMOscillator, AMSynth, Abs, Add, AmplitudeEnvelope, Analyser, AudioToGain, AutoFilter, AutoPanner, AutoWah, BaseContext, BiquadFilter, BitCrusher, Buffer, BufferSource, Buffers, Channel, Chebyshev, Chorus, Clock, Compressor, Context, Convolver, CrossFade, DCMeter, Delay, Destination$1 as Destination, Distortion, Draw$1 as Draw, DuoSynth, EQ3, Emitter, Envelope, FFT, FMOscillator, FMSynth, FatOscillator, FeedbackCombFilter, FeedbackDelay, Filter, Follower, Freeverb, Frequency, FrequencyClass, FrequencyEnvelope, FrequencyShifter, Gain, GainToAudio, Gate, GrainPlayer, GreaterThan, GreaterThanZero, IntervalTimeline, JCReverb, LFO, Limiter, Listener$1 as Listener, Loop, LowpassCombFilter, Master, MembraneSynth, Merge, MetalSynth, Meter, MidSideCompressor, MidSideMerge, MidSideSplit, Midi, MidiClass, Mono, MonoSynth, MultibandCompressor, MultibandSplit, Multiply, Negate, Noise, NoiseSynth, Offline, OfflineContext, OmniOscillator, OnePoleFilter, Oscillator, PWMOscillator, PanVol, Panner, Panner3D, Param, Part, Pattern, Phaser, PingPongDelay, PitchShift, Player, Players, PluckSynth, PolySynth, Pow, PulseOscillator, Recorder, Reverb, Sampler, Scale, ScaleExp, Sequence, Signal, Solo, Split, StateTimeline, StereoWidener, Subtract, SyncedSignal, Synth, Ticks, TicksClass, Time, TimeClass, Timeline, ToneAudioBuffer, ToneAudioBuffers, ToneAudioNode, ToneBufferSource, ToneEvent, ToneOscillatorNode, Transport$1 as Transport, TransportTime, TransportTimeClass, Tremolo, Units as Unit, UserMedia, Vibrato, Volume, WaveShaper, Waveform, Zero, connect, connectSeries, connectSignal, context, dbToGain, Debug as debug, defaultArg, disconnect, ftom, gainToDb, getContext, getDestination, getDraw, getListener, getTransport, immediate, intervalToFrequencyRatio, isArray, isBoolean, isDefined, isFunction, isNote, isNumber, isObject, isString, isUndef, loaded, mtof, now, optionsFromArguments, setContext, start, isSupported as supported, version };
+export { AMSynth, Abs, Add, Analyser, AutoFilter, AutoPanner, AutoWah, BiquadFilter, BitCrusher, Chebyshev, Chorus, Compressor, Convolver, CrossFade, DCMeter, Delay, Distortion, DuoSynth, EQ3, FFT, FMSynth, FeedbackCombFilter, FeedbackDelay, Filter, Follower, Freeverb, FrequencyEnvelope, FrequencyShifter, GainToAudio, Gate, GrainPlayer, GreaterThan, GreaterThanZero, JCReverb, LFO, Limiter, Loop, LowpassCombFilter, Merge, MetalSynth, Meter, MidSideCompressor, MidSideMerge, MidSideSplit, Mono, MonoSynth, MultibandCompressor, MultibandSplit, Negate, Noise, NoiseSynth, Offline, OnePoleFilter, Panner3D, Part, Pattern, Phaser, PingPongDelay, PitchShift, Players, PluckSynth, PolySynth, Pow, Recorder, Reverb, Scale, ScaleExp, Sequence, Split, StereoWidener, Subtract, SyncedSignal, ToneEvent, Tremolo, Units as Unit, UserMedia, Vibrato, Waveform, Zero };
diff --git a/docs/_snowpack/pkg/webmidi.js b/docs/_snowpack/pkg/webmidi.js
index 6f2f04b4..92d89cc9 100644
--- a/docs/_snowpack/pkg/webmidi.js
+++ b/docs/_snowpack/pkg/webmidi.js
@@ -1,37 +1,3 @@
-import { c as createCommonjsModule, a as commonjsGlobal } from './common/_commonjsHelpers-8c19dec8.js';
-
-var webmidi_min = createCommonjsModule(function (module) {
-/*
-
-WebMidi v2.5.3
-
-WebMidi.js helps you tame the Web MIDI API. Send and receive MIDI messages with ease. Control instruments with user-friendly functions (playNote, sendPitchBend, etc.). React to MIDI input with simple event listeners (noteon, pitchbend, controlchange, etc.).
-https://github.com/djipco/webmidi
-
-
-The MIT License (MIT)
-
-Copyright (c) 2015-2019, Jean-Philippe Côté
-
-Permission is hereby granted, free of charge, to any person obtaining a copy of this software and
-associated documentation files (the "Software"), to deal in the Software without restriction,
-including without limitation the rights to use, copy, modify, merge, publish, distribute,
-sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is
-furnished to do so, subject to the following conditions:
-
-The above copyright notice and this permission notice shall be included in all copies or substantial
-portions of the Software.
-
-THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT
-NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
-NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES
-OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
-CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
-
-*/
-
-
-!function(scope){function WebMidi(){if(WebMidi.prototype._singleton)throw new Error("WebMidi is a singleton, it cannot be instantiated directly.");(WebMidi.prototype._singleton=this)._inputs=[],this._outputs=[],this._userHandlers={},this._stateChangeQueue=[],this._processingStateChange=!1,this._midiInterfaceEvents=["connected","disconnected"],this._nrpnBuffer=[[],[],[],[],[],[],[],[],[],[],[],[],[],[],[],[]],this._nrpnEventsEnabled=!0,this._nrpnTypes=["entry","increment","decrement"],this._notes=["C","C#","D","D#","E","F","F#","G","G#","A","A#","B"],this._semitones={C:0,D:2,E:4,F:5,G:7,A:9,B:11},Object.defineProperties(this,{MIDI_SYSTEM_MESSAGES:{value:{sysex:240,timecode:241,songposition:242,songselect:243,tuningrequest:246,sysexend:247,clock:248,start:250,continue:251,stop:252,activesensing:254,reset:255,midimessage:0,unknownsystemmessage:-1},writable:!1,enumerable:!0,configurable:!1},MIDI_CHANNEL_MESSAGES:{value:{noteoff:8,noteon:9,keyaftertouch:10,controlchange:11,channelmode:11,nrpn:11,programchange:12,channelaftertouch:13,pitchbend:14},writable:!1,enumerable:!0,configurable:!1},MIDI_REGISTERED_PARAMETER:{value:{pitchbendrange:[0,0],channelfinetuning:[0,1],channelcoarsetuning:[0,2],tuningprogram:[0,3],tuningbank:[0,4],modulationrange:[0,5],azimuthangle:[61,0],elevationangle:[61,1],gain:[61,2],distanceratio:[61,3],maximumdistance:[61,4],maximumdistancegain:[61,5],referencedistanceratio:[61,6],panspreadangle:[61,7],rollangle:[61,8]},writable:!1,enumerable:!0,configurable:!1},MIDI_CONTROL_CHANGE_MESSAGES:{value:{bankselectcoarse:0,modulationwheelcoarse:1,breathcontrollercoarse:2,footcontrollercoarse:4,portamentotimecoarse:5,dataentrycoarse:6,volumecoarse:7,balancecoarse:8,pancoarse:10,expressioncoarse:11,effectcontrol1coarse:12,effectcontrol2coarse:13,generalpurposeslider1:16,generalpurposeslider2:17,generalpurposeslider3:18,generalpurposeslider4:19,bankselectfine:32,modulationwheelfine:33,breathcontrollerfine:34,footcontrollerfine:36,portamentotimefine:37,dataentryfine:38,volumefine:39,balancefine:40,panfine:42,expressionfine:43,effectcontrol1fine:44,effectcontrol2fine:45,holdpedal:64,portamento:65,sustenutopedal:66,softpedal:67,legatopedal:68,hold2pedal:69,soundvariation:70,resonance:71,soundreleasetime:72,soundattacktime:73,brightness:74,soundcontrol6:75,soundcontrol7:76,soundcontrol8:77,soundcontrol9:78,soundcontrol10:79,generalpurposebutton1:80,generalpurposebutton2:81,generalpurposebutton3:82,generalpurposebutton4:83,reverblevel:91,tremololevel:92,choruslevel:93,celestelevel:94,phaserlevel:95,databuttonincrement:96,databuttondecrement:97,nonregisteredparametercoarse:98,nonregisteredparameterfine:99,registeredparametercoarse:100,registeredparameterfine:101},writable:!1,enumerable:!0,configurable:!1},MIDI_NRPN_MESSAGES:{value:{entrymsb:6,entrylsb:38,increment:96,decrement:97,paramlsb:98,parammsb:99,nullactiveparameter:127},writable:!1,enumerable:!0,configurable:!1},MIDI_CHANNEL_MODE_MESSAGES:{value:{allsoundoff:120,resetallcontrollers:121,localcontrol:122,allnotesoff:123,omnimodeoff:124,omnimodeon:125,monomodeon:126,polymodeon:127},writable:!1,enumerable:!0,configurable:!1},octaveOffset:{value:0,writable:!0,enumerable:!0,configurable:!1}}),Object.defineProperties(this,{supported:{enumerable:!0,get:function(){return "requestMIDIAccess"in navigator}},enabled:{enumerable:!0,get:function(){return void 0!==this.interface}.bind(this)},inputs:{enumerable:!0,get:function(){return this._inputs}.bind(this)},outputs:{enumerable:!0,get:function(){return this._outputs}.bind(this)},sysexEnabled:{enumerable:!0,get:function(){return !(!this.interface||!this.interface.sysexEnabled)}.bind(this)},nrpnEventsEnabled:{enumerable:!0,get:function(){return !!this._nrpnEventsEnabled}.bind(this),set:function(enabled){return this._nrpnEventsEnabled=enabled,this._nrpnEventsEnabled}},nrpnTypes:{enumerable:!0,get:function(){return this._nrpnTypes}.bind(this)},time:{enumerable:!0,get:function(){return performance.now()}}});}var wm=new WebMidi;function Input(midiInput){var that=this;this._userHandlers={channel:{},system:{}},this._midiInput=midiInput,Object.defineProperties(this,{connection:{enumerable:!0,get:function(){return that._midiInput.connection}},id:{enumerable:!0,get:function(){return that._midiInput.id}},manufacturer:{enumerable:!0,get:function(){return that._midiInput.manufacturer}},name:{enumerable:!0,get:function(){return that._midiInput.name}},state:{enumerable:!0,get:function(){return that._midiInput.state}},type:{enumerable:!0,get:function(){return that._midiInput.type}}}),this._initializeUserHandlers(),this._midiInput.onmidimessage=this._onMidiMessage.bind(this);}function Output(midiOutput){var that=this;this._midiOutput=midiOutput,Object.defineProperties(this,{connection:{enumerable:!0,get:function(){return that._midiOutput.connection}},id:{enumerable:!0,get:function(){return that._midiOutput.id}},manufacturer:{enumerable:!0,get:function(){return that._midiOutput.manufacturer}},name:{enumerable:!0,get:function(){return that._midiOutput.name}},state:{enumerable:!0,get:function(){return that._midiOutput.state}},type:{enumerable:!0,get:function(){return that._midiOutput.type}}});}WebMidi.prototype.enable=function(callback,sysex){this.enabled||(this.supported?navigator.requestMIDIAccess({sysex:sysex}).then(function(midiAccess){var promiseTimeout,events=[],promises=[];this.interface=midiAccess,this._resetInterfaceUserHandlers(),this.interface.onstatechange=function(e){events.push(e);};for(var inputs=midiAccess.inputs.values(),input=inputs.next();input&&!input.done;input=inputs.next())promises.push(input.value.open());for(var outputs=midiAccess.outputs.values(),output=outputs.next();output&&!output.done;output=outputs.next())promises.push(output.value.open());function onPortsOpen(){clearTimeout(promiseTimeout),this._updateInputsAndOutputs(),this.interface.onstatechange=this._onInterfaceStateChange.bind(this),"function"==typeof callback&&callback.call(this),events.forEach(function(event){this._onInterfaceStateChange(event);}.bind(this));}promiseTimeout=setTimeout(onPortsOpen.bind(this),200),Promise&&Promise.all(promises).catch(function(err){}).then(onPortsOpen.bind(this));}.bind(this),function(err){"function"==typeof callback&&callback.call(this,err);}.bind(this)):"function"==typeof callback&&callback(new Error("The Web MIDI API is not supported by your browser.")));},WebMidi.prototype.disable=function(){if(!this.supported)throw new Error("The Web MIDI API is not supported by your browser.");this.enabled&&(this.removeListener(),this.inputs.forEach(function(input){input.removeListener();})),this.interface&&(this.interface.onstatechange=void 0),this.interface=void 0,this._inputs=[],this._outputs=[],this._nrpnEventsEnabled=!0,this._resetInterfaceUserHandlers();},WebMidi.prototype.addListener=function(type,listener){if(!this.enabled)throw new Error("WebMidi must be enabled before adding event listeners.");if("function"!=typeof listener)throw new TypeError("The 'listener' parameter must be a function.");if(!(0<=this._midiInterfaceEvents.indexOf(type)))throw new TypeError("The specified event type is not supported.");return this._userHandlers[type].push(listener),this},WebMidi.prototype.hasListener=function(type,listener){if(!this.enabled)throw new Error("WebMidi must be enabled before checking event listeners.");if("function"!=typeof listener)throw new TypeError("The 'listener' parameter must be a function.");if(!(0<=this._midiInterfaceEvents.indexOf(type)))throw new TypeError("The specified event type is not supported.");for(var o=0;o