| 1 | // js/audio-engine.js |
| 2 | import { Effects } from './effects.js'; |
| 3 | |
| 4 | export class AudioEngine { |
| 5 | constructor() { |
| 6 | this.audioContext = null; |
| 7 | this.source = null; |
| 8 | this.isPlaying = false; |
| 9 | this.audioBuffer = null; |
| 10 | |
| 11 | // Microphone support |
| 12 | this.microphoneStream = null; |
| 13 | this.microphoneSource = null; |
| 14 | this.isMicrophoneActive = false; |
| 15 | |
| 16 | this.currentEffectIndex = 0; |
| 17 | this.currentParam = 0.5; |
| 18 | this.rawParam = 0.5; |
| 19 | this.smoothedParam = 0.5; |
| 20 | this.paramSmoothFactor = 0.2; |
| 21 | |
| 22 | this.effectNames = [ |
| 23 | 'mid_side', |
| 24 | 'bitcrush', |
| 25 | 'lowpass', |
| 26 | 'highpass', |
| 27 | 'delay', |
| 28 | 'reverb', |
| 29 | 'spectral_freeze', |
| 30 | 'pitch_shift' |
| 31 | ]; |
| 32 | |
| 33 | this.effects = null; |
| 34 | this.effectNodes = []; |
| 35 | } |
| 36 | |
| 37 | async init() { |
| 38 | if (!this.audioContext) { |
| 39 | this.audioContext = new (window.AudioContext || window.webkitAudioContext)(); |
| 40 | |
| 41 | // Load AudioWorklet modules |
| 42 | await this.loadWorklets(); |
| 43 | |
| 44 | // Setup audio graph |
| 45 | this.setupAudioGraph(); |
| 46 | } |
| 47 | } |
| 48 | |
| 49 | async loadWorklets() { |
| 50 | try { |
| 51 | // Load bitcrusher worklet |
| 52 | const bitcrusherResponse = await fetch('js/worklets/bitcrusher.js'); |
| 53 | const bitcrusherCode = await bitcrusherResponse.text(); |
| 54 | const bitcrusherBlob = new Blob([bitcrusherCode], { type: 'application/javascript' }); |
| 55 | const bitcrusherUrl = URL.createObjectURL(bitcrusherBlob); |
| 56 | await this.audioContext.audioWorklet.addModule(bitcrusherUrl); |
| 57 | |
| 58 | // Load spectral freeze worklet |
| 59 | const freezeResponse = await fetch('js/worklets/spectral-freeze.js'); |
| 60 | const freezeCode = await freezeResponse.text(); |
| 61 | const freezeBlob = new Blob([freezeCode], { type: 'application/javascript' }); |
| 62 | const freezeUrl = URL.createObjectURL(freezeBlob); |
| 63 | await this.audioContext.audioWorklet.addModule(freezeUrl); |
| 64 | } catch (err) { |
| 65 | console.error('Error loading AudioWorklets:', err); |
| 66 | console.log('Falling back to inline worklets'); |
| 67 | |
| 68 | // Fallback: load worklets inline |
| 69 | await this.loadInlineWorklets(); |
| 70 | } |
| 71 | } |
| 72 | |
| 73 | async loadInlineWorklets() { |
| 74 | // Inline worklet code as fallback |
| 75 | const bitcrusherProcessor = ` |
| 76 | class BitcrusherProcessor extends AudioWorkletProcessor { |
| 77 | static get parameterDescriptors() { |
| 78 | return [{ |
| 79 | name: 'bitDepth', |
| 80 | defaultValue: 8, |
| 81 | minValue: 1, |
| 82 | maxValue: 16, |
| 83 | automationRate: 'k-rate' |
| 84 | }]; |
| 85 | } |
| 86 | |
| 87 | process(inputs, outputs, parameters) { |
| 88 | const input = inputs[0]; |
| 89 | const output = outputs[0]; |
| 90 | const bitDepth = parameters.bitDepth[0]; |
| 91 | |
| 92 | const step = 2 / Math.pow(2, bitDepth); |
| 93 | |
| 94 | for (let channel = 0; channel < input.length; channel++) { |
| 95 | const inputChannel = input[channel]; |
| 96 | const outputChannel = output[channel]; |
| 97 | |
| 98 | for (let i = 0; i < inputChannel.length; i++) { |
| 99 | const sample = inputChannel[i]; |
| 100 | outputChannel[i] = Math.round(sample / step) * step; |
| 101 | } |
| 102 | } |
| 103 | |
| 104 | return true; |
| 105 | } |
| 106 | } |
| 107 | |
| 108 | registerProcessor('bitcrusher-processor', BitcrusherProcessor); |
| 109 | `; |
| 110 | |
| 111 | const spectralFreezeProcessor = ` |
| 112 | class SpectralFreezeProcessor extends AudioWorkletProcessor { |
| 113 | constructor() { |
| 114 | super(); |
| 115 | this.frozenSpectrum = null; |
| 116 | this.isActive = false; |
| 117 | this.port.onmessage = (e) => { |
| 118 | if (e.data.type === 'setActive') { |
| 119 | this.isActive = e.data.value; |
| 120 | if (!this.isActive) { |
| 121 | this.frozenSpectrum = null; |
| 122 | } |
| 123 | } |
| 124 | }; |
| 125 | } |
| 126 | |
| 127 | static get parameterDescriptors() { |
| 128 | return [{ |
| 129 | name: 'freeze', |
| 130 | defaultValue: 0, |
| 131 | minValue: 0, |
| 132 | maxValue: 1, |
| 133 | automationRate: 'k-rate' |
| 134 | }]; |
| 135 | } |
| 136 | |
| 137 | process(inputs, outputs, parameters) { |
| 138 | const input = inputs[0]; |
| 139 | const output = outputs[0]; |
| 140 | const freeze = parameters.freeze[0]; |
| 141 | |
| 142 | if (freeze > 0.5 && !this.frozenSpectrum && input[0]) { |
| 143 | this.frozenSpectrum = new Float32Array(input[0].length); |
| 144 | for (let i = 0; i < input[0].length; i++) { |
| 145 | this.frozenSpectrum[i] = input[0][i]; |
| 146 | } |
| 147 | } else if (freeze <= 0.5) { |
| 148 | this.frozenSpectrum = null; |
| 149 | } |
| 150 | |
| 151 | for (let channel = 0; channel < input.length; channel++) { |
| 152 | const inputChannel = input[channel]; |
| 153 | const outputChannel = output[channel]; |
| 154 | |
| 155 | if (this.frozenSpectrum && freeze > 0.5) { |
| 156 | for (let i = 0; i < outputChannel.length; i++) { |
| 157 | const phase = Math.random() * 2 * Math.PI; |
| 158 | outputChannel[i] = this.frozenSpectrum[i % this.frozenSpectrum.length] * |
| 159 | Math.cos(phase) * 0.8; |
| 160 | } |
| 161 | } else { |
| 162 | for (let i = 0; i < outputChannel.length; i++) { |
| 163 | outputChannel[i] = inputChannel ? inputChannel[i] : 0; |
| 164 | } |
| 165 | } |
| 166 | } |
| 167 | |
| 168 | return true; |
| 169 | } |
| 170 | } |
| 171 | |
| 172 | registerProcessor('spectral-freeze-processor', SpectralFreezeProcessor); |
| 173 | `; |
| 174 | |
| 175 | const bitcrusherBlob = new Blob([bitcrusherProcessor], { type: 'application/javascript' }); |
| 176 | const bitcrusherUrl = URL.createObjectURL(bitcrusherBlob); |
| 177 | await this.audioContext.audioWorklet.addModule(bitcrusherUrl); |
| 178 | |
| 179 | const freezeBlob = new Blob([spectralFreezeProcessor], { type: 'application/javascript' }); |
| 180 | const freezeUrl = URL.createObjectURL(freezeBlob); |
| 181 | await this.audioContext.audioWorklet.addModule(freezeUrl); |
| 182 | } |
| 183 | |
| 184 | setupAudioGraph() { |
| 185 | // Create main input/output nodes |
| 186 | this.inputGain = this.audioContext.createGain(); |
| 187 | this.outputGain = this.audioContext.createGain(); |
| 188 | |
| 189 | // Create effects |
| 190 | this.effects = new Effects(this.audioContext); |
| 191 | |
| 192 | // Create a simple serial effects chain |
| 193 | this.setupSerialEffectsChain(); |
| 194 | |
| 195 | this.outputGain.connect(this.audioContext.destination); |
| 196 | } |
| 197 | |
| 198 | setupSerialEffectsChain() { |
| 199 | // Create bypass and effect paths for each effect |
| 200 | this.effectNodes = []; |
| 201 | |
| 202 | let previousNode = this.inputGain; |
| 203 | |
| 204 | this.effectNames.forEach((name, i) => { |
| 205 | const bypass = this.audioContext.createGain(); |
| 206 | const effectInput = this.audioContext.createGain(); |
| 207 | const mixer = this.audioContext.createGain(); |
| 208 | |
| 209 | // Split signal to bypass and effect |
| 210 | previousNode.connect(bypass); |
| 211 | previousNode.connect(effectInput); |
| 212 | |
| 213 | // Connect effect |
| 214 | const effectOutput = this.audioContext.createGain(); |
| 215 | |
| 216 | switch (name) { |
| 217 | case 'mid_side': |
| 218 | effectInput.connect(this.effects.midSideIn); |
| 219 | this.effects.midSideOut.connect(effectOutput); |
| 220 | break; |
| 221 | |
| 222 | case 'bitcrush': |
| 223 | effectInput.connect(this.effects.bitcrusher); |
| 224 | this.effects.bitcrusher.connect(effectOutput); |
| 225 | break; |
| 226 | |
| 227 | case 'lowpass': |
| 228 | effectInput.connect(this.effects.lowpass); |
| 229 | this.effects.lowpass.connect(effectOutput); |
| 230 | break; |
| 231 | |
| 232 | case 'highpass': |
| 233 | effectInput.connect(this.effects.highpass); |
| 234 | this.effects.highpass.connect(effectOutput); |
| 235 | break; |
| 236 | |
| 237 | case 'delay': |
| 238 | effectInput.connect(this.effects.delay); |
| 239 | effectInput.connect(this.effects.delayDry); |
| 240 | this.effects.delayMix.connect(effectOutput); |
| 241 | this.effects.delayDry.connect(effectOutput); |
| 242 | break; |
| 243 | |
| 244 | case 'reverb': |
| 245 | effectInput.connect(this.effects.convolver); |
| 246 | effectInput.connect(this.effects.reverbDry); |
| 247 | this.effects.convolver.connect(this.effects.reverbMix); |
| 248 | this.effects.reverbMix.connect(effectOutput); |
| 249 | this.effects.reverbDry.connect(effectOutput); |
| 250 | break; |
| 251 | |
| 252 | case 'spectral_freeze': |
| 253 | effectInput.connect(this.effects.spectralFreeze); |
| 254 | this.effects.spectralFreeze.connect(effectOutput); |
| 255 | break; |
| 256 | |
| 257 | case 'pitch_shift': |
| 258 | effectInput.connect(effectOutput); // Pass through, handled by playback rate |
| 259 | break; |
| 260 | } |
| 261 | |
| 262 | // Mix bypass and effect |
| 263 | bypass.connect(mixer); |
| 264 | effectOutput.connect(mixer); |
| 265 | |
| 266 | // Store node info |
| 267 | this.effectNodes.push({ |
| 268 | name, |
| 269 | bypass, |
| 270 | effectInput, |
| 271 | effectOutput, |
| 272 | mixer, |
| 273 | isActive: false |
| 274 | }); |
| 275 | |
| 276 | // Set initial state (all bypassed) |
| 277 | bypass.gain.value = 1; |
| 278 | effectInput.gain.value = 0; |
| 279 | |
| 280 | // Chain to next effect |
| 281 | previousNode = mixer; |
| 282 | }); |
| 283 | |
| 284 | // Connect final node to output |
| 285 | previousNode.connect(this.outputGain); |
| 286 | } |
| 287 | |
| 288 | switchToEffect(index) { |
| 289 | // Disable all effects |
| 290 | this.effectNodes.forEach((node, i) => { |
| 291 | if (i === index) { |
| 292 | // Enable this effect |
| 293 | node.bypass.gain.setTargetAtTime(0, this.audioContext.currentTime, 0.01); |
| 294 | node.effectInput.gain.setTargetAtTime(1, this.audioContext.currentTime, 0.01); |
| 295 | node.isActive = true; |
| 296 | } else { |
| 297 | // Bypass this effect |
| 298 | node.bypass.gain.setTargetAtTime(1, this.audioContext.currentTime, 0.01); |
| 299 | node.effectInput.gain.setTargetAtTime(0, this.audioContext.currentTime, 0.01); |
| 300 | node.isActive = false; |
| 301 | } |
| 302 | }); |
| 303 | |
| 304 | this.currentEffectIndex = index; |
| 305 | } |
| 306 | |
| 307 | updateEffectParameter(value) { |
| 308 | // Don't update if not initialized |
| 309 | if (!this.effects) { |
| 310 | return; |
| 311 | } |
| 312 | |
| 313 | this.rawParam = value; |
| 314 | |
| 315 | // Smooth the parameter |
| 316 | this.smoothedParam = this.paramSmoothFactor * this.smoothedParam + |
| 317 | (1 - this.paramSmoothFactor) * value; |
| 318 | this.currentParam = this.smoothedParam; |
| 319 | |
| 320 | // Update the effect |
| 321 | const effectName = this.effectNames[this.currentEffectIndex]; |
| 322 | this.effects.updateParameter(effectName, this.currentParam); |
| 323 | |
| 324 | // Special handling for pitch shift - EXTREME VERSION |
| 325 | if (effectName === 'pitch_shift' && this.source && this.source.playbackRate) { |
| 326 | // INSANE pitch range: -3 octaves to +3 octaves |
| 327 | // Non-linear curve for more fun in the middle |
| 328 | const normalized = (this.currentParam - 0.5) * 2; // -1 to 1 |
| 329 | const semitones = normalized * Math.abs(normalized) * 36; // -36 to +36 with curve |
| 330 | this.source.playbackRate.value = Math.pow(2, semitones / 12); |
| 331 | |
| 332 | // Also detune for microtonal madness at certain positions |
| 333 | if (this.source.detune) { |
| 334 | this.source.detune.value = Math.sin(this.currentParam * Math.PI * 4) * 50; |
| 335 | } |
| 336 | } |
| 337 | } |
| 338 | |
| 339 | async loadArrayBuffer(arrayBuffer) { |
| 340 | await this.init(); |
| 341 | |
| 342 | try { |
| 343 | const audioBuffer = await this.audioContext.decodeAudioData(arrayBuffer); |
| 344 | this.audioBuffer = audioBuffer; |
| 345 | return true; |
| 346 | } catch (err) { |
| 347 | console.error('Error decoding audio:', err); |
| 348 | return false; |
| 349 | } |
| 350 | } |
| 351 | |
| 352 | async loadFile(file) { |
| 353 | const arrayBuffer = await file.arrayBuffer(); |
| 354 | return this.loadArrayBuffer(arrayBuffer); |
| 355 | } |
| 356 | |
| 357 | async loadDemoAudio() { |
| 358 | await this.init(); |
| 359 | |
| 360 | // Create a rich demo audio |
| 361 | const sampleRate = this.audioContext.sampleRate; |
| 362 | const duration = 30; |
| 363 | const buffer = this.audioContext.createBuffer(2, sampleRate * duration, sampleRate); |
| 364 | |
| 365 | for (let channel = 0; channel < 2; channel++) { |
| 366 | const channelData = buffer.getChannelData(channel); |
| 367 | |
| 368 | for (let i = 0; i < channelData.length; i++) { |
| 369 | const t = i / sampleRate; |
| 370 | |
| 371 | // Bass line |
| 372 | const bassFreq = 110 * Math.pow(2, Math.floor(t * 2) % 4 / 12); |
| 373 | const bass = Math.sin(2 * Math.PI * bassFreq * t) * 0.3; |
| 374 | |
| 375 | // Melody |
| 376 | const melodyPattern = [0, 3, 5, 7, 8, 7, 5, 3]; |
| 377 | const melodyNote = melodyPattern[Math.floor(t * 4) % 8]; |
| 378 | const melodyFreq = 440 * Math.pow(2, melodyNote / 12); |
| 379 | const melody = Math.sin(2 * Math.PI * melodyFreq * t) * 0.2; |
| 380 | |
| 381 | // Drums |
| 382 | const kick = (t % 0.5 < 0.05) ? Math.sin(2 * Math.PI * 60 * t) * Math.exp(-t % 0.5 * 20) : 0; |
| 383 | const hihat = (t % 0.125 < 0.02) ? (Math.random() * 2 - 1) * 0.1 * Math.exp(-t % 0.125 * 50) : 0; |
| 384 | |
| 385 | // Mix with stereo separation |
| 386 | const pan = channel === 0 ? 0.7 : 1.3; |
| 387 | channelData[i] = (bass + melody * pan + kick + hihat) * 0.5; |
| 388 | } |
| 389 | } |
| 390 | |
| 391 | this.audioBuffer = buffer; |
| 392 | } |
| 393 | |
| 394 | async loadPresetIR(preset) { |
| 395 | if (preset === 'none') { |
| 396 | this.effects.createDefaultIR(); |
| 397 | return; |
| 398 | } |
| 399 | |
| 400 | // Create synthetic IRs for different spaces |
| 401 | const lengths = { room: 1, hall: 2, plate: 3 }; |
| 402 | const length = this.audioContext.sampleRate * lengths[preset]; |
| 403 | const ir = this.audioContext.createBuffer(2, length, this.audioContext.sampleRate); |
| 404 | |
| 405 | for (let channel = 0; channel < 2; channel++) { |
| 406 | const channelData = ir.getChannelData(channel); |
| 407 | |
| 408 | for (let i = 0; i < length; i++) { |
| 409 | const decay = Math.pow(1 - i / length, 2); |
| 410 | |
| 411 | switch (preset) { |
| 412 | case 'room': |
| 413 | channelData[i] = (Math.random() * 2 - 1) * decay; |
| 414 | break; |
| 415 | case 'hall': |
| 416 | channelData[i] = (Math.random() * 2 - 1) * decay * |
| 417 | (1 + 0.5 * Math.sin(i / this.audioContext.sampleRate * 100)); |
| 418 | break; |
| 419 | case 'plate': |
| 420 | channelData[i] = (Math.random() * 2 - 1) * decay * |
| 421 | Math.sin(i / this.audioContext.sampleRate * 2000); |
| 422 | break; |
| 423 | } |
| 424 | } |
| 425 | } |
| 426 | |
| 427 | this.effects.setImpulseResponse(ir); |
| 428 | } |
| 429 | |
| 430 | async loadIRFile(file) { |
| 431 | await this.init(); |
| 432 | |
| 433 | try { |
| 434 | const arrayBuffer = await file.arrayBuffer(); |
| 435 | const irBuffer = await this.audioContext.decodeAudioData(arrayBuffer); |
| 436 | this.effects.setImpulseResponse(irBuffer); |
| 437 | return true; |
| 438 | } catch (err) { |
| 439 | console.error('Error loading IR:', err); |
| 440 | return false; |
| 441 | } |
| 442 | } |
| 443 | |
| 444 | async startMicrophone() { |
| 445 | try { |
| 446 | await this.init(); |
| 447 | |
| 448 | this.microphoneStream = await navigator.mediaDevices.getUserMedia({ |
| 449 | audio: { |
| 450 | echoCancellation: false, |
| 451 | noiseSuppression: false, |
| 452 | autoGainControl: false |
| 453 | } |
| 454 | }); |
| 455 | |
| 456 | this.microphoneSource = this.audioContext.createMediaStreamSource(this.microphoneStream); |
| 457 | this.microphoneSource.connect(this.inputGain); |
| 458 | |
| 459 | this.isMicrophoneActive = true; |
| 460 | return true; |
| 461 | } catch (err) { |
| 462 | console.error('Error starting microphone:', err); |
| 463 | throw err; |
| 464 | } |
| 465 | } |
| 466 | |
| 467 | stopMicrophone() { |
| 468 | if (this.microphoneSource) { |
| 469 | this.microphoneSource.disconnect(); |
| 470 | this.microphoneSource = null; |
| 471 | } |
| 472 | |
| 473 | if (this.microphoneStream) { |
| 474 | this.microphoneStream.getTracks().forEach(track => track.stop()); |
| 475 | this.microphoneStream = null; |
| 476 | } |
| 477 | |
| 478 | this.isMicrophoneActive = false; |
| 479 | } |
| 480 | |
| 481 | play() { |
| 482 | if (!this.audioBuffer) return; |
| 483 | |
| 484 | this.source = this.audioContext.createBufferSource(); |
| 485 | this.source.buffer = this.audioBuffer; |
| 486 | this.source.loop = true; |
| 487 | this.source.connect(this.inputGain); |
| 488 | this.source.start(); |
| 489 | |
| 490 | this.isPlaying = true; |
| 491 | } |
| 492 | |
| 493 | stop() { |
| 494 | if (this.source) { |
| 495 | this.source.stop(); |
| 496 | this.source.disconnect(); |
| 497 | this.source = null; |
| 498 | } |
| 499 | |
| 500 | this.isPlaying = false; |
| 501 | } |
| 502 | } |