fix(audio): prevent app freeze during focus mode ticking sound

Use singleton AudioContext and cache audio buffers to prevent memory
leak and resource exhaustion. Previously, a new AudioContext was
created for every sound play (once per second with ticking enabled),
causing ~480+ instances after 8 minutes and browser GC pauses.

Also fixes race condition where source.start() was called before
audio data was loaded.

Fixes #5798
This commit is contained in:
Johannes Millan 2026-01-02 15:26:12 +01:00
parent 871ee354ca
commit 3a5cddd8e8
5 changed files with 505 additions and 71 deletions

View file

@ -1,56 +1,47 @@
import { SoundConfig } from '../../config/global-config.model';
import { TaskLog } from '../../../core/log';
import { getAudioBuffer, getAudioContext } from '../../../util/audio-context';
const BASE = './assets/snd';
const PITCH_OFFSET = -400;
/**
* Plays the task completion sound with optional pitch variation.
* Uses a singleton AudioContext and caches audio buffers to prevent resource leaks.
*
* @param soundCfg - Sound configuration including volume and pitch settings
* @param nrOfDoneTasks - Number of completed tasks (affects pitch if enabled)
*/
export const playDoneSound = (soundCfg: SoundConfig, nrOfDoneTasks: number = 0): void => {
const speed = 1;
const BASE = './assets/snd';
const PITCH_OFFSET = -400;
const file = `${BASE}/${soundCfg.doneSound}`;
// const speed = 0.5;
// const a = new Audio('/assets/snd/done4.mp3');
// TaskLog.log(a);
// a.volume = .4;
// a.playbackRate = 1.5;
// (a as any).mozPreservesPitch = false;
// (a as any).webkitPreservesPitch = false;
// a.play();
TaskLog.log(file);
const pitchIncrement = nrOfDoneTasks * 50;
const pitchFactor = soundCfg.isIncreaseDoneSoundPitch
? // prettier-ignore
PITCH_OFFSET + (nrOfDoneTasks * 50)
? PITCH_OFFSET + pitchIncrement
: 0;
const audioCtx = new ((window as any).AudioContext ||
(window as any).webkitAudioContext)();
const source = audioCtx.createBufferSource();
const request = new XMLHttpRequest();
request.open('GET', file, true);
request.responseType = 'arraybuffer';
request.onload = () => {
const audioData = request.response;
audioCtx.decodeAudioData(
audioData,
(buffer: AudioBuffer) => {
source.buffer = buffer;
source.playbackRate.value = speed;
// source.detune.value = 100; // value in cents
source.detune.value = pitchFactor; // value in cents
getAudioBuffer(file)
.then((buffer) => {
const audioCtx = getAudioContext();
const source = audioCtx.createBufferSource();
source.buffer = buffer;
source.playbackRate.value = speed;
source.detune.value = pitchFactor;
if (soundCfg.volume !== 100) {
const gainNode = audioCtx.createGain();
gainNode.gain.value = soundCfg.volume / 100;
source.connect(gainNode);
gainNode.connect(audioCtx.destination);
} else {
source.connect(audioCtx.destination);
}
},
(e: DOMException) => {
throw new Error('Error with decoding audio data SP: ' + e.message);
},
);
};
request.send();
source.start(0);
if (soundCfg.volume !== 100) {
const gainNode = audioCtx.createGain();
gainNode.gain.value = soundCfg.volume / 100;
source.connect(gainNode);
gainNode.connect(audioCtx.destination);
} else {
source.connect(audioCtx.destination);
}
source.start(0);
})
.catch((e) => {
console.error('Error playing done sound:', e);
});
};

View file

@ -0,0 +1,214 @@
import {
getAudioContext,
getAudioBuffer,
clearAudioBufferCache,
closeAudioContext,
} from './audio-context';
describe('audio-context', () => {
let originalAudioContext: typeof AudioContext;
let mockCloseContext: jasmine.Spy;
beforeEach(() => {
originalAudioContext = (window as any).AudioContext;
// Create a mock context that has the close method
mockCloseContext = jasmine.createSpy('close');
const mockContext = {
state: 'running',
resume: jasmine.createSpy('resume'),
close: mockCloseContext,
};
(window as any).AudioContext = jasmine
.createSpy('AudioContext')
.and.returnValue(mockContext);
// Reset the module state
closeAudioContext();
// Now we can set up our real test mocks
});
afterEach(() => {
(window as any).AudioContext = originalAudioContext;
});
describe('getAudioContext', () => {
it('should create an AudioContext if none exists', () => {
const mockContext = {
state: 'running',
resume: jasmine.createSpy('resume'),
close: jasmine.createSpy('close'),
};
(window as any).AudioContext = jasmine
.createSpy('AudioContext')
.and.returnValue(mockContext);
const ctx = getAudioContext();
expect((window as any).AudioContext).toHaveBeenCalled();
expect(ctx).toBe(mockContext as unknown as AudioContext);
});
it('should return the same AudioContext on subsequent calls', () => {
const mockContext = {
state: 'running',
resume: jasmine.createSpy('resume'),
close: jasmine.createSpy('close'),
};
(window as any).AudioContext = jasmine
.createSpy('AudioContext')
.and.returnValue(mockContext);
const ctx1 = getAudioContext();
const ctx2 = getAudioContext();
expect((window as any).AudioContext).toHaveBeenCalledTimes(1);
expect(ctx1).toBe(ctx2);
});
it('should resume the context if suspended', () => {
const mockContext = {
state: 'suspended',
resume: jasmine.createSpy('resume'),
close: jasmine.createSpy('close'),
};
(window as any).AudioContext = jasmine
.createSpy('AudioContext')
.and.returnValue(mockContext);
getAudioContext();
expect(mockContext.resume).toHaveBeenCalled();
});
it('should not resume if context is running', () => {
const mockContext = {
state: 'running',
resume: jasmine.createSpy('resume'),
close: jasmine.createSpy('close'),
};
(window as any).AudioContext = jasmine
.createSpy('AudioContext')
.and.returnValue(mockContext);
getAudioContext();
expect(mockContext.resume).not.toHaveBeenCalled();
});
});
describe('getAudioBuffer', () => {
let mockContext: any;
let mockArrayBuffer: ArrayBuffer;
let mockAudioBuffer: AudioBuffer;
beforeEach(() => {
mockArrayBuffer = new ArrayBuffer(8);
mockAudioBuffer = {} as AudioBuffer;
mockContext = {
state: 'running',
resume: jasmine.createSpy('resume'),
close: jasmine.createSpy('close'),
decodeAudioData: jasmine
.createSpy('decodeAudioData')
.and.returnValue(Promise.resolve(mockAudioBuffer)),
};
(window as any).AudioContext = jasmine
.createSpy('AudioContext')
.and.returnValue(mockContext);
spyOn(window, 'fetch').and.returnValue(
Promise.resolve({
arrayBuffer: () => Promise.resolve(mockArrayBuffer),
} as Response),
);
});
it('should fetch and decode audio on first call', async () => {
const buffer = await getAudioBuffer('./assets/snd/test.mp3');
expect(window.fetch).toHaveBeenCalledWith('./assets/snd/test.mp3');
expect(mockContext.decodeAudioData).toHaveBeenCalledWith(mockArrayBuffer);
expect(buffer).toBe(mockAudioBuffer);
});
it('should return cached buffer on subsequent calls', async () => {
await getAudioBuffer('./assets/snd/test.mp3');
const buffer = await getAudioBuffer('./assets/snd/test.mp3');
expect(window.fetch).toHaveBeenCalledTimes(1);
expect(buffer).toBe(mockAudioBuffer);
});
it('should cache different files separately', async () => {
await getAudioBuffer('./assets/snd/test1.mp3');
await getAudioBuffer('./assets/snd/test2.mp3');
expect(window.fetch).toHaveBeenCalledTimes(2);
expect(window.fetch).toHaveBeenCalledWith('./assets/snd/test1.mp3');
expect(window.fetch).toHaveBeenCalledWith('./assets/snd/test2.mp3');
});
});
describe('clearAudioBufferCache', () => {
it('should clear the buffer cache', async () => {
const mockArrayBuffer = new ArrayBuffer(8);
const mockAudioBuffer = {} as AudioBuffer;
const mockContext = {
state: 'running',
resume: jasmine.createSpy('resume'),
close: jasmine.createSpy('close'),
decodeAudioData: jasmine
.createSpy('decodeAudioData')
.and.returnValue(Promise.resolve(mockAudioBuffer)),
};
(window as any).AudioContext = jasmine
.createSpy('AudioContext')
.and.returnValue(mockContext);
spyOn(window, 'fetch').and.returnValue(
Promise.resolve({
arrayBuffer: () => Promise.resolve(mockArrayBuffer),
} as Response),
);
await getAudioBuffer('./assets/snd/test.mp3');
clearAudioBufferCache();
await getAudioBuffer('./assets/snd/test.mp3');
expect(window.fetch).toHaveBeenCalledTimes(2);
});
});
describe('closeAudioContext', () => {
it('should close the context and clear cache', () => {
const mockContext = {
state: 'running',
resume: jasmine.createSpy('resume'),
close: jasmine.createSpy('close'),
};
(window as any).AudioContext = jasmine
.createSpy('AudioContext')
.and.returnValue(mockContext);
getAudioContext();
closeAudioContext();
expect(mockContext.close).toHaveBeenCalled();
// Verify a new context is created after close
const newMockContext = {
state: 'running',
resume: jasmine.createSpy('resume'),
close: jasmine.createSpy('close'),
};
(window as any).AudioContext = jasmine
.createSpy('AudioContext')
.and.returnValue(newMockContext);
const ctx = getAudioContext();
expect(ctx).toBe(newMockContext as unknown as AudioContext);
});
});
});

View file

@ -0,0 +1,64 @@
/**
* Singleton AudioContext manager to avoid creating multiple AudioContext instances.
* This prevents memory leaks and browser resource exhaustion when playing sounds frequently.
*/
let audioContext: AudioContext | null = null;
const audioBufferCache = new Map<string, AudioBuffer>();
/**
* Returns the singleton AudioContext instance, creating it if necessary.
* Handles the AudioContext suspended state that can occur due to browser autoplay policies.
*/
export const getAudioContext = (): AudioContext => {
if (!audioContext) {
audioContext = new ((window as any).AudioContext ||
(window as any).webkitAudioContext)();
}
// Resume if suspended (can happen due to browser autoplay policies)
if (audioContext && audioContext.state === 'suspended') {
audioContext.resume();
}
return audioContext!;
};
/**
* Retrieves a cached audio buffer or fetches and decodes it if not cached.
* @param filePath - Path to the audio file
* @returns Promise resolving to the decoded AudioBuffer
*/
export const getAudioBuffer = async (filePath: string): Promise<AudioBuffer> => {
const cached = audioBufferCache.get(filePath);
if (cached) {
return cached;
}
const ctx = getAudioContext();
const response = await fetch(filePath);
const arrayBuffer = await response.arrayBuffer();
const audioBuffer = await ctx.decodeAudioData(arrayBuffer);
audioBufferCache.set(filePath, audioBuffer);
return audioBuffer;
};
/**
* Clears the audio buffer cache. Useful for testing or memory management.
*/
export const clearAudioBufferCache = (): void => {
audioBufferCache.clear();
};
/**
* Closes the AudioContext and clears all caches.
* Should only be called when audio is no longer needed (e.g., app shutdown).
*/
export const closeAudioContext = (): void => {
if (audioContext) {
audioContext.close();
audioContext = null;
}
audioBufferCache.clear();
};

View file

@ -0,0 +1,165 @@
import { playSound } from './play-sound';
import { closeAudioContext } from './audio-context';
describe('playSound', () => {
let mockAudioContext: any;
let mockGainNode: any;
let mockBufferSource: any;
let mockAudioBuffer: AudioBuffer;
let originalAudioContext: typeof AudioContext;
let fetchSpy: jasmine.Spy;
beforeEach(() => {
originalAudioContext = (window as any).AudioContext;
mockGainNode = {
connect: jasmine.createSpy('connect'),
gain: { value: 1 },
};
mockBufferSource = {
connect: jasmine.createSpy('connect'),
start: jasmine.createSpy('start'),
buffer: null,
};
mockAudioBuffer = {} as AudioBuffer;
mockAudioContext = {
state: 'running',
resume: jasmine.createSpy('resume'),
close: jasmine.createSpy('close'),
createBufferSource: jasmine
.createSpy('createBufferSource')
.and.returnValue(mockBufferSource),
createGain: jasmine.createSpy('createGain').and.returnValue(mockGainNode),
destination: {} as AudioDestinationNode,
decodeAudioData: jasmine
.createSpy('decodeAudioData')
.and.callFake(() => Promise.resolve(mockAudioBuffer)),
};
(window as any).AudioContext = jasmine
.createSpy('AudioContext')
.and.returnValue(mockAudioContext);
fetchSpy = spyOn(window, 'fetch').and.returnValue(
Promise.resolve({
arrayBuffer: () => Promise.resolve(new ArrayBuffer(8)),
} as Response),
);
// Reset the singleton and cache for each test
closeAudioContext();
});
afterEach(() => {
(window as any).AudioContext = originalAudioContext;
closeAudioContext();
});
it('should create an AudioContext', (done) => {
playSound('test.mp3');
setTimeout(() => {
expect((window as any).AudioContext).toHaveBeenCalled();
done();
}, 10);
});
it('should fetch the audio file', (done) => {
playSound('test.mp3');
setTimeout(() => {
expect(fetchSpy).toHaveBeenCalledWith('./assets/snd/test.mp3');
done();
}, 10);
});
it('should create a new buffer source for each playback', (done) => {
playSound('test.mp3');
setTimeout(() => {
expect(mockAudioContext.createBufferSource).toHaveBeenCalled();
done();
}, 10);
});
it('should start playback after buffer is assigned', (done) => {
playSound('test.mp3');
setTimeout(() => {
expect(mockBufferSource.start).toHaveBeenCalledWith(0);
done();
}, 10);
});
it('should connect directly to destination at full volume', (done) => {
playSound('test.mp3', 100);
setTimeout(() => {
expect(mockBufferSource.connect).toHaveBeenCalledWith(mockAudioContext.destination);
expect(mockAudioContext.createGain).not.toHaveBeenCalled();
done();
}, 10);
});
it('should use gain node for volume adjustment', (done) => {
playSound('test.mp3', 50);
setTimeout(() => {
expect(mockAudioContext.createGain).toHaveBeenCalled();
expect(mockGainNode.gain.value).toBe(0.5);
expect(mockBufferSource.connect).toHaveBeenCalledWith(mockGainNode);
expect(mockGainNode.connect).toHaveBeenCalledWith(mockAudioContext.destination);
done();
}, 10);
});
it('should handle errors gracefully', (done) => {
const consoleErrorSpy = spyOn(console, 'error');
fetchSpy.and.returnValue(Promise.reject(new Error('Test error')));
playSound('nonexistent.mp3');
setTimeout(() => {
expect(consoleErrorSpy).toHaveBeenCalled();
done();
}, 10);
});
it('should reuse the same AudioContext for multiple sounds', (done) => {
playSound('test1.mp3');
setTimeout(() => {
playSound('test2.mp3');
setTimeout(() => {
// AudioContext should only be created once
expect((window as any).AudioContext).toHaveBeenCalledTimes(1);
done();
}, 10);
}, 10);
});
it('should cache audio buffers and not re-fetch', (done) => {
playSound('cached-test.mp3');
setTimeout(() => {
// Reset createBufferSource call count to verify it's called again
mockAudioContext.createBufferSource.calls.reset();
mockBufferSource.connect.calls.reset();
mockBufferSource.start.calls.reset();
playSound('cached-test.mp3');
setTimeout(() => {
// Fetch should only be called once for the same file
expect(fetchSpy).toHaveBeenCalledTimes(1);
// But we should still create a new buffer source (required by Web Audio API)
expect(mockAudioContext.createBufferSource).toHaveBeenCalled();
done();
}, 10);
}, 10);
});
});

View file

@ -1,35 +1,35 @@
import { getAudioBuffer, getAudioContext } from './audio-context';
const BASE = './assets/snd';
/**
* Plays a sound file at the specified volume.
* Uses a singleton AudioContext and caches audio buffers to prevent resource leaks.
*
* @param filePath - Path to the sound file relative to assets/snd
* @param vol - Volume level from 0 to 100 (default: 100)
*/
export const playSound = (filePath: string, vol = 100): void => {
const file = `${BASE}/${filePath}`;
const audioCtx = new ((window as any).AudioContext ||
(window as any).webkitAudioContext)();
const source = audioCtx.createBufferSource();
const request = new XMLHttpRequest();
request.open('GET', file, true);
request.responseType = 'arraybuffer';
request.onload = () => {
const audioData = request.response;
audioCtx.decodeAudioData(
audioData,
(buffer: AudioBuffer) => {
source.buffer = buffer;
getAudioBuffer(file)
.then((buffer) => {
const audioCtx = getAudioContext();
const source = audioCtx.createBufferSource();
source.buffer = buffer;
if (vol !== 100) {
const gainNode = audioCtx.createGain();
gainNode.gain.value = vol / 100;
source.connect(gainNode);
gainNode.connect(audioCtx.destination);
} else {
source.connect(audioCtx.destination);
}
},
(e: DOMException) => {
throw new Error('Error with decoding audio data SP: ' + e.message);
},
);
};
request.send();
source.start(0);
if (vol !== 100) {
const gainNode = audioCtx.createGain();
gainNode.gain.value = vol / 100;
source.connect(gainNode);
gainNode.connect(audioCtx.destination);
} else {
source.connect(audioCtx.destination);
}
source.start(0);
})
.catch((e) => {
console.error('Error playing sound:', e);
});
};