diff --git a/ts/components/GlobalAudioContext.tsx b/ts/components/GlobalAudioContext.tsx index efe4ab1e7a..7a3cceb36a 100644 --- a/ts/components/GlobalAudioContext.tsx +++ b/ts/components/GlobalAudioContext.tsx @@ -2,16 +2,22 @@ // SPDX-License-Identifier: AGPL-3.0-only import * as React from 'react'; +import PQueue from 'p-queue'; import LRU from 'lru-cache'; import { WaveformCache } from '../types/Audio'; const MAX_WAVEFORM_COUNT = 1000; +const MAX_PARALLEL_COMPUTE = 8; -type Contents = { +export type ComputePeaksResult = { + duration: number; + peaks: ReadonlyArray; +}; + +export type Contents = { audio: HTMLAudioElement; - audioContext: AudioContext; - waveformCache: WaveformCache; + computePeaks(url: string, barCount: number): Promise; }; // This context's value is effectively global. This is not ideal but is necessary because @@ -19,12 +25,108 @@ type Contents = { // and instantiate these inside of `GlobalAudioProvider`. (We may wish to keep // `audioContext` global, however, as the browser limits the number that can be // created.) +const audioContext = new AudioContext(); +const waveformCache: WaveformCache = new LRU({ + max: MAX_WAVEFORM_COUNT, +}); + +const inProgressMap = new Map>(); +const computeQueue = new PQueue({ + concurrency: MAX_PARALLEL_COMPUTE, +}); + +/** + * Load audio from `url`, decode PCM data, and compute RMS peaks for displaying + * the waveform. + * + * The results are cached in the `waveformCache` which is shared across + * messages in the conversation and provided by GlobalAudioContext. + * + * The computation happens off the renderer thread by AudioContext, but it is + * still quite expensive, so we cache it in the `waveformCache` LRU cache. + */ +async function doComputePeaks( + url: string, + barCount: number +): Promise { + const existing = waveformCache.get(url); + if (existing) { + window.log.info('GlobalAudioContext: waveform cache hit', url); + return Promise.resolve(existing); + } + + window.log.info('GlobalAudioContext: waveform cache miss', url); + + // Load and decode `url` into a raw PCM + const response = await fetch(url); + const raw = await response.arrayBuffer(); + + const data = await audioContext.decodeAudioData(raw); + + // Compute RMS peaks + const peaks = new Array(barCount).fill(0); + const norms = new Array(barCount).fill(0); + + const samplesPerPeak = data.length / peaks.length; + for ( + let channelNum = 0; + channelNum < data.numberOfChannels; + channelNum += 1 + ) { + const channel = data.getChannelData(channelNum); + + for (let sample = 0; sample < channel.length; sample += 1) { + const i = Math.floor(sample / samplesPerPeak); + peaks[i] += channel[sample] ** 2; + norms[i] += 1; + } + } + + // Average + let max = 1e-23; + for (let i = 0; i < peaks.length; i += 1) { + peaks[i] = Math.sqrt(peaks[i] / Math.max(1, norms[i])); + max = Math.max(max, peaks[i]); + } + + // Normalize + for (let i = 0; i < peaks.length; i += 1) { + peaks[i] /= max; + } + + const result = { peaks, duration: data.duration }; + waveformCache.set(url, result); + return result; +} + +export async function computePeaks( + url: string, + barCount: number +): Promise { + const computeKey = `${url}:${barCount}`; + + const pending = inProgressMap.get(computeKey); + if (pending) { + window.log.info( + 'GlobalAudioContext: already computing peaks for', + computeKey + ); + return pending; + } + + window.log.info('GlobalAudioContext: queue computing peaks for', computeKey); + const promise = computeQueue.add(() => doComputePeaks(url, barCount)); + + inProgressMap.set(computeKey, promise); + const result = await promise; + inProgressMap.delete(computeKey); + + return result; +} + const globalContents: Contents = { audio: new Audio(), - audioContext: new AudioContext(), - waveformCache: new LRU({ - max: MAX_WAVEFORM_COUNT, - }), + computePeaks, }; export const GlobalAudioContext = React.createContext(globalContents); diff --git a/ts/components/conversation/Message.stories.tsx b/ts/components/conversation/Message.stories.tsx index 4b39cbffd9..72204e92d9 100644 --- a/ts/components/conversation/Message.stories.tsx +++ b/ts/components/conversation/Message.stories.tsx @@ -3,14 +3,12 @@ import * as React from 'react'; import { isBoolean } from 'lodash'; -import LRU from 'lru-cache'; import { action } from '@storybook/addon-actions'; import { boolean, number, text, select } from '@storybook/addon-knobs'; import { storiesOf } from '@storybook/react'; import { Colors } from '../../types/Colors'; -import { WaveformCache } from '../../types/Audio'; import { EmojiPicker } from '../emoji/EmojiPicker'; import { Message, Props, AudioAttachmentProps } from './Message'; import { @@ -22,6 +20,7 @@ import { VIDEO_MP4, } from '../../types/MIME'; import { MessageAudio } from './MessageAudio'; +import { computePeaks } from '../GlobalAudioContext'; import { setup as setupI18n } from '../../../js/modules/i18n'; import enMessages from '../../../_locales/en/messages.json'; import { pngUrl } from '../../storybook/Fixtures'; @@ -50,16 +49,13 @@ const MessageAudioContainer: React.FC = props => { undefined ); const audio = React.useMemo(() => new Audio(), []); - const audioContext = React.useMemo(() => new AudioContext(), []); - const waveformCache: WaveformCache = React.useMemo(() => new LRU(), []); return ( diff --git a/ts/components/conversation/MessageAudio.tsx b/ts/components/conversation/MessageAudio.tsx index d5301e5cff..95378d4f67 100644 --- a/ts/components/conversation/MessageAudio.tsx +++ b/ts/components/conversation/MessageAudio.tsx @@ -7,9 +7,10 @@ import { noop } from 'lodash'; import { assert } from '../../util/assert'; import { LocalizerType } from '../../types/Util'; -import { WaveformCache } from '../../types/Audio'; import { hasNotDownloaded, AttachmentType } from '../../types/Attachment'; +import { ComputePeaksResult } from '../GlobalAudioContext'; + export type Props = { direction?: 'incoming' | 'outgoing'; id: string; @@ -20,13 +21,12 @@ export type Props = { // See: GlobalAudioContext.tsx audio: HTMLAudioElement; - audioContext: AudioContext; - waveformCache: WaveformCache; buttonRef: React.RefObject; kickOffAttachmentDownload(): void; onCorrupted(): void; + computePeaks(url: string, barCount: number): Promise; activeAudioID: string | undefined; setActiveAudioID: (id: string | undefined) => void; }; @@ -40,20 +40,10 @@ type ButtonProps = { onClick: () => void; }; -type LoadAudioOptions = { - audioContext: AudioContext; - waveformCache: WaveformCache; - url: string; -}; - -type LoadAudioResult = { - duration: number; - peaks: ReadonlyArray; -}; - enum State { NotDownloaded = 'NotDownloaded', Pending = 'Pending', + Computing = 'Computing', Normal = 'Normal', } @@ -89,68 +79,6 @@ const timeToText = (time: number): string => { return hours ? `${hours}:${minutes}:${seconds}` : `${minutes}:${seconds}`; }; -/** - * Load audio from `url`, decode PCM data, and compute RMS peaks for displaying - * the waveform. - * - * The results are cached in the `waveformCache` which is shared across - * messages in the conversation and provided by GlobalAudioContext. - */ -// TODO(indutny): move this to GlobalAudioContext and limit the concurrency. -// see DESKTOP-1267 -async function loadAudio(options: LoadAudioOptions): Promise { - const { audioContext, waveformCache, url } = options; - - const existing = waveformCache.get(url); - if (existing) { - window.log.info('MessageAudio: waveform cache hit', url); - return Promise.resolve(existing); - } - - window.log.info('MessageAudio: waveform cache miss', url); - - // Load and decode `url` into a raw PCM - const response = await fetch(url); - const raw = await response.arrayBuffer(); - - const data = await audioContext.decodeAudioData(raw); - - // Compute RMS peaks - const peaks = new Array(BAR_COUNT).fill(0); - const norms = new Array(BAR_COUNT).fill(0); - - const samplesPerPeak = data.length / peaks.length; - for ( - let channelNum = 0; - channelNum < data.numberOfChannels; - channelNum += 1 - ) { - const channel = data.getChannelData(channelNum); - - for (let sample = 0; sample < channel.length; sample += 1) { - const i = Math.floor(sample / samplesPerPeak); - peaks[i] += channel[sample] ** 2; - norms[i] += 1; - } - } - - // Average - let max = 1e-23; - for (let i = 0; i < peaks.length; i += 1) { - peaks[i] = Math.sqrt(peaks[i] / Math.max(1, norms[i])); - max = Math.max(max, peaks[i]); - } - - // Normalize - for (let i = 0; i < peaks.length; i += 1) { - peaks[i] /= max; - } - - const result = { peaks, duration: data.duration }; - waveformCache.set(url, result); - return result; -} - const Button: React.FC = props => { const { i18n, buttonRef, mod, label, onClick } = props; // Clicking button toggle playback @@ -192,9 +120,6 @@ const Button: React.FC = props => { * Display message audio attachment along with its waveform, duration, and * toggle Play/Pause button. * - * The waveform is computed off the renderer thread by AudioContext, but it is - * still quite expensive, so we cache it in the `waveformCache` LRU cache. - * * A global audio player is used for playback and access is managed by the * `activeAudioID` property. Whenever `activeAudioID` property is equal to `id` * the instance of the `MessageAudio` assumes the ownership of the `Audio` @@ -214,8 +139,7 @@ export const MessageAudio: React.FC = (props: Props) => { onCorrupted, audio, - audioContext, - waveformCache, + computePeaks, activeAudioID, setActiveAudioID, @@ -234,6 +158,7 @@ export const MessageAudio: React.FC = (props: Props) => { // NOTE: Avoid division by zero const [duration, setDuration] = useState(1e-23); + const [hasPeaks, setHasPeaks] = useState(false); const [peaks, setPeaks] = useState>( new Array(BAR_COUNT).fill(0) ); @@ -244,6 +169,8 @@ export const MessageAudio: React.FC = (props: Props) => { state = State.Pending; } else if (hasNotDownloaded(attachment)) { state = State.NotDownloaded; + } else if (!hasPeaks) { + state = State.Computing; } else { state = State.Normal; } @@ -251,7 +178,7 @@ export const MessageAudio: React.FC = (props: Props) => { // This effect loads audio file and computes its RMS peak for dispalying the // waveform. useEffect(() => { - if (state !== State.Normal) { + if (state !== State.Computing) { return noop; } @@ -268,19 +195,19 @@ export const MessageAudio: React.FC = (props: Props) => { ); } - const { peaks: newPeaks, duration: newDuration } = await loadAudio({ - audioContext, - waveformCache, - url: attachment.url, - }); + const { peaks: newPeaks, duration: newDuration } = await computePeaks( + attachment.url, + BAR_COUNT + ); if (canceled) { return; } setPeaks(newPeaks); + setHasPeaks(true); setDuration(Math.max(newDuration, 1e-23)); } catch (err) { window.log.error( - 'MessageAudio: loadAudio error, marking as corrupted', + 'MessageAudio: computePeaks error, marking as corrupted', err ); @@ -293,12 +220,12 @@ export const MessageAudio: React.FC = (props: Props) => { }; }, [ attachment, - audioContext, + computePeaks, setDuration, setPeaks, + setHasPeaks, onCorrupted, state, - waveformCache, ]); // This effect attaches/detaches event listeners to the global