最近我需要弄清楚另一件事,将我的 assembly.ai 转录引擎连接到一个声音很大的前端。
第一步是请求访问启用了回声消除功能的麦克风。此功能内置于大多数现代浏览器中,有助于减少扬声器的反馈。
async function getmicrophonestream() { const constraints = { audio: { echocancellation: true, noisesuppression: true, autogaincontrol: true } }; try { const stream = await navigator.mediadevices.getusermedia(constraints); return stream; } catch (err) { console.error('error accessing the microphone', err); return null; } }
接下来,我们设置 web audio api 来处理音频流。这涉及创建 audiocontext 并连接各种节点,包括 dynamicscompressornode。
async function setupaudioprocessing(stream) { const audiocontext = new audiocontext(); const source = audiocontext.createmediastreamsource(stream); // create a dynamicscompressornode for additional processing const compressor = audiocontext.createdynamicscompressor(); compressor.threshold.setvalueattime(-50, audiocontext.currenttime); // example settings compressor.knee.setvalueattime(null, audiocontext.currenttime); compressor.ratio.setvalueattime(null, audiocontext.currenttime); compressor.attack.setvalueattime(null, audiocontext.currenttime); compressor.release.setvalueattime(0.25, audiocontext.currenttime); // connect nodes source.connect(compressor); compressor.connect(audiocontext.destination); return { audiocontext, source, compressor }; }
最后,我们将音频处理设置与 web speech api 集成以执行语音识别。
async function startSpeechRecognition() { const stream = await getMicrophoneStream(); if (!stream) return; const { audioContext, source, compressor } = await setupAudioProcessing(stream); const recognition = new (window.SpeechRecognition || window.webkitSpeechRecognition)(); recognition.continuous = true; recognition.interimResults = true; recognition.onresult = (event) => { for (let i = event.resultIndex; i { console.error('Speech recognition error', event.error); }; recognition.start(); // Handle audio context resume if needed if (audioContext.state === 'suspended') { audioContext.resume(); } return recognition; } // Start the speech recognition process startSpeechRecognition();
希望您发现这很有用。
快乐编码!
蒂姆.