123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118 |
- import { onUnmounted, reactive, ref } from 'vue'
- import { ElMessage } from 'element-plus'
- export default function useSpeechToAudio({
- onEnd = (audio) => {},
- onSpeak = ({ duration }) => {},
- timeout = 0,
- }) {
- const state: any = reactive({
- duration: 0,
- mediaRecorder: null,
- analyser: null,
- timer: null,
- dataArray: null,
- animationId: null,
- timestamp: 0,
- })
- const volume = ref(0)
- const speak = async () => {
- try {
- // 请求麦克风权限
- const stream = await navigator.mediaDevices.getUserMedia({ audio: true })
- const audioContext = new (window.AudioContext ||
- window.webkitAudioContext)()
- state.analyser = audioContext.createAnalyser()
- state.analyser.fftSize = 256
- const microphone = audioContext.createMediaStreamSource(stream)
- microphone.connect(state.analyser)
- state.dataArray = new Uint8Array(state.analyser.frequencyBinCount)
- state.mediaRecorder = new MediaRecorder(stream)
- const audioChunks: any = []
- state.mediaRecorder.ondataavailable = (event) => {
- audioChunks.push(event.data)
- }
- state.mediaRecorder.onstop = async () => {
- cancelAnimationFrame(state.animationId)
- clearInterval(state.timer)
- state.timer = null
- state.timestamp = 0
- const audioBlob = new Blob(audioChunks, { type: 'audio/mp3' })
- // this.audioUrl = URL.createObjectURL(this.audioBlob)
- stream.getTracks().forEach((track) => track.stop())
- if (microphone) {
- microphone.disconnect()
- }
- if (audioContext && audioContext.state !== 'closed') {
- audioContext.close()
- }
- if (!audioBlob) {
- ElMessage.error('没有可上传的录音文件')
- return
- }
- try {
- onEnd(audioBlob)
- } catch (err) {
- onEnd(null)
- ElMessage.error('上传错误:' + err)
- }
- }
- state.mediaRecorder.start()
- updateVolume()
- const startTime = Date.now()
- state.duration = 0
- volume.value = 0
- onSpeak({ duration: state.duration })
- // 更新录音时长
- state.timer = setInterval(() => {
- state.duration = Math.floor((Date.now() - startTime) / 1000)
- onSpeak({ duration: state.duration })
- }, 1000)
- } catch (err: any) {
- ElMessage.error('无法访问麦克风: ' + err.message)
- console.error('录音错误:', err)
- }
- }
- const stop = () => {
- state.mediaRecorder?.stop()
- }
- // 更新音量显示
- const updateVolume = () => {
- if (!state.analyser) return
- state.analyser.getByteFrequencyData(state.dataArray)
- // 计算平均音量
- let sum = 0
- for (let i = 0; i < state.dataArray.length; i++) {
- sum += state.dataArray[i]
- }
- const average = sum / state.dataArray.length
- // 将音量映射到0-100范围
- volume.value = Math.min(100, Math.max(0, Math.round(average / 2.55)))
- if (timeout > 0) {
- if (volume.value > 10) {
- state.timestamp = 0
- } else {
- if (state.timestamp > 0) {
- if (new Date().getTime() - state.timestamp > timeout * 1000) {
- stop()
- }
- } else {
- state.timestamp = new Date().getTime()
- }
- }
- }
- // 继续动画
- state.animationId = requestAnimationFrame(updateVolume)
- }
- return {
- speak,
- stop,
- volume,
- }
- }
|