123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199 |
- <template>
- <div class="flex items-center">
- <template v-if="state.isStart">
- <el-tooltip content="停止语音输入" placement="top">
- <div class="voice __hover" @click="onStop">
- <div v-for="item in 4" ref="ref_bars" />
- </div>
- </el-tooltip>
- </template>
- <template v-else>
- <el-tooltip content="语音输入" placement="top">
- <div class="__hover size-6" @click="onStart">
- <img class="h-full w-full" src="@/assets/images/chat/audio.png" />
- </div>
- </el-tooltip>
- </template>
- <template v-if="state.isStart">
- <div class="ml-1.5 text-sm text-[#4f4f4f]">
- {{ durationCpt }}
- </div>
- </template>
- </div>
- </template>
- <script setup lang="ts">
- import {
- computed,
- getCurrentInstance,
- onMounted,
- reactive,
- ref,
- watch,
- } from 'vue'
- import { ElMessage } from 'element-plus'
- const emit = defineEmits(['onLoading', 'onAudio'])
- const props = defineProps({})
- const state: any = reactive({
- isStart: false,
- duration: 0,
- mediaRecorder: null,
- audioBlob: null,
- timer: null,
- analyser: null,
- animationId: null,
- phase: 0,
- })
- const ref_bars = ref()
- const durationCpt = computed(() => {
- const minutes = Math.floor(state.duration / 60)
- const seconds = Math.floor(state.duration % 60)
- return `${minutes.toString().padStart(2, '0')}:${seconds.toString().padStart(2, '0')}`
- })
- const onStart = async () => {
- state.isStart = true
- try {
- // 请求麦克风权限
- const stream = await navigator.mediaDevices.getUserMedia({ audio: true })
- const audioContext = new (window.AudioContext ||
- window.webkitAudioContext)()
- state.analyser = audioContext.createAnalyser()
- state.analyser.fftSize = 256
- const microphone = audioContext.createMediaStreamSource(stream)
- microphone.connect(state.analyser)
- updateVolumeBars()
- state.mediaRecorder = new MediaRecorder(stream)
- const audioChunks: any = []
- state.mediaRecorder.ondataavailable = (event) => {
- audioChunks.push(event.data)
- }
- state.mediaRecorder.onstop = async () => {
- clearInterval(state.timer)
- state.audioBlob = new Blob(audioChunks, { type: 'audio/mp3' })
- // this.audioUrl = URL.createObjectURL(this.audioBlob)
- stream.getTracks().forEach((track) => track.stop())
- if (microphone) {
- microphone.disconnect()
- }
- if (audioContext && audioContext.state !== 'closed') {
- audioContext.close()
- }
- cancelAnimationFrame(state.animationId)
- // 重置柱状图
- ref_bars.value.forEach((bar) => {
- bar.style.height = '4px'
- })
- if (!state.audioBlob) {
- ElMessage.error('没有可上传的录音文件')
- return
- }
- try {
- const formData = new FormData()
- formData.append('file', state.audioBlob)
- // const audioResponse = await audioToText(
- // `/installed-apps/${import.meta.env.VITE_DIFY_APPID}/audio-to-text`,
- // false,
- // formData,
- // )
- emit(
- 'onAudio',
- '语音内容语音内容语音内容语音内容语音内容语音内容语音内容语音内容语音内容语音内容语音内容语音内容语音内容语音内容语音内容语音内容语音内容语音内容语音内容语音内容语音内容语音内容语音内容语音内容语音内容语音内容语音内容语音内容语音内容语音内容',
- )
- } catch (err) {
- emit('onAudio', '')
- ElMessage.error('上传错误:' + err)
- } finally {
- emit('onAudio', '')
- }
- }
- state.mediaRecorder.start()
- const startTime = Date.now()
- state.duration = 0
- // 更新录音时长
- state.timer = setInterval(() => {
- state.duration = Math.floor((Date.now() - startTime) / 1000)
- }, 1000)
- } catch (err: any) {
- ElMessage.error('无法访问麦克风: ' + err.message)
- console.error('录音错误:', err)
- }
- }
- const onStop = async () => {
- emit('onLoading')
- state.isStart = false
- if (state.mediaRecorder) {
- state.mediaRecorder.stop()
- }
- }
- const updateVolumeBars = () => {
- if (!state.isStart) return
- const array = new Uint8Array(state.analyser.frequencyBinCount)
- state.analyser.getByteFrequencyData(array)
- let sum = 0
- for (let i = 0; i < array.length; i++) {
- sum += array[i]
- }
- const average = sum / array.length
- const baseVolume = Math.min(1, average / 70)
- // 更新相位
- state.phase += 0.2
- if (state.phase > Math.PI * 2) state.phase -= Math.PI * 2
- // 更新每个柱子的高度
- ref_bars.value.forEach((bar, index) => {
- // 每个柱子有轻微相位差
- const barPhase = state.phase + (index * Math.PI) / 3
- // 波浪因子 (0.5-1.5范围)
- const waveFactor = 0.8 + Math.sin(barPhase) * 0.2
- // 基础高度 + 音量影响 * 波浪因子
- const height =
- 4 +
- baseVolume *
- (index > 0 && index < ref_bars.value.length - 1 ? 15 : 5) *
- waveFactor
- bar.style.height = `${height}px`
- })
- state.animationId = requestAnimationFrame(updateVolumeBars)
- }
- const calculateBarLevels = (volume) => {
- // 根据音量计算4个柱子的高度比例
- // 无声音时全部为0,有声音时从低到高依次点亮
- const thresholds = [0.25, 0.5, 0.75, 1.0]
- return thresholds.map((t) =>
- Math.max(0, Math.min(1, (volume - (t - 0.25)) * 4)),
- )
- }
- onMounted(() => {})
- </script>
- <style lang="scss" scoped>
- .voice {
- background-color: rgba(var(--czr-main-color-rgb), 0.1);
- gap: 2px;
- width: 32px;
- height: 32px;
- border-radius: 8px;
- display: flex;
- align-items: center;
- justify-content: center;
- > div {
- width: 2px;
- height: 4px;
- background-color: var(--czr-main-color);
- transition: height 0.15s ease-out;
- &:nth-child(2) {
- margin-right: 1px;
- }
- }
- }
- </style>
|