1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144
| <template> <div class="sound-con" v-if="recognizing"> <div class="sound-con"> <img :src="micUrl" alt=""> <div class="text" v-if="final_transcript">{{ final_transcript }}</div> </div> </div> </template>
<script> import {pinyin} from 'pinyin-pro';
export default { name: "SoundControl", data: () => { return { micUrl: '', recognizing: false, final_transcript: '',//最终生成的文本 pyStr: '',// 用于识别命令的拼音 wsUrl: 'ws://你的vosk服务地址', webSocket: '', streamLocal: '', //声音流 context: "", // 声音处理上下文环境 source: '', //声音处理source processor: '', //声音处理程序 bufferSize: 8192 // 缓冲区大小 } }, mounted: function () { this.initWS() navigator.mediaDevices.getUserMedia({ audio: { channelCount: 1, echoCancellation: true, noiseSuppression: true, sampleRate: 8000 }, video: false }).then(this.handleStream) }, methods: { handleStream(stream) { this.streamLocal = stream this.context = new AudioContext({sampleRate: 8000}); this.source = this.context.createMediaStreamSource(stream); this.processor = this.context.createScriptProcessor(this.bufferSize, 1, 1);
this.source.connect(this.processor); this.processor.connect(this.context.destination);
this.processor.onaudioprocess = (audioDataChunk) => { // console.log('audioDataChunk.inputBuffer :%o', audioDataChunk.inputBuffer); this.sendAudio(audioDataChunk); }; }, sendAudio(audioDataChunk) { if (this.webSocket.readyState === WebSocket.OPEN) { // convert to 16-bit payload const inputData = audioDataChunk.inputBuffer.getChannelData(0) || new Float32Array(this.bufferSize); const targetBuffer = new Int16Array(inputData.length); for (let index = inputData.length; index > 0; index--) { targetBuffer[index] = 32767 * Math.min(1, inputData[index]); } this.webSocket.send(targetBuffer.buffer); } }, initWS() { this.webSocket = new WebSocket(this.wsUrl); this.webSocket.binaryType = "arraybuffer";
this.webSocket.onopen = function (event) { console.log('vosk语音转文本连接成功'); };
this.webSocket.onerror = function (event) { console.error(event.data); };
this.webSocket.onmessage = (event) => { if (event.data) { let parsed = JSON.parse(event.data); if (parsed.text) { console.log('声音识别结果: %s', parsed.text) // 先用唤醒词识别 请求第一次接口 返回文字转拼音 如果有 xiao du 则手动开启录音 // 模糊音处理 统一使用前鼻音 this.pyStr = pinyin(parsed.text, {toneType: 'none'}).replaceAll(/ing/g, 'in').replaceAll(/eng/g, 'en') if (this.pyStr.includes('xiao du')) { this.recognizing = true this.final_transcript = '我在! 您可以尝试说: 打开开挖分析' setTimeout(() => { this.recognizing = false this.final_transcript = '' }, 100000) } else { if (this.recognizing) { this.final_transcript = parsed.text this.pyStr = pinyin(parsed.text, {toneType: 'none'}).replaceAll(/ing/g, 'in') .replaceAll(/eng/g, 'en').trim() .replaceAll(/\s+/g,' ') this.$store.commit('setSoundControl', this.pyStr) } } } } }; } }, watch: { recognizing: { handler(n) { this.micUrl = n ? require('@/assets/img/sound_record/mic-animate.gif') : require('@/assets/img/sound_record/mic.gif') } } } } </script>
<style lang="scss" scoped> .sound-con { position: fixed; left: 500px; bottom: 20px; cursor: pointer; background: rgba(60, 115, 102, 0.8); border-radius: 20px; height: 50px;
display: flex; align-items: center;
.text { width: auto; height: 50px; color: white; border-radius: 20px; background: rgba(60, 115, 102, 0.8); margin-left: 20px; margin-right: 20px; line-height: 50px; } } </style>
|