javascript 如何延长Webkit SpeechRecognition的发言时间?讲话短暂停顿后不中断

gab6jxml  于 2023-05-05  发布在  Java
关注(0)|答案(3)|浏览(518)

我只在点击按钮后使用语音识别。因此,recognition.continuous = false被设置。来自https://developer.mozilla.org/en-US/docs/Web/API/SpeechRecognition/的注解
SpeechRecognition界面的continuous属性控制是为每个识别返回连续结果还是仅返回单个结果。
当我说话时,例如。这是一个很好的源代码{暂停1-2秒}它在这一点上中断。
如何延长发言时间?我认为休息3-5秒就可以了。
PS:由于安全问题,代码可能无法在此处运行。请看https://codepen.io/durrrrr/pen/RwaRVdg

navigator.getUserMedia = navigator.getUserMedia ||
                         navigator.webkitGetUserMedia ||
                         navigator.mozGetUserMedia;

if (navigator.getUserMedia) {
   navigator.getUserMedia({ audio: true },
      function(stream) {
         console.log("Microphone Access supported");
      },
      function(err) {
         console.log("An error occured by accessing the microphone: " + err.name);
      }
    );
} else {
   console.log("getUserMedia not supported");
}

window.SpeechRecognition = window.webkitSpeechRecognition || window.SpeechRecognition;

if ('SpeechRecognition' in window) {

    console.log('Speech recognition supported');

    var SpeechRecognition = SpeechRecognition || webkitSpeechRecognition;
    var SpeechGrammarList = SpeechGrammarList || webkitSpeechGrammarList;

    // JSpeech Grammar Format
    var grammar = '#JSGF V1.0; grammar colors; public <color> = aqua | azure | beige | bisque | black | blue | brown | chocolate | coral | crimson | cyan | fuchsia | ghostwhite | gold | goldenrod | gray | green | indigo | ivory | khaki | lavender | lime | linen | magenta | maroon | moccasin | navy | olive | orange | orchid | peru | pink | plum | purple | red | salmon | sienna | silver | snow | tan | teal | thistle | tomato | turquoise | violet | white | yellow ;'
    var message = $('#message');
    var recognition = new SpeechRecognition();
    var speechRecognitionList = new SpeechGrammarList();
    var finalTranscript = '';
    speechRecognitionList.addFromString(grammar, 1)
    recognition.grammars = speechRecognitionList;
    recognition.lang = 'en-EN';
    recognition.interimResults = true; 
    recognition.continuous = false;

    recognition.onresult = function(event) {
      var currentTranscript = '';
      var confidenceFinalTranscrip = '';
      for(var i = event.resultIndex; i < event.results.length; ++i) {
        if(event.results[i].isFinal) {
          finalTranscript += event.results[i][0].transcript;
          confidenceFinalTranscrip = event.results[i][0].confidence;
          $('#finalTranscript').html(finalTranscript);
        } else {
          currentTranscript += event.results[i][0].transcript;
        }
      }
      $('#currentTranscript').html(currentTranscript);
    };
  
    recognition.onspeechend = function() {
      $('#pulse-ring-microphone').removeClass('pulse-ring');
      $('#pulse-ring-microphone').removeClass('pulse-ring-small');
      $('#pulse-ring-microphone-font').removeClass('pulse-ring-font-on').addClass('pulse-ring-font-off');
      recognition.stop();
    };

    recognition.onerror = function(event) {
      $('#pulse-ring-microphone').removeClass('pulse-ring');
      $('#pulse-ring-microphone-font').removeClass('pulse-ring-font-on').addClass('pulse-ring-font-off');
      $('#errorTranscript').html(event.speechError);
    }

} else {
  console.log('Speech recognition not supported');
}

$('#btnStartSpeech').on('click', function(){
  $('#pulse-ring-microphone').addClass('pulse-ring');
  $('#pulse-ring-microphone-font').removeClass('pulse-ring-font-off').addClass('pulse-ring-font-on');
  recognition.start();
});
.container-pulse-ring {
  display: flex;
  align-items: center;
  justify-content: center;
}

.container-pulse-ring:focus {
  outline: -webkit-focus-ring-color auto 0px;
}

#btnStartSpeech.btnAnimated {
  border: none;
  padding: 0;
  border-radius: 100%;
  width: 150px;
  height: 150px;
  font-size: 3.5em;
  color: #fff;
  padding: 0;
  margin: 0;
  background: #337ab7;
  position: relative;
  display: inline-block;
  line-height: 50px;
  text-align: center;
  white-space: nowrap;
  vertical-align: middle;
  -ms-touch-action: manipulation;
  touch-action: manipulation;
  cursor: pointer;
  -webkit-user-select: none;
  -moz-user-select: none;
  -ms-user-select: none;
  user-select: none;
  background-image: none;
}

.btnAnimated:focus {
  outline: 0px auto -webkit-focus-ring-color;
  outline-offset: -2px;
}

.pulse-ring-font-on {
  color: #fff;
}

.pulse-ring-font-off {
  color: #333;
}

.pulse-ring {
  content: '';
  width: 150px;
  height: 150px;
  border: 50px solid #337ab7;
  border-radius: 50%;
  position: absolute;
  top: -1px;
  left: -1px;
  animation: pulsate infinite 2s;
}

@-webkit-keyframes pulsate {
  0% {
    -webkit-transform: scale(1, 1);
    opacity: 1;
  }

  100% {
    -webkit-transform: scale(1.2, 1.2);
    opacity: 0;
  }
}
<script src="https://cdnjs.cloudflare.com/ajax/libs/jquery/1.9.1/jquery.min.js"></script>
navigator.getUserMedia = navigator.getUserMedia ||
                         navigator.webkitGetUserMedia ||
                         navigator.mozGetUserMedia;

if (navigator.getUserMedia) {
   navigator.getUserMedia({ audio: true },
      function(stream) {
         console.log("Accessed the Microphone");
      },
      function(err) {
         console.log("The following error occured: " + err.name);
      }
    );
} else {
   console.log("getUserMedia not supported");
}

window.SpeechRecognition = window.webkitSpeechRecognition || window.SpeechRecognition;

if ('SpeechRecognition' in window) {

    console.log('Speech recognition supported');

    var SpeechRecognition = SpeechRecognition || webkitSpeechRecognition;

    // JSpeech Grammar Format
    var message = $('#message');
    var recognition = new SpeechRecognition();
    var finalTranscript = '';
    recognition.lang = 'en-EN';
    recognition.interimResults = true; 
    recognition.continuous = false;

    recognition.onresult = function(event) {
      var currentTranscript = '';
      var confidenceFinalTranscrip = '';
      for(var i = event.resultIndex; i < event.results.length; ++i) {
        if(event.results[i].isFinal) {
          finalTranscript += event.results[i][0].transcript;
          confidenceFinalTranscrip = event.results[i][0].confidence;
          $('#finalTranscript').html(finalTranscript);
        } else {
          currentTranscript += event.results[i][0].transcript;
        }
      }
      $('#currentTranscript').html(currentTranscript);
    };
  
    recognition.onspeechend = function() {
      $('#pulse-ring-microphone').removeClass('pulse-ring');
      $('#pulse-ring-microphone').removeClass('pulse-ring-small');
      $('#pulse-ring-microphone-font').removeClass('pulse-ring-font-on').addClass('pulse-ring-font-off');
      recognition.stop();
    };

    recognition.onerror = function(event) {
      $('#pulse-ring-microphone').removeClass('pulse-ring');
      $('#pulse-ring-microphone-font').removeClass('pulse-ring-font-on').addClass('pulse-ring-font-off');
      $('#errorTranscript').html(event.speechError);
    }

} else {
  console.log('Speech recognition not supported');
}

$('#btnStartSpeech').on('click', function(){
  $('#pulse-ring-microphone').addClass('pulse-ring');
  $('#pulse-ring-microphone-font').removeClass('pulse-ring-font-off').addClass('pulse-ring-font-on');
  recognition.start();
});
xiozqbni

xiozqbni1#

您可以运行setTimeout来重新启动识别启动

setTimeout(() => {
      recognition.start();
    }, 500);
1yjd4xko

1yjd4xko2#

recognition.continuous = false表示服务在第一个最终结果出现时立即停止。内部VAD(语音活动检测)将决定您的输入何时停止。它相当具有攻击性,你不能改变阈值,所以在1- 2秒没有说话之后,它总是会结束。
您可以将引擎设置为continuous = true并收集多个最终结果,直到某个最大值。时间到了(我正在使用SEPIA开源语音助手),但状态处理可能相当棘手。
我目前正在构建一个类似于Web Speech API的库,其中包含开源VAD和语音识别(SEPIA Web Audio Library)模块,让您可以完全控制各个方面,但您需要托管自己的ASR服务器。^^

pinkon5k

pinkon5k3#

使用recognition更新代码。continue = false

continue = true

相关问题