使用官方提供的实时语音转文字demo,可以通过文件流的方式识别出音频文件的语音并转成文字。当音频流来源切换到前端调用麦克风实时发送数据,就无法识别出文字。有人遇到类似的问题吗
@OnMessage(maxMessageSize = Integer.MAX_VALUE/2)
public void onMessage(Session session, InputStream byteBuffer) {
try {
speechRecognition.runOnce(byteBuffer, session);
} catch (Exception e) {
LogUtils.error("AsrSocket","onMessage","", String.format("连接发送异常,当前在线数为:%d ==> 关闭该连接信息:session_id = %s, sid = %s。",
onlineSessionClientCount.get(), session.getId(), uid), e);
}
}
/**
* 单独执行
*/
public void runOnce(InputStream inputStream, Session session) {
try {
SpeechClient client = getSpeechClient();
//http 建议每次传输200ms数据 websocket建议每次传输40ms数据
List<byte[]> speechData = ByteUtils.subToSmallBytes(inputStream,
SpeechRecognitionSysConfig.requestWay == AsrConstant.RequestWay.Http ? 6400 : 640);
//请求参数,用于配置语音识别相关参数,可使用init方法进行默认配置或使用 builder的方式构建自定义参数
SpeechRecognitionRequest request = SpeechRecognitionRequest.initialize();
request.setEngineModelType("16k_zh"); //模型类型为必传参数,否则异常
request.setVoiceFormat(1); //指定音频格式
SpeechRecognizer speechWsRecognizer = client
.newSpeechRecognizer(request, new MySpeechRecognitionListener(session));
//开始识别 调用start方法
boolean success = speechWsRecognizer.start();
if (success) {
for (int i = 0; i < speechData.size(); i++) {
//模拟音频间隔
Thread.sleep(SpeechRecognitionSysConfig.requestWay == AsrConstant.RequestWay.Http ? 200 : 20);
//发送数据
speechWsRecognizer.write(speechData.get(i));
}
//结束识别调用stop方法
speechWsRecognizer.stop();
}
} catch (Exception e) {
LogUtils.error("SpeechRecognition", "runOnce", "", "调用asr异常", e);
}
}
相似问题