目录
一、语音唤醒部分
1、首先在科大讯飞官网注册开发者账号
2、配置唤醒词然后下载sdk
3、选择对应功能下载
4、语音唤醒lib包全部复制到工程目录下
5、把语音唤醒词文件复制到工程的assets目录
6、复制对应权限到AndroidManifest.xml中
7、唤醒工具类封装
二、语音识别
1、工具类
2、使用
一、语音唤醒部分
1、首先在科大讯飞官网注册开发者账号
控制台-讯飞开放平台
2、配置唤醒词然后下载sdk
3、选择对应功能下载
4、语音唤醒lib包全部复制到工程目录下
5、把语音唤醒词文件复制到工程的assets目录
6、复制对应权限到AndroidManifest.xml中
<uses-permission android:name="android.permission.INTERNET" /><uses-permission android:name="android.permission.RECORD_AUDIO" /><uses-permission android:name="android.permission.READ_PHONE_STATE" /><!-- App 需要使用的部分权限 --><uses-permission android:name="android.permission.READ_PHONE_STATE" /><uses-permission android:name="android.permission.WRITE_EXTERNAL_STORAGE" /><!-- 科大讯飞 --><uses-permissionandroid:name="android.permission.MOUNT_UNMOUNT_FILESYSTEMS"tools:ignore="ProtectedPermissions" /><uses-permissionandroid:name="android.permission.READ_PRIVILEGED_PHONE_STATE"tools:ignore="ProtectedPermissions" /><uses-permissionandroid:name="android.permission.MANAGE_EXTERNAL_STORAGE"tools:ignore="ProtectedPermissions" /><uses-permissionandroid:name="android.permission.READ_PHONE_NUMBERS"tools:ignore="ProtectedPermissions" />
7、唤醒工具类封装
其中IflytekAPP_id为科大讯飞平台的应用id
public abstract class WakeUpUtil {private static AutoTouch autoTouch = new AutoTouch();//自动点击屏幕/*** 唤醒的回调*/public abstract void wakeUp(String resultString);// Log标签private static final String TAG = "WakeUpUtil";// 上下文private static Context mContext;// 语音唤醒对象private VoiceWakeuper mIvw;//唤醒门限值//门限值越高,则要求匹配度越高,才能唤醒//值范围:[0,3000]//默认值:1450private static int curThresh = 1450;public WakeUpUtil(Context context) {initKedaXun(context);mContext = context;// 初始化唤醒对象mIvw = VoiceWakeuper.createWakeuper(context, null);Log.d("initLogData", "===进入唤醒工具类====");}/*** 获取唤醒词功能** @return 返回文件位置*/private static String getResource() {final String resPath = ResourceUtil.generateResourcePath(mContext, RESOURCE_TYPE.assets, "ivw/" + "cf22564a" + ".jet");return resPath;}/*** 唤醒*/public void wake() {Log.d("initLogData", "===进入唤醒工具类====");// 非空判断,防止因空指针使程序崩溃VoiceWakeuper mIvw = VoiceWakeuper.getWakeuper();if (mIvw != null) {// textView.setText(resultString);// 清空参数mIvw.setParameter(SpeechConstant.PARAMS, null);// 设置唤醒资源路径mIvw.setParameter(SpeechConstant.IVW_RES_PATH, getResource());// 唤醒门限值,根据资源携带的唤醒词个数按照“id:门限;id:门限”的格式传入mIvw.setParameter(SpeechConstant.IVW_THRESHOLD, "0:" + curThresh);// 设置唤醒模式mIvw.setParameter(SpeechConstant.IVW_SST, "wakeup");// 设置持续进行唤醒mIvw.setParameter(SpeechConstant.KEEP_ALIVE, "1");mIvw.startListening(mWakeuperListener);Log.d("initLogData", "====唤醒====");} else {Log.d("initLogData", "===唤醒未初始化11====");
// Toast.makeText(mContext, "唤醒未初始化1", Toast.LENGTH_SHORT).show();}}public void stopWake() {mIvw = VoiceWakeuper.getWakeuper();if (mIvw != null) {mIvw.stopListening();} else {Log.d("initLogData", "===唤醒未初始化222====");
// Toast.makeText(mContext, "唤醒未初始化2", Toast.LENGTH_SHORT).show();}}String resultString = "";private WakeuperListener mWakeuperListener = new WakeuperListener() {@Overridepublic void onResult(WakeuperResult result) {try {String text = result.getResultString();JSONObject object;object = new JSONObject(text);StringBuffer buffer = new StringBuffer();buffer.append("【RAW】 " + text);buffer.append("\n");buffer.append("【操作类型】" + object.optString("sst"));buffer.append("\n");buffer.append("【唤醒词id】" + object.optString("id"));buffer.append("\n");buffer.append("【得分】" + object.optString("score"));buffer.append("\n");buffer.append("【前端点】" + object.optString("bos"));buffer.append("\n");buffer.append("【尾端点】" + object.optString("eos"));resultString = buffer.toString();stopWake();autoTouch.autoClickPos( 0.1, 0.1);wakeUp(resultString);
// MyEventManager.postMsg("" + resultString, "voicesWakeListener");} catch (JSONException e) {MyEventManager.postMsg("" + "结果解析出错", "voicesWakeListener");resultString = "结果解析出错";wakeUp(resultString);e.printStackTrace();}// Logger.d("===开始说话==="+resultString);}@Overridepublic void onError(SpeechError error) {MyEventManager.postMsg("" + "唤醒出错", "voicesWakeListener");}@Overridepublic void onBeginOfSpeech() {Log.d("initLogData", "===唤醒onBeginOfSpeech====");}@Overridepublic void onEvent(int eventType, int isLast, int arg2, Bundle obj) {
// Log.d("initLogData", "===唤醒onEvent===" + eventType);}@Overridepublic void onVolumeChanged(int i) {
// Log.d("initLogData", "===开始说话==="+i);}};/*** 科大讯飞* 语音sdk* 初始化*/public void initKedaXun(Context context) {// 初始化参数构建StringBuffer param = new StringBuffer();//IflytekAPP_id为我们申请的Appidparam.append("appid=" + context.getString(R.string.IflytekAPP_id));param.append(",");// 设置使用v5+param.append(SpeechConstant.ENGINE_MODE + "=" + SpeechConstant.MODE_MSC);SpeechUtility.createUtility(context, param.toString());Log.d("initLogData", "===在appacation中初始化=====");}}
使用直接调用即可
/*** 科大讯飞* 语音唤醒* 对象*/private WakeUpUtil wakeUpUtil;private void voiceWake() {Log.d("initLogData", "===执行唤醒服务====");wakeUpUtil = new WakeUpUtil(this) {@Overridepublic void wakeUp(String result) {MyEventManager.postMsg("" + "唤醒成功", "voicesWakeListener");Log.d("initLogData", "====唤醒成功===========" + result);// 开启唤醒wakeUpUtil.wake();}};wakeUpUtil.wake();}
到此语音唤醒已经集成结束,接下来是语音识别。
二、语音识别
1、工具类
/*** 科大讯飞* 语音识别* 工具类*/
public class KDVoiceRegUtils {private SpeechRecognizer mIat;private RecognizerListener mRecognizerListener;private InitListener mInitListener;private StringBuilder result = new StringBuilder();// 函数调用返回值private int resultCode = 0;/*** 利用AtomicReference*/private static final AtomicReference<KDVoiceRegUtils> INSTANCE = new AtomicReference<KDVoiceRegUtils>();/*** 私有化*/private KDVoiceRegUtils() {}/*** 用CAS确保线程安全*/public static final KDVoiceRegUtils getInstance() {for (; ; ) {KDVoiceRegUtils current = INSTANCE.get();if (current != null) {return current;}current = new KDVoiceRegUtils();if (INSTANCE.compareAndSet(null, current)) {return current;}Log.d("initLogData", "===科大讯飞实例化===大哥大哥==");}}/*** 初始化* 监听*/public void initVoiceRecorgnise(Context ct) {if (mInitListener != null || mRecognizerListener != null) {return;}mInitListener = new InitListener() {@Overridepublic void onInit(int code) {
// Log.e(TAG, "SpeechRecognizer init() code = " + code);Log.d("initLogData", "===科大讯飞唤醒初始化===" + code);if (code != ErrorCode.SUCCESS) {
// showToast("初始化失败,错误码:" + code + ",请点击网址https://www.xfyun.cn/document/error-code查询解决方案");}}};//识别监听mRecognizerListener = new RecognizerListener() {@Overridepublic void onBeginOfSpeech() {// 此回调表示:sdk内部录音机已经准备好了,用户可以开始语音输入Log.d("initLogData", "=====开始说话======");}@Overridepublic void onError(SpeechError error) {// Tips:// 错误码:10118(您没有说话),可能是录音机权限被禁,需要提示用户打开应用的录音权限。
// Log.d("initLogData", "====错误说话=====" + error.getPlainDescription(true));senVoicesMsg(300, "识别错误 ");//100启动语音识别 200识别成功 300识别错误mIat.stopListening();hideDialog();}@Overridepublic void onEndOfSpeech() {// 此回调表示:检测到了语音的尾端点,已经进入识别过程,不再接受语音输入mIat.stopListening();
// Log.d("initLogData", "=====结束说话======");hideDialog();}@Overridepublic void onResult(RecognizerResult results, boolean isLast) {String text = parseIatResult(results.getResultString());
// Log.d("initLogData", "==说话==语音识别结果==initVoice==" + text);result.append(text);if (!text.trim().isEmpty() && boxDialog != null) {senVoicesMsg(200, "识别成功");//100启动语音识别 200识别成功 300识别错误boxDialog.showTxtContent(result.toString());senVoicesMsg(200, "" + result.toString());}if (isLast) {result.setLength(0);}}@Overridepublic void onVolumeChanged(int volume, byte[] data) {//showToast("当前正在说话,音量大小:" + volume);if (volume > 0 && boxDialog != null) {boxDialog.showTxtContent("录音中...");}Log.d("initLogData", "===说话==onVolumeChanged:====" + volume);}@Overridepublic void onEvent(int eventType, int arg1, int arg2, Bundle obj) {// 以下代码用于获取与云端的会话id,当业务出错时将会话id提供给技术支持人员,可用于查询会话日志,定位出错原因// 若使用本地能力,会话id为nullif (SpeechEvent.EVENT_SESSION_ID == eventType) {String sid = obj.getString(SpeechEvent.KEY_EVENT_SESSION_ID);}}};// 初始化识别无UI识别对象// 使用SpeechRecognizer对象,可根据回调消息自定义界面;mIat = SpeechRecognizer.createRecognizer(ct, mInitListener);if (mIat != null) {setIatParam();//参数配置}}/*** 执行语音* 识别*/public void startVoice(Context context) {senVoicesMsg(100, "启动语音识别");//100启动语音识别 200识别成功 300识别错误if (mIat != null) {showDialog(context);mIat.startListening(mRecognizerListener);}}/*** 科大讯飞* 语音识别* 参数配置*/private void setIatParam() {// 清空参数mIat.setParameter(com.iflytek.cloud.SpeechConstant.PARAMS, null);// 设置听写引擎mIat.setParameter(com.iflytek.cloud.SpeechConstant.ENGINE_TYPE, com.iflytek.cloud.SpeechConstant.TYPE_CLOUD);// 设置返回结果格式mIat.setParameter(com.iflytek.cloud.SpeechConstant.RESULT_TYPE, "json");// 设置语言mIat.setParameter(com.iflytek.cloud.SpeechConstant.LANGUAGE, "zh_cn");// 设置语言区域mIat.setParameter(com.iflytek.cloud.SpeechConstant.ACCENT, "mandarin");// 设置语音前端点:静音超时时间,即用户多长时间不说话则当做超时处理mIat.setParameter(com.iflytek.cloud.SpeechConstant.VAD_BOS, "4000");// 设置语音后端点:后端点静音检测时间,即用户停止说话多长时间内即认为不再输入, 自动停止录音mIat.setParameter(com.iflytek.cloud.SpeechConstant.VAD_EOS, "500");// 设置标点符号,设置为"0"返回结果无标点,设置为"1"返回结果有标点mIat.setParameter(com.iflytek.cloud.SpeechConstant.ASR_PTT, "0");Log.d("initLogData", "==语音是被==初始化成功:====");// 设置音频保存路径,保存音频格式支持pcm、wav,设置路径为sd卡请注意WRITE_EXTERNAL_STORAGE权限// 注:AUDIO_FORMAT参数语记需要更新版本才能生效
// mIatDialog.setParameter(SpeechConstant.AUDIO_FORMAT, "wav");
// mIatDialog.setParameter(SpeechConstant.ASR_AUDIO_PATH, Environment.getExternalStorageDirectory() + "/MyApplication/" + filename + ".wav");}/*** 语音* 识别* 解析*/public static String parseIatResult(String json) {StringBuffer ret = new StringBuffer();try {JSONTokener tokener = new JSONTokener(json);JSONObject joResult = new JSONObject(tokener);JSONArray words = joResult.getJSONArray("ws");for (int i = 0; i < words.length(); i++) {// 转写结果词,默认使用第一个结果JSONArray items = words.getJSONObject(i).getJSONArray("cw");JSONObject obj = items.getJSONObject(0);ret.append(obj.getString("w"));}} catch (Exception e) {e.printStackTrace();}return ret.toString();}/*** 对话框* getApplicationContext()*/private VoiceDialog boxDialog;private void showDialog(Context context) {View inflate = LayoutInflater.from(context).inflate(R.layout.donghua_layout, null, false);boxDialog = new VoiceDialog(context, inflate, VoiceDialog.LocationView.BOTTOM);boxDialog.show();}/*** 隐藏* 对话框*/private void hideDialog() {if (boxDialog != null) {boxDialog.dismiss();}}/*** 发送语音* 识别消息** @param code* @param conn*/private void senVoicesMsg(int code, String conn) {VoiceRecognizeResult voiceRecognizeResult = new VoiceRecognizeResult();voiceRecognizeResult.setCode(code);//100启动语音识别 200识别成功 300识别错误voiceRecognizeResult.setMsg("" + conn);String std = JSON.toJSONString(voiceRecognizeResult);MyEventManager.postMsg("" + std, "VoiceRecognizeResult");}/*** 科大讯飞* 语音sdk* 初始化*/public void initKedaXun(Context context) {// 初始化参数构建StringBuffer param = new StringBuffer();//IflytekAPP_id为我们申请的Appidparam.append("appid=" + context.getString(R.string.IflytekAPP_id));param.append(",");// 设置使用v5+param.append(SpeechConstant.ENGINE_MODE + "=" + SpeechConstant.MODE_MSC);SpeechUtility.createUtility(context, param.toString());Log.d("initLogData", "===在appacation中初始化=====");}}
2、使用
KDVoiceRegUtils.getInstance().initKedaXun(mWXSDKInstance.getContext());KDVoiceRegUtils.getInstance().initVoiceRecorgnise(mUniSDKInstance.getContext());//语音识别初始化KDVoiceRegUtils.getInstance().startVoice(mUniSDKInstance.getContext());
注意其实代码还可以优化,由于公司业务需要,封装的不怎么彻底,使用者可在此基础上进一步封装。