openai Realtime API (实时语音)

https://openai.com/index/introducing-the-realtime-api/

 

官方demo

https://github.com/openai/openai-realtime-console

官方demo使用到的插件

https://github.com/openai/openai-realtime-api-beta?tab=readme-ov-file

装包配置

修改yarn.lock 这个包是从github下载的

"@openai/realtime-api-beta@openai/openai-realtime-api-beta":
  version "0.0.0"
  resolved "https://codeload.github.com/openai/openai-realtime-api-beta/tar.gz/a5cb94824f625423858ebacb9f769226ca98945f"
  dependencies:
    ws "^8.18.0"

前端代码

import { RealtimeClient } from '@openai/realtime-api-beta'


 

nginx配置

RealtimeClient需要配置一个wss地址

wss和https使用相同的加密协议,不需要单独配置,直接配置一个转发就可以了

    # httpsserver {listen       443 ssl; server_name  chat.xutongbao.top;# 付费ssl_certificate         /temp/ssl/chat.xutongbao.top/chat.xutongbao.top_cert_chain.pem;   # nginx的ssl证书文件ssl_certificate_key     /temp/ssl/chat.xutongbao.top/chat.xutongbao.top_key.key;  # nginx的ssl证书验证密码# 免费# ssl_certificate         /temp/ssl/cersign/chat.xutongbao.top/chat.xutongbao.top.crt;   # nginx的ssl证书文件# ssl_certificate_key     /temp/ssl/cersign/chat.xutongbao.top/chat.xutongbao.top_rsa.key;  # nginx的ssl证书验证密码proxy_send_timeout 6000s;    # 设置发送超时时间,proxy_read_timeout 6000s;    # 设置读取超时时间。#配置根目录location / {root    /temp/yuying;index  index.html index.htm;add_header Content-Security-Policy upgrade-insecure-requests;}location /api/ {proxy_set_header X-Real-IP $remote_addr;proxy_set_header REMOTE-HOST $remote_addr;proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;proxy_set_header X-NginX-Proxy true;proxy_set_header Connection '';proxy_http_version 1.1;chunked_transfer_encoding off;proxy_buffering off;proxy_cache off;proxy_pass http://yuying-api.xutongbao.top;}location /socket.io/ {proxy_set_header X-Real-IP $remote_addr;proxy_set_header REMOTE-HOST $remote_addr;proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;proxy_set_header X-NginX-Proxy true;proxy_pass http://127.0.0.1:84;# 关键配置 startproxy_http_version 1.1;proxy_set_header Upgrade $http_upgrade;proxy_set_header Connection "upgrade";# 关键配置 end}location /ws {proxy_pass http://52.247.xxx.xxx:86/;proxy_read_timeout              500;proxy_set_header                Host    $http_host;proxy_set_header                X-Real-IP          $remote_addr;proxy_set_header                X-Forwarded-For $proxy_add_x_forwarded_for;proxy_http_version 1.1;# ws 协议专用头proxy_set_header                Upgrade $http_upgrade;proxy_set_header                Connection "Upgrade";}location /ws-test {proxy_pass http://52.247.xxx.xxx:92/;proxy_read_timeout              500;proxy_set_header                Host    $http_host;proxy_set_header                X-Real-IP          $remote_addr;proxy_set_header                X-Forwarded-For $proxy_add_x_forwarded_for;proxy_http_version 1.1;# ws 协议专用头proxy_set_header                Upgrade $http_upgrade;proxy_set_header                Connection "Upgrade";}# 匹配sslCnd开头的请求,实际转发的请求去掉多余的sslCnd这三个字母location ^~/sslCnd/ {proxy_set_header X-Real-IP $remote_addr;proxy_set_header REMOTE-HOST $remote_addr;proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;proxy_set_header X-NginX-Proxy true;proxy_pass http://cdn.xutongbao.top/;}           }   

建立连接时如何通过token确认用户身份

  let apiKeyValue = `${localStorage.getItem('token')}divide${localStorage.getItem('talkId')}`const clientRef = useRef(new RealtimeClient(LOCAL_RELAY_SERVER_URL? {url: LOCAL_RELAY_SERVER_URL,apiKey: apiKeyValue,dangerouslyAllowAPIKeyInBrowser: true,}: {apiKey: apiKey,dangerouslyAllowAPIKeyInBrowser: true,}))

前端完整代码

realtimePlus/pages/ConsolePage.js:

import { connect } from 'react-redux'
import { withRouter } from 'react-router-dom'
import { useEffect, useRef, useCallback, useState } from 'react'
import { RealtimeClient } from '@openai/realtime-api-beta'
import { WavRecorder, WavStreamPlayer } from '../lib/wavtools/index.js'
import { instructions } from '../utils/conversation_config.js'
import { WavRenderer } from '../utils/wav_renderer'
import { X, ArrowUp, ArrowDown } from 'react-feather'
import { Button, Dropdown, Input, Select } from 'antd'
import { SinglePageHeader, Icon } from '../../../../../../components/light'
import { isPC } from '../../../../../../utils/tools.js'
import { realTimeBaseURL } from '../../../../../../utils/config.js'
import { message as antdMessage } from 'antd'
import Api from '../../../../../../api/index.js'import './ConsolePage.css'
import './index.css'
const LOCAL_RELAY_SERVER_URL = realTimeBaseURL //'wss://chat.xutongbao.top/ws'const Option = Select.Option
let isPCFlag = isPC()
let isAddStart = false
let addIdHistory = []function Index() {//#region 配置const apiKey = LOCAL_RELAY_SERVER_URL? '': localStorage.getItem('tmp::voice_api_key') ||prompt('OpenAI API Key') ||''if (apiKey !== '') {localStorage.setItem('tmp::voice_api_key', apiKey)}const wavRecorderRef = useRef(new WavRecorder({ sampleRate: 24000 }))const wavStreamPlayerRef = useRef(new WavStreamPlayer({ sampleRate: 24000 }))let apiKeyValue = `${localStorage.getItem('token')}divide${localStorage.getItem('talkId')}`const clientRef = useRef(new RealtimeClient(LOCAL_RELAY_SERVER_URL? {url: LOCAL_RELAY_SERVER_URL,apiKey: apiKeyValue,dangerouslyAllowAPIKeyInBrowser: true,}: {apiKey: apiKey,dangerouslyAllowAPIKeyInBrowser: true,}))const clientCanvasRef = useRef(null)const serverCanvasRef = useRef(null)const eventsScrollHeightRef = useRef(0)const eventsScrollRef = useRef(null)const startTimeRef = useRef(new Date().toISOString())const [items, setItems] = useState([])const [realtimeEvents, setRealtimeEvents] = useState([])const [expandedEvents, setExpandedEvents] = useState({})const [isConnected, setIsConnected] = useState(false)const [canPushToTalk, setCanPushToTalk] = useState(true)const [isRecording, setIsRecording] = useState(false)const [message, setMessage] = useState('')const [messageType, setMessageType] = useState('none')//#endregionconst getItems = () => {const items = [{key: 'chrome',label: (<>{/* eslint-disable-next-line */}<ahref={`https://static.xutongbao.top/app/ChromeSetup.exe`}target="_blank">下载chrome浏览器(推荐)</a></>),icon: <Icon name="chrome" className="m-realtime-menu-icon"></Icon>,},]return items}//#region 基础const formatTime = useCallback((timestamp) => {const startTime = startTimeRef.currentconst t0 = new Date(startTime).valueOf()const t1 = new Date(timestamp).valueOf()const delta = t1 - t0const hs = Math.floor(delta / 10) % 100const s = Math.floor(delta / 1000) % 60const m = Math.floor(delta / 60_000) % 60const pad = (n) => {let s = n + ''while (s.length < 2) {s = '0' + s}return s}return `${pad(m)}:${pad(s)}.${pad(hs)}`}, [])const connectConversation = useCallback(async () => {const client = clientRef.currentconst wavRecorder = wavRecorderRef.currentconst wavStreamPlayer = wavStreamPlayerRef.currentstartTimeRef.current = new Date().toISOString()setIsConnected(true)setRealtimeEvents([])setItems(client.conversation.getItems())try {// Connect to microphoneawait wavRecorder.begin()} catch (error) {console.log(error)}// Connect to audio outputawait wavStreamPlayer.connect()// Connect to realtime APIawait client.connect()// let isAutoAsk = true// if (isAutoAsk) {// client.sendUserMessageContent([//   {//     type: `input_text`,//     text: `你好!`,//   },// ])if (client.getTurnDetectionType() === 'server_vad') {await wavRecorder.record((data) => client.appendInputAudio(data.mono))}}, [])const handleTest = () => {const client = clientRef.currentclient.sendUserMessageContent([{type: `input_text`,text: message,},])setMessage('')}const handleMessage = (event) => {setMessage(event.target.value)}/*** Disconnect and reset conversation state*/const disconnectConversation = useCallback(async () => {setIsConnected(false)setRealtimeEvents([])// setItems([])const client = clientRef.currentclient.disconnect()const wavRecorder = wavRecorderRef.currentawait wavRecorder.end()const wavStreamPlayer = wavStreamPlayerRef.currentawait wavStreamPlayer.interrupt()}, [])const deleteConversationItem = useCallback(async (id) => {const client = clientRef.currentclient.deleteItem(id)}, [])/*** In push-to-talk mode, start recording* .appendInputAudio() for each sample*/const startRecording = async () => {setIsRecording(true)const client = clientRef.currentconst wavRecorder = wavRecorderRef.currentconst wavStreamPlayer = wavStreamPlayerRef.currentconst trackSampleOffset = await wavStreamPlayer.interrupt()if (trackSampleOffset?.trackId) {const { trackId, offset } = trackSampleOffsetawait client.cancelResponse(trackId, offset)}try {await wavRecorder.record((data) => client.appendInputAudio(data.mono))} catch (error) {console.log(error)}}/*** In push-to-talk mode, stop recording*/const stopRecording = async () => {setIsRecording(false)const client = clientRef.currentconst wavRecorder = wavRecorderRef.currenttry {await wavRecorder.pause()} catch (error) {console.log(error)}try {client.createResponse()} catch (error) {console.log(error)}}/*** Switch between Manual <> VAD mode for communication*/const changeTurnEndType = async (messageType) => {setMessageType(messageType)let valueif (messageType === 'server_vad') {value = 'server_vad'} else if (messageType === 'none' || messageType === 'input') {value = 'none'}const client = clientRef.currentconst wavRecorder = wavRecorderRef.currentif (value === 'none' && wavRecorder.getStatus() === 'recording') {await wavRecorder.pause()}client.updateSession({turn_detection: value === 'none' ? null : { type: 'server_vad' },})if (value === 'server_vad' && client.isConnected()) {await wavRecorder.record((data) => client.appendInputAudio(data.mono))}setCanPushToTalk(messageType === 'none')}const handleSearch = () => {let params = {talkId: localStorage.getItem('talkId'),gptVersion: 'realtime',pageNum: 1,pageSize: 20,isGetNewest: true,}const client = clientRef.current// client.conversation.processEvent({//   type: 'conversation.item.created',//   event_id: 'item_ARaEpHPCznsNlBGN5DGFp',//   item: {//     id: 'item_ARaEpHPCznsNlBGN5DGFp',//     object: 'realtime.item',//     type: 'message',//     status: 'completed',//     role: 'user',//     content: [{ type: 'input_text', text: '你好' }],//     formatted: { audio: {}, text: '你好', transcript: '' },//   }// })// let items = client.conversation.getItems()// console.log('items', items)Api.h5.chatSearch(params).then((res) => {// let list = [//   {//     id: 'item_ARaEpHPCznsNlBGN5DGFp',//     object: 'realtime.item',//     type: 'message',//     status: 'completed',//     role: 'user',//     content: [{ type: 'input_text', text: '你好' }],//     formatted: { audio: {}, text: '你好', transcript: '' },//   },//   {//     id: 'item_ARaEpLuspCKg6raB95pFr',//     object: 'realtime.item',//     type: 'message',//     status: 'in_progress',//     role: 'assistant',//     content: [{ type: 'audio', transcript: '你好!' }],//     formatted: { audio: {}, text: '', transcript: '你好!' },//   },// ]if (res.code === 200) {let list = res.data.list.map((item) => {return {id: item.uid,object: 'realtime.item',type: 'message',status: 'completed',role: item.messageType === '1' ? 'user' : 'assistant',content: [{type: item.messageType === '1' ? 'input_text' : 'text',text: item.message,transcript: item.message,},],formatted: {audio: {},text: item.message,transcript: item.message,},}})setItems(list)list.forEach((item) => {client.conversation.processEvent({type: 'conversation.item.created',event_id: item.id,item: {...item,},})})let items = client.conversation.getItems()console.log('items', items)}})}//#endregion//#region  useEffect/*** Auto-scroll the event logs*/useEffect(() => {if (eventsScrollRef.current) {const eventsEl = eventsScrollRef.currentconst scrollHeight = eventsEl.scrollHeight// Only scroll if height has just changedif (scrollHeight !== eventsScrollHeightRef.current) {eventsEl.scrollTop = scrollHeighteventsScrollHeightRef.current = scrollHeight}}}, [realtimeEvents])/*** Auto-scroll the conversation logs*/useEffect(() => {const conversationEls = [].slice.call(document.body.querySelectorAll('[data-conversation-content]'))for (const el of conversationEls) {const conversationEl = elconversationEl.scrollTop = conversationEl.scrollHeight}}, [items])/*** Set up render loops for the visualization canvas*/useEffect(() => {let isLoaded = trueconst wavRecorder = wavRecorderRef.currentconst clientCanvas = clientCanvasRef.currentlet clientCtx = nullconst wavStreamPlayer = wavStreamPlayerRef.currentconst serverCanvas = serverCanvasRef.currentlet serverCtx = nullconst render = () => {if (isLoaded) {if (clientCanvas) {if (!clientCanvas.width || !clientCanvas.height) {clientCanvas.width = clientCanvas.offsetWidthclientCanvas.height = clientCanvas.offsetHeight}clientCtx = clientCtx || clientCanvas.getContext('2d')if (clientCtx) {clientCtx.clearRect(0, 0, clientCanvas.width, clientCanvas.height)const result = wavRecorder.recording? wavRecorder.getFrequencies('voice'): { values: new Float32Array([0]) }WavRenderer.drawBars(clientCanvas,clientCtx,result.values,'#0099ff',10,0,8)}}if (serverCanvas) {if (!serverCanvas.width || !serverCanvas.height) {serverCanvas.width = serverCanvas.offsetWidthserverCanvas.height = serverCanvas.offsetHeight}serverCtx = serverCtx || serverCanvas.getContext('2d')if (serverCtx) {serverCtx.clearRect(0, 0, serverCanvas.width, serverCanvas.height)const result = wavStreamPlayer.analyser? wavStreamPlayer.getFrequencies('voice'): { values: new Float32Array([0]) }WavRenderer.drawBars(serverCanvas,serverCtx,result.values,'#009900',10,0,8)}}window.requestAnimationFrame(render)}}render()return () => {isLoaded = false}}, [])/*** Core RealtimeClient and audio capture setup* Set all of our instructions, tools, events and more*/useEffect(() => {// Get refsconst wavStreamPlayer = wavStreamPlayerRef.currentconst client = clientRef.current// Set instructionsclient.updateSession({ instructions: instructions })// Set transcription, otherwise we don't get user transcriptions backclient.updateSession({ input_audio_transcription: { model: 'whisper-1' } })// handle realtime events from client + server for event loggingclient.on('realtime.event', (realtimeEvent) => {if (realtimeEvent.event.code === 400) {antdMessage.warning(realtimeEvent.event.message)disconnectConversation()return}setRealtimeEvents((realtimeEvents) => {const lastEvent = realtimeEvents[realtimeEvents.length - 1]if (lastEvent?.event.type === realtimeEvent.event.type) {// if we receive multiple events in a row, aggregate them for display purposeslastEvent.count = (lastEvent.count || 0) + 1return realtimeEvents.slice(0, -1).concat(lastEvent)} else {return realtimeEvents.concat(realtimeEvent)}})})client.on('error', (event) => console.error(event))client.on('conversation.interrupted', async () => {const trackSampleOffset = await wavStreamPlayer.interrupt()if (trackSampleOffset?.trackId) {const { trackId, offset } = trackSampleOffsetawait client.cancelResponse(trackId, offset)}})client.on('conversation.updated', async ({ item, delta }) => {const items = client.conversation.getItems()if (delta?.audio) {wavStreamPlayer.add16BitPCM(delta.audio, item.id)}if (item.status === 'completed' && item.formatted.audio?.length) {const wavFile = await WavRecorder.decode(item.formatted.audio,24000,24000)item.formatted.file = wavFile}setItems(items)isAddStart = true})setItems(client.conversation.getItems())handleSearch()return () => {// cleanup; resets to defaultsclient.reset()}// eslint-disable-next-line}, [])useEffect(() => {if (Array.isArray(items) && items.length > 0) {let lastItem = items[items.length - 1]let addIdHistoryIndex = addIdHistory.findIndex((item) => item === lastItem.id)if (lastItem?.status === 'completed' &&lastItem?.role === 'assistant' &&isAddStart === true &&addIdHistoryIndex < 0) {addIdHistory.push(lastItem.id)let message = items[items.length - 2].formatted.transcript? items[items.length - 2].formatted.transcript: items[items.length - 2].formatted.textlet robotMessage = lastItem.formatted.transcriptApi.h5.chatRealTimeAdd({talkId: localStorage.getItem('talkId'),name: localStorage.getItem('nickname'),message,robotMessage,}).then((res) => {if (res.code === 40006) {antdMessage.warning(res.message)disconnectConversation()}})}}// eslint-disable-next-line}, [items, isAddStart])//#endregionreturn (<div className="m-realtime-wrap-box"><div className={`m-realtime-wrap-chat`}><SinglePageHeadergoBackPath="/ai/index/home/chatList"title="Realtime"></SinglePageHeader><div className="m-realtime-list" id="scrollableDiv">{window.platform === 'rn' ? null : (<Dropdownmenu={{ items: getItems() }}className="m-realtime-dropdown"trigger={['click', 'hover']}><Icon name="more" className="m-realtime-menu-btn"></Icon></Dropdown>)}<div data-component="ConsolePage"><div className="content-main"><div className="content-logs"><div className="content-block events"><div className="visualization"><div className="visualization-entry client"><canvas ref={clientCanvasRef} /></div><div className="visualization-entry server"><canvas ref={serverCanvasRef} /></div></div><div className="content-block-body" ref={eventsScrollRef}>{!realtimeEvents.length && `等待连接...`}{realtimeEvents.map((realtimeEvent, i) => {const count = realtimeEvent.countconst event = { ...realtimeEvent.event }if (event.type === 'input_audio_buffer.append') {event.audio = `[trimmed: ${event.audio.length} bytes]`} else if (event.type === 'response.audio.delta') {event.delta = `[trimmed: ${event.delta.length} bytes]`}return (<div className="event" key={event.event_id}><div className="event-timestamp">{formatTime(realtimeEvent.time)}</div><div className="event-details"><divclassName="event-summary"onClick={() => {// toggle event detailsconst id = event.event_idconst expanded = { ...expandedEvents }if (expanded[id]) {delete expanded[id]} else {expanded[id] = true}setExpandedEvents(expanded)}}><divclassName={`event-source ${event.type === 'error'? 'error': realtimeEvent.source}`}>{realtimeEvent.source === 'client' ? (<ArrowUp />) : (<ArrowDown />)}<span>{event.type === 'error'? 'error!': realtimeEvent.source}</span></div><div className="event-type">{event.type}{count && ` (${count})`}</div></div>{!!expandedEvents[event.event_id] && (<div className="event-payload">{JSON.stringify(event, null, 2)}</div>)}</div></div>)})}</div></div><div className="content-block conversation"><div className="content-block-body" data-conversation-content>{!items.length && `等待连接...`}{items.map((conversationItem, i) => {return (<divclassName="conversation-item"key={conversationItem.id}><divclassName={`speaker ${conversationItem.role || ''}`}><div>{(conversationItem.role || conversationItem.type).replaceAll('_', ' ')}</div><divclassName="close"onClick={() =>deleteConversationItem(conversationItem.id)}><X /></div></div><div className={`speaker-content`}>{/* tool response */}{conversationItem.type ==='function_call_output' && (<div>{conversationItem.formatted.output}</div>)}{/* tool call */}{!!conversationItem.formatted.tool && (<div>{conversationItem.formatted.tool.name}({conversationItem.formatted.tool.arguments})</div>)}{!conversationItem.formatted.tool &&conversationItem.role === 'user' && (<div className="m-realtime-message">{conversationItem.formatted.transcript ||(conversationItem.formatted.audio?.length? '(awaiting transcript)': conversationItem.formatted.text ||'(item sent)')}</div>)}{!conversationItem.formatted.tool &&conversationItem.role === 'assistant' && (<div className="m-realtime-message">{conversationItem.formatted.transcript ||conversationItem.formatted.text ||'(truncated)'}</div>)}{conversationItem.formatted.file && (<audiosrc={conversationItem.formatted.file.url}controls/>)}</div></div>)})}</div></div><div className="content-actions"><Selectvalue={messageType}onChange={(value) => changeTurnEndType(value)}placeholder="请选择"><Option value="none">手动</Option><Option value="server_vad">自动</Option><Option value="input">打字</Option></Select><div className="spacer" />{isConnected && canPushToTalk && (<>{isPCFlag ? (<Buttontype="primary"label={isRecording ? 'release to send' : 'push to talk'}disabled={!isConnected || !canPushToTalk}onMouseDown={startRecording}onMouseUp={stopRecording}className={`m-realtime-recorad-btn ${isRecording ? 'active' : ''}`}>{isRecording ? '松开发送' : '按住说话'}</Button>) : (<Buttontype="primary"label={isRecording ? 'release to send' : 'push to talk'}disabled={!isConnected || !canPushToTalk}onTouchStart={startRecording}onTouchEnd={stopRecording}className={`m-realtime-recorad-btn ${isRecording ? 'active' : ''}`}>{isRecording ? '松开发送' : '按住说话'}</Button>)}</>)}{isConnected && messageType === 'input' ? (<div className="m-realtime-input-wrap"><Input.TextAreavalue={message}onChange={(event) => handleMessage(event)}placeholder="请输入"></Input.TextArea><Buttontype="primary"onClick={() => handleTest()}className="m-realtime-send-btn">发送</Button></div>) : null}<div className="spacer" /><Buttontype="primary"danger={isConnected ? true : false}onClick={isConnected ? disconnectConversation : connectConversation}>{isConnected ? '已连接' : '连接'}</Button></div></div></div></div></div></div></div>)
}const mapStateToProps = (state) => {return {collapsed: state.getIn(['light', 'collapsed']),isRNGotToken: state.getIn(['light', 'isRNGotToken']),}
}const mapDispatchToProps = (dispatch) => {return {onSetState(key, value) {dispatch({ type: 'SET_LIGHT_STATE', key, value })},onDispatch(action) {dispatch(action)},}
}export default connect(mapStateToProps, mapDispatchToProps)(withRouter(Index))

后端通过请求头获取token

  async handleUserAuth(req) {let index = req.rawHeaders.findIndex((item) =>item.includes('realtime, openai-insecure-api-key.'))let infoValue = ''if (index >= 0) {infoValue = req.rawHeaders[index]}infoValue = infoValue.replace('realtime, openai-insecure-api-key.', '')infoValue = infoValue.replace(', openai-beta.realtime-v1', '')let infoValueArr = infoValue.split('divide')let realTimeAuthRes = await axios.post(`${baseURL}/api/light/chat/realTimeAuth`,{token: infoValueArr[0],talkId: infoValueArr[1],apiKey,})return realTimeAuthRes}

后端完整代码

relay.js:

const { WebSocketServer } = require('ws')
const axios = require('axios')let baseURL = process.env.aliIPAddressWithPort
let apiKey = process.env.apiKeyOnServerclass RealtimeRelay {constructor(apiKey) {this.apiKey = apiKeythis.sockets = new WeakMap()this.wss = null}listen(port) {this.wss = new WebSocketServer({ port })this.wss.on('connection', this.connectionHandler.bind(this))this.log(`Listening on ws://localhost:${port}`)}async handleUserAuth(req) {let index = req.rawHeaders.findIndex((item) =>item.includes('realtime, openai-insecure-api-key.'))let infoValue = ''if (index >= 0) {infoValue = req.rawHeaders[index]}infoValue = infoValue.replace('realtime, openai-insecure-api-key.', '')infoValue = infoValue.replace(', openai-beta.realtime-v1', '')let infoValueArr = infoValue.split('divide')let realTimeAuthRes = await axios.post(`${baseURL}/api/light/chat/realTimeAuth`,{token: infoValueArr[0],talkId: infoValueArr[1],apiKey,})return realTimeAuthRes}async connectionHandler(ws, req) {if (global.isAzure) {let realTimeAuthRes = await this.handleUserAuth(req)if (realTimeAuthRes.data.code === 200) {let Realtime = await import('@openai/realtime-api-beta')const { RealtimeClient } = Realtimeif (!req.url) {this.log('No URL provided, closing connection.')ws.close()return}const url = new URL(req.url, `http://${req.headers.host}`)const pathname = url.pathnameif (pathname !== '/') {this.log(`Invalid pathname: "${pathname}"`)ws.close()return}// Instantiate new clientthis.log(`Connecting with key "${this.apiKey.slice(0, 3)}..."`)const client = new RealtimeClient({ apiKey: this.apiKey })// Relay: OpenAI Realtime API Event -> Browser Eventclient.realtime.on('server.*', (event) => {this.log(`Relaying "${event.type}" to Client`)ws.send(JSON.stringify(event))})client.realtime.on('close', () => ws.close())// Relay: Browser Event -> OpenAI Realtime API Event// We need to queue data waiting for the OpenAI connectionconst messageQueue = []const messageHandler = (data) => {try {const event = JSON.parse(data)this.log(`Relaying "${event.type}" to OpenAI`)client.realtime.send(event.type, event)} catch (e) {console.error(e.message)this.log(`Error parsing event from client: ${data}`)}}ws.on('message', (data) => {if (!client.isConnected()) {messageQueue.push(data)} else {messageHandler(data)}})ws.on('close', () => client.disconnect())// Connect to OpenAI Realtime APItry {this.log(`Connecting to OpenAI...`)await client.connect()} catch (e) {this.log(`Error connecting to OpenAI: ${e.message}`)ws.close()return}this.log(`Connected to OpenAI successfully!`)while (messageQueue.length) {messageHandler(messageQueue.shift())}} else {ws.send(JSON.stringify({...realTimeAuthRes.data,}))}}}// eslint-disable-next-linelog(...args) {// console.log(`[RealtimeRelay]`, ...args)}
}module.exports = {RealtimeRelay,
}

调用上面的代码:

  const relay = new RealtimeRelay(process.env.openaiToken)relay.listen(PORT)

人工智能学习网站

https://chat.xutongbao.top

本文来自互联网用户投稿,该文观点仅代表作者本人,不代表本站立场。本站仅提供信息存储空间服务,不拥有所有权,不承担相关法律责任。如若转载,请注明出处:http://www.mzph.cn/diannao/60271.shtml

如若内容造成侵权/违法违规/事实不符,请联系多彩编程网进行投诉反馈email:809451989@qq.com,一经查实,立即删除!

相关文章

conda 和 pip 的比较

conda 和 pip 的比较 在使用 Anaconda 管理 Python 环境时&#xff0c;您可以选择使用 conda 或 pip 命令来下载和安装软件包。这两种工具都能够有效地管理包&#xff0c;但它们在管理环境和解决依赖关系时有一些关键的区别。理解这些差异可以帮助您更好地决定在特定情况下使用…

【IC】DTCO

DTCO本质上是DSE。。。 文章A Novel Framework for DTCO: Fast and Automatic Routability Assessment with Machine Learning for Sub-3nm Technology Options中提到&#xff1a; std cell尺寸缩小不一定会在block模块级获得面积收益。。。得综合考虑&#xff0c;综合了设计…

mybatis+postgresql,无感读写json字段

1.实体类中字段类型 import com.alibaba.fastjson.JSONObject;Data public class TestDto {private String name;//对应数据库的JSON字段private JSONObject page_detail;} 2.自定义实现typeHandler package base.utils;import com.alibaba.fastjson.JSONObject; import org…

SpringBoot配置Rabbit中的MessageConverter对象

SpringAMQP默认使用SimpleMessageConverter组件对消息内容进行转换 SimpleMessageConverter&#xff1a; only supports String, byte[] and Serializable payloads仅仅支持String、Byte[]和Serializable对象Jackson2JsonMessageConverter&#xff1a;was expecting (JSON Str…

Python毕业设计选题:基于django+vue的医院挂号系统设计与实现

开发语言&#xff1a;Python框架&#xff1a;djangoPython版本&#xff1a;python3.7.7数据库&#xff1a;mysql 5.7数据库工具&#xff1a;Navicat11开发软件&#xff1a;PyCharm 系统展示 病人管理 科室类型管理 医生管理 公告咨询管理 挂号预约管理 科室信息管理 摘要 医…

蓝牙 SPP 协议详解及 Android 实现

文章目录 前言一、 什么是蓝牙 SPP 协议&#xff1f;SPP 的适用场景 二、SPP的工作流程1. 蓝牙设备初始化2. 设备发现与配对3. 建立 SPP 连接4. 数据传输5. 关闭连接 三、进阶应用与常见问题蓝牙连接中断与重试机制数据传输中的延迟与错误处理电池消耗和蓝牙优化 总结 前言 蓝…

arm 汇编技巧

汇编标号&#xff1a;f表示forward&#xff0c; b表示backward&#xff1a; Here is an example: 1: branch 1f 2: branch 1b 1: branch 2f 2: branch 1b Which is the equivalent of: label_1: branch label_3 label_2: branch label_1 label_3: branch label_4 label_4: bra…

WebPages 安全

WebPages 安全 1. 引言 随着互联网的普及和信息技术的发展&#xff0c;Web页面已经成为人们获取信息、进行交流和开展业务的重要平台。然而&#xff0c;随之而来的安全问题也日益突出&#xff0c;如跨站脚本攻击&#xff08;XSS&#xff09;、跨站请求伪造&#xff08;CSRF&a…

Java异步编程CompletableFuture(串行,并行,批量执行)

&#x1f353; 简介&#xff1a;java系列技术分享(&#x1f449;持续更新中…&#x1f525;) &#x1f353; 初衷:一起学习、一起进步、坚持不懈 &#x1f353; 如果文章内容有误与您的想法不一致,欢迎大家在评论区指正&#x1f64f; &#x1f353; 希望这篇文章对你有所帮助,欢…

Redis 缓存击穿

目录 缓存击穿 什么是缓存击穿&#xff1f; 有哪些解决办法&#xff1f; 缓存穿透和缓存击穿有什么区别&#xff1f; 缓存雪崩 什么是缓存雪崩&#xff1f; 有哪些解决办法&#xff1f; 缓存预热如何实现&#xff1f; 缓存雪崩和缓存击穿有什么区别&#xff1f; 如何保…

电脑不显示wifi列表怎么办?电脑不显示WiF列表的解决办法

有用户会遇到电脑总是不显示wifi列表的问题&#xff0c;但是不知道要怎么解决。随着无线网络的普及和使用&#xff0c;电脑无法显示WiFi列表的问题有时会让人感到困扰。电脑不显示WiFi列表是很常见的问题&#xff0c;但这并不意味着你无法连接到网络。不用担心&#xff0c;这个…

知识图谱,语义分析,全文检索,neo4j,elaticsearch,知识库平台(java,vue)

一、项目介绍 一款全源码&#xff0c;可二开&#xff0c;可基于云部署、私有部署的企业级知识库云平台&#xff0c;一款让企业知识变为实打实的数字财富的系统&#xff0c;应用在需要进行文档整理、分类、归集、检索、分析的场景。 为什么建立知识库平台&#xff1f; 助力企业…

Java项目实战II基于Spring Boot的问卷调查系统的设计与实现(开发文档+数据库+源码)

目录 一、前言 二、技术介绍 三、系统实现 四、文档参考 五、核心代码 六、源码获取 全栈码农以及毕业设计实战开发&#xff0c;CSDN平台Java领域新星创作者&#xff0c;专注于大学生项目实战开发、讲解和毕业答疑辅导 一、前言 在当今信息爆炸的时代&#xff0c;问卷调查…

博客摘录「 java三年工作经验面试题整理《精华》」2023年6月12日

JDK 和 JRE 有什么区别&#xff1f;JDK&#xff1a;java 开发工具包&#xff0c;提供了 java 的开发环境和运行环境。JRE&#xff1a;java 运行环境&#xff0c;为 java 的运行提供了所需环境。JDK 其实包含了 JRE&#xff0c;同时还包含了编译 java 源码的编译器 javac&#x…

揭开 gRPC、RPC 、TCP和UDP 的通信奥秘

差异点 特性TCPUDPRPCgRPCHTTP工作层级传输层传输层应用层应用层应用层传输协议面向连接的传输协议无连接传输协议使用 TCP、HTTP 等协议HTTP/2HTTP/1.1, HTTP/2序列化格式字节流数据报文XML、JSON 或自定义Protocol BuffersJSON 或 XML特点可靠的连接传输无连接、快速传输远程…

二叉树搜索树(上)

二叉树搜索树&#xff08;上&#xff09; 概念 二叉搜索树又称二叉排序树&#xff0c;它或者是一颗空树&#xff0c;或者是具有以下性质的二叉树: • 若它的左子树不为空&#xff0c;则左子树上所有结点的值都⼩于等于根结点的值 • 若它的右子树不为空&#xff0c;则右子树…

解读Nature:Larger and more instructable language models become less reliable

目录 Larger and more instructable language models become less reliable 核心描述 核心原理 创新点 举例说明 大模型训练,微调建议 Larger and more instructable language models become less reliable 这篇论文的核心在于对大型语言模型(LLMs)的可靠性进行了深入…

在Linux上部署(MySQL Redis Elasticsearch等)各类软件

实战章节&#xff1a;在Linux上部署各类软件 前言 为什么学习各类软件在Linux上的部署 在前面&#xff0c;我们学习了许多的Linux命令和高级技巧&#xff0c;这些知识点比较零散&#xff0c;同学们跟随着课程的内容进行练习虽然可以基础掌握这些命令和技巧的使用&#xff0c…

局域网桥接只能单向ping问题,arp无法建立

一、问题 三台设备&#xff1a;Windows&#xff0c;Ubuntu&#xff0c;开发板。 我的Windows在每次开机后&#xff0c;无法ping通开发板&#xff0c;开发板可以ping通Windows&#xff1b; Windows和另一台局域网内的Ubuntu可以相互ping通&#xff1b; Ubuntu和开发板可以相互pi…

FPGA实现以太网(二)、初始化和配置PHY芯片

系列文章目录 FPGA实现以太网&#xff08;一&#xff09;、以太网基础知识 文章目录 系列文章目录一、MDIO协议介绍二、PHY芯片管脚以及结构框图三、MDIO帧时序介绍3.1 MDIO帧格式3.2 MDIO写时序3.3 MDIO读时序 四、PHY芯片常用寄存器描述4.1 基本模式控制寄存器&#xff08;0…