mirror of
https://github.com/tl-open-source/tl-rtc-file.git
synced 2025-09-27 03:46:11 +08:00

feat: 支持单独发送文件 feat: 支持文本私聊 feat: 支持seafile网盘暂存文件 feat: 支持直播房间 feat: 支持取件码下载文件 feat: 支持预览视频文件 feat: 支持报错告警 feat: 调整补充启动logo feat: 调整补充免责协议 feat: 调整补充配置中的版本号 feat: 调整优化开源协议 feat: 调整补充定制收费服务 feat: 调整优化服务端代码 feat: 调整优化批量发送逻辑 feat: 调整优化样式体验 feat: 调整优化conf中ws, manage相关配置 feat: 调整优化文件发送时间间隔为1秒钟 feat: 调整优化文件发送体验 feat: 调整优化选择文件逻辑 feat: 调整优化启动文件/命令 feat: 调整优化socket配置区分 feat: 调整优化分享进入房间 feat: 调整优化右上角消息提示 feat: 调整删除npm依赖 feat: 调整删除首次弹窗 feat: 即将支持远程cavas画笔 feat: 即将支持远程控制
137 lines
2.7 KiB
JavaScript
137 lines
2.7 KiB
JavaScript
/**
|
||
* openapi api
|
||
* @author iamtsm
|
||
*/
|
||
|
||
const request = require('request')
|
||
const openai = require('../../../conf/cfg.json').openai
|
||
const utils = require("../../..//src/utils/utils");
|
||
|
||
let keysMap = {}
|
||
|
||
/**
|
||
* openai聊天接口
|
||
* @param {*} apiKey
|
||
* @param {*} prompt
|
||
* @param {*} model
|
||
* @param {*} maxTokens
|
||
* @param {*} temperature
|
||
* @param {*} n
|
||
* @returns
|
||
*/
|
||
function openaiChatApi(
|
||
apiKey,
|
||
prompt,
|
||
model = 'text-davinci-003',
|
||
maxTokens = 256,
|
||
temperature = 0.7,
|
||
n = 1
|
||
) {
|
||
return new Promise((resolve, reject) => {
|
||
const options = {
|
||
url: 'https://api.openai.com/v1/completions',
|
||
headers: {
|
||
'Content-Type': 'application/json',
|
||
'Authorization': `Bearer ${apiKey}`
|
||
},
|
||
json: {
|
||
prompt: prompt,
|
||
model: model,
|
||
max_tokens: maxTokens,
|
||
temperature: temperature,
|
||
n : n,
|
||
}
|
||
}
|
||
|
||
request.post(options, (error, response, body) => {
|
||
if (error) {
|
||
reject(error)
|
||
} else {
|
||
resolve(body)
|
||
}
|
||
})
|
||
})
|
||
}
|
||
|
||
|
||
/**
|
||
* 获取所有AI key使用状态
|
||
* @returns
|
||
*/
|
||
function apiKeysStatus(){
|
||
return keysMap
|
||
}
|
||
|
||
/**
|
||
* 设置AI key使用状态
|
||
* @param {*} key
|
||
*/
|
||
function setApiKeyStatus( key ){
|
||
if(Object.keys(keysMap).length > 0){
|
||
keysMap[key].used = 0
|
||
}
|
||
}
|
||
|
||
/**
|
||
* 获取一个可用的apiKey
|
||
* @param {*} roomId
|
||
* @returns
|
||
*/
|
||
function getApiKey( roomId ){
|
||
// init
|
||
if(Object.keys(keysMap).length === 0){
|
||
let apiKeys = openai.apiKeys;
|
||
apiKeys.forEach((key)=>{
|
||
keysMap[key] = {
|
||
used : 0
|
||
}
|
||
})
|
||
}
|
||
// get
|
||
for(let key in keysMap){
|
||
if(keysMap[key].used === 0){
|
||
keysMap[key].used = roomId
|
||
return key;
|
||
}
|
||
}
|
||
return undefined
|
||
}
|
||
|
||
|
||
/**
|
||
* AI聊天
|
||
* @param {*} msg
|
||
* @param {*} roomId
|
||
* @returns
|
||
*/
|
||
async function openaiChat(msg, roomId){
|
||
let apiKey = getApiKey(roomId)
|
||
if(!apiKey){
|
||
return "AI忙不过来啦,稍等重试一下"
|
||
}
|
||
|
||
let result = "";
|
||
let maxCount = 0;
|
||
while (maxCount++ < 100000) {
|
||
const res = await openaiChatApi( apiKey, msg )
|
||
utils.tlConsole("res : ",res)
|
||
result += res.choices[0].text.substring(res.choices[0].text.indexOf("\n")+1);
|
||
if(result.startsWith("\n")){
|
||
result = result.substring(1);
|
||
}
|
||
if (res.choices[0].finish_reason === "stop") {
|
||
break;
|
||
}
|
||
msg = res.choices[0].text;
|
||
}
|
||
|
||
//回答完,清空下key的占用
|
||
setApiKeyStatus(apiKey)
|
||
|
||
return result;
|
||
}
|
||
|
||
|
||
module.exports = {
|
||
openaiChat, apiKeysStatus
|
||
} |