Files
chatgpt-dingtalk/pkg/llm/client.go
二丫讲梵 f7326b6797 增加卡片交互流式输出的能力 (#315)
* 将ai交互切换为go-openai

* add stream

*  feat(stream): 优化流式响应机制,实现实时卡片更新

- 将固定1.5秒更新改为基于300ms最小间隔的实时更新策略
- 新增内容缓冲区机制,避免过于频繁的API调用
- 改进流式中断处理,保护已接收的内容不丢失

🔧 chore(llm): 优化HTTP客户端配置

- 增加连接池设置(MaxIdleConns: 100, MaxIdleConnsPerHost: 10)
- 设置空闲连接超时时间为90秒
- 添加HTTP/2禁用选项注释,用于解决流式错误问题

📝 docs(stream): 更新流式更新策略文档

- 详细说明实时流式更新机制和缓冲策略
- 新增HTTP/2流式错误的故障排除指南
- 更新配置参数说明和建议范围

🐛 fix(stream): 修复流式中断时的内容丢失问题

- 在流式接收中断时,确保已接收的内容不会丢失
- 改进错误处理逻辑,区分有内容和无内容的情况

* modify ai
2025-12-11 18:22:35 +08:00

97 lines
2.2 KiB
Go

package llm
import (
"context"
"net/http"
"net/url"
"time"
openai "github.com/sashabaranov/go-openai"
"github.com/eryajf/chatgpt-dingtalk/public"
)
type Client struct {
client *openai.Client
ctx context.Context
userId string
maxQuestionLen int
maxText int
maxAnswerLen int
timeOut time.Duration
doneChan chan struct{}
cancel func()
ChatContext *Context
}
func NewClient(userId string) *Client {
ctx, cancel := context.WithTimeout(context.Background(), 600*time.Second)
timeOutChan := make(chan struct{}, 1)
go func() {
<-ctx.Done()
timeOutChan <- struct{}{}
}()
config := openai.DefaultConfig(public.Config.ApiKey)
// Azure配置
if public.Config.AzureOn {
config = openai.DefaultAzureConfig(
public.Config.AzureOpenAIToken,
"https://"+public.Config.AzureResourceName+".openai.azure.com",
)
config.APIVersion = public.Config.AzureApiVersion
config.AzureModelMapperFunc = func(model string) string {
return public.Config.AzureDeploymentName
}
} else {
// HTTP客户端配置
transport := &http.Transport{
MaxIdleConns: 100,
MaxIdleConnsPerHost: 10,
IdleConnTimeout: 90 * time.Second,
}
if public.Config.HttpProxy != "" {
proxyURL, _ := url.Parse(public.Config.HttpProxy)
transport.Proxy = http.ProxyURL(proxyURL)
}
config.HTTPClient = &http.Client{Transport: transport}
if public.Config.BaseURL != "" {
config.BaseURL = public.Config.BaseURL + "/v1"
}
}
return &Client{
client: openai.NewClientWithConfig(config),
ctx: ctx,
userId: userId,
maxQuestionLen: public.Config.MaxQuestionLen,
maxAnswerLen: public.Config.MaxAnswerLen,
maxText: public.Config.MaxText,
timeOut: public.Config.SessionTimeout,
doneChan: timeOutChan,
cancel: cancel,
ChatContext: NewContext(),
}
}
func (c *Client) Close() {
c.cancel()
}
func (c *Client) GetDoneChan() chan struct{} {
return c.doneChan
}
func (c *Client) SetMaxQuestionLen(maxQuestionLen int) int {
if maxQuestionLen > c.maxText-c.maxAnswerLen {
maxQuestionLen = c.maxText - c.maxAnswerLen
}
c.maxQuestionLen = maxQuestionLen
return c.maxQuestionLen
}