mirror of
https://github.com/eolinker/apinto
synced 2025-09-26 21:01:19 +08:00
新增AI provider和ai prompt
This commit is contained in:
39
drivers/ai-provider/authropic/anthropic.yaml
Normal file
39
drivers/ai-provider/authropic/anthropic.yaml
Normal file
@@ -0,0 +1,39 @@
|
||||
provider: anthropic
|
||||
label:
|
||||
en_US: Anthropic
|
||||
description:
|
||||
en_US: Anthropic’s powerful models, such as Claude 3.
|
||||
zh_Hans: Anthropic 的强大模型,例如 Claude 3。
|
||||
icon_small:
|
||||
en_US: icon_s_en.svg
|
||||
icon_large:
|
||||
en_US: icon_l_en.svg
|
||||
background: "#F0F0EB"
|
||||
help:
|
||||
title:
|
||||
en_US: Get your API Key from Anthropic
|
||||
zh_Hans: 从 Anthropic 获取 API Key
|
||||
url:
|
||||
en_US: https://console.anthropic.com/account/keys
|
||||
supported_model_types:
|
||||
- llm
|
||||
configurate_methods:
|
||||
- predefined-model
|
||||
provider_credential_schema:
|
||||
credential_form_schemas:
|
||||
- variable: anthropic_api_key
|
||||
label:
|
||||
en_US: API Key
|
||||
type: secret-input
|
||||
required: true
|
||||
placeholder:
|
||||
zh_Hans: 在此输入您的 API Key
|
||||
en_US: Enter your API Key
|
||||
- variable: anthropic_api_url
|
||||
label:
|
||||
en_US: API URL
|
||||
type: text-input
|
||||
required: false
|
||||
placeholder:
|
||||
zh_Hans: 在此输入您的 API URL
|
||||
en_US: Enter your API URL
|
8
drivers/ai-provider/authropic/llm/_position.yaml
Normal file
8
drivers/ai-provider/authropic/llm/_position.yaml
Normal file
@@ -0,0 +1,8 @@
|
||||
- claude-3-5-sonnet-20240620
|
||||
- claude-3-haiku-20240307
|
||||
- claude-3-opus-20240229
|
||||
- claude-3-sonnet-20240229
|
||||
- claude-2.1
|
||||
- claude-instant-1.2
|
||||
- claude-2
|
||||
- claude-instant-1
|
36
drivers/ai-provider/authropic/llm/claude-2.1.yaml
Normal file
36
drivers/ai-provider/authropic/llm/claude-2.1.yaml
Normal file
@@ -0,0 +1,36 @@
|
||||
model: claude-2.1
|
||||
label:
|
||||
en_US: claude-2.1
|
||||
model_type: llm
|
||||
features:
|
||||
- agent-thought
|
||||
model_properties:
|
||||
mode: chat
|
||||
context_size: 200000
|
||||
parameter_rules:
|
||||
- name: temperature
|
||||
use_template: temperature
|
||||
- name: top_p
|
||||
use_template: top_p
|
||||
- name: top_k
|
||||
label:
|
||||
zh_Hans: 取样数量
|
||||
en_US: Top k
|
||||
type: int
|
||||
help:
|
||||
zh_Hans: 仅从每个后续标记的前 K 个选项中采样。
|
||||
en_US: Only sample from the top K options for each subsequent token.
|
||||
required: false
|
||||
- name: max_tokens_to_sample
|
||||
use_template: max_tokens
|
||||
required: true
|
||||
default: 4096
|
||||
min: 1
|
||||
max: 4096
|
||||
- name: response_format
|
||||
use_template: response_format
|
||||
pricing:
|
||||
input: '8.00'
|
||||
output: '24.00'
|
||||
unit: '0.000001'
|
||||
currency: USD
|
37
drivers/ai-provider/authropic/llm/claude-2.yaml
Normal file
37
drivers/ai-provider/authropic/llm/claude-2.yaml
Normal file
@@ -0,0 +1,37 @@
|
||||
model: claude-2
|
||||
label:
|
||||
en_US: claude-2
|
||||
model_type: llm
|
||||
features:
|
||||
- agent-thought
|
||||
model_properties:
|
||||
mode: chat
|
||||
context_size: 100000
|
||||
parameter_rules:
|
||||
- name: temperature
|
||||
use_template: temperature
|
||||
- name: top_p
|
||||
use_template: top_p
|
||||
- name: top_k
|
||||
label:
|
||||
zh_Hans: 取样数量
|
||||
en_US: Top k
|
||||
type: int
|
||||
help:
|
||||
zh_Hans: 仅从每个后续标记的前 K 个选项中采样。
|
||||
en_US: Only sample from the top K options for each subsequent token.
|
||||
required: false
|
||||
- name: max_tokens_to_sample
|
||||
use_template: max_tokens
|
||||
required: true
|
||||
default: 4096
|
||||
min: 1
|
||||
max: 4096
|
||||
- name: response_format
|
||||
use_template: response_format
|
||||
pricing:
|
||||
input: '8.00'
|
||||
output: '24.00'
|
||||
unit: '0.000001'
|
||||
currency: USD
|
||||
deprecated: true
|
@@ -0,0 +1,39 @@
|
||||
model: claude-3-5-sonnet-20240620
|
||||
label:
|
||||
en_US: claude-3-5-sonnet-20240620
|
||||
model_type: llm
|
||||
features:
|
||||
- agent-thought
|
||||
- vision
|
||||
- tool-call
|
||||
- stream-tool-call
|
||||
model_properties:
|
||||
mode: chat
|
||||
context_size: 200000
|
||||
parameter_rules:
|
||||
- name: temperature
|
||||
use_template: temperature
|
||||
- name: top_p
|
||||
use_template: top_p
|
||||
- name: top_k
|
||||
label:
|
||||
zh_Hans: 取样数量
|
||||
en_US: Top k
|
||||
type: int
|
||||
help:
|
||||
zh_Hans: 仅从每个后续标记的前 K 个选项中采样。
|
||||
en_US: Only sample from the top K options for each subsequent token.
|
||||
required: false
|
||||
- name: max_tokens
|
||||
use_template: max_tokens
|
||||
required: true
|
||||
default: 8192
|
||||
min: 1
|
||||
max: 8192
|
||||
- name: response_format
|
||||
use_template: response_format
|
||||
pricing:
|
||||
input: '3.00'
|
||||
output: '15.00'
|
||||
unit: '0.000001'
|
||||
currency: USD
|
@@ -0,0 +1,39 @@
|
||||
model: claude-3-haiku-20240307
|
||||
label:
|
||||
en_US: claude-3-haiku-20240307
|
||||
model_type: llm
|
||||
features:
|
||||
- agent-thought
|
||||
- vision
|
||||
- tool-call
|
||||
- stream-tool-call
|
||||
model_properties:
|
||||
mode: chat
|
||||
context_size: 200000
|
||||
parameter_rules:
|
||||
- name: temperature
|
||||
use_template: temperature
|
||||
- name: top_p
|
||||
use_template: top_p
|
||||
- name: top_k
|
||||
label:
|
||||
zh_Hans: 取样数量
|
||||
en_US: Top k
|
||||
type: int
|
||||
help:
|
||||
zh_Hans: 仅从每个后续标记的前 K 个选项中采样。
|
||||
en_US: Only sample from the top K options for each subsequent token.
|
||||
required: false
|
||||
- name: max_tokens
|
||||
use_template: max_tokens
|
||||
required: true
|
||||
default: 4096
|
||||
min: 1
|
||||
max: 4096
|
||||
- name: response_format
|
||||
use_template: response_format
|
||||
pricing:
|
||||
input: '0.25'
|
||||
output: '1.25'
|
||||
unit: '0.000001'
|
||||
currency: USD
|
@@ -0,0 +1,39 @@
|
||||
model: claude-3-opus-20240229
|
||||
label:
|
||||
en_US: claude-3-opus-20240229
|
||||
model_type: llm
|
||||
features:
|
||||
- agent-thought
|
||||
- vision
|
||||
- tool-call
|
||||
- stream-tool-call
|
||||
model_properties:
|
||||
mode: chat
|
||||
context_size: 200000
|
||||
parameter_rules:
|
||||
- name: temperature
|
||||
use_template: temperature
|
||||
- name: top_p
|
||||
use_template: top_p
|
||||
- name: top_k
|
||||
label:
|
||||
zh_Hans: 取样数量
|
||||
en_US: Top k
|
||||
type: int
|
||||
help:
|
||||
zh_Hans: 仅从每个后续标记的前 K 个选项中采样。
|
||||
en_US: Only sample from the top K options for each subsequent token.
|
||||
required: false
|
||||
- name: max_tokens
|
||||
use_template: max_tokens
|
||||
required: true
|
||||
default: 4096
|
||||
min: 1
|
||||
max: 4096
|
||||
- name: response_format
|
||||
use_template: response_format
|
||||
pricing:
|
||||
input: '15.00'
|
||||
output: '75.00'
|
||||
unit: '0.000001'
|
||||
currency: USD
|
@@ -0,0 +1,39 @@
|
||||
model: claude-3-sonnet-20240229
|
||||
label:
|
||||
en_US: claude-3-sonnet-20240229
|
||||
model_type: llm
|
||||
features:
|
||||
- agent-thought
|
||||
- vision
|
||||
- tool-call
|
||||
- stream-tool-call
|
||||
model_properties:
|
||||
mode: chat
|
||||
context_size: 200000
|
||||
parameter_rules:
|
||||
- name: temperature
|
||||
use_template: temperature
|
||||
- name: top_p
|
||||
use_template: top_p
|
||||
- name: top_k
|
||||
label:
|
||||
zh_Hans: 取样数量
|
||||
en_US: Top k
|
||||
type: int
|
||||
help:
|
||||
zh_Hans: 仅从每个后续标记的前 K 个选项中采样。
|
||||
en_US: Only sample from the top K options for each subsequent token.
|
||||
required: false
|
||||
- name: max_tokens
|
||||
use_template: max_tokens
|
||||
required: true
|
||||
default: 4096
|
||||
min: 1
|
||||
max: 4096
|
||||
- name: response_format
|
||||
use_template: response_format
|
||||
pricing:
|
||||
input: '3.00'
|
||||
output: '15.00'
|
||||
unit: '0.000001'
|
||||
currency: USD
|
36
drivers/ai-provider/authropic/llm/claude-instant-1.2.yaml
Normal file
36
drivers/ai-provider/authropic/llm/claude-instant-1.2.yaml
Normal file
@@ -0,0 +1,36 @@
|
||||
model: claude-instant-1.2
|
||||
label:
|
||||
en_US: claude-instant-1.2
|
||||
model_type: llm
|
||||
features: [ ]
|
||||
model_properties:
|
||||
mode: chat
|
||||
context_size: 100000
|
||||
parameter_rules:
|
||||
- name: temperature
|
||||
use_template: temperature
|
||||
- name: top_p
|
||||
use_template: top_p
|
||||
- name: top_k
|
||||
label:
|
||||
zh_Hans: 取样数量
|
||||
en_US: Top k
|
||||
type: int
|
||||
help:
|
||||
zh_Hans: 仅从每个后续标记的前 K 个选项中采样。
|
||||
en_US: Only sample from the top K options for each subsequent token.
|
||||
required: false
|
||||
- name: max_tokens
|
||||
use_template: max_tokens
|
||||
required: true
|
||||
default: 4096
|
||||
min: 1
|
||||
max: 4096
|
||||
- name: response_format
|
||||
use_template: response_format
|
||||
pricing:
|
||||
input: '1.63'
|
||||
output: '5.51'
|
||||
unit: '0.000001'
|
||||
currency: USD
|
||||
deprecated: true
|
36
drivers/ai-provider/authropic/llm/claude-instant-1.yaml
Normal file
36
drivers/ai-provider/authropic/llm/claude-instant-1.yaml
Normal file
@@ -0,0 +1,36 @@
|
||||
model: claude-instant-1
|
||||
label:
|
||||
en_US: claude-instant-1
|
||||
model_type: llm
|
||||
features: [ ]
|
||||
model_properties:
|
||||
mode: chat
|
||||
context_size: 100000
|
||||
parameter_rules:
|
||||
- name: temperature
|
||||
use_template: temperature
|
||||
- name: top_p
|
||||
use_template: top_p
|
||||
- name: top_k
|
||||
label:
|
||||
zh_Hans: 取样数量
|
||||
en_US: Top k
|
||||
type: int
|
||||
help:
|
||||
zh_Hans: 仅从每个后续标记的前 K 个选项中采样。
|
||||
en_US: Only sample from the top K options for each subsequent token.
|
||||
required: false
|
||||
- name: max_tokens_to_sample
|
||||
use_template: max_tokens
|
||||
required: true
|
||||
default: 4096
|
||||
min: 1
|
||||
max: 4096
|
||||
- name: response_format
|
||||
use_template: response_format
|
||||
pricing:
|
||||
input: '1.63'
|
||||
output: '5.51'
|
||||
unit: '0.000001'
|
||||
currency: USD
|
||||
deprecated: true
|
26
drivers/ai-provider/converter.go
Normal file
26
drivers/ai-provider/converter.go
Normal file
@@ -0,0 +1,26 @@
|
||||
package ai_provider
|
||||
|
||||
import (
|
||||
"github.com/eolinker/eosc/eocontext"
|
||||
)
|
||||
|
||||
type IConverter interface {
|
||||
RequestConvert(ctx eocontext.EoContext) error
|
||||
ResponseConvert(ctx eocontext.EoContext) error
|
||||
}
|
||||
|
||||
type ClientRequest struct {
|
||||
Messages []*Message `json:"messages"`
|
||||
}
|
||||
|
||||
type ClientResponse struct {
|
||||
Message Message `json:"message"`
|
||||
FinishReason string `json:"finish_reason"`
|
||||
Code int `json:"code"`
|
||||
Error string `json:"error"`
|
||||
}
|
||||
|
||||
type Message struct {
|
||||
Role string `json:"role"`
|
||||
Content string `json:"content"`
|
||||
}
|
31
drivers/ai-provider/google/google.yaml
Normal file
31
drivers/ai-provider/google/google.yaml
Normal file
@@ -0,0 +1,31 @@
|
||||
provider: google
|
||||
label:
|
||||
en_US: Google
|
||||
description:
|
||||
en_US: Google's Gemini model.
|
||||
zh_Hans: 谷歌提供的 Gemini 模型.
|
||||
icon_small:
|
||||
en_US: icon_s_en.svg
|
||||
icon_large:
|
||||
en_US: icon_l_en.svg
|
||||
background: "#FCFDFF"
|
||||
help:
|
||||
title:
|
||||
en_US: Get your API Key from Google
|
||||
zh_Hans: 从 Google 获取 API Key
|
||||
url:
|
||||
en_US: https://ai.google.dev/
|
||||
supported_model_types:
|
||||
- llm
|
||||
configurate_methods:
|
||||
- predefined-model
|
||||
provider_credential_schema:
|
||||
credential_form_schemas:
|
||||
- variable: google_api_key
|
||||
label:
|
||||
en_US: API Key
|
||||
type: secret-input
|
||||
required: true
|
||||
placeholder:
|
||||
zh_Hans: 在此输入您的 API Key
|
||||
en_US: Enter your API Key
|
@@ -0,0 +1,39 @@
|
||||
model: gemini-1.5-flash-8b-exp-0827
|
||||
label:
|
||||
en_US: Gemini 1.5 Flash 8B 0827
|
||||
model_type: llm
|
||||
features:
|
||||
- agent-thought
|
||||
- vision
|
||||
- tool-call
|
||||
- stream-tool-call
|
||||
model_properties:
|
||||
mode: chat
|
||||
context_size: 1048576
|
||||
parameter_rules:
|
||||
- name: temperature
|
||||
use_template: temperature
|
||||
- name: top_p
|
||||
use_template: top_p
|
||||
- name: top_k
|
||||
label:
|
||||
zh_Hans: 取样数量
|
||||
en_US: Top k
|
||||
type: int
|
||||
help:
|
||||
zh_Hans: 仅从每个后续标记的前 K 个选项中采样。
|
||||
en_US: Only sample from the top K options for each subsequent token.
|
||||
required: false
|
||||
- name: max_tokens_to_sample
|
||||
use_template: max_tokens
|
||||
required: true
|
||||
default: 8192
|
||||
min: 1
|
||||
max: 8192
|
||||
- name: response_format
|
||||
use_template: response_format
|
||||
pricing:
|
||||
input: '0.00'
|
||||
output: '0.00'
|
||||
unit: '0.000001'
|
||||
currency: USD
|
@@ -0,0 +1,39 @@
|
||||
model: gemini-1.5-flash-exp-0827
|
||||
label:
|
||||
en_US: Gemini 1.5 Flash 0827
|
||||
model_type: llm
|
||||
features:
|
||||
- agent-thought
|
||||
- vision
|
||||
- tool-call
|
||||
- stream-tool-call
|
||||
model_properties:
|
||||
mode: chat
|
||||
context_size: 1048576
|
||||
parameter_rules:
|
||||
- name: temperature
|
||||
use_template: temperature
|
||||
- name: top_p
|
||||
use_template: top_p
|
||||
- name: top_k
|
||||
label:
|
||||
zh_Hans: 取样数量
|
||||
en_US: Top k
|
||||
type: int
|
||||
help:
|
||||
zh_Hans: 仅从每个后续标记的前 K 个选项中采样。
|
||||
en_US: Only sample from the top K options for each subsequent token.
|
||||
required: false
|
||||
- name: max_tokens_to_sample
|
||||
use_template: max_tokens
|
||||
required: true
|
||||
default: 8192
|
||||
min: 1
|
||||
max: 8192
|
||||
- name: response_format
|
||||
use_template: response_format
|
||||
pricing:
|
||||
input: '0.00'
|
||||
output: '0.00'
|
||||
unit: '0.000001'
|
||||
currency: USD
|
39
drivers/ai-provider/google/llm/gemini-1.5-flash-latest.yaml
Normal file
39
drivers/ai-provider/google/llm/gemini-1.5-flash-latest.yaml
Normal file
@@ -0,0 +1,39 @@
|
||||
model: gemini-1.5-flash-latest
|
||||
label:
|
||||
en_US: Gemini 1.5 Flash
|
||||
model_type: llm
|
||||
features:
|
||||
- agent-thought
|
||||
- vision
|
||||
- tool-call
|
||||
- stream-tool-call
|
||||
model_properties:
|
||||
mode: chat
|
||||
context_size: 1048576
|
||||
parameter_rules:
|
||||
- name: temperature
|
||||
use_template: temperature
|
||||
- name: top_p
|
||||
use_template: top_p
|
||||
- name: top_k
|
||||
label:
|
||||
zh_Hans: 取样数量
|
||||
en_US: Top k
|
||||
type: int
|
||||
help:
|
||||
zh_Hans: 仅从每个后续标记的前 K 个选项中采样。
|
||||
en_US: Only sample from the top K options for each subsequent token.
|
||||
required: false
|
||||
- name: max_tokens_to_sample
|
||||
use_template: max_tokens
|
||||
required: true
|
||||
default: 8192
|
||||
min: 1
|
||||
max: 8192
|
||||
- name: response_format
|
||||
use_template: response_format
|
||||
pricing:
|
||||
input: '0.00'
|
||||
output: '0.00'
|
||||
unit: '0.000001'
|
||||
currency: USD
|
39
drivers/ai-provider/google/llm/gemini-1.5-pro-exp-0801.yaml
Normal file
39
drivers/ai-provider/google/llm/gemini-1.5-pro-exp-0801.yaml
Normal file
@@ -0,0 +1,39 @@
|
||||
model: gemini-1.5-pro-exp-0801
|
||||
label:
|
||||
en_US: Gemini 1.5 Pro 0801
|
||||
model_type: llm
|
||||
features:
|
||||
- agent-thought
|
||||
- vision
|
||||
- tool-call
|
||||
- stream-tool-call
|
||||
model_properties:
|
||||
mode: chat
|
||||
context_size: 2097152
|
||||
parameter_rules:
|
||||
- name: temperature
|
||||
use_template: temperature
|
||||
- name: top_p
|
||||
use_template: top_p
|
||||
- name: top_k
|
||||
label:
|
||||
zh_Hans: 取样数量
|
||||
en_US: Top k
|
||||
type: int
|
||||
help:
|
||||
zh_Hans: 仅从每个后续标记的前 K 个选项中采样。
|
||||
en_US: Only sample from the top K options for each subsequent token.
|
||||
required: false
|
||||
- name: max_tokens_to_sample
|
||||
use_template: max_tokens
|
||||
required: true
|
||||
default: 8192
|
||||
min: 1
|
||||
max: 8192
|
||||
- name: response_format
|
||||
use_template: response_format
|
||||
pricing:
|
||||
input: '0.00'
|
||||
output: '0.00'
|
||||
unit: '0.000001'
|
||||
currency: USD
|
39
drivers/ai-provider/google/llm/gemini-1.5-pro-exp-0827.yaml
Normal file
39
drivers/ai-provider/google/llm/gemini-1.5-pro-exp-0827.yaml
Normal file
@@ -0,0 +1,39 @@
|
||||
model: gemini-1.5-pro-exp-0827
|
||||
label:
|
||||
en_US: Gemini 1.5 Pro 0827
|
||||
model_type: llm
|
||||
features:
|
||||
- agent-thought
|
||||
- vision
|
||||
- tool-call
|
||||
- stream-tool-call
|
||||
model_properties:
|
||||
mode: chat
|
||||
context_size: 2097152
|
||||
parameter_rules:
|
||||
- name: temperature
|
||||
use_template: temperature
|
||||
- name: top_p
|
||||
use_template: top_p
|
||||
- name: top_k
|
||||
label:
|
||||
zh_Hans: 取样数量
|
||||
en_US: Top k
|
||||
type: int
|
||||
help:
|
||||
zh_Hans: 仅从每个后续标记的前 K 个选项中采样。
|
||||
en_US: Only sample from the top K options for each subsequent token.
|
||||
required: false
|
||||
- name: max_tokens_to_sample
|
||||
use_template: max_tokens
|
||||
required: true
|
||||
default: 8192
|
||||
min: 1
|
||||
max: 8192
|
||||
- name: response_format
|
||||
use_template: response_format
|
||||
pricing:
|
||||
input: '0.00'
|
||||
output: '0.00'
|
||||
unit: '0.000001'
|
||||
currency: USD
|
39
drivers/ai-provider/google/llm/gemini-1.5-pro-latest.yaml
Normal file
39
drivers/ai-provider/google/llm/gemini-1.5-pro-latest.yaml
Normal file
@@ -0,0 +1,39 @@
|
||||
model: gemini-1.5-pro-latest
|
||||
label:
|
||||
en_US: Gemini 1.5 Pro
|
||||
model_type: llm
|
||||
features:
|
||||
- agent-thought
|
||||
- vision
|
||||
- tool-call
|
||||
- stream-tool-call
|
||||
model_properties:
|
||||
mode: chat
|
||||
context_size: 2097152
|
||||
parameter_rules:
|
||||
- name: temperature
|
||||
use_template: temperature
|
||||
- name: top_p
|
||||
use_template: top_p
|
||||
- name: top_k
|
||||
label:
|
||||
zh_Hans: 取样数量
|
||||
en_US: Top k
|
||||
type: int
|
||||
help:
|
||||
zh_Hans: 仅从每个后续标记的前 K 个选项中采样。
|
||||
en_US: Only sample from the top K options for each subsequent token.
|
||||
required: false
|
||||
- name: max_tokens_to_sample
|
||||
use_template: max_tokens
|
||||
required: true
|
||||
default: 8192
|
||||
min: 1
|
||||
max: 8192
|
||||
- name: response_format
|
||||
use_template: response_format
|
||||
pricing:
|
||||
input: '0.00'
|
||||
output: '0.00'
|
||||
unit: '0.000001'
|
||||
currency: USD
|
34
drivers/ai-provider/google/llm/gemini-pro-vision.yaml
Normal file
34
drivers/ai-provider/google/llm/gemini-pro-vision.yaml
Normal file
@@ -0,0 +1,34 @@
|
||||
model: gemini-pro-vision
|
||||
label:
|
||||
en_US: Gemini Pro Vision
|
||||
model_type: llm
|
||||
features:
|
||||
- vision
|
||||
model_properties:
|
||||
mode: chat
|
||||
context_size: 12288
|
||||
parameter_rules:
|
||||
- name: temperature
|
||||
use_template: temperature
|
||||
- name: top_p
|
||||
use_template: top_p
|
||||
- name: top_k
|
||||
label:
|
||||
zh_Hans: 取样数量
|
||||
en_US: Top k
|
||||
type: int
|
||||
help:
|
||||
zh_Hans: 仅从每个后续标记的前 K 个选项中采样。
|
||||
en_US: Only sample from the top K options for each subsequent token.
|
||||
required: false
|
||||
- name: max_tokens_to_sample
|
||||
use_template: max_tokens
|
||||
required: true
|
||||
default: 4096
|
||||
min: 1
|
||||
max: 4096
|
||||
pricing:
|
||||
input: '0.00'
|
||||
output: '0.00'
|
||||
unit: '0.000001'
|
||||
currency: USD
|
38
drivers/ai-provider/google/llm/gemini-pro.yaml
Normal file
38
drivers/ai-provider/google/llm/gemini-pro.yaml
Normal file
@@ -0,0 +1,38 @@
|
||||
model: gemini-pro
|
||||
label:
|
||||
en_US: Gemini Pro
|
||||
model_type: llm
|
||||
features:
|
||||
- agent-thought
|
||||
- tool-call
|
||||
- stream-tool-call
|
||||
model_properties:
|
||||
mode: chat
|
||||
context_size: 30720
|
||||
parameter_rules:
|
||||
- name: temperature
|
||||
use_template: temperature
|
||||
- name: top_p
|
||||
use_template: top_p
|
||||
- name: top_k
|
||||
label:
|
||||
zh_Hans: 取样数量
|
||||
en_US: Top k
|
||||
type: int
|
||||
help:
|
||||
zh_Hans: 仅从每个后续标记的前 K 个选项中采样。
|
||||
en_US: Only sample from the top K options for each subsequent token.
|
||||
required: false
|
||||
- name: max_tokens_to_sample
|
||||
use_template: max_tokens
|
||||
required: true
|
||||
default: 2048
|
||||
min: 1
|
||||
max: 2048
|
||||
- name: response_format
|
||||
use_template: response_format
|
||||
pricing:
|
||||
input: '0.00'
|
||||
output: '0.00'
|
||||
unit: '0.000001'
|
||||
currency: USD
|
7
drivers/ai-provider/openAI/config.go
Normal file
7
drivers/ai-provider/openAI/config.go
Normal file
@@ -0,0 +1,7 @@
|
||||
package openAI
|
||||
|
||||
type Config struct {
|
||||
APIKey string `json:"api_key"`
|
||||
Organization string `json:"organization"`
|
||||
Base string `json:"base"`
|
||||
}
|
52
drivers/ai-provider/openAI/executor.go
Normal file
52
drivers/ai-provider/openAI/executor.go
Normal file
@@ -0,0 +1,52 @@
|
||||
package openAI
|
||||
|
||||
import (
|
||||
"time"
|
||||
|
||||
"github.com/eolinker/apinto/drivers"
|
||||
"github.com/eolinker/eosc"
|
||||
"github.com/eolinker/eosc/eocontext"
|
||||
)
|
||||
|
||||
type executor struct {
|
||||
drivers.WorkerBase
|
||||
}
|
||||
|
||||
func (e *executor) Select(ctx eocontext.EoContext) (eocontext.INode, int, error) {
|
||||
//TODO implement me
|
||||
panic("implement me")
|
||||
}
|
||||
|
||||
func (e *executor) Scheme() string {
|
||||
//TODO implement me
|
||||
panic("implement me")
|
||||
}
|
||||
|
||||
func (e *executor) TimeOut() time.Duration {
|
||||
//TODO implement me
|
||||
panic("implement me")
|
||||
}
|
||||
|
||||
func (e *executor) Nodes() []eocontext.INode {
|
||||
//TODO implement me
|
||||
panic("implement me")
|
||||
}
|
||||
|
||||
func (e *executor) Start() error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (e *executor) Reset(conf interface{}, workers map[eosc.RequireId]eosc.IWorker) error {
|
||||
//TODO implement me
|
||||
panic("implement me")
|
||||
}
|
||||
|
||||
func (e *executor) Stop() error {
|
||||
//TODO implement me
|
||||
panic("implement me")
|
||||
}
|
||||
|
||||
func (e *executor) CheckSkill(skill string) bool {
|
||||
//TODO implement me
|
||||
panic("implement me")
|
||||
}
|
1
drivers/ai-provider/openAI/factory.go
Normal file
1
drivers/ai-provider/openAI/factory.go
Normal file
@@ -0,0 +1 @@
|
||||
package openAI
|
44
drivers/ai-provider/openAI/llm/chatgpt-4o-latest.yaml
Normal file
44
drivers/ai-provider/openAI/llm/chatgpt-4o-latest.yaml
Normal file
@@ -0,0 +1,44 @@
|
||||
model: chatgpt-4o-latest
|
||||
label:
|
||||
zh_Hans: chatgpt-4o-latest
|
||||
en_US: chatgpt-4o-latest
|
||||
model_type: llm
|
||||
features:
|
||||
- multi-tool-call
|
||||
- agent-thought
|
||||
- stream-tool-call
|
||||
- vision
|
||||
model_properties:
|
||||
mode: chat
|
||||
context_size: 128000
|
||||
parameter_rules:
|
||||
- name: temperature
|
||||
use_template: temperature
|
||||
- name: top_p
|
||||
use_template: top_p
|
||||
- name: presence_penalty
|
||||
use_template: presence_penalty
|
||||
- name: frequency_penalty
|
||||
use_template: frequency_penalty
|
||||
- name: max_tokens
|
||||
use_template: max_tokens
|
||||
default: 512
|
||||
min: 1
|
||||
max: 16384
|
||||
- name: response_format
|
||||
label:
|
||||
zh_Hans: 回复格式
|
||||
en_US: Response Format
|
||||
type: string
|
||||
help:
|
||||
zh_Hans: 指定模型必须输出的格式
|
||||
en_US: specifying the format that the model must output
|
||||
required: false
|
||||
options:
|
||||
- text
|
||||
- json_object
|
||||
pricing:
|
||||
input: '2.50'
|
||||
output: '10.00'
|
||||
unit: '0.000001'
|
||||
currency: USD
|
43
drivers/ai-provider/openAI/llm/gpt-3.5-turbo-0125.yaml
Normal file
43
drivers/ai-provider/openAI/llm/gpt-3.5-turbo-0125.yaml
Normal file
@@ -0,0 +1,43 @@
|
||||
model: gpt-3.5-turbo-0125
|
||||
label:
|
||||
zh_Hans: gpt-3.5-turbo-0125
|
||||
en_US: gpt-3.5-turbo-0125
|
||||
model_type: llm
|
||||
features:
|
||||
- multi-tool-call
|
||||
- agent-thought
|
||||
- stream-tool-call
|
||||
model_properties:
|
||||
mode: chat
|
||||
context_size: 16385
|
||||
parameter_rules:
|
||||
- name: temperature
|
||||
use_template: temperature
|
||||
- name: top_p
|
||||
use_template: top_p
|
||||
- name: presence_penalty
|
||||
use_template: presence_penalty
|
||||
- name: frequency_penalty
|
||||
use_template: frequency_penalty
|
||||
- name: max_tokens
|
||||
use_template: max_tokens
|
||||
default: 512
|
||||
min: 1
|
||||
max: 4096
|
||||
- name: response_format
|
||||
label:
|
||||
zh_Hans: 回复格式
|
||||
en_US: Response Format
|
||||
type: string
|
||||
help:
|
||||
zh_Hans: 指定模型必须输出的格式
|
||||
en_US: specifying the format that the model must output
|
||||
required: false
|
||||
options:
|
||||
- text
|
||||
- json_object
|
||||
pricing:
|
||||
input: '0.0005'
|
||||
output: '0.0015'
|
||||
unit: '0.001'
|
||||
currency: USD
|
34
drivers/ai-provider/openAI/llm/gpt-3.5-turbo-0613.yaml
Normal file
34
drivers/ai-provider/openAI/llm/gpt-3.5-turbo-0613.yaml
Normal file
@@ -0,0 +1,34 @@
|
||||
model: gpt-3.5-turbo-0613
|
||||
label:
|
||||
zh_Hans: gpt-3.5-turbo-0613
|
||||
en_US: gpt-3.5-turbo-0613
|
||||
model_type: llm
|
||||
features:
|
||||
- multi-tool-call
|
||||
- agent-thought
|
||||
- stream-tool-call
|
||||
model_properties:
|
||||
mode: chat
|
||||
context_size: 4096
|
||||
parameter_rules:
|
||||
- name: temperature
|
||||
use_template: temperature
|
||||
- name: top_p
|
||||
use_template: top_p
|
||||
- name: presence_penalty
|
||||
use_template: presence_penalty
|
||||
- name: frequency_penalty
|
||||
use_template: frequency_penalty
|
||||
- name: max_tokens
|
||||
use_template: max_tokens
|
||||
default: 512
|
||||
min: 1
|
||||
max: 4096
|
||||
- name: response_format
|
||||
use_template: response_format
|
||||
pricing:
|
||||
input: '0.0015'
|
||||
output: '0.002'
|
||||
unit: '0.001'
|
||||
currency: USD
|
||||
deprecated: true
|
43
drivers/ai-provider/openAI/llm/gpt-3.5-turbo-1106.yaml
Normal file
43
drivers/ai-provider/openAI/llm/gpt-3.5-turbo-1106.yaml
Normal file
@@ -0,0 +1,43 @@
|
||||
model: gpt-3.5-turbo-1106
|
||||
label:
|
||||
zh_Hans: gpt-3.5-turbo-1106
|
||||
en_US: gpt-3.5-turbo-1106
|
||||
model_type: llm
|
||||
features:
|
||||
- multi-tool-call
|
||||
- agent-thought
|
||||
- stream-tool-call
|
||||
model_properties:
|
||||
mode: chat
|
||||
context_size: 16385
|
||||
parameter_rules:
|
||||
- name: temperature
|
||||
use_template: temperature
|
||||
- name: top_p
|
||||
use_template: top_p
|
||||
- name: presence_penalty
|
||||
use_template: presence_penalty
|
||||
- name: frequency_penalty
|
||||
use_template: frequency_penalty
|
||||
- name: max_tokens
|
||||
use_template: max_tokens
|
||||
default: 512
|
||||
min: 1
|
||||
max: 4096
|
||||
- name: response_format
|
||||
label:
|
||||
zh_Hans: 回复格式
|
||||
en_US: Response Format
|
||||
type: string
|
||||
help:
|
||||
zh_Hans: 指定模型必须输出的格式
|
||||
en_US: specifying the format that the model must output
|
||||
required: false
|
||||
options:
|
||||
- text
|
||||
- json_object
|
||||
pricing:
|
||||
input: '0.001'
|
||||
output: '0.002'
|
||||
unit: '0.001'
|
||||
currency: USD
|
34
drivers/ai-provider/openAI/llm/gpt-3.5-turbo-16k-0613.yaml
Normal file
34
drivers/ai-provider/openAI/llm/gpt-3.5-turbo-16k-0613.yaml
Normal file
@@ -0,0 +1,34 @@
|
||||
model: gpt-3.5-turbo-16k-0613
|
||||
label:
|
||||
zh_Hans: gpt-3.5-turbo-16k-0613
|
||||
en_US: gpt-3.5-turbo-16k-0613
|
||||
model_type: llm
|
||||
features:
|
||||
- multi-tool-call
|
||||
- agent-thought
|
||||
- stream-tool-call
|
||||
model_properties:
|
||||
mode: chat
|
||||
context_size: 16385
|
||||
parameter_rules:
|
||||
- name: temperature
|
||||
use_template: temperature
|
||||
- name: top_p
|
||||
use_template: top_p
|
||||
- name: presence_penalty
|
||||
use_template: presence_penalty
|
||||
- name: frequency_penalty
|
||||
use_template: frequency_penalty
|
||||
- name: max_tokens
|
||||
use_template: max_tokens
|
||||
default: 512
|
||||
min: 1
|
||||
max: 16385
|
||||
- name: response_format
|
||||
use_template: response_format
|
||||
pricing:
|
||||
input: '0.003'
|
||||
output: '0.004'
|
||||
unit: '0.001'
|
||||
currency: USD
|
||||
deprecated: true
|
33
drivers/ai-provider/openAI/llm/gpt-3.5-turbo-16k.yaml
Normal file
33
drivers/ai-provider/openAI/llm/gpt-3.5-turbo-16k.yaml
Normal file
@@ -0,0 +1,33 @@
|
||||
model: gpt-3.5-turbo-16k
|
||||
label:
|
||||
zh_Hans: gpt-3.5-turbo-16k
|
||||
en_US: gpt-3.5-turbo-16k
|
||||
model_type: llm
|
||||
features:
|
||||
- multi-tool-call
|
||||
- agent-thought
|
||||
- stream-tool-call
|
||||
model_properties:
|
||||
mode: chat
|
||||
context_size: 16385
|
||||
parameter_rules:
|
||||
- name: temperature
|
||||
use_template: temperature
|
||||
- name: top_p
|
||||
use_template: top_p
|
||||
- name: presence_penalty
|
||||
use_template: presence_penalty
|
||||
- name: frequency_penalty
|
||||
use_template: frequency_penalty
|
||||
- name: max_tokens
|
||||
use_template: max_tokens
|
||||
default: 512
|
||||
min: 1
|
||||
max: 16385
|
||||
- name: response_format
|
||||
use_template: response_format
|
||||
pricing:
|
||||
input: '0.003'
|
||||
output: '0.004'
|
||||
unit: '0.001'
|
||||
currency: USD
|
30
drivers/ai-provider/openAI/llm/gpt-3.5-turbo-instruct.yaml
Normal file
30
drivers/ai-provider/openAI/llm/gpt-3.5-turbo-instruct.yaml
Normal file
@@ -0,0 +1,30 @@
|
||||
model: gpt-3.5-turbo-instruct
|
||||
label:
|
||||
zh_Hans: gpt-3.5-turbo-instruct
|
||||
en_US: gpt-3.5-turbo-instruct
|
||||
model_type: llm
|
||||
features: [ ]
|
||||
model_properties:
|
||||
mode: completion
|
||||
context_size: 4096
|
||||
parameter_rules:
|
||||
- name: temperature
|
||||
use_template: temperature
|
||||
- name: top_p
|
||||
use_template: top_p
|
||||
- name: presence_penalty
|
||||
use_template: presence_penalty
|
||||
- name: frequency_penalty
|
||||
use_template: frequency_penalty
|
||||
- name: max_tokens
|
||||
use_template: max_tokens
|
||||
default: 512
|
||||
min: 1
|
||||
max: 4096
|
||||
- name: response_format
|
||||
use_template: response_format
|
||||
pricing:
|
||||
input: '0.0015'
|
||||
output: '0.002'
|
||||
unit: '0.001'
|
||||
currency: USD
|
43
drivers/ai-provider/openAI/llm/gpt-3.5-turbo.yaml
Normal file
43
drivers/ai-provider/openAI/llm/gpt-3.5-turbo.yaml
Normal file
@@ -0,0 +1,43 @@
|
||||
model: gpt-3.5-turbo
|
||||
label:
|
||||
zh_Hans: gpt-3.5-turbo
|
||||
en_US: gpt-3.5-turbo
|
||||
model_type: llm
|
||||
features:
|
||||
- multi-tool-call
|
||||
- agent-thought
|
||||
- stream-tool-call
|
||||
model_properties:
|
||||
mode: chat
|
||||
context_size: 16385
|
||||
parameter_rules:
|
||||
- name: temperature
|
||||
use_template: temperature
|
||||
- name: top_p
|
||||
use_template: top_p
|
||||
- name: presence_penalty
|
||||
use_template: presence_penalty
|
||||
- name: frequency_penalty
|
||||
use_template: frequency_penalty
|
||||
- name: max_tokens
|
||||
use_template: max_tokens
|
||||
default: 512
|
||||
min: 1
|
||||
max: 4096
|
||||
- name: response_format
|
||||
label:
|
||||
zh_Hans: 回复格式
|
||||
en_US: Response Format
|
||||
type: string
|
||||
help:
|
||||
zh_Hans: 指定模型必须输出的格式
|
||||
en_US: specifying the format that the model must output
|
||||
required: false
|
||||
options:
|
||||
- text
|
||||
- json_object
|
||||
pricing:
|
||||
input: '0.0005'
|
||||
output: '0.0015'
|
||||
unit: '0.001'
|
||||
currency: USD
|
56
drivers/ai-provider/openAI/llm/gpt-4-0125-preview.yaml
Normal file
56
drivers/ai-provider/openAI/llm/gpt-4-0125-preview.yaml
Normal file
@@ -0,0 +1,56 @@
|
||||
model: gpt-4-0125-preview
|
||||
label:
|
||||
zh_Hans: gpt-4-0125-preview
|
||||
en_US: gpt-4-0125-preview
|
||||
model_type: llm
|
||||
features:
|
||||
- multi-tool-call
|
||||
- agent-thought
|
||||
- stream-tool-call
|
||||
model_properties:
|
||||
mode: chat
|
||||
context_size: 128000
|
||||
parameter_rules:
|
||||
- name: temperature
|
||||
use_template: temperature
|
||||
- name: top_p
|
||||
use_template: top_p
|
||||
- name: presence_penalty
|
||||
use_template: presence_penalty
|
||||
- name: frequency_penalty
|
||||
use_template: frequency_penalty
|
||||
- name: max_tokens
|
||||
use_template: max_tokens
|
||||
default: 512
|
||||
min: 1
|
||||
max: 4096
|
||||
- name: seed
|
||||
label:
|
||||
zh_Hans: 种子
|
||||
en_US: Seed
|
||||
type: int
|
||||
help:
|
||||
zh_Hans: 如果指定,模型将尽最大努力进行确定性采样,使得重复的具有相同种子和参数的请求应该返回相同的结果。不能保证确定性,您应该参考 system_fingerprint
|
||||
响应参数来监视变化。
|
||||
en_US: If specified, model will make a best effort to sample deterministically,
|
||||
such that repeated requests with the same seed and parameters should return
|
||||
the same result. Determinism is not guaranteed, and you should refer to the
|
||||
system_fingerprint response parameter to monitor changes in the backend.
|
||||
required: false
|
||||
- name: response_format
|
||||
label:
|
||||
zh_Hans: 回复格式
|
||||
en_US: Response Format
|
||||
type: string
|
||||
help:
|
||||
zh_Hans: 指定模型必须输出的格式
|
||||
en_US: specifying the format that the model must output
|
||||
required: false
|
||||
options:
|
||||
- text
|
||||
- json_object
|
||||
pricing:
|
||||
input: '0.01'
|
||||
output: '0.03'
|
||||
unit: '0.001'
|
||||
currency: USD
|
56
drivers/ai-provider/openAI/llm/gpt-4-1106-preview.yaml
Normal file
56
drivers/ai-provider/openAI/llm/gpt-4-1106-preview.yaml
Normal file
@@ -0,0 +1,56 @@
|
||||
model: gpt-4-1106-preview
|
||||
label:
|
||||
zh_Hans: gpt-4-1106-preview
|
||||
en_US: gpt-4-1106-preview
|
||||
model_type: llm
|
||||
features:
|
||||
- multi-tool-call
|
||||
- agent-thought
|
||||
- stream-tool-call
|
||||
model_properties:
|
||||
mode: chat
|
||||
context_size: 128000
|
||||
parameter_rules:
|
||||
- name: temperature
|
||||
use_template: temperature
|
||||
- name: top_p
|
||||
use_template: top_p
|
||||
- name: presence_penalty
|
||||
use_template: presence_penalty
|
||||
- name: frequency_penalty
|
||||
use_template: frequency_penalty
|
||||
- name: max_tokens
|
||||
use_template: max_tokens
|
||||
default: 512
|
||||
min: 1
|
||||
max: 4096
|
||||
- name: seed
|
||||
label:
|
||||
zh_Hans: 种子
|
||||
en_US: Seed
|
||||
type: int
|
||||
help:
|
||||
zh_Hans: 如果指定,模型将尽最大努力进行确定性采样,使得重复的具有相同种子和参数的请求应该返回相同的结果。不能保证确定性,您应该参考 system_fingerprint
|
||||
响应参数来监视变化。
|
||||
en_US: If specified, model will make a best effort to sample deterministically,
|
||||
such that repeated requests with the same seed and parameters should return
|
||||
the same result. Determinism is not guaranteed, and you should refer to the
|
||||
system_fingerprint response parameter to monitor changes in the backend.
|
||||
required: false
|
||||
- name: response_format
|
||||
label:
|
||||
zh_Hans: 回复格式
|
||||
en_US: Response Format
|
||||
type: string
|
||||
help:
|
||||
zh_Hans: 指定模型必须输出的格式
|
||||
en_US: specifying the format that the model must output
|
||||
required: false
|
||||
options:
|
||||
- text
|
||||
- json_object
|
||||
pricing:
|
||||
input: '0.01'
|
||||
output: '0.03'
|
||||
unit: '0.001'
|
||||
currency: USD
|
56
drivers/ai-provider/openAI/llm/gpt-4-32k.yaml
Normal file
56
drivers/ai-provider/openAI/llm/gpt-4-32k.yaml
Normal file
@@ -0,0 +1,56 @@
|
||||
model: gpt-4-32k
|
||||
label:
|
||||
zh_Hans: gpt-4-32k
|
||||
en_US: gpt-4-32k
|
||||
model_type: llm
|
||||
features:
|
||||
- multi-tool-call
|
||||
- agent-thought
|
||||
- stream-tool-call
|
||||
model_properties:
|
||||
mode: chat
|
||||
context_size: 32768
|
||||
parameter_rules:
|
||||
- name: temperature
|
||||
use_template: temperature
|
||||
- name: top_p
|
||||
use_template: top_p
|
||||
- name: presence_penalty
|
||||
use_template: presence_penalty
|
||||
- name: frequency_penalty
|
||||
use_template: frequency_penalty
|
||||
- name: max_tokens
|
||||
use_template: max_tokens
|
||||
default: 512
|
||||
min: 1
|
||||
max: 32768
|
||||
- name: seed
|
||||
label:
|
||||
zh_Hans: 种子
|
||||
en_US: Seed
|
||||
type: int
|
||||
help:
|
||||
zh_Hans: 如果指定,模型将尽最大努力进行确定性采样,使得重复的具有相同种子和参数的请求应该返回相同的结果。不能保证确定性,您应该参考 system_fingerprint
|
||||
响应参数来监视变化。
|
||||
en_US: If specified, model will make a best effort to sample deterministically,
|
||||
such that repeated requests with the same seed and parameters should return
|
||||
the same result. Determinism is not guaranteed, and you should refer to the
|
||||
system_fingerprint response parameter to monitor changes in the backend.
|
||||
required: false
|
||||
- name: response_format
|
||||
label:
|
||||
zh_Hans: 回复格式
|
||||
en_US: Response Format
|
||||
type: string
|
||||
help:
|
||||
zh_Hans: 指定模型必须输出的格式
|
||||
en_US: specifying the format that the model must output
|
||||
required: false
|
||||
options:
|
||||
- text
|
||||
- json_object
|
||||
pricing:
|
||||
input: '0.06'
|
||||
output: '0.12'
|
||||
unit: '0.001'
|
||||
currency: USD
|
57
drivers/ai-provider/openAI/llm/gpt-4-turbo-2024-04-09.yaml
Normal file
57
drivers/ai-provider/openAI/llm/gpt-4-turbo-2024-04-09.yaml
Normal file
@@ -0,0 +1,57 @@
|
||||
model: gpt-4-turbo-2024-04-09
|
||||
label:
|
||||
zh_Hans: gpt-4-turbo-2024-04-09
|
||||
en_US: gpt-4-turbo-2024-04-09
|
||||
model_type: llm
|
||||
features:
|
||||
- multi-tool-call
|
||||
- agent-thought
|
||||
- stream-tool-call
|
||||
- vision
|
||||
model_properties:
|
||||
mode: chat
|
||||
context_size: 128000
|
||||
parameter_rules:
|
||||
- name: temperature
|
||||
use_template: temperature
|
||||
- name: top_p
|
||||
use_template: top_p
|
||||
- name: presence_penalty
|
||||
use_template: presence_penalty
|
||||
- name: frequency_penalty
|
||||
use_template: frequency_penalty
|
||||
- name: max_tokens
|
||||
use_template: max_tokens
|
||||
default: 512
|
||||
min: 1
|
||||
max: 4096
|
||||
- name: seed
|
||||
label:
|
||||
zh_Hans: 种子
|
||||
en_US: Seed
|
||||
type: int
|
||||
help:
|
||||
zh_Hans: 如果指定,模型将尽最大努力进行确定性采样,使得重复的具有相同种子和参数的请求应该返回相同的结果。不能保证确定性,您应该参考 system_fingerprint
|
||||
响应参数来监视变化。
|
||||
en_US: If specified, model will make a best effort to sample deterministically,
|
||||
such that repeated requests with the same seed and parameters should return
|
||||
the same result. Determinism is not guaranteed, and you should refer to the
|
||||
system_fingerprint response parameter to monitor changes in the backend.
|
||||
required: false
|
||||
- name: response_format
|
||||
label:
|
||||
zh_Hans: 回复格式
|
||||
en_US: Response Format
|
||||
type: string
|
||||
help:
|
||||
zh_Hans: 指定模型必须输出的格式
|
||||
en_US: specifying the format that the model must output
|
||||
required: false
|
||||
options:
|
||||
- text
|
||||
- json_object
|
||||
pricing:
|
||||
input: '0.01'
|
||||
output: '0.03'
|
||||
unit: '0.001'
|
||||
currency: USD
|
56
drivers/ai-provider/openAI/llm/gpt-4-turbo-preview.yaml
Normal file
56
drivers/ai-provider/openAI/llm/gpt-4-turbo-preview.yaml
Normal file
@@ -0,0 +1,56 @@
|
||||
model: gpt-4-turbo-preview
|
||||
label:
|
||||
zh_Hans: gpt-4-turbo-preview
|
||||
en_US: gpt-4-turbo-preview
|
||||
model_type: llm
|
||||
features:
|
||||
- multi-tool-call
|
||||
- agent-thought
|
||||
- stream-tool-call
|
||||
model_properties:
|
||||
mode: chat
|
||||
context_size: 128000
|
||||
parameter_rules:
|
||||
- name: temperature
|
||||
use_template: temperature
|
||||
- name: top_p
|
||||
use_template: top_p
|
||||
- name: presence_penalty
|
||||
use_template: presence_penalty
|
||||
- name: frequency_penalty
|
||||
use_template: frequency_penalty
|
||||
- name: max_tokens
|
||||
use_template: max_tokens
|
||||
default: 512
|
||||
min: 1
|
||||
max: 4096
|
||||
- name: seed
|
||||
label:
|
||||
zh_Hans: 种子
|
||||
en_US: Seed
|
||||
type: int
|
||||
help:
|
||||
zh_Hans: 如果指定,模型将尽最大努力进行确定性采样,使得重复的具有相同种子和参数的请求应该返回相同的结果。不能保证确定性,您应该参考 system_fingerprint
|
||||
响应参数来监视变化。
|
||||
en_US: If specified, model will make a best effort to sample deterministically,
|
||||
such that repeated requests with the same seed and parameters should return
|
||||
the same result. Determinism is not guaranteed, and you should refer to the
|
||||
system_fingerprint response parameter to monitor changes in the backend.
|
||||
required: false
|
||||
- name: response_format
|
||||
label:
|
||||
zh_Hans: 回复格式
|
||||
en_US: Response Format
|
||||
type: string
|
||||
help:
|
||||
zh_Hans: 指定模型必须输出的格式
|
||||
en_US: specifying the format that the model must output
|
||||
required: false
|
||||
options:
|
||||
- text
|
||||
- json_object
|
||||
pricing:
|
||||
input: '0.01'
|
||||
output: '0.03'
|
||||
unit: '0.001'
|
||||
currency: USD
|
57
drivers/ai-provider/openAI/llm/gpt-4-turbo.yaml
Normal file
57
drivers/ai-provider/openAI/llm/gpt-4-turbo.yaml
Normal file
@@ -0,0 +1,57 @@
|
||||
model: gpt-4-turbo
|
||||
label:
|
||||
zh_Hans: gpt-4-turbo
|
||||
en_US: gpt-4-turbo
|
||||
model_type: llm
|
||||
features:
|
||||
- multi-tool-call
|
||||
- agent-thought
|
||||
- stream-tool-call
|
||||
- vision
|
||||
model_properties:
|
||||
mode: chat
|
||||
context_size: 128000
|
||||
parameter_rules:
|
||||
- name: temperature
|
||||
use_template: temperature
|
||||
- name: top_p
|
||||
use_template: top_p
|
||||
- name: presence_penalty
|
||||
use_template: presence_penalty
|
||||
- name: frequency_penalty
|
||||
use_template: frequency_penalty
|
||||
- name: max_tokens
|
||||
use_template: max_tokens
|
||||
default: 512
|
||||
min: 1
|
||||
max: 4096
|
||||
- name: seed
|
||||
label:
|
||||
zh_Hans: 种子
|
||||
en_US: Seed
|
||||
type: int
|
||||
help:
|
||||
zh_Hans: 如果指定,模型将尽最大努力进行确定性采样,使得重复的具有相同种子和参数的请求应该返回相同的结果。不能保证确定性,您应该参考 system_fingerprint
|
||||
响应参数来监视变化。
|
||||
en_US: If specified, model will make a best effort to sample deterministically,
|
||||
such that repeated requests with the same seed and parameters should return
|
||||
the same result. Determinism is not guaranteed, and you should refer to the
|
||||
system_fingerprint response parameter to monitor changes in the backend.
|
||||
required: false
|
||||
- name: response_format
|
||||
label:
|
||||
zh_Hans: 回复格式
|
||||
en_US: Response Format
|
||||
type: string
|
||||
help:
|
||||
zh_Hans: 指定模型必须输出的格式
|
||||
en_US: specifying the format that the model must output
|
||||
required: false
|
||||
options:
|
||||
- text
|
||||
- json_object
|
||||
pricing:
|
||||
input: '0.01'
|
||||
output: '0.03'
|
||||
unit: '0.001'
|
||||
currency: USD
|
54
drivers/ai-provider/openAI/llm/gpt-4-vision-preview.yaml
Normal file
54
drivers/ai-provider/openAI/llm/gpt-4-vision-preview.yaml
Normal file
@@ -0,0 +1,54 @@
|
||||
model: gpt-4-vision-preview
|
||||
label:
|
||||
zh_Hans: gpt-4-vision-preview
|
||||
en_US: gpt-4-vision-preview
|
||||
model_type: llm
|
||||
features:
|
||||
- vision
|
||||
model_properties:
|
||||
mode: chat
|
||||
context_size: 128000
|
||||
parameter_rules:
|
||||
- name: temperature
|
||||
use_template: temperature
|
||||
- name: top_p
|
||||
use_template: top_p
|
||||
- name: presence_penalty
|
||||
use_template: presence_penalty
|
||||
- name: frequency_penalty
|
||||
use_template: frequency_penalty
|
||||
- name: max_tokens
|
||||
use_template: max_tokens
|
||||
default: 512
|
||||
min: 1
|
||||
max: 4096
|
||||
- name: seed
|
||||
label:
|
||||
zh_Hans: 种子
|
||||
en_US: Seed
|
||||
type: int
|
||||
help:
|
||||
zh_Hans: 如果指定,模型将尽最大努力进行确定性采样,使得重复的具有相同种子和参数的请求应该返回相同的结果。不能保证确定性,您应该参考 system_fingerprint
|
||||
响应参数来监视变化。
|
||||
en_US: If specified, model will make a best effort to sample deterministically,
|
||||
such that repeated requests with the same seed and parameters should return
|
||||
the same result. Determinism is not guaranteed, and you should refer to the
|
||||
system_fingerprint response parameter to monitor changes in the backend.
|
||||
required: false
|
||||
- name: response_format
|
||||
label:
|
||||
zh_Hans: 回复格式
|
||||
en_US: Response Format
|
||||
type: string
|
||||
help:
|
||||
zh_Hans: 指定模型必须输出的格式
|
||||
en_US: specifying the format that the model must output
|
||||
required: false
|
||||
options:
|
||||
- text
|
||||
- json_object
|
||||
pricing:
|
||||
input: '0.01'
|
||||
output: '0.03'
|
||||
unit: '0.001'
|
||||
currency: USD
|
56
drivers/ai-provider/openAI/llm/gpt-4.yaml
Normal file
56
drivers/ai-provider/openAI/llm/gpt-4.yaml
Normal file
@@ -0,0 +1,56 @@
|
||||
model: gpt-4
|
||||
label:
|
||||
zh_Hans: gpt-4
|
||||
en_US: gpt-4
|
||||
model_type: llm
|
||||
features:
|
||||
- multi-tool-call
|
||||
- agent-thought
|
||||
- stream-tool-call
|
||||
model_properties:
|
||||
mode: chat
|
||||
context_size: 8192
|
||||
parameter_rules:
|
||||
- name: temperature
|
||||
use_template: temperature
|
||||
- name: top_p
|
||||
use_template: top_p
|
||||
- name: presence_penalty
|
||||
use_template: presence_penalty
|
||||
- name: frequency_penalty
|
||||
use_template: frequency_penalty
|
||||
- name: max_tokens
|
||||
use_template: max_tokens
|
||||
default: 512
|
||||
min: 1
|
||||
max: 8192
|
||||
- name: seed
|
||||
label:
|
||||
zh_Hans: 种子
|
||||
en_US: Seed
|
||||
type: int
|
||||
help:
|
||||
zh_Hans: 如果指定,模型将尽最大努力进行确定性采样,使得重复的具有相同种子和参数的请求应该返回相同的结果。不能保证确定性,您应该参考 system_fingerprint
|
||||
响应参数来监视变化。
|
||||
en_US: If specified, model will make a best effort to sample deterministically,
|
||||
such that repeated requests with the same seed and parameters should return
|
||||
the same result. Determinism is not guaranteed, and you should refer to the
|
||||
system_fingerprint response parameter to monitor changes in the backend.
|
||||
required: false
|
||||
- name: response_format
|
||||
label:
|
||||
zh_Hans: 回复格式
|
||||
en_US: Response Format
|
||||
type: string
|
||||
help:
|
||||
zh_Hans: 指定模型必须输出的格式
|
||||
en_US: specifying the format that the model must output
|
||||
required: false
|
||||
options:
|
||||
- text
|
||||
- json_object
|
||||
pricing:
|
||||
input: '0.03'
|
||||
output: '0.06'
|
||||
unit: '0.001'
|
||||
currency: USD
|
44
drivers/ai-provider/openAI/llm/gpt-4o-2024-05-13.yaml
Normal file
44
drivers/ai-provider/openAI/llm/gpt-4o-2024-05-13.yaml
Normal file
@@ -0,0 +1,44 @@
|
||||
model: gpt-4o-2024-05-13
|
||||
label:
|
||||
zh_Hans: gpt-4o-2024-05-13
|
||||
en_US: gpt-4o-2024-05-13
|
||||
model_type: llm
|
||||
features:
|
||||
- multi-tool-call
|
||||
- agent-thought
|
||||
- stream-tool-call
|
||||
- vision
|
||||
model_properties:
|
||||
mode: chat
|
||||
context_size: 128000
|
||||
parameter_rules:
|
||||
- name: temperature
|
||||
use_template: temperature
|
||||
- name: top_p
|
||||
use_template: top_p
|
||||
- name: presence_penalty
|
||||
use_template: presence_penalty
|
||||
- name: frequency_penalty
|
||||
use_template: frequency_penalty
|
||||
- name: max_tokens
|
||||
use_template: max_tokens
|
||||
default: 512
|
||||
min: 1
|
||||
max: 4096
|
||||
- name: response_format
|
||||
label:
|
||||
zh_Hans: 回复格式
|
||||
en_US: Response Format
|
||||
type: string
|
||||
help:
|
||||
zh_Hans: 指定模型必须输出的格式
|
||||
en_US: specifying the format that the model must output
|
||||
required: false
|
||||
options:
|
||||
- text
|
||||
- json_object
|
||||
pricing:
|
||||
input: '5.00'
|
||||
output: '15.00'
|
||||
unit: '0.000001'
|
||||
currency: USD
|
47
drivers/ai-provider/openAI/llm/gpt-4o-2024-08-06.yaml
Normal file
47
drivers/ai-provider/openAI/llm/gpt-4o-2024-08-06.yaml
Normal file
@@ -0,0 +1,47 @@
|
||||
model: gpt-4o-2024-08-06
|
||||
label:
|
||||
zh_Hans: gpt-4o-2024-08-06
|
||||
en_US: gpt-4o-2024-08-06
|
||||
model_type: llm
|
||||
features:
|
||||
- multi-tool-call
|
||||
- agent-thought
|
||||
- stream-tool-call
|
||||
- vision
|
||||
model_properties:
|
||||
mode: chat
|
||||
context_size: 128000
|
||||
parameter_rules:
|
||||
- name: temperature
|
||||
use_template: temperature
|
||||
- name: top_p
|
||||
use_template: top_p
|
||||
- name: presence_penalty
|
||||
use_template: presence_penalty
|
||||
- name: frequency_penalty
|
||||
use_template: frequency_penalty
|
||||
- name: max_tokens
|
||||
use_template: max_tokens
|
||||
default: 512
|
||||
min: 1
|
||||
max: 16384
|
||||
- name: response_format
|
||||
label:
|
||||
zh_Hans: 回复格式
|
||||
en_US: Response Format
|
||||
type: string
|
||||
help:
|
||||
zh_Hans: 指定模型必须输出的格式
|
||||
en_US: specifying the format that the model must output
|
||||
required: false
|
||||
options:
|
||||
- text
|
||||
- json_object
|
||||
- json_schema
|
||||
- name: json_schema
|
||||
use_template: json_schema
|
||||
pricing:
|
||||
input: '2.50'
|
||||
output: '10.00'
|
||||
unit: '0.000001'
|
||||
currency: USD
|
47
drivers/ai-provider/openAI/llm/gpt-4o-mini-2024-07-18.yaml
Normal file
47
drivers/ai-provider/openAI/llm/gpt-4o-mini-2024-07-18.yaml
Normal file
@@ -0,0 +1,47 @@
|
||||
model: gpt-4o-mini-2024-07-18
|
||||
label:
|
||||
zh_Hans: gpt-4o-mini-2024-07-18
|
||||
en_US: gpt-4o-mini-2024-07-18
|
||||
model_type: llm
|
||||
features:
|
||||
- multi-tool-call
|
||||
- agent-thought
|
||||
- stream-tool-call
|
||||
- vision
|
||||
model_properties:
|
||||
mode: chat
|
||||
context_size: 128000
|
||||
parameter_rules:
|
||||
- name: temperature
|
||||
use_template: temperature
|
||||
- name: top_p
|
||||
use_template: top_p
|
||||
- name: presence_penalty
|
||||
use_template: presence_penalty
|
||||
- name: frequency_penalty
|
||||
use_template: frequency_penalty
|
||||
- name: max_tokens
|
||||
use_template: max_tokens
|
||||
default: 512
|
||||
min: 1
|
||||
max: 16384
|
||||
- name: response_format
|
||||
label:
|
||||
zh_Hans: 回复格式
|
||||
en_US: Response Format
|
||||
type: string
|
||||
help:
|
||||
zh_Hans: 指定模型必须输出的格式
|
||||
en_US: specifying the format that the model must output
|
||||
required: false
|
||||
options:
|
||||
- text
|
||||
- json_object
|
||||
- json_schema
|
||||
- name: json_schema
|
||||
use_template: json_schema
|
||||
pricing:
|
||||
input: '0.15'
|
||||
output: '0.60'
|
||||
unit: '0.000001'
|
||||
currency: USD
|
47
drivers/ai-provider/openAI/llm/gpt-4o-mini.yaml
Normal file
47
drivers/ai-provider/openAI/llm/gpt-4o-mini.yaml
Normal file
@@ -0,0 +1,47 @@
|
||||
model: gpt-4o-mini
|
||||
label:
|
||||
zh_Hans: gpt-4o-mini
|
||||
en_US: gpt-4o-mini
|
||||
model_type: llm
|
||||
features:
|
||||
- multi-tool-call
|
||||
- agent-thought
|
||||
- stream-tool-call
|
||||
- vision
|
||||
model_properties:
|
||||
mode: chat
|
||||
context_size: 128000
|
||||
parameter_rules:
|
||||
- name: temperature
|
||||
use_template: temperature
|
||||
- name: top_p
|
||||
use_template: top_p
|
||||
- name: presence_penalty
|
||||
use_template: presence_penalty
|
||||
- name: frequency_penalty
|
||||
use_template: frequency_penalty
|
||||
- name: max_tokens
|
||||
use_template: max_tokens
|
||||
default: 512
|
||||
min: 1
|
||||
max: 16384
|
||||
- name: response_format
|
||||
label:
|
||||
zh_Hans: 回复格式
|
||||
en_US: Response Format
|
||||
type: string
|
||||
help:
|
||||
zh_Hans: 指定模型必须输出的格式
|
||||
en_US: specifying the format that the model must output
|
||||
required: false
|
||||
options:
|
||||
- text
|
||||
- json_object
|
||||
- json_schema
|
||||
- name: json_schema
|
||||
use_template: json_schema
|
||||
pricing:
|
||||
input: '0.15'
|
||||
output: '0.60'
|
||||
unit: '0.000001'
|
||||
currency: USD
|
44
drivers/ai-provider/openAI/llm/gpt-4o.yaml
Normal file
44
drivers/ai-provider/openAI/llm/gpt-4o.yaml
Normal file
@@ -0,0 +1,44 @@
|
||||
model: gpt-4o
|
||||
label:
|
||||
zh_Hans: gpt-4o
|
||||
en_US: gpt-4o
|
||||
model_type: llm
|
||||
features:
|
||||
- multi-tool-call
|
||||
- agent-thought
|
||||
- stream-tool-call
|
||||
- vision
|
||||
model_properties:
|
||||
mode: chat
|
||||
context_size: 128000
|
||||
parameter_rules:
|
||||
- name: temperature
|
||||
use_template: temperature
|
||||
- name: top_p
|
||||
use_template: top_p
|
||||
- name: presence_penalty
|
||||
use_template: presence_penalty
|
||||
- name: frequency_penalty
|
||||
use_template: frequency_penalty
|
||||
- name: max_tokens
|
||||
use_template: max_tokens
|
||||
default: 512
|
||||
min: 1
|
||||
max: 4096
|
||||
- name: response_format
|
||||
label:
|
||||
zh_Hans: 回复格式
|
||||
en_US: Response Format
|
||||
type: string
|
||||
help:
|
||||
zh_Hans: 指定模型必须输出的格式
|
||||
en_US: specifying the format that the model must output
|
||||
required: false
|
||||
options:
|
||||
- text
|
||||
- json_object
|
||||
pricing:
|
||||
input: '5.00'
|
||||
output: '15.00'
|
||||
unit: '0.000001'
|
||||
currency: USD
|
33
drivers/ai-provider/openAI/llm/o1-mini-2024-09-12.yaml
Normal file
33
drivers/ai-provider/openAI/llm/o1-mini-2024-09-12.yaml
Normal file
@@ -0,0 +1,33 @@
|
||||
model: o1-mini-2024-09-12
|
||||
label:
|
||||
zh_Hans: o1-mini-2024-09-12
|
||||
en_US: o1-mini-2024-09-12
|
||||
model_type: llm
|
||||
features:
|
||||
- agent-thought
|
||||
model_properties:
|
||||
mode: chat
|
||||
context_size: 128000
|
||||
parameter_rules:
|
||||
- name: max_tokens
|
||||
use_template: max_tokens
|
||||
default: 65536
|
||||
min: 1
|
||||
max: 65536
|
||||
- name: response_format
|
||||
label:
|
||||
zh_Hans: 回复格式
|
||||
en_US: response_format
|
||||
type: string
|
||||
help:
|
||||
zh_Hans: 指定模型必须输出的格式
|
||||
en_US: specifying the format that the model must output
|
||||
required: false
|
||||
options:
|
||||
- text
|
||||
- json_object
|
||||
pricing:
|
||||
input: '3.00'
|
||||
output: '12.00'
|
||||
unit: '0.000001'
|
||||
currency: USD
|
33
drivers/ai-provider/openAI/llm/o1-mini.yaml
Normal file
33
drivers/ai-provider/openAI/llm/o1-mini.yaml
Normal file
@@ -0,0 +1,33 @@
|
||||
model: o1-mini
|
||||
label:
|
||||
zh_Hans: o1-mini
|
||||
en_US: o1-mini
|
||||
model_type: llm
|
||||
features:
|
||||
- agent-thought
|
||||
model_properties:
|
||||
mode: chat
|
||||
context_size: 128000
|
||||
parameter_rules:
|
||||
- name: max_tokens
|
||||
use_template: max_tokens
|
||||
default: 65536
|
||||
min: 1
|
||||
max: 65536
|
||||
- name: response_format
|
||||
label:
|
||||
zh_Hans: 回复格式
|
||||
en_US: response_format
|
||||
type: string
|
||||
help:
|
||||
zh_Hans: 指定模型必须输出的格式
|
||||
en_US: specifying the format that the model must output
|
||||
required: false
|
||||
options:
|
||||
- text
|
||||
- json_object
|
||||
pricing:
|
||||
input: '3.00'
|
||||
output: '12.00'
|
||||
unit: '0.000001'
|
||||
currency: USD
|
33
drivers/ai-provider/openAI/llm/o1-preview-2024-09-12.yaml
Normal file
33
drivers/ai-provider/openAI/llm/o1-preview-2024-09-12.yaml
Normal file
@@ -0,0 +1,33 @@
|
||||
model: o1-preview-2024-09-12
|
||||
label:
|
||||
zh_Hans: o1-preview-2024-09-12
|
||||
en_US: o1-preview-2024-09-12
|
||||
model_type: llm
|
||||
features:
|
||||
- agent-thought
|
||||
model_properties:
|
||||
mode: chat
|
||||
context_size: 128000
|
||||
parameter_rules:
|
||||
- name: max_tokens
|
||||
use_template: max_tokens
|
||||
default: 32768
|
||||
min: 1
|
||||
max: 32768
|
||||
- name: response_format
|
||||
label:
|
||||
zh_Hans: 回复格式
|
||||
en_US: response_format
|
||||
type: string
|
||||
help:
|
||||
zh_Hans: 指定模型必须输出的格式
|
||||
en_US: specifying the format that the model must output
|
||||
required: false
|
||||
options:
|
||||
- text
|
||||
- json_object
|
||||
pricing:
|
||||
input: '15.00'
|
||||
output: '60.00'
|
||||
unit: '0.000001'
|
||||
currency: USD
|
33
drivers/ai-provider/openAI/llm/o1-preview.yaml
Normal file
33
drivers/ai-provider/openAI/llm/o1-preview.yaml
Normal file
@@ -0,0 +1,33 @@
|
||||
model: o1-preview
|
||||
label:
|
||||
zh_Hans: o1-preview
|
||||
en_US: o1-preview
|
||||
model_type: llm
|
||||
features:
|
||||
- agent-thought
|
||||
model_properties:
|
||||
mode: chat
|
||||
context_size: 128000
|
||||
parameter_rules:
|
||||
- name: max_tokens
|
||||
use_template: max_tokens
|
||||
default: 32768
|
||||
min: 1
|
||||
max: 32768
|
||||
- name: response_format
|
||||
label:
|
||||
zh_Hans: 回复格式
|
||||
en_US: response_format
|
||||
type: string
|
||||
help:
|
||||
zh_Hans: 指定模型必须输出的格式
|
||||
en_US: specifying the format that the model must output
|
||||
required: false
|
||||
options:
|
||||
- text
|
||||
- json_object
|
||||
pricing:
|
||||
input: '15.00'
|
||||
output: '60.00'
|
||||
unit: '0.000001'
|
||||
currency: USD
|
29
drivers/ai-provider/openAI/llm/text-davinci-003.yaml
Normal file
29
drivers/ai-provider/openAI/llm/text-davinci-003.yaml
Normal file
@@ -0,0 +1,29 @@
|
||||
model: text-davinci-003
|
||||
label:
|
||||
zh_Hans: text-davinci-003
|
||||
en_US: text-davinci-003
|
||||
model_type: llm
|
||||
features: [ ]
|
||||
model_properties:
|
||||
mode: completion
|
||||
context_size: 4096
|
||||
parameter_rules:
|
||||
- name: temperature
|
||||
use_template: temperature
|
||||
- name: top_p
|
||||
use_template: top_p
|
||||
- name: presence_penalty
|
||||
use_template: presence_penalty
|
||||
- name: frequency_penalty
|
||||
use_template: frequency_penalty
|
||||
- name: max_tokens
|
||||
use_template: max_tokens
|
||||
default: 512
|
||||
min: 1
|
||||
max: 4096
|
||||
pricing:
|
||||
input: '0.001'
|
||||
output: '0.002'
|
||||
unit: '0.001'
|
||||
currency: USD
|
||||
deprecated: true
|
89
drivers/ai-provider/openAI/openai.yaml
Normal file
89
drivers/ai-provider/openAI/openai.yaml
Normal file
@@ -0,0 +1,89 @@
|
||||
provider: openai
|
||||
label:
|
||||
en_US: OpenAI
|
||||
description:
|
||||
en_US: Models provided by OpenAI, such as GPT-3.5-Turbo and GPT-4.
|
||||
zh_Hans: OpenAI 提供的模型,例如 GPT-3.5-Turbo 和 GPT-4。
|
||||
icon_small:
|
||||
en_US: icon_s_en.svg
|
||||
icon_large:
|
||||
en_US: icon_l_en.svg
|
||||
background: "#E5E7EB"
|
||||
help:
|
||||
title:
|
||||
en_US: Get your API Key from OpenAI
|
||||
zh_Hans: 从 OpenAI 获取 API Key
|
||||
url:
|
||||
en_US: https://platform.openai.com/account/api-keys
|
||||
supported_model_types:
|
||||
- llm
|
||||
- text-embedding
|
||||
- speech2text
|
||||
- moderation
|
||||
- tts
|
||||
configurate_methods:
|
||||
- predefined-model
|
||||
- customizable-model
|
||||
model_credential_schema:
|
||||
model:
|
||||
label:
|
||||
en_US: Model Name
|
||||
zh_Hans: 模型名称
|
||||
placeholder:
|
||||
en_US: Enter your model name
|
||||
zh_Hans: 输入模型名称
|
||||
credential_form_schemas:
|
||||
- variable: openai_api_key
|
||||
label:
|
||||
en_US: API Key
|
||||
type: secret-input
|
||||
required: true
|
||||
placeholder:
|
||||
zh_Hans: 在此输入您的 API Key
|
||||
en_US: Enter your API Key
|
||||
- variable: openai_organization
|
||||
label:
|
||||
zh_Hans: 组织 ID
|
||||
en_US: Organization
|
||||
type: text-input
|
||||
required: false
|
||||
placeholder:
|
||||
zh_Hans: 在此输入您的组织 ID
|
||||
en_US: Enter your Organization ID
|
||||
- variable: openai_api_base
|
||||
label:
|
||||
zh_Hans: API Base
|
||||
en_US: API Base
|
||||
type: text-input
|
||||
required: false
|
||||
placeholder:
|
||||
zh_Hans: 在此输入您的 API Base
|
||||
en_US: Enter your API Base
|
||||
provider_credential_schema:
|
||||
credential_form_schemas:
|
||||
- variable: openai_api_key
|
||||
label:
|
||||
en_US: API Key
|
||||
type: secret-input
|
||||
required: true
|
||||
placeholder:
|
||||
zh_Hans: 在此输入您的 API Key
|
||||
en_US: Enter your API Key
|
||||
- variable: openai_organization
|
||||
label:
|
||||
zh_Hans: 组织 ID
|
||||
en_US: Organization
|
||||
type: text-input
|
||||
required: false
|
||||
placeholder:
|
||||
zh_Hans: 在此输入您的组织 ID
|
||||
en_US: Enter your Organization ID
|
||||
- variable: openai_api_base
|
||||
label:
|
||||
zh_Hans: API Base
|
||||
en_US: API Base
|
||||
type: text-input
|
||||
required: false
|
||||
placeholder:
|
||||
zh_Hans: 在此输入您的 API Base, 如:https://api.openai.com
|
||||
en_US: Enter your API Base, e.g. https://api.openai.com
|
35
drivers/ai-service/config.go
Normal file
35
drivers/ai-service/config.go
Normal file
@@ -0,0 +1,35 @@
|
||||
package ai_service
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"strings"
|
||||
|
||||
"github.com/eolinker/eosc"
|
||||
)
|
||||
|
||||
// Config service_http驱动配置
|
||||
type Config struct {
|
||||
Title string `json:"title" label:"标题"`
|
||||
Timeout int64 `json:"timeout" label:"请求超时时间" default:"2000" minimum:"1" title:"单位:ms,最小值:1"`
|
||||
Retry int `json:"retry" label:"失败重试次数"`
|
||||
Scheme string `json:"scheme" label:"请求协议" enum:"HTTP,HTTPS"`
|
||||
Provider eosc.RequireId `json:"provider" required:"false" empty_label:"使用匿名上游" label:"服务发现" skill:"github.com/eolinker/apinto/discovery.discovery.IDiscovery"`
|
||||
}
|
||||
|
||||
func (c *Config) String() string {
|
||||
data, _ := json.Marshal(c)
|
||||
return string(data)
|
||||
}
|
||||
func (c *Config) rebuild() {
|
||||
if c.Retry < 0 {
|
||||
c.Retry = 0
|
||||
}
|
||||
if c.Timeout < 0 {
|
||||
c.Timeout = 0
|
||||
}
|
||||
c.Scheme = strings.ToLower(c.Scheme)
|
||||
if c.Scheme != "http" && c.Scheme != "https" {
|
||||
c.Scheme = "http"
|
||||
}
|
||||
|
||||
}
|
22
drivers/ai-service/driver.go
Normal file
22
drivers/ai-service/driver.go
Normal file
@@ -0,0 +1,22 @@
|
||||
package ai_service
|
||||
|
||||
import (
|
||||
"github.com/eolinker/apinto/drivers"
|
||||
"github.com/eolinker/eosc"
|
||||
)
|
||||
|
||||
// Create 创建实例
|
||||
func Create(id, name string, v *Config, workers map[eosc.RequireId]eosc.IWorker) (eosc.IWorker, error) {
|
||||
|
||||
w := &executor{
|
||||
WorkerBase: drivers.Worker(id, name),
|
||||
title: v.Title,
|
||||
}
|
||||
|
||||
err := w.Reset(v, workers)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return w, nil
|
||||
}
|
42
drivers/ai-service/executor.go
Normal file
42
drivers/ai-service/executor.go
Normal file
@@ -0,0 +1,42 @@
|
||||
package ai_service
|
||||
|
||||
import (
|
||||
"github.com/eolinker/apinto/drivers"
|
||||
"github.com/eolinker/apinto/service"
|
||||
|
||||
"github.com/eolinker/eosc"
|
||||
"github.com/eolinker/eosc/eocontext"
|
||||
)
|
||||
|
||||
var _ service.IService = &executor{}
|
||||
|
||||
type executor struct {
|
||||
drivers.WorkerBase
|
||||
title string
|
||||
eocontext.BalanceHandler
|
||||
}
|
||||
|
||||
func (e *executor) PassHost() (eocontext.PassHostMod, string) {
|
||||
return eocontext.NodeHost, ""
|
||||
}
|
||||
|
||||
func (e *executor) Title() string {
|
||||
return e.title
|
||||
}
|
||||
|
||||
func (e *executor) Start() error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (e *executor) Reset(conf interface{}, workers map[eosc.RequireId]eosc.IWorker) error {
|
||||
//TODO implement me
|
||||
panic("implement me")
|
||||
}
|
||||
|
||||
func (e *executor) Stop() error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (e *executor) CheckSkill(skill string) bool {
|
||||
return service.CheckSkill(skill)
|
||||
}
|
28
drivers/ai-service/factory.go
Normal file
28
drivers/ai-service/factory.go
Normal file
@@ -0,0 +1,28 @@
|
||||
package ai_service
|
||||
|
||||
import (
|
||||
"github.com/eolinker/apinto/drivers"
|
||||
iphash "github.com/eolinker/apinto/upstream/ip-hash"
|
||||
roundrobin "github.com/eolinker/apinto/upstream/round-robin"
|
||||
"github.com/eolinker/eosc"
|
||||
"github.com/eolinker/eosc/log"
|
||||
)
|
||||
|
||||
var DriverName = "service_ai"
|
||||
|
||||
// Register 注册service_http驱动工厂
|
||||
func Register(register eosc.IExtenderDriverRegister) {
|
||||
err := register.RegisterExtenderDriver(DriverName, NewFactory())
|
||||
if err != nil {
|
||||
log.Errorf("register %s %s", DriverName, err)
|
||||
return
|
||||
|
||||
}
|
||||
}
|
||||
|
||||
// NewFactory 创建service_http驱动工厂
|
||||
func NewFactory() eosc.IExtenderDriverFactory {
|
||||
roundrobin.Register()
|
||||
iphash.Register()
|
||||
return drivers.NewFactory[Config](Create)
|
||||
}
|
11
drivers/plugins/ai-prompt/config.go
Normal file
11
drivers/plugins/ai-prompt/config.go
Normal file
@@ -0,0 +1,11 @@
|
||||
package ai_prompt
|
||||
|
||||
type Config struct {
|
||||
Prompt string `json:"prompt"`
|
||||
Variables []Variable `json:"variables"`
|
||||
}
|
||||
|
||||
type Variable struct {
|
||||
Key string `json:"key"`
|
||||
Require bool `json:"require"`
|
||||
}
|
14
drivers/plugins/ai-prompt/driver.go
Normal file
14
drivers/plugins/ai-prompt/driver.go
Normal file
@@ -0,0 +1,14 @@
|
||||
package ai_prompt
|
||||
|
||||
import (
|
||||
"github.com/eolinker/apinto/drivers"
|
||||
"github.com/eolinker/eosc"
|
||||
)
|
||||
|
||||
func Create(id, name string, v *Config, workers map[eosc.RequireId]eosc.IWorker) (eosc.IWorker, error) {
|
||||
w := &executor{
|
||||
WorkerBase: drivers.Worker(id, name),
|
||||
}
|
||||
err := w.Reset(v, workers)
|
||||
return w, err
|
||||
}
|
116
drivers/plugins/ai-prompt/executor.go
Normal file
116
drivers/plugins/ai-prompt/executor.go
Normal file
@@ -0,0 +1,116 @@
|
||||
package ai_prompt
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"errors"
|
||||
"fmt"
|
||||
"strings"
|
||||
|
||||
"github.com/eolinker/apinto/drivers"
|
||||
"github.com/eolinker/eosc"
|
||||
"github.com/eolinker/eosc/eocontext"
|
||||
http_context "github.com/eolinker/eosc/eocontext/http-context"
|
||||
)
|
||||
|
||||
type RequestMessage struct {
|
||||
Messages []Message `json:"messages"`
|
||||
Variables map[string]string `json:"variables"`
|
||||
}
|
||||
|
||||
type Message struct {
|
||||
Role string `json:"role"`
|
||||
Content string `json:"content"`
|
||||
}
|
||||
|
||||
type executor struct {
|
||||
drivers.WorkerBase
|
||||
prompt string
|
||||
required bool
|
||||
variables map[string]bool
|
||||
}
|
||||
|
||||
func (e *executor) DoFilter(ctx eocontext.EoContext, next eocontext.IChain) (err error) {
|
||||
// 判断是否是websocket
|
||||
return http_context.DoHttpFilter(e, ctx, next)
|
||||
}
|
||||
|
||||
func (e *executor) DoHttpFilter(ctx http_context.IHttpContext, next eocontext.IChain) error {
|
||||
if len(e.variables) > 0 {
|
||||
body, err := ctx.Proxy().Body().RawBody()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
body, err = genRequestMessage(body, e.prompt, e.variables, e.required)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
ctx.Proxy().Body().SetRaw("application/json", body)
|
||||
}
|
||||
|
||||
if next != nil {
|
||||
return next.DoChain(ctx)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func genRequestMessage(body []byte, prompt string, variables map[string]bool, required bool) ([]byte, error) {
|
||||
baseMsg := eosc.NewBase[RequestMessage]()
|
||||
err := json.Unmarshal(body, baseMsg)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if len(baseMsg.Config.Variables) == 0 && required {
|
||||
return nil, errors.New("variables is required")
|
||||
}
|
||||
|
||||
for k, v := range variables {
|
||||
if _, ok := baseMsg.Config.Variables[k]; !ok && v {
|
||||
return nil, fmt.Errorf("variable %s is required", k)
|
||||
}
|
||||
prompt = strings.Replace(prompt, fmt.Sprintf("{{%s}}", k), baseMsg.Config.Variables[k], -1)
|
||||
}
|
||||
messages := []Message{
|
||||
{
|
||||
Role: "system",
|
||||
Content: prompt,
|
||||
},
|
||||
}
|
||||
baseMsg.Config.Messages = append(messages, baseMsg.Config.Messages...)
|
||||
return json.Marshal(baseMsg)
|
||||
}
|
||||
|
||||
func (e *executor) Destroy() {
|
||||
}
|
||||
|
||||
func (e *executor) Start() error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (e *executor) Reset(conf interface{}, workers map[eosc.RequireId]eosc.IWorker) error {
|
||||
cfg, ok := conf.(*Config)
|
||||
if !ok {
|
||||
return errors.New("invalid config")
|
||||
}
|
||||
variables := make(map[string]bool)
|
||||
required := false
|
||||
|
||||
for _, v := range cfg.Variables {
|
||||
variables[v.Key] = v.Require
|
||||
if v.Require {
|
||||
required = true
|
||||
}
|
||||
}
|
||||
e.variables = variables
|
||||
e.required = required
|
||||
e.prompt = cfg.Prompt
|
||||
return nil
|
||||
}
|
||||
|
||||
func (e *executor) Stop() error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (e *executor) CheckSkill(skill string) bool {
|
||||
return http_context.FilterSkillName == skill
|
||||
}
|
28
drivers/plugins/ai-prompt/factory.go
Normal file
28
drivers/plugins/ai-prompt/factory.go
Normal file
@@ -0,0 +1,28 @@
|
||||
package ai_prompt
|
||||
|
||||
import (
|
||||
"github.com/eolinker/apinto/drivers"
|
||||
"github.com/eolinker/eosc"
|
||||
)
|
||||
|
||||
const (
|
||||
Name = "ai_prompt"
|
||||
)
|
||||
|
||||
func Register(register eosc.IExtenderDriverRegister) {
|
||||
register.RegisterExtenderDriver(Name, NewFactory())
|
||||
}
|
||||
|
||||
type Factory struct {
|
||||
eosc.IExtenderDriverFactory
|
||||
}
|
||||
|
||||
func NewFactory() *Factory {
|
||||
return &Factory{
|
||||
IExtenderDriverFactory: drivers.NewFactory[Config](Create),
|
||||
}
|
||||
}
|
||||
|
||||
func (f *Factory) Create(profession string, name string, label string, desc string, params map[string]interface{}) (eosc.IExtenderDriver, error) {
|
||||
return f.IExtenderDriverFactory.Create(profession, name, label, desc, params)
|
||||
}
|
2
go.mod
2
go.mod
@@ -185,4 +185,4 @@ require (
|
||||
|
||||
replace github.com/soheilhy/cmux v0.1.5 => github.com/hmzzrcs/cmux v0.1.6
|
||||
|
||||
//replace github.com/eolinker/eosc => ../eosc
|
||||
replace github.com/eolinker/eosc => ../eosc
|
||||
|
@@ -1,6 +1,7 @@
|
||||
package fasthttp_client
|
||||
|
||||
import (
|
||||
"crypto/tls"
|
||||
"fmt"
|
||||
"net"
|
||||
"strings"
|
||||
@@ -94,8 +95,11 @@ func (c *Client) getHostClient(addr string, rewriteHost string) (*fasthttp.HostC
|
||||
}
|
||||
|
||||
hc = &fasthttp.HostClient{
|
||||
Addr: httpAddr,
|
||||
IsTLS: isTLS,
|
||||
Addr: httpAddr,
|
||||
IsTLS: isTLS,
|
||||
TLSConfig: &tls.Config{
|
||||
InsecureSkipVerify: true,
|
||||
},
|
||||
Dial: dial,
|
||||
MaxConns: DefaultMaxConns,
|
||||
MaxConnWaitTimeout: DefaultMaxConnWaitTimeout,
|
||||
|
@@ -143,9 +143,11 @@ func (ctx *HttpContext) SendTo(scheme string, node eoscContext.INode, timeout ti
|
||||
case eoscContext.NodeHost:
|
||||
rewriteHost = host
|
||||
request.URI().SetHost(host)
|
||||
//ctx.proxyRequest.Header().SetHost(targetHost)
|
||||
case eoscContext.ReWriteHost:
|
||||
rewriteHost = targetHost
|
||||
request.URI().SetHost(targetHost)
|
||||
//ctx.proxyRequest.Header().SetHost(targetHost)
|
||||
}
|
||||
beginTime := time.Now()
|
||||
ctx.response.responseError = fasthttp_client.ProxyTimeout(scheme, rewriteHost, node, request, &ctx.fastHttpRequestCtx.Response, timeout)
|
||||
@@ -153,17 +155,23 @@ func (ctx *HttpContext) SendTo(scheme string, node eoscContext.INode, timeout ti
|
||||
if ctx.response.Response != nil {
|
||||
responseHeader = ctx.response.Response.Header
|
||||
}
|
||||
|
||||
agent := newRequestAgent(&ctx.proxyRequest, host, scheme, responseHeader, beginTime, time.Now())
|
||||
|
||||
if ctx.response.responseError != nil {
|
||||
agent.setStatusCode(504)
|
||||
} else {
|
||||
ctx.response.ResponseHeader.refresh()
|
||||
|
||||
agent.setStatusCode(ctx.fastHttpRequestCtx.Response.StatusCode())
|
||||
}
|
||||
|
||||
if ctx.fastHttpRequestCtx.Response.RemoteAddr() != nil {
|
||||
ip, port := parseAddr(ctx.fastHttpRequestCtx.Response.RemoteAddr().String())
|
||||
agent.setRemoteIP(ip)
|
||||
agent.setRemotePort(port)
|
||||
ctx.response.remoteIP = ip
|
||||
ctx.response.remotePort = port
|
||||
agent.setStatusCode(ctx.fastHttpRequestCtx.Response.StatusCode())
|
||||
}
|
||||
agent.responseBody = string(ctx.response.Response.Body())
|
||||
|
||||
|
@@ -11,7 +11,6 @@ const (
|
||||
|
||||
type IService interface {
|
||||
eosc.IWorker
|
||||
eoscContext.EoApp
|
||||
eoscContext.BalanceHandler
|
||||
eoscContext.UpstreamHostHandler
|
||||
Title() string
|
||||
|
Reference in New Issue
Block a user