mirror of
https://github.com/xtekky/gpt4free.git
synced 2025-10-04 16:02:41 +08:00

- Fixed duplicate model entries in Blackbox provider model_aliases - Added meta-llama- to llama- name cleaning in Cloudflare provider - Enhanced PollinationsAI provider with improved vision model detection - Added reasoning support to PollinationsAI provider - Fixed HuggingChat authentication to include headers and impersonate - Removed unused max_inputs_length parameter from HuggingFaceAPI - Renamed extra_data to extra_body for consistency across providers - Added Puter provider with grouped model support - Enhanced AnyProvider with grouped model display and better model organization - Fixed model cleaning in AnyProvider to handle more model name variations - Added api_key handling for HuggingFace providers in AnyProvider - Added see_stream helper function to parse event streams - Updated GUI server to handle JsonConversation properly - Fixed aspect ratio handling in image generation functions - Added ResponsesConfig and ClientResponse for new API endpoint - Updated requirements to include markitdown
52 lines
1.5 KiB
Python
52 lines
1.5 KiB
Python
from __future__ import annotations
|
|
|
|
from ...typing import CreateResult, Messages
|
|
from ..helper import filter_none
|
|
from ..template import OpenaiTemplate
|
|
|
|
models = {
|
|
"theb-ai": "TheB.AI",
|
|
"gpt-3.5-turbo": "GPT-3.5",
|
|
"gpt-4-turbo": "GPT-4 Turbo",
|
|
"gpt-4": "GPT-4",
|
|
"claude-3.5-sonnet": "Claude",
|
|
"llama-2-7b-chat": "Llama 2 7B",
|
|
"llama-2-13b-chat": "Llama 2 13B",
|
|
"llama-2-70b-chat": "Llama 2 70B",
|
|
"code-llama-7b": "Code Llama 7B",
|
|
"code-llama-13b": "Code Llama 13B",
|
|
"code-llama-34b": "Code Llama 34B",
|
|
"qwen-2-72b": "Qwen"
|
|
}
|
|
|
|
class ThebApi(OpenaiTemplate):
|
|
label = "TheB.AI API"
|
|
url = "https://theb.ai"
|
|
login_url = "https://beta.theb.ai/home"
|
|
api_base = "https://api.theb.ai/v1"
|
|
working = True
|
|
needs_auth = True
|
|
|
|
default_model = "theb-ai"
|
|
fallback_models = list(models)
|
|
|
|
@classmethod
|
|
def create_async_generator(
|
|
cls,
|
|
model: str,
|
|
messages: Messages,
|
|
temperature: float = None,
|
|
top_p: float = None,
|
|
**kwargs
|
|
) -> CreateResult:
|
|
system_message = "\n".join([message["content"] for message in messages if message["role"] == "system"])
|
|
messages = [message for message in messages if message["role"] != "system"]
|
|
data = {
|
|
"model_params": filter_none(
|
|
system_prompt=system_message,
|
|
temperature=temperature,
|
|
top_p=top_p,
|
|
)
|
|
}
|
|
return super().create_async_generator(model, messages, extra_body=data, **kwargs)
|