feat: introduce AnyProvider & LM Arena, overhaul model/provider logic (#2925)

* feat: introduce AnyProvider & LM Arena, overhaul model/provider logic

- **Provider additions & removals**
  - Added `Provider/LMArenaProvider.py` with full async stream implementation and vision model support
  - Registered `LMArenaProvider` in `Provider/__init__.py`; removed old `hf_space/LMArenaProvider.py`
  - Created `providers/any_provider.py`; registers `AnyProvider` dynamically in `Provider`
- **Provider framework enhancements**
  - `providers/base_provider.py`
    - Added `video_models` and `audio_models` attributes
  - `providers/retry_provider.py`
    - Introduced `is_content()` helper; now treats `AudioResponse` as stream content
- **Cloudflare provider refactor**
  - `Provider/Cloudflare.py`
    - Re‑implemented `get_models()` with `read_models()` helper, `fallback_models`, robust nodriver/curl handling and model‑name cleaning
- **Other provider tweaks**
  - `Provider/Copilot.py` – removed `"reasoning"` alias and initial `setOptions` WS message
  - `Provider/PollinationsAI.py` & `PollinationsImage.py`
    - Converted `audio_models` from list to dict, adjusted usage checks and labels
  - `Provider/hf/__init__.py` – applies `model_aliases` remap before dispatch
  - `Provider/hf_space/DeepseekAI_JanusPro7b.py` – now merges media before upload
  - `needs_auth/Gemini.py` – dropped obsolete Gemini model entries
  - `needs_auth/GigaChat.py` – added lowercase `"gigachat"` alias
- **API & client updates**
  - Replaced `ProviderUtils` with new `Provider` map usage throughout API and GUI server
  - Integrated `AnyProvider` as default fallback in `g4f/client` sync & async flows
  - API endpoints now return counts of providers per model and filter by `x_ignored` header
- **GUI improvements**
  - Updated JS labels with emoji icons, provider ignore logic, model count display
- **Model registry**
  - Renamed base model `"GigaChat:latest"` ➜ `"gigachat"` in `models.py`
- **Miscellaneous**
  - Added audio/video flags to GUI provider list
  - Tightened error propagation in `retry_provider.raise_exceptions`

* Fix unittests

* fix: handle None conversation when accessing provider-specific data

- Modified `AnyProvider` class in `g4f/providers/any_provider.py`
- Updated logic to check if `conversation` is not None before accessing `provider.__name__` attribute
- Wrapped `getattr(conversation, provider.__name__, None)` block in an additional `if conversation is not None` condition
- Changed `setattr(conversation, provider.__name__, chunk)` to use `chunk.get_dict()` instead of the object directly
- Ensured consistent use of `JsonConversation` when modifying or assigning `conversation` data

* ```
feat: add provider string conversion & update IterListProvider call

- In g4f/client/__init__.py, within both Completions and AsyncCompletions, added a check to convert the provider from a string using convert_to_provider(provider) when applicable.
- In g4f/providers/any_provider.py, removed the second argument (False) from the IterListProvider constructor call in the async for loop.
```

---------

Co-authored-by: hlohaus <983577+hlohaus@users.noreply.github.com>
This commit is contained in:
H Lohaus
2025-04-18 14:10:51 +02:00
committed by GitHub
parent 0c0c72c203
commit 0a070bdf10
24 changed files with 669 additions and 341 deletions

View File

@@ -22,18 +22,17 @@ import os
import argparse import argparse
import tempfile import tempfile
import time import time
from typing import Optional, Dict, Any, List, Tuple from typing import Optional, Any, List
from g4f.client import Client from g4f.client import Client
from g4f.models import ModelUtils from g4f.models import ModelUtils
import g4f.Provider
from g4f import debug from g4f import debug
debug.logging = True debug.logging = True
# Constants # Constants
DEFAULT_MODEL = "claude-3.7-sonnet" DEFAULT_MODEL = "o1"
FALLBACK_MODELS = ["claude-3.5-sonnet", "o1", "o3-mini", "gpt-4o"] FALLBACK_MODELS = ["o1", "o3-mini", "gpt-4o"]
MAX_DIFF_SIZE = None # Set to None to disable truncation, or a number for character limit MAX_DIFF_SIZE = None # Set to None to disable truncation, or a number for character limit
MAX_RETRIES = 3 MAX_RETRIES = 3
RETRY_DELAY = 2 # Seconds RETRY_DELAY = 2 # Seconds

View File

@@ -3,7 +3,8 @@ from __future__ import annotations
import unittest import unittest
from g4f.errors import ModelNotFoundError from g4f.errors import ModelNotFoundError
from g4f.client import Client, AsyncClient, ChatCompletion, ChatCompletionChunk, get_model_and_provider from g4f.client import Client, AsyncClient, ChatCompletion, ChatCompletionChunk
from g4f.client.service import get_model_and_provider
from g4f.Provider.Copilot import Copilot from g4f.Provider.Copilot import Copilot
from g4f.models import gpt_4o from g4f.models import gpt_4o
from .mocks import AsyncGeneratorProviderMock, ModelProviderMock, YieldProviderMock from .mocks import AsyncGeneratorProviderMock, ModelProviderMock, YieldProviderMock

View File

@@ -8,13 +8,14 @@ from .base_provider import AsyncGeneratorProvider, ProviderModelMixin, AuthFileM
from ..requests import Session, StreamSession, get_args_from_nodriver, raise_for_status, merge_cookies from ..requests import Session, StreamSession, get_args_from_nodriver, raise_for_status, merge_cookies
from ..requests import DEFAULT_HEADERS, has_nodriver, has_curl_cffi from ..requests import DEFAULT_HEADERS, has_nodriver, has_curl_cffi
from ..providers.response import FinishReason, Usage from ..providers.response import FinishReason, Usage
from ..errors import ResponseStatusError, ModelNotFoundError from ..errors import ResponseStatusError, ModelNotFoundError, MissingRequirementsError
from .. import debug
from .helper import render_messages from .helper import render_messages
class Cloudflare(AsyncGeneratorProvider, ProviderModelMixin, AuthFileMixin): class Cloudflare(AsyncGeneratorProvider, ProviderModelMixin, AuthFileMixin):
label = "Cloudflare AI" label = "Cloudflare AI"
url = "https://playground.ai.cloudflare.com" url = "https://playground.ai.cloudflare.com"
working = True working = has_curl_cffi
use_nodriver = True use_nodriver = True
api_endpoint = "https://playground.ai.cloudflare.com/api/inference" api_endpoint = "https://playground.ai.cloudflare.com/api/inference"
models_url = "https://playground.ai.cloudflare.com/api/models" models_url = "https://playground.ai.cloudflare.com/api/models"
@@ -38,29 +39,49 @@ class Cloudflare(AsyncGeneratorProvider, ProviderModelMixin, AuthFileMixin):
"qwen-1.5-7b": "@cf/qwen/qwen1.5-7b-chat-awq", "qwen-1.5-7b": "@cf/qwen/qwen1.5-7b-chat-awq",
"qwen-2.5-coder": "@cf/qwen/qwen2.5-coder-32b-instruct", "qwen-2.5-coder": "@cf/qwen/qwen2.5-coder-32b-instruct",
} }
fallback_models = list(model_aliases.keys())
_args: dict = None _args: dict = None
@classmethod @classmethod
def get_models(cls) -> str: def get_models(cls) -> str:
if not cls.models: def read_models():
if cls._args is None:
if has_nodriver:
get_running_loop(check_nested=True)
args = get_args_from_nodriver(cls.url)
cls._args = asyncio.run(args)
elif not has_curl_cffi:
return cls.models
else:
cls._args = {"headers": DEFAULT_HEADERS, "cookies": {}}
with Session(**cls._args) as session: with Session(**cls._args) as session:
response = session.get(cls.models_url) response = session.get(cls.models_url)
cls._args["cookies"] = merge_cookies(cls._args["cookies"], response) cls._args["cookies"] = merge_cookies(cls._args["cookies"], response)
try:
raise_for_status(response) raise_for_status(response)
except ResponseStatusError:
return cls.models
json_data = response.json() json_data = response.json()
cls.models = [model.get("name") for model in json_data.get("models")] def clean_name(name: str) -> str:
return name.split("/")[-1].replace(
"-instruct", "").replace(
"-17b-16e", "").replace(
"-chat", "").replace(
"-fp8", "").replace(
"-fast", "").replace(
"-int8", "").replace(
"-awq", "").replace(
"-qvq", "").replace(
"-r1", "")
model_map = {clean_name(model.get("name")): model.get("name") for model in json_data.get("models")}
cls.models = list(model_map.keys())
cls.model_aliases = {**cls.model_aliases, **model_map}
if not cls.models:
try:
if cls._args is None:
cls._args = {"headers": DEFAULT_HEADERS, "cookies": {}}
read_models()
except ResponseStatusError as f:
if has_nodriver:
get_running_loop(check_nested=True)
args = get_args_from_nodriver(cls.url)
try:
cls._args = asyncio.run(args)
read_models()
except RuntimeError as e:
cls.models = cls.fallback_models
debug.log(f"Nodriver is not available: {type(e).__name__}: {e}")
else:
cls.models = cls.fallback_models
debug.log(f"Nodriver is not installed: {type(f).__name__}: {f}")
return cls.models return cls.models
@classmethod @classmethod

View File

@@ -48,7 +48,6 @@ class Copilot(AsyncGeneratorProvider, ProviderModelMixin):
"gpt-4": default_model, "gpt-4": default_model,
"gpt-4o": default_model, "gpt-4o": default_model,
"o1": "Think Deeper", "o1": "Think Deeper",
"reasoning": "Think Deeper",
"dall-e-3": default_model "dall-e-3": default_model
} }
@@ -144,7 +143,6 @@ class Copilot(AsyncGeneratorProvider, ProviderModelMixin):
uploaded_images.append({"type":"image", "url": media}) uploaded_images.append({"type":"image", "url": media})
wss = await session.ws_connect(cls.websocket_url, timeout=3) wss = await session.ws_connect(cls.websocket_url, timeout=3)
await wss.send(json.dumps({"event":"setOptions","supportedCards":["weather","local","image","sports","video","ads","finance"],"ads":{"supportedTypes":["multimedia","product","tourActivity","propertyPromotion","text"]}}));
await wss.send(json.dumps({ await wss.send(json.dumps({
"event": "send", "event": "send",
"conversationId": conversation_id, "conversationId": conversation_id,

View File

@@ -0,0 +1,371 @@
from __future__ import annotations
import json
import uuid
import requests
from ..typing import AsyncResult, Messages, MediaListType
from ..requests import StreamSession, FormData, raise_for_status
from ..providers.response import FinishReason, JsonConversation
from ..tools.media import merge_media
from ..image import to_bytes, is_accepted_format
from .base_provider import AsyncGeneratorProvider, ProviderModelMixin, AuthFileMixin
from .helper import get_last_user_message
from .. import debug
class LMArenaProvider(AsyncGeneratorProvider, ProviderModelMixin, AuthFileMixin):
label = "LM Arena"
url = "https://lmarena.ai"
api_endpoint = "/queue/join?"
working = True
default_model = "gpt-4o"
model_aliases = {default_model: "chatgpt-4o-latest-20250326"}
models = [
default_model,
"o3-2025-04-16",
"o4-mini-2025-04-16",
"gpt-4.1-2025-04-14",
"gemini-2.5-pro-exp-03-25",
"llama-4-maverick-03-26-experimental",
"grok-3-preview-02-24",
"claude-3-7-sonnet-20250219",
"claude-3-7-sonnet-20250219-thinking-32k",
"deepseek-v3-0324",
"llama-4-maverick-17b-128e-instruct",
"gpt-4.1-mini-2025-04-14",
"gpt-4.1-nano-2025-04-14",
"gemini-2.0-flash-thinking-exp-01-21",
"gemini-2.0-flash-001",
"gemini-2.0-flash-lite-preview-02-05",
"gemma-3-27b-it",
"gemma-3-12b-it",
"gemma-3-4b-it",
"deepseek-r1",
"claude-3-5-sonnet-20241022",
"o3-mini",
"llama-3.3-70b-instruct",
"gpt-4o-mini-2024-07-18",
"gpt-4o-2024-11-20",
"gpt-4o-2024-08-06",
"gpt-4o-2024-05-13",
"command-a-03-2025",
"qwq-32b",
"p2l-router-7b",
"claude-3-5-haiku-20241022",
"claude-3-5-sonnet-20240620",
"doubao-1.5-pro-32k-250115",
"doubao-1.5-vision-pro-32k-250115",
"mistral-small-24b-instruct-2501",
"phi-4",
"amazon-nova-pro-v1.0",
"amazon-nova-lite-v1.0",
"amazon-nova-micro-v1.0",
"cobalt-exp-beta-v3",
"cobalt-exp-beta-v4",
"qwen-max-2025-01-25",
"qwen-plus-0125-exp",
"qwen2.5-vl-32b-instruct",
"qwen2.5-vl-72b-instruct",
"gemini-1.5-pro-002",
"gemini-1.5-flash-002",
"gemini-1.5-flash-8b-001",
"gemini-1.5-pro-001",
"gemini-1.5-flash-001",
"llama-3.1-405b-instruct-bf16",
"llama-3.3-nemotron-49b-super-v1",
"llama-3.1-nemotron-ultra-253b-v1",
"llama-3.1-nemotron-70b-instruct",
"llama-3.1-70b-instruct",
"llama-3.1-8b-instruct",
"hunyuan-standard-2025-02-10",
"hunyuan-large-2025-02-10",
"hunyuan-standard-vision-2024-12-31",
"hunyuan-turbo-0110",
"hunyuan-turbos-20250226",
"mistral-large-2411",
"pixtral-large-2411",
"mistral-large-2407",
"llama-3.1-nemotron-51b-instruct",
"granite-3.1-8b-instruct",
"granite-3.1-2b-instruct",
"step-2-16k-exp-202412",
"step-2-16k-202502",
"step-1o-vision-32k-highres",
"yi-lightning",
"glm-4-plus",
"glm-4-plus-0111",
"jamba-1.5-large",
"jamba-1.5-mini",
"gemma-2-27b-it",
"gemma-2-9b-it",
"gemma-2-2b-it",
"eureka-chatbot",
"claude-3-haiku-20240307",
"claude-3-sonnet-20240229",
"claude-3-opus-20240229",
"nemotron-4-340b",
"llama-3-70b-instruct",
"llama-3-8b-instruct",
"qwen2.5-plus-1127",
"qwen2.5-coder-32b-instruct",
"qwen2.5-72b-instruct",
"qwen-max-0919",
"qwen-vl-max-1119",
"qwen-vl-max-0809",
"llama-3.1-tulu-3-70b",
"olmo-2-0325-32b-instruct",
"gpt-3.5-turbo-0125",
"reka-core-20240904",
"reka-flash-20240904",
"c4ai-aya-expanse-32b",
"c4ai-aya-expanse-8b",
"c4ai-aya-vision-32b",
"command-r-plus-08-2024",
"command-r-08-2024",
"codestral-2405",
"mixtral-8x22b-instruct-v0.1",
"mixtral-8x7b-instruct-v0.1",
"pixtral-12b-2409",
"ministral-8b-2410"
]
vision_models = [
"o3-2025-04-16",
"o4-mini-2025-04-16",
"gpt-4.1-2025-04-14",
"gemini-2.5-pro-exp-03-25",
"claude-3-7-sonnet-20250219",
"claude-3-7-sonnet-20250219-thinking-32k",
"llama-4-maverick-17b-128e-instruct",
"gpt-4.1-mini-2025-04-14",
"gpt-4.1-nano-2025-04-14",
"gemini-2.0-flash-thinking-exp-01-21",
"gemini-2.0-flash-001",
"gemini-2.0-flash-lite-preview-02-05",
"claude-3-5-sonnet-20241022",
"gpt-4o-mini-2024-07-18",
"gpt-4o-2024-11-20",
"gpt-4o-2024-08-06",
"gpt-4o-2024-05-13",
"claude-3-5-sonnet-20240620",
"doubao-1.5-vision-pro-32k-250115",
"amazon-nova-pro-v1.0",
"amazon-nova-lite-v1.0",
"qwen2.5-vl-32b-instruct",
"qwen2.5-vl-72b-instruct",
"gemini-1.5-pro-002",
"gemini-1.5-flash-002",
"gemini-1.5-flash-8b-001",
"gemini-1.5-pro-001",
"gemini-1.5-flash-001",
"hunyuan-standard-vision-2024-12-31",
"pixtral-large-2411",
"step-1o-vision-32k-highres",
"claude-3-haiku-20240307",
"claude-3-sonnet-20240229",
"claude-3-opus-20240229",
"qwen-vl-max-1119",
"qwen-vl-max-0809",
"reka-core-20240904",
"reka-flash-20240904",
"c4ai-aya-vision-32b",
"pixtral-12b-2409"
]
_args: dict = None
@classmethod
def get_models(cls) -> list[str]:
if not cls.models:
url = "https://storage.googleapis.com/public-arena-no-cors/p2l-explorer/data/overall/arena.json"
data = requests.get(url).json()
cls.models = [model[0] for model in data["leaderboard"]]
return cls.models
@classmethod
def _build_payloads(cls, model_id: str, session_hash: str, text: str, files: list, max_tokens: int, temperature: float, top_p: float):
first_payload = {
"data": [
None,
model_id,
{"text": text, "files": files},
{
"text_models": [model_id],
"all_text_models": [model_id],
"vision_models": [],
"all_vision_models": [],
"image_gen_models": [],
"all_image_gen_models": [],
"search_models": [],
"all_search_models": [],
"models": [model_id],
"all_models": [model_id],
"arena_type": "text-arena"
}
],
"event_data": None,
"fn_index": 117,
"trigger_id": 159,
"session_hash": session_hash
}
second_payload = {
"data": [],
"event_data": None,
"fn_index": 118,
"trigger_id": 159,
"session_hash": session_hash
}
third_payload = {
"data": [None, temperature, top_p, max_tokens],
"event_data": None,
"fn_index": 119,
"trigger_id": 159,
"session_hash": session_hash
}
return first_payload, second_payload, third_payload
@classmethod
def _build_second_payloads(cls, model_id: str, session_hash: str, text: str, max_tokens: int, temperature: float, top_p: float):
first_payload = {
"data":[None,model_id,text,{
"text_models":[model_id],
"all_text_models":[model_id],
"vision_models":[],
"image_gen_models":[],
"all_image_gen_models":[],
"search_models":[],
"all_search_models":[],
"models":[model_id],
"all_models":[model_id],
"arena_type":"text-arena"}],
"event_data": None,
"fn_index": 120,
"trigger_id": 157,
"session_hash": session_hash
}
second_payload = {
"data": [],
"event_data": None,
"fn_index": 121,
"trigger_id": 157,
"session_hash": session_hash
}
third_payload = {
"data": [None, temperature, top_p, max_tokens],
"event_data": None,
"fn_index": 122,
"trigger_id": 157,
"session_hash": session_hash
}
return first_payload, second_payload, third_payload
@classmethod
async def create_async_generator(
cls, model: str, messages: Messages,
media: MediaListType = None,
conversation: JsonConversation = None,
return_conversation: bool = False,
max_tokens: int = 2048,
temperature: float = 0.7,
top_p: float = 1,
proxy: str = None,
**kwargs
) -> AsyncResult:
if not model:
model = cls.default_model
if model in cls.model_aliases:
model = cls.model_aliases[model]
prompt = get_last_user_message(messages)
new_conversation = False
if conversation is None:
conversation = JsonConversation(session_hash=str(uuid.uuid4()).replace("-", ""))
new_conversation = True
async with StreamSession(impersonate="chrome") as session:
if new_conversation:
media = list(merge_media(media, messages))
if media:
data = FormData()
for i in range(len(media)):
media[i] = (to_bytes(media[i][0]), media[i][1])
for image, image_name in media:
data.add_field(f"files", image, filename=image_name)
async with session.post(f"{cls.url}/upload", params={"upload_id": conversation.session_hash}, data=data) as response:
await raise_for_status(response)
image_files = await response.json()
media = [{
"path": image_file,
"url": f"{cls.url}/file={image_file}",
"orig_name": media[i][1],
"size": len(media[i][0]),
"mime_type": is_accepted_format(media[i][0]),
"meta": {
"_type": "gradio.FileData"
}
} for i, image_file in enumerate(image_files)]
first_payload, second_payload, third_payload = cls._build_payloads(model, conversation.session_hash, prompt, media, max_tokens, temperature, top_p)
else:
first_payload, second_payload, third_payload = cls._build_second_payloads(model, conversation.session_hash, prompt, max_tokens, temperature, top_p)
headers = {
"Content-Type": "application/json",
"Accept": "application/json",
}
# POST 1
async with session.post(f"{cls.url}{cls.api_endpoint}", json=first_payload, proxy=proxy, headers=headers) as response:
await raise_for_status(response)
# POST 2
async with session.post(f"{cls.url}{cls.api_endpoint}", json=second_payload, proxy=proxy, headers=headers) as response:
await raise_for_status(response)
# POST 3
async with session.post(f"{cls.url}{cls.api_endpoint}", json=third_payload, proxy=proxy, headers=headers) as response:
await raise_for_status(response)
# Long stream GET
async def sse_stream():
stream_url = f"{cls.url}/queue/data?session_hash={conversation.session_hash}"
async with session.get(stream_url, headers={"Accept": "text/event-stream"}, proxy=proxy) as response:
await raise_for_status(response)
text_position = 0
count = 0
async for line in response.iter_lines():
if line.startswith(b"data: "):
try:
msg = json.loads(line[6:])
except Exception as e:
raise RuntimeError(f"Failed to decode JSON from stream: {line}", e)
if msg.get("msg") == "process_generating":
data = msg["output"]["data"][1]
if data:
data = data[0]
if len(data) > 2:
if isinstance(data[2], list):
data[2] = data[2][-1]
content = data[2][text_position:]
if content.endswith(""):
content = content[:-2]
if content:
count += 1
yield count, content
text_position += len(content)
elif msg.get("msg") == "close_stream":
break
elif msg.get("msg") not in ("process_completed", "process_starts", "estimation"):
debug.log(f"Unexpected message: {msg}")
count = 0
async for count, chunk in sse_stream():
yield chunk
if count == 0:
raise RuntimeError("No response from server.")
if return_conversation:
yield conversation
if count == max_tokens:
yield FinishReason("length")

View File

@@ -47,9 +47,9 @@ class PollinationsAI(AsyncGeneratorProvider, ProviderModelMixin):
default_image_model = "flux" default_image_model = "flux"
default_vision_model = default_model default_vision_model = default_model
default_audio_model = "openai-audio" default_audio_model = "openai-audio"
text_models = [default_model] text_models = [default_model, "evil"]
image_models = [default_image_model] image_models = [default_image_model]
audio_models = [default_audio_model] audio_models = {default_audio_model: []}
extra_image_models = ["flux-pro", "flux-dev", "flux-schnell", "midjourney", "dall-e-3", "turbo"] extra_image_models = ["flux-pro", "flux-dev", "flux-schnell", "midjourney", "dall-e-3", "turbo"]
vision_models = [default_vision_model, "gpt-4o-mini", "openai", "openai-large", "searchgpt"] vision_models = [default_vision_model, "gpt-4o-mini", "openai", "openai-large", "searchgpt"]
_models_loaded = False _models_loaded = False
@@ -66,9 +66,6 @@ class PollinationsAI(AsyncGeneratorProvider, ProviderModelMixin):
"llama-3.1-8b": "llamalight", "llama-3.1-8b": "llamalight",
"llama-3.3-70b": "llama-scaleway", "llama-3.3-70b": "llama-scaleway",
"phi-4": "phi", "phi-4": "phi",
"gemini-2.0": "gemini",
"gemini-2.0-flash": "gemini",
"gemini-2.0-flash-thinking": "gemini-thinking",
"deepseek-r1": "deepseek-reasoning-large", "deepseek-r1": "deepseek-reasoning-large",
"deepseek-r1": "deepseek-reasoning", "deepseek-r1": "deepseek-reasoning",
"deepseek-v3": "deepseek", "deepseek-v3": "deepseek",
@@ -332,7 +329,7 @@ class PollinationsAI(AsyncGeneratorProvider, ProviderModelMixin):
result = json.loads(line[6:]) result = json.loads(line[6:])
if "error" in result: if "error" in result:
raise ResponseError(result["error"].get("message", result["error"])) raise ResponseError(result["error"].get("message", result["error"]))
if "usage" in result: if result.get("usage") is not None:
yield Usage(**result["usage"]) yield Usage(**result["usage"])
choices = result.get("choices", [{}]) choices = result.get("choices", [{}])
choice = choices.pop() if choices else {} choice = choices.pop() if choices else {}
@@ -354,7 +351,7 @@ class PollinationsAI(AsyncGeneratorProvider, ProviderModelMixin):
yield ToolCalls(message["tool_calls"]) yield ToolCalls(message["tool_calls"])
else: else:
raise ResponseError(result) raise ResponseError(result)
if "usage" in result: if result.get("usage") is not None:
yield Usage(**result["usage"]) yield Usage(**result["usage"])
finish_reason = choice.get("finish_reason") finish_reason = choice.get("finish_reason")
if finish_reason: if finish_reason:

View File

@@ -8,10 +8,11 @@ from .PollinationsAI import PollinationsAI
class PollinationsImage(PollinationsAI): class PollinationsImage(PollinationsAI):
label = "PollinationsImage" label = "PollinationsImage"
parent = PollinationsAI.__name__
default_model = "flux" default_model = "flux"
default_vision_model = None default_vision_model = None
default_image_model = default_model default_image_model = default_model
audio_models = None audio_models = {}
image_models = [default_image_model] # Default models image_models = [default_image_model] # Default models
_models_loaded = False # Add a checkbox for synchronization _models_loaded = False # Add a checkbox for synchronization

View File

@@ -56,6 +56,7 @@ try:
from .Jmuz import Jmuz from .Jmuz import Jmuz
from .LambdaChat import LambdaChat from .LambdaChat import LambdaChat
from .Liaobots import Liaobots from .Liaobots import Liaobots
from .LMArenaProvider import LMArenaProvider
from .OIVSCode import OIVSCode from .OIVSCode import OIVSCode
except ImportError as e: except ImportError as e:
debug.error("Providers not loaded (F-L):", e) debug.error("Providers not loaded (F-L):", e)

View File

@@ -37,6 +37,8 @@ class HuggingFace(AsyncGeneratorProvider, ProviderModelMixin):
messages: Messages, messages: Messages,
**kwargs **kwargs
) -> AsyncResult: ) -> AsyncResult:
if model in cls.model_aliases:
model = cls.model_aliases[model]
if "tools" not in kwargs and "media" not in kwargs and random.random() >= 0.5: if "tools" not in kwargs and "media" not in kwargs and random.random() >= 0.5:
try: try:
is_started = False is_started = False

View File

@@ -13,6 +13,7 @@ from ..helper import format_prompt, format_image_prompt
from ...providers.response import JsonConversation, ImageResponse, Reasoning from ...providers.response import JsonConversation, ImageResponse, Reasoning
from ...requests.aiohttp import StreamSession, StreamResponse, FormData from ...requests.aiohttp import StreamSession, StreamResponse, FormData
from ...requests.raise_for_status import raise_for_status from ...requests.raise_for_status import raise_for_status
from ...tools.media import merge_media
from ...image import to_bytes, is_accepted_format from ...image import to_bytes, is_accepted_format
from ...cookies import get_cookies from ...cookies import get_cookies
from ...errors import ResponseError from ...errors import ResponseError
@@ -99,7 +100,8 @@ class DeepseekAI_JanusPro7b(AsyncGeneratorProvider, ProviderModelMixin):
if return_conversation: if return_conversation:
yield conversation yield conversation
if media is not None: media = list(merge_media(media, messages))
if media:
data = FormData() data = FormData()
for i in range(len(media)): for i in range(len(media)):
media[i] = (to_bytes(media[i][0]), media[i][1]) media[i] = (to_bytes(media[i][0]), media[i][1])

View File

@@ -1,253 +0,0 @@
from __future__ import annotations
import json
import uuid
import asyncio
from ...typing import AsyncResult, Messages
from ...requests import StreamSession, raise_for_status
from ...providers.response import FinishReason
from ..base_provider import AsyncGeneratorProvider, ProviderModelMixin, AuthFileMixin
from ..helper import format_prompt
from ... import debug
class LMArenaProvider(AsyncGeneratorProvider, ProviderModelMixin, AuthFileMixin):
label = "LM Arena"
url = "https://lmarena.ai"
api_endpoint = "/queue/join?"
working = True
default_model = "chatgpt-4o-latest-20250326"
model_aliases = {"gpt-4o": default_model}
models = [
default_model,
"gpt-4.1-2025-04-14",
"gemini-2.5-pro-exp-03-25",
"llama-4-maverick-03-26-experimental",
"grok-3-preview-02-24",
"claude-3-7-sonnet-20250219",
"claude-3-7-sonnet-20250219-thinking-32k",
"deepseek-v3-0324",
"llama-4-maverick-17b-128e-instruct",
"gpt-4.1-mini-2025-04-14",
"gpt-4.1-nano-2025-04-14",
"gemini-2.0-flash-thinking-exp-01-21",
"gemini-2.0-flash-001",
"gemini-2.0-flash-lite-preview-02-05",
"gemma-3-27b-it",
"gemma-3-12b-it",
"gemma-3-4b-it",
"deepseek-r1",
"claude-3-5-sonnet-20241022",
"o3-mini",
"llama-3.3-70b-instruct",
"gpt-4o-mini-2024-07-18",
"gpt-4o-2024-11-20",
"gpt-4o-2024-08-06",
"gpt-4o-2024-05-13",
"command-a-03-2025",
"qwq-32b",
"p2l-router-7b",
"claude-3-5-haiku-20241022",
"claude-3-5-sonnet-20240620",
"doubao-1.5-pro-32k-250115",
"doubao-1.5-vision-pro-32k-250115",
"mistral-small-24b-instruct-2501",
"phi-4",
"amazon-nova-pro-v1.0",
"amazon-nova-lite-v1.0",
"amazon-nova-micro-v1.0",
"cobalt-exp-beta-v3",
"cobalt-exp-beta-v4",
"qwen-max-2025-01-25",
"qwen-plus-0125-exp",
"qwen2.5-vl-32b-instruct",
"qwen2.5-vl-72b-instruct",
"gemini-1.5-pro-002",
"gemini-1.5-flash-002",
"gemini-1.5-flash-8b-001",
"gemini-1.5-pro-001",
"gemini-1.5-flash-001",
"llama-3.1-405b-instruct-bf16",
"llama-3.3-nemotron-49b-super-v1",
"llama-3.1-nemotron-ultra-253b-v1",
"llama-3.1-nemotron-70b-instruct",
"llama-3.1-70b-instruct",
"llama-3.1-8b-instruct",
"hunyuan-standard-2025-02-10",
"hunyuan-large-2025-02-10",
"hunyuan-standard-vision-2024-12-31",
"hunyuan-turbo-0110",
"hunyuan-turbos-20250226",
"mistral-large-2411",
"pixtral-large-2411",
"mistral-large-2407",
"llama-3.1-nemotron-51b-instruct",
"granite-3.1-8b-instruct",
"granite-3.1-2b-instruct",
"step-2-16k-exp-202412",
"step-2-16k-202502",
"step-1o-vision-32k-highres",
"yi-lightning",
"glm-4-plus",
"glm-4-plus-0111",
"jamba-1.5-large",
"jamba-1.5-mini",
"gemma-2-27b-it",
"gemma-2-9b-it",
"gemma-2-2b-it",
"eureka-chatbot",
"claude-3-haiku-20240307",
"claude-3-sonnet-20240229",
"claude-3-opus-20240229",
"nemotron-4-340b",
"llama-3-70b-instruct",
"llama-3-8b-instruct",
"qwen2.5-plus-1127",
"qwen2.5-coder-32b-instruct",
"qwen2.5-72b-instruct",
"qwen-max-0919",
"qwen-vl-max-1119",
"qwen-vl-max-0809",
"llama-3.1-tulu-3-70b",
"olmo-2-0325-32b-instruct",
"gpt-3.5-turbo-0125",
"reka-core-20240904",
"reka-flash-20240904",
"c4ai-aya-expanse-32b",
"c4ai-aya-expanse-8b",
"c4ai-aya-vision-32b",
"command-r-plus-08-2024",
"command-r-08-2024",
"codestral-2405",
"mixtral-8x22b-instruct-v0.1",
"mixtral-8x7b-instruct-v0.1",
"pixtral-12b-2409",
"ministral-8b-2410"]
_args: dict = None
@staticmethod
def _random_session_hash():
return str(uuid.uuid4())
@classmethod
def _build_payloads(cls, model_id: str, session_hash: str, messages: Messages, max_tokens: int, temperature: float, top_p: float):
first_payload = {
"data": [
None,
model_id,
{"text": format_prompt(messages), "files": []},
{
"text_models": [model_id],
"all_text_models": [model_id],
"vision_models": [],
"all_vision_models": [],
"image_gen_models": [],
"all_image_gen_models": [],
"search_models": [],
"all_search_models": [],
"models": [model_id],
"all_models": [model_id],
"arena_type": "text-arena"
}
],
"event_data": None,
"fn_index": 117,
"trigger_id": 159,
"session_hash": session_hash
}
second_payload = {
"data": [],
"event_data": None,
"fn_index": 118,
"trigger_id": 159,
"session_hash": session_hash
}
third_payload = {
"data": [None, temperature, top_p, max_tokens],
"event_data": None,
"fn_index": 119,
"trigger_id": 159,
"session_hash": session_hash
}
return first_payload, second_payload, third_payload
@classmethod
async def create_async_generator(
cls, model: str, messages: Messages,
max_tokens: int = 2048,
temperature: float = 0.7,
top_p: float = 1,
proxy: str = None,
**kwargs
) -> AsyncResult:
if not model:
model = cls.default_model
if model in cls.model_aliases:
model = cls.model_aliases[model]
session_hash = cls._random_session_hash()
headers = {
"Content-Type": "application/json",
"Accept": "application/json"
}
async with StreamSession(impersonate="chrome", headers=headers) as session:
first_payload, second_payload, third_payload = cls._build_payloads(model, session_hash, messages, max_tokens, temperature, top_p)
# Long stream GET
async def long_stream():
# POST 1
async with session.post(f"{cls.url}{cls.api_endpoint}", json=first_payload, proxy=proxy) as response:
await raise_for_status(response)
# POST 2
async with session.post(f"{cls.url}{cls.api_endpoint}", json=second_payload, proxy=proxy) as response:
await raise_for_status(response)
# POST 3
async with session.post(f"{cls.url}{cls.api_endpoint}", json=third_payload, proxy=proxy) as response:
await raise_for_status(response)
stream_url = f"{cls.url}/queue/data?session_hash={session_hash}"
async with session.get(stream_url, headers={"Accept": "text/event-stream"}, proxy=proxy) as response:
await raise_for_status(response)
text_position = 0
count = 0
async for line in response.iter_lines():
if line.startswith(b"data: "):
try:
msg = json.loads(line[6:])
except Exception as e:
raise RuntimeError(f"Failed to decode JSON from stream: {line}", e)
if msg.get("msg") == "process_generating":
data = msg["output"]["data"][1]
if data:
data = data[0]
if len(data) > 2:
if isinstance(data[2], list):
data[2] = data[2][-1]
content = data[2][text_position:]
if content.endswith(""):
content = content[:-2]
if content:
count += 1
yield count, content
text_position += len(content)
elif msg.get("msg") == "close_stream":
break
elif msg.get("msg") not in ("process_completed", "process_starts", "estimation"):
debug.log(f"Unexpected message: {msg}")
count = 0
async for count, chunk in long_stream():
yield chunk
if count == 0:
await asyncio.sleep(10)
async for count, chunk in long_stream():
yield chunk
if count == 0:
raise RuntimeError("No response from server.")
if count == max_tokens:
yield FinishReason("length")

View File

@@ -10,7 +10,6 @@ from .BlackForestLabs_Flux1Dev import BlackForestLabs_Flux1Dev
from .BlackForestLabs_Flux1Schnell import BlackForestLabs_Flux1Schnell from .BlackForestLabs_Flux1Schnell import BlackForestLabs_Flux1Schnell
from .CohereForAI_C4AI_Command import CohereForAI_C4AI_Command from .CohereForAI_C4AI_Command import CohereForAI_C4AI_Command
from .DeepseekAI_JanusPro7b import DeepseekAI_JanusPro7b from .DeepseekAI_JanusPro7b import DeepseekAI_JanusPro7b
from .LMArenaProvider import LMArenaProvider
from .Microsoft_Phi_4 import Microsoft_Phi_4 from .Microsoft_Phi_4 import Microsoft_Phi_4
from .Qwen_QVQ_72B import Qwen_QVQ_72B from .Qwen_QVQ_72B import Qwen_QVQ_72B
from .Qwen_Qwen_2_5 import Qwen_Qwen_2_5 from .Qwen_Qwen_2_5 import Qwen_Qwen_2_5
@@ -33,7 +32,6 @@ class HuggingSpace(AsyncGeneratorProvider, ProviderModelMixin):
BlackForestLabs_Flux1Schnell, BlackForestLabs_Flux1Schnell,
CohereForAI_C4AI_Command, CohereForAI_C4AI_Command,
DeepseekAI_JanusPro7b, DeepseekAI_JanusPro7b,
LMArenaProvider,
Microsoft_Phi_4, Microsoft_Phi_4,
Qwen_QVQ_72B, Qwen_QVQ_72B,
Qwen_Qwen_2_5, Qwen_Qwen_2_5,

View File

@@ -68,10 +68,6 @@ models = {
"gemini-2.0-flash-exp": {"x-goog-ext-525001261-jspb": '[null,null,null,null,"f299729663a2343f"]'}, "gemini-2.0-flash-exp": {"x-goog-ext-525001261-jspb": '[null,null,null,null,"f299729663a2343f"]'},
"gemini-2.0-flash-thinking": {"x-goog-ext-525001261-jspb": '[null,null,null,null,"9c17b1863f581b8a"]'}, "gemini-2.0-flash-thinking": {"x-goog-ext-525001261-jspb": '[null,null,null,null,"9c17b1863f581b8a"]'},
"gemini-2.0-flash-thinking-with-apps": {"x-goog-ext-525001261-jspb": '[null,null,null,null,"f8f8f5ea629f5d37"]'}, "gemini-2.0-flash-thinking-with-apps": {"x-goog-ext-525001261-jspb": '[null,null,null,null,"f8f8f5ea629f5d37"]'},
"gemini-2.0-exp-advanced": {"x-goog-ext-525001261-jspb": '[null,null,null,null,"b1e46a6037e6aa9f"]'},
"gemini-1.5-flash": {"x-goog-ext-525001261-jspb": '[null,null,null,null,"418ab5ea040b5c43"]'},
"gemini-1.5-pro": {"x-goog-ext-525001261-jspb": '[null,null,null,null,"9d60dfae93c9ff1f"]'},
"gemini-1.5-pro-research": {"x-goog-ext-525001261-jspb": '[null,null,null,null,"e5a44cb1dae2b489"]'},
} }
class Gemini(AsyncGeneratorProvider, ProviderModelMixin): class Gemini(AsyncGeneratorProvider, ProviderModelMixin):

View File

@@ -66,6 +66,7 @@ class GigaChat(AsyncGeneratorProvider, ProviderModelMixin):
needs_auth = True needs_auth = True
default_model = "GigaChat:latest" default_model = "GigaChat:latest"
models = [default_model, "GigaChat-Plus", "GigaChat-Pro"] models = [default_model, "GigaChat-Plus", "GigaChat-Pro"]
model_aliases = {"gigachat": default_model}
@classmethod @classmethod
async def create_async_generator( async def create_async_generator(

View File

@@ -43,7 +43,9 @@ from g4f.image import is_data_an_media, EXTENSIONS_MAP
from g4f.image.copy_images import images_dir, copy_media, get_source_url from g4f.image.copy_images import images_dir, copy_media, get_source_url
from g4f.errors import ProviderNotFoundError, ModelNotFoundError, MissingAuthError, NoValidHarFileError from g4f.errors import ProviderNotFoundError, ModelNotFoundError, MissingAuthError, NoValidHarFileError
from g4f.cookies import read_cookie_files, get_cookies_dir from g4f.cookies import read_cookie_files, get_cookies_dir
from g4f.Provider import ProviderType, ProviderUtils, __providers__ from g4f.providers.types import ProviderType
from g4f.providers.any_provider import AnyProvider
from g4f import Provider
from g4f.gui import get_gui_app from g4f.gui import get_gui_app
from g4f.tools.files import supports_filename, get_async_streaming from g4f.tools.files import supports_filename, get_async_streaming
from .stubs import ( from .stubs import (
@@ -86,8 +88,8 @@ def create_app():
if AppConfig.ignored_providers: if AppConfig.ignored_providers:
for provider in AppConfig.ignored_providers: for provider in AppConfig.ignored_providers:
if provider in ProviderUtils.convert: if provider in Provider.__map__:
ProviderUtils.convert[provider].working = False Provider.__map__[provider].working = False
return app return app
@@ -232,13 +234,13 @@ class Api:
return { return {
"object": "list", "object": "list",
"data": [{ "data": [{
"id": model_id, "id": model,
"object": "model", "object": "model",
"created": 0, "created": 0,
"owned_by": model.base_provider, "owned_by": "",
"image": isinstance(model, g4f.models.ImageModel), "image": isinstance(model, g4f.models.ImageModel),
"provider": False, "provider": False,
} for model_id, model in g4f.models.ModelUtils.convert.items()] + } for model in AnyProvider.get_models()] +
[{ [{
"id": provider_name, "id": provider_name,
"object": "model", "object": "model",
@@ -255,9 +257,9 @@ class Api:
HTTP_200_OK: {"model": List[ModelResponseModel]}, HTTP_200_OK: {"model": List[ModelResponseModel]},
}) })
async def models(provider: str, credentials: Annotated[HTTPAuthorizationCredentials, Depends(Api.security)] = None): async def models(provider: str, credentials: Annotated[HTTPAuthorizationCredentials, Depends(Api.security)] = None):
if provider not in ProviderUtils.convert: if provider not in Provider.__map__:
return ErrorResponse.from_message("The provider does not exist.", 404) return ErrorResponse.from_message("The provider does not exist.", 404)
provider: ProviderType = ProviderUtils.convert[provider] provider: ProviderType = Provider.__map__[provider]
if not hasattr(provider, "get_models"): if not hasattr(provider, "get_models"):
models = [] models = []
elif credentials is not None and credentials.credentials != "secret": elif credentials is not None and credentials.credentials != "secret":
@@ -448,7 +450,7 @@ class Api:
'created': 0, 'created': 0,
'url': provider.url, 'url': provider.url,
'label': getattr(provider, "label", None), 'label': getattr(provider, "label", None),
} for provider in __providers__ if provider.working] } for provider in Provider.__providers__ if provider.working]
@self.app.get("/v1/providers/{provider}", responses={ @self.app.get("/v1/providers/{provider}", responses={
HTTP_200_OK: {"model": ProviderResponseDetailModel}, HTTP_200_OK: {"model": ProviderResponseDetailModel},

View File

@@ -15,12 +15,13 @@ from ..providers.response import *
from ..errors import NoMediaResponseError from ..errors import NoMediaResponseError
from ..providers.retry_provider import IterListProvider from ..providers.retry_provider import IterListProvider
from ..providers.asyncio import to_sync_generator from ..providers.asyncio import to_sync_generator
from ..providers.any_provider import AnyProvider
from ..Provider.needs_auth import BingCreateImages, OpenaiAccount from ..Provider.needs_auth import BingCreateImages, OpenaiAccount
from ..tools.run_tools import async_iter_run_tools, iter_run_tools from ..tools.run_tools import async_iter_run_tools, iter_run_tools
from .stubs import ChatCompletion, ChatCompletionChunk, Image, ImagesResponse, UsageModel, ToolCallModel from .stubs import ChatCompletion, ChatCompletionChunk, Image, ImagesResponse, UsageModel, ToolCallModel
from .models import ClientModels from .models import ClientModels
from .types import IterResponse, ImageProvider, Client as BaseClient from .types import IterResponse, ImageProvider, Client as BaseClient
from .service import get_model_and_provider, convert_to_provider from .service import convert_to_provider
from .helper import find_stop, filter_json, filter_none, safe_aclose from .helper import find_stop, filter_json, filter_none, safe_aclose
from .. import debug from .. import debug
@@ -299,14 +300,12 @@ class Completions:
kwargs["media"] = [(image, image_name)] kwargs["media"] = [(image, image_name)]
elif "images" in kwargs: elif "images" in kwargs:
kwargs["media"] = kwargs.pop("images") kwargs["media"] = kwargs.pop("images")
model, provider = get_model_and_provider( if provider is None:
model, provider = self.provider
self.provider if provider is None else provider, if provider is None:
stream, provider = AnyProvider
ignore_working, if isinstance(provider, str):
ignore_stream, provider = convert_to_provider(provider)
has_images="media" in kwargs
)
stop = [stop] if isinstance(stop, str) else stop stop = [stop] if isinstance(stop, str) else stop
if ignore_stream: if ignore_stream:
kwargs["ignore_stream"] = True kwargs["ignore_stream"] = True
@@ -600,14 +599,12 @@ class AsyncCompletions:
kwargs["media"] = [(image, image_name)] kwargs["media"] = [(image, image_name)]
elif "images" in kwargs: elif "images" in kwargs:
kwargs["media"] = kwargs.pop("images") kwargs["media"] = kwargs.pop("images")
model, provider = get_model_and_provider( if provider is None:
model, provider = self.provider
self.provider if provider is None else provider, if provider is None:
stream, provider = AnyProvider
ignore_working, if isinstance(provider, str):
ignore_stream, provider = convert_to_provider(provider)
has_images="media" in kwargs,
)
stop = [stop] if isinstance(stop, str) else stop stop = [stop] if isinstance(stop, str) else stop
if ignore_stream: if ignore_stream:
kwargs["ignore_stream"] = True kwargs["ignore_stream"] = True

View File

@@ -2347,7 +2347,7 @@ async function on_api() {
models.forEach((model) => { models.forEach((model) => {
let option = document.createElement("option"); let option = document.createElement("option");
option.value = model.name; option.value = model.name;
option.text = model.name + (model.image ? " (Image Generation)" : "") + (model.vision ? " (Image Upload)" : "") + (model.audio ? " (Audio Generation)" : "") + (model.video ? " (Video Generation)" : ""); option.text = model.name + (model.image ? " (🖼️ Image Generation)" : "") + (model.vision ? " (👓 Image Upload)" : "") + (model.audio ? " (🎧 Audio Generation)" : "") + (model.video ? " (🎥 Video Generation)" : "");
option.dataset.providers = model.providers.join(" "); option.dataset.providers = model.providers.join(" ");
modelSelect.appendChild(option); modelSelect.appendChild(option);
is_demo = model.demo; is_demo = model.demo;
@@ -2849,6 +2849,10 @@ async function api(ressource, args=null, files=null, message_id=null, scroll=tru
if (api_base) { if (api_base) {
headers.x_api_base = api_base; headers.x_api_base = api_base;
} }
const ignored = Array.from(settings.querySelectorAll("input.provider:not(:checked)")).map((el)=>el.value);
if (ignored) {
headers.x_ignored = ignored.join(" ");
}
url = `/backend-api/v2/${ressource}/${args}`; url = `/backend-api/v2/${ressource}/${args}`;
} else if (ressource == "conversation") { } else if (ressource == "conversation") {
let body = JSON.stringify(args); let body = JSON.stringify(args);
@@ -2985,7 +2989,8 @@ async function load_provider_models(provider=null) {
let option = document.createElement('option'); let option = document.createElement('option');
option.value = model.model; option.value = model.model;
option.dataset.label = model.model; option.dataset.label = model.model;
option.text = `${model.model}${model.image ? " (Image Generation)" : ""}${model.audio ? " (Audio Generation)" : ""}${model.video ? " (Video Generation)" : ""}${model.vision ? " (Image Upload)" : ""}`; option.text = model.model + (model.count > 1 ? ` (${model.count}+)` : "") + (model.image ? " (🖼️ Image Generation)" : "") + (model.vision ? " (👓 Image Upload)" : "") + (model.audio ? " (🎧 Audio Generation)" : "") + (model.video ? " (🎥 Video Generation)" : "");
if (model.task) { if (model.task) {
option.text += ` (${model.task})`; option.text += ` (${model.task})`;
} }

View File

@@ -10,7 +10,7 @@ from inspect import signature
from ...errors import VersionNotFoundError, MissingAuthError from ...errors import VersionNotFoundError, MissingAuthError
from ...image.copy_images import copy_media, ensure_images_dir, images_dir from ...image.copy_images import copy_media, ensure_images_dir, images_dir
from ...tools.run_tools import iter_run_tools from ...tools.run_tools import iter_run_tools
from ...Provider import ProviderUtils, __providers__ from ... import Provider
from ...providers.base_provider import ProviderModelMixin from ...providers.base_provider import ProviderModelMixin
from ...providers.retry_provider import BaseRetryProvider from ...providers.retry_provider import BaseRetryProvider
from ...providers.helper import format_image_prompt from ...providers.helper import format_image_prompt
@@ -41,12 +41,14 @@ class Api:
for model, providers in models.__models__.values()] for model, providers in models.__models__.values()]
@staticmethod @staticmethod
def get_provider_models(provider: str, api_key: str = None, api_base: str = None): def get_provider_models(provider: str, api_key: str = None, api_base: str = None, ignored: list = None):
if provider in ProviderUtils.convert: if provider in Provider.__map__:
provider = ProviderUtils.convert[provider] provider = Provider.__map__[provider]
if issubclass(provider, ProviderModelMixin): if issubclass(provider, ProviderModelMixin):
if "api_key" in signature(provider.get_models).parameters: if "api_key" in signature(provider.get_models).parameters:
models = provider.get_models(api_key=api_key, api_base=api_base) models = provider.get_models(api_key=api_key, api_base=api_base)
elif "ignored" in signature(provider.get_models).parameters:
models = provider.get_models(ignored=ignored)
else: else:
models = provider.get_models() models = provider.get_models()
return [ return [
@@ -57,7 +59,7 @@ class Api:
"audio": getattr(provider, "default_audio_model", None) == model or model in getattr(provider, "audio_models", []), "audio": getattr(provider, "default_audio_model", None) == model or model in getattr(provider, "audio_models", []),
"video": getattr(provider, "default_video_model", None) == model or model in getattr(provider, "video_models", []), "video": getattr(provider, "default_video_model", None) == model or model in getattr(provider, "video_models", []),
"image": False if provider.image_models is None else model in provider.image_models, "image": False if provider.image_models is None else model in provider.image_models,
"task": None if not hasattr(provider, "task_mapping") else provider.task_mapping[model] if model in provider.task_mapping else None "count": getattr(provider, "models_count", {}).get(model),
} }
for model in models for model in models
] ]
@@ -69,15 +71,15 @@ class Api:
"name": provider.__name__, "name": provider.__name__,
"label": provider.label if hasattr(provider, "label") else provider.__name__, "label": provider.label if hasattr(provider, "label") else provider.__name__,
"parent": getattr(provider, "parent", None), "parent": getattr(provider, "parent", None),
"image": bool(getattr(provider, "image_models", False)), "image": len(getattr(provider, "image_models", [])),
"audio": getattr(provider, "audio_models", None) is not None, "audio": len(getattr(provider, "audio_models", [])),
"video": getattr(provider, "video_models", None) is not None, "video": len(getattr(provider, "video_models", [])),
"vision": getattr(provider, "default_vision_model", None) is not None, "vision": getattr(provider, "default_vision_model", None) is not None,
"nodriver": getattr(provider, "use_nodriver", False), "nodriver": getattr(provider, "use_nodriver", False),
"hf_space": getattr(provider, "hf_space", False), "hf_space": getattr(provider, "hf_space", False),
"auth": provider.needs_auth, "auth": provider.needs_auth,
"login_url": getattr(provider, "login_url", None), "login_url": getattr(provider, "login_url", None),
} for provider in __providers__ if provider.working] } for provider in Provider.__providers__ if provider.working]
@staticmethod @staticmethod
def get_version() -> dict: def get_version() -> dict:

View File

@@ -438,7 +438,8 @@ class Backend_Api(Api):
def get_provider_models(self, provider: str): def get_provider_models(self, provider: str):
api_key = request.headers.get("x_api_key") api_key = request.headers.get("x_api_key")
api_base = request.headers.get("x_api_base") api_base = request.headers.get("x_api_base")
models = super().get_provider_models(provider, api_key, api_base) ignored = request.headers.get("x_ignored").split()
models = super().get_provider_models(provider, api_key, api_base, ignored)
if models is None: if models is None:
return "Provider not found", 404 return "Provider not found", 404
return models return models

View File

@@ -187,7 +187,7 @@ o3_mini = Model(
### GigaChat ### ### GigaChat ###
gigachat = Model( gigachat = Model(
name = 'GigaChat:latest', name = 'gigachat',
base_provider = 'gigachat', base_provider = 'gigachat',
best_provider = GigaChat best_provider = GigaChat
) )
@@ -1006,6 +1006,6 @@ __models__ = {
if model.best_provider is not None and model.best_provider.working if model.best_provider is not None and model.best_provider.working
else []) else [])
for model in ModelUtils.convert.values()] for model in ModelUtils.convert.values()]
if providers if [p for p in providers if p.working]
} }
_all_models = list(__models__.keys()) _all_models = list(__models__.keys())

View File

@@ -0,0 +1,181 @@
from __future__ import annotations
from ..typing import AsyncResult, Messages, MediaListType
from ..errors import ModelNotFoundError
from ..providers.retry_provider import IterListProvider
from ..image import is_data_an_audio
from ..providers.response import JsonConversation, ProviderInfo
from ..Provider.needs_auth import OpenaiChat, CopilotAccount
from ..Provider.hf import HuggingFace, HuggingFaceMedia
from ..Provider.hf_space import HuggingSpace
from .. import Provider
from .. import models
from ..Provider import Cloudflare, LMArenaProvider, Gemini, Grok, DeepSeekAPI, PerplexityLabs, LambdaChat, PollinationsAI, FreeRouter
from ..Provider import Microsoft_Phi_4, DeepInfraChat, Blackbox
from .base_provider import AsyncGeneratorProvider, ProviderModelMixin
class AnyProvider(AsyncGeneratorProvider, ProviderModelMixin):
default_model = "default"
working = True
@classmethod
def get_models(cls, ignored: list[str] = []) -> list[str]:
cls.audio_models = {}
cls.image_models = []
cls.vision_models = []
cls.video_models = []
model_with_providers = {
model: [
provider for provider in providers
if provider.working and getattr(provider, "parent", provider.__name__) not in ignored
] for model, (_, providers) in models.__models__.items()
}
model_with_providers = {
model: providers for model, providers in model_with_providers.items()
if providers
}
cls.models_count = {
model: len(providers) for model, providers in model_with_providers.items() if len(providers) > 1
}
all_models = ["default"] + list(model_with_providers.keys())
for provider in [OpenaiChat, PollinationsAI, HuggingSpace, Cloudflare, PerplexityLabs, Gemini, Grok]:
if not provider.working or getattr(provider, "parent", provider.__name__) in ignored:
continue
if provider == PollinationsAI:
all_models.extend([f"{provider.__name__}:{model}" for model in provider.get_models() if model not in all_models])
cls.audio_models.update({f"{provider.__name__}:{model}": [] for model in provider.get_models() if model in provider.audio_models})
cls.image_models.extend([f"{provider.__name__}:{model}" for model in provider.get_models() if model in provider.image_models])
cls.vision_models.extend([f"{provider.__name__}:{model}" for model in provider.get_models() if model in provider.vision_models])
else:
all_models.extend(provider.get_models())
cls.image_models.extend(provider.image_models)
cls.vision_models.extend(provider.vision_models)
cls.video_models.extend(provider.video_models)
if CopilotAccount.working and CopilotAccount.parent not in ignored:
all_models.extend(list(CopilotAccount.model_aliases.keys()))
if PollinationsAI.working and PollinationsAI.__name__ not in ignored:
all_models.extend(list(PollinationsAI.model_aliases.keys()))
def clean_name(name: str) -> str:
return name.split("/")[-1].split(":")[0].lower(
).replace("-instruct", ""
).replace("-chat", ""
).replace("-08-2024", ""
).replace("-03-2025", ""
).replace("-20250219", ""
).replace("-20241022", ""
).replace("-2025-04-16", ""
).replace("-2025-04-14", ""
).replace("-0125", ""
).replace("-2407", ""
).replace("-2501", ""
).replace("-0324", ""
).replace("-2409", ""
).replace("-2410", ""
).replace("-2411", ""
).replace("-02-24", ""
).replace("-03-25", ""
).replace("-03-26", ""
).replace("-01-21", ""
).replace(".1-", "-"
).replace("_", "."
).replace("c4ai-", ""
).replace("-preview", ""
).replace("-experimental", ""
).replace("-v1", ""
).replace("-fp8", ""
).replace("-bf16", ""
).replace("-hf", ""
).replace("llama3", "llama-3")
for provider in [HuggingFace, HuggingFaceMedia, LMArenaProvider, LambdaChat, DeepInfraChat]:
if not provider.working or getattr(provider, "parent", provider.__name__) in ignored:
continue
model_map = {clean_name(model): model for model in provider.get_models()}
provider.model_aliases.update(model_map)
all_models.extend(list(model_map.keys()))
cls.image_models.extend([clean_name(model) for model in provider.image_models])
cls.vision_models.extend([clean_name(model) for model in provider.vision_models])
cls.video_models.extend([clean_name(model) for model in provider.video_models])
for provider in [Microsoft_Phi_4, PollinationsAI]:
if provider.working and getattr(provider, "parent", provider.__name__) not in ignored:
cls.audio_models.update(provider.audio_models)
cls.models_count.update({model: all_models.count(model) + cls.models_count.get(model, 0) for model in all_models})
return list(dict.fromkeys([model if model else "default" for model in all_models]))
@classmethod
async def create_async_generator(
cls,
model: str,
messages: Messages,
stream: bool = True,
media: MediaListType = None,
ignored: list[str] = [],
conversation: JsonConversation = None,
**kwargs
) -> AsyncResult:
providers = []
if ":" in model:
providers = model.split(":")
model = providers.pop()
providers = [getattr(Provider, provider) for provider in providers]
elif not model or model == "default":
has_image = False
has_audio = "audio" in kwargs
if not has_audio and media is not None:
for media_data, filename in media:
if is_data_an_audio(media_data, filename):
has_audio = True
break
has_image = True
if has_audio:
providers = [PollinationsAI, Microsoft_Phi_4]
elif has_image:
providers = models.default_vision.best_provider.providers
else:
providers = models.default.best_provider.providers
else:
for provider in [OpenaiChat, HuggingSpace, Cloudflare, LMArenaProvider, PerplexityLabs, Gemini, Grok, DeepSeekAPI, FreeRouter, Blackbox]:
if provider.working and (model if model else "auto") in provider.get_models():
providers.append(provider)
for provider in [HuggingFace, HuggingFaceMedia, LambdaChat, LMArenaProvider, CopilotAccount, PollinationsAI, DeepInfraChat]:
if model in provider.model_aliases:
providers.append(provider)
if model in models.__models__:
for provider in models.__models__[model][1]:
providers.append(provider)
providers = [provider for provider in providers if provider.working and getattr(provider, "parent", provider.__name__) not in ignored]
if len(providers) == 0:
raise ModelNotFoundError(f"Model {model} not found in any provider.")
if len(providers) == 1:
provider = providers[0]
if conversation is not None:
child_conversation = getattr(conversation, provider.__name__, None)
if child_conversation is not None:
kwargs["conversation"] = JsonConversation(**child_conversation)
yield ProviderInfo(**provider.get_dict(), model=model)
async for chunk in provider.get_async_create_function()(
model,
messages,
stream=stream,
media=media,
**kwargs
):
if isinstance(chunk, JsonConversation):
if conversation is None:
conversation = JsonConversation()
setattr(conversation, provider.__name__, chunk.get_dict())
yield conversation
else:
yield chunk
return
async for chunk in IterListProvider(providers).get_async_create_function()(
model,
messages,
stream=stream,
media=media,
**kwargs
):
yield chunk
setattr(Provider, "AnyProvider", AnyProvider)
Provider.__map__["AnyProvider"] = AnyProvider
Provider.__providers__.append(AnyProvider)

View File

@@ -342,6 +342,8 @@ class ProviderModelMixin:
model_aliases: dict[str, str] = {} model_aliases: dict[str, str] = {}
image_models: list = [] image_models: list = []
vision_models: list = [] vision_models: list = []
video_models: list = []
audio_models: dict = {}
last_model: str = None last_model: str = None
@classmethod @classmethod

View File

@@ -4,10 +4,13 @@ import random
from ..typing import Type, List, CreateResult, Messages, AsyncResult from ..typing import Type, List, CreateResult, Messages, AsyncResult
from .types import BaseProvider, BaseRetryProvider, ProviderType from .types import BaseProvider, BaseRetryProvider, ProviderType
from .response import MediaResponse, ProviderInfo from .response import MediaResponse, AudioResponse, ProviderInfo
from .. import debug from .. import debug
from ..errors import RetryProviderError, RetryNoProviderError from ..errors import RetryProviderError, RetryNoProviderError
def is_content(chunk):
return isinstance(chunk, (str, MediaResponse, AudioResponse))
class IterListProvider(BaseRetryProvider): class IterListProvider(BaseRetryProvider):
def __init__( def __init__(
self, self,
@@ -59,7 +62,7 @@ class IterListProvider(BaseRetryProvider):
for chunk in response: for chunk in response:
if chunk: if chunk:
yield chunk yield chunk
if isinstance(chunk, (str, MediaResponse)): if is_content(chunk):
started = True started = True
if started: if started:
return return
@@ -94,7 +97,7 @@ class IterListProvider(BaseRetryProvider):
async for chunk in response: async for chunk in response:
if chunk: if chunk:
yield chunk yield chunk
if isinstance(chunk, (str, MediaResponse)): if is_content(chunk):
started = True started = True
elif response: elif response:
response = await response response = await response
@@ -173,8 +176,8 @@ class RetryProvider(IterListProvider):
print(f"Using {provider.__name__} provider (attempt {attempt + 1})") print(f"Using {provider.__name__} provider (attempt {attempt + 1})")
response = provider.get_create_function()(model, messages, stream=stream, **kwargs) response = provider.get_create_function()(model, messages, stream=stream, **kwargs)
for chunk in response: for chunk in response:
if isinstance(chunk, str) or isinstance(chunk, ImageResponse):
yield chunk yield chunk
if is_content(chunk):
started = True started = True
if started: if started:
return return
@@ -207,8 +210,8 @@ class RetryProvider(IterListProvider):
response = provider.get_async_create_function()(model, messages, stream=stream, **kwargs) response = provider.get_async_create_function()(model, messages, stream=stream, **kwargs)
if hasattr(response, "__aiter__"): if hasattr(response, "__aiter__"):
async for chunk in response: async for chunk in response:
if isinstance(chunk, str) or isinstance(chunk, ImageResponse):
yield chunk yield chunk
if is_content(chunk):
started = True started = True
else: else:
response = await response response = await response
@@ -237,6 +240,6 @@ def raise_exceptions(exceptions: dict) -> None:
if exceptions: if exceptions:
raise RetryProviderError("RetryProvider failed:\n" + "\n".join([ raise RetryProviderError("RetryProvider failed:\n" + "\n".join([
f"{p}: {type(exception).__name__}: {exception}" for p, exception in exceptions.items() f"{p}: {type(exception).__name__}: {exception}" for p, exception in exceptions.items()
])) ])) from list(exceptions.values())[0]
raise RetryNoProviderError("No provider found") raise RetryNoProviderError("No provider found")