From 1dac52a19128858d4250aeb79237a5c4afba8158 Mon Sep 17 00:00:00 2001 From: hlohaus <983577+hlohaus@users.noreply.github.com> Date: Mon, 10 Nov 2025 13:58:07 +0100 Subject: [PATCH] Refactor PollinationsAI model selection logic; update default system in AIModel; clean up unused provider imports --- g4f/Provider/PollinationsAI.py | 2 +- g4f/integration/pydantic_ai.py | 51 ++++++++++------------------------ g4f/models.py | 6 ---- g4f/providers/any_provider.py | 2 +- 4 files changed, 16 insertions(+), 45 deletions(-) diff --git a/g4f/Provider/PollinationsAI.py b/g4f/Provider/PollinationsAI.py index 23ce1677..53263dc7 100644 --- a/g4f/Provider/PollinationsAI.py +++ b/g4f/Provider/PollinationsAI.py @@ -206,7 +206,7 @@ class PollinationsAI(AsyncGeneratorProvider, ProviderModelMixin): if is_data_an_audio(media_data, filename): has_audio = True break - model = cls.default_audio_model if has_audio else model + model = cls.default_audio_model if has_audio else cls.default_model elif cls._models_loaded or cls.get_models(): if model in cls.model_aliases: model = cls.model_aliases[model] diff --git a/g4f/integration/pydantic_ai.py b/g4f/integration/pydantic_ai.py index a65420af..2ad14fdc 100644 --- a/g4f/integration/pydantic_ai.py +++ b/g4f/integration/pydantic_ai.py @@ -6,8 +6,9 @@ from dataclasses import dataclass, field from pydantic_ai import ModelResponsePart, ThinkingPart, ToolCallPart from pydantic_ai.models import Model, ModelResponse, KnownModelName, infer_model -from pydantic_ai.models.openai import OpenAIChatModel, UnexpectedModelBehavior -from pydantic_ai.models.openai import OpenAISystemPromptRole, _CHAT_FINISH_REASON_MAP, _map_usage, _now_utc, number_to_datetime, split_content_into_text_and_thinking, replace +from pydantic_ai.usage import RequestUsage +from pydantic_ai.models.openai import OpenAIChatModel +from pydantic_ai.models.openai import OpenAISystemPromptRole, _now_utc, split_content_into_text_and_thinking, replace import pydantic_ai.models.openai pydantic_ai.models.openai.NOT_GIVEN = None @@ -31,7 +32,7 @@ class AIModel(OpenAIChatModel): provider: str | None = None, *, system_prompt_role: OpenAISystemPromptRole | None = None, - system: str | None = 'openai', + system: str | None = 'g4f', **kwargs ): """Initialize an AI model. @@ -46,7 +47,7 @@ class AIModel(OpenAIChatModel): customize the `base_url` and `api_key` to use a different provider. """ self._model_name = model_name - self._provider = provider + self._provider = getattr(provider, '__name__', provider) self.client = AsyncClient(provider=provider, **kwargs) self.system_prompt_role = system_prompt_role self._system = system @@ -58,36 +59,12 @@ class AIModel(OpenAIChatModel): def _process_response(self, response: ChatCompletion | str) -> ModelResponse: """Process a non-streamed response, and prepare a message to return.""" - # Although the OpenAI SDK claims to return a Pydantic model (`ChatCompletion`) from the chat completions function: - # * it hasn't actually performed validation (presumably they're creating the model with `model_construct` or something?!) - # * if the endpoint returns plain text, the return type is a string - # Thus we validate it fully here. - if not isinstance(response, ChatCompletion): - raise UnexpectedModelBehavior('Invalid response from OpenAI chat completions endpoint, expected JSON data') - - if response.created: - timestamp = number_to_datetime(response.created) - else: - timestamp = _now_utc() - response.created = int(timestamp.timestamp()) - - # Workaround for local Ollama which sometimes returns a `None` finish reason. - if response.choices and (choice := response.choices[0]) and choice.finish_reason is None: # pyright: ignore[reportUnnecessaryComparison] - choice.finish_reason = 'stop' - choice = response.choices[0] items: list[ModelResponsePart] = [] - # The `reasoning` field is only present in gpt-oss via Ollama and OpenRouter. - # - https://cookbook.openai.com/articles/gpt-oss/handle-raw-cot#chat-completions-api - # - https://openrouter.ai/docs/use-cases/reasoning-tokens#basic-usage-with-reasoning-tokens if reasoning := getattr(choice.message, 'reasoning', None): items.append(ThinkingPart(id='reasoning', content=reasoning, provider_name=self.system)) - # NOTE: We don't currently handle OpenRouter `reasoning_details`: - # - https://openrouter.ai/docs/use-cases/reasoning-tokens#preserving-reasoning-blocks - # If you need this, please file an issue. - if choice.message.content: items.extend( (replace(part, id='content', provider_name=self.system) if isinstance(part, ThinkingPart) else part) @@ -95,20 +72,21 @@ class AIModel(OpenAIChatModel): ) if choice.message.tool_calls is not None: for c in choice.message.tool_calls: - items.append(ToolCallPart(c.get("function").get("name"), c.get("function").get("arguments"), tool_call_id=c.get("id"))) - - raw_finish_reason = choice.finish_reason - finish_reason = _CHAT_FINISH_REASON_MAP.get(raw_finish_reason) + items.append(ToolCallPart(c.function.name, c.function.arguments, tool_call_id=c.id)) + usage = RequestUsage( + input_tokens=response.usage.prompt_tokens, + output_tokens=response.usage.completion_tokens, + ) return ModelResponse( parts=items, - usage=_map_usage(response, self._provider, "", self._model_name), + usage=usage, model_name=response.model, - timestamp=timestamp, + timestamp=_now_utc(), provider_details=None, provider_response_id=response.id, provider_name=self._provider, - finish_reason=finish_reason, + finish_reason=choice.finish_reason, ) def new_infer_model(model: Model | KnownModelName, api_key: str = None) -> Model: @@ -125,5 +103,4 @@ def new_infer_model(model: Model | KnownModelName, api_key: str = None) -> Model def patch_infer_model(api_key: str | None = None): import pydantic_ai.models - pydantic_ai.models.infer_model = partial(new_infer_model, api_key=api_key) - pydantic_ai.models.OpenAIChatModel = AIModel \ No newline at end of file + pydantic_ai.models.infer_model = partial(new_infer_model, api_key=api_key) \ No newline at end of file diff --git a/g4f/models.py b/g4f/models.py index 7c625abb..17b1aca4 100644 --- a/g4f/models.py +++ b/g4f/models.py @@ -6,7 +6,6 @@ from typing import Dict, List, Optional from .Provider import IterListProvider, ProviderType from .Provider import ( ### No Auth Required ### - Blackbox, Chatai, Cloudflare, Copilot, @@ -17,7 +16,6 @@ from .Provider import ( GLM, Kimi, LambdaChat, - Mintlify, OIVSCodeSer2, OIVSCodeSer0501, OperaAria, @@ -27,7 +25,6 @@ from .Provider import ( PollinationsAI, PollinationsImage, Qwen, - StringableInference, TeachAnything, Together, WeWordle, @@ -155,7 +152,6 @@ default = Model( name = "", base_provider = "", best_provider = IterListProvider([ - StringableInference, OIVSCodeSer0501, OIVSCodeSer2, Copilot, @@ -168,7 +164,6 @@ default = Model( Together, Chatai, WeWordle, - Mintlify, TeachAnything, OpenaiChat, Cloudflare, @@ -179,7 +174,6 @@ default_vision = VisionModel( name = "", base_provider = "", best_provider = IterListProvider([ - StringableInference, DeepInfra, OIVSCodeSer0501, OIVSCodeSer2, diff --git a/g4f/providers/any_provider.py b/g4f/providers/any_provider.py index 8f808846..fdee466d 100644 --- a/g4f/providers/any_provider.py +++ b/g4f/providers/any_provider.py @@ -348,7 +348,7 @@ class AnyProvider(AsyncGeneratorProvider, AnyModelProviderMixin): has_audio = True break has_image = True - if "tools" in kwargs: + if kwargs.get("tools", None): providers = [PollinationsAI] elif "audio" in kwargs or "audio" in kwargs.get("modalities", []): if kwargs.get("audio", {}).get("language") is None: