diff --git a/g4f/Provider/PollinationsAI.py b/g4f/Provider/PollinationsAI.py index 7755c930..aae45837 100644 --- a/g4f/Provider/PollinationsAI.py +++ b/g4f/Provider/PollinationsAI.py @@ -3,42 +3,45 @@ from __future__ import annotations import json import random import requests -from urllib.parse import quote +from urllib.parse import quote_plus from typing import Optional from aiohttp import ClientSession +from .helper import filter_none from .base_provider import AsyncGeneratorProvider, ProviderModelMixin +from ..typing import AsyncResult, Messages, ImagesType +from ..image import to_data_uri from ..requests.raise_for_status import raise_for_status -from ..typing import AsyncResult, Messages -from ..image import ImageResponse +from ..requests.aiohttp import get_connector +from ..providers.response import ImageResponse, FinishReason, Usage + +DEFAULT_HEADERS = { + 'Accept': '*/*', + 'Accept-Language': 'en-US,en;q=0.9', + 'User-Agent': 'Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/130.0.0.0 Safari/537.36', +} class PollinationsAI(AsyncGeneratorProvider, ProviderModelMixin): label = "Pollinations AI" url = "https://pollinations.ai" - + working = True supports_stream = False supports_system_message = True supports_message_history = True - # API endpoints base - api_base = "https://text.pollinations.ai/openai" - # API endpoints - text_api_endpoint = "https://text.pollinations.ai/" + text_api_endpoint = "https://text.pollinations.ai/openai" image_api_endpoint = "https://image.pollinations.ai/" # Models configuration default_model = "openai" default_image_model = "flux" - - image_models = [] - models = [] - - additional_models_image = ["midjourney", "dall-e-3"] - additional_models_text = ["claude", "karma", "command-r", "llamalight", "mistral-large", "sur", "sur-mistral"] + default_vision_model = "gpt-4o" + extra_image_models = ["midjourney", "dall-e-3"] + vision_models = [default_vision_model, "gpt-4o-mini"] + extra_text_models = [*vision_models, "claude", "karma", "command-r", "llamalight", "mistral-large", "sur", "sur-mistral"] model_aliases = { - "gpt-4o": default_model, "qwen-2-72b": "qwen", "qwen-2.5-coder-32b": "qwen-coder", "llama-3.3-70b": "llama", @@ -50,22 +53,17 @@ class PollinationsAI(AsyncGeneratorProvider, ProviderModelMixin): "deepseek-chat": "deepseek", "llama-3.2-3b": "llamalight", } + text_models = [] @classmethod def get_models(cls, **kwargs): - # Initialize model lists if not exists - if not hasattr(cls, 'image_models'): - cls.image_models = [] - if not hasattr(cls, 'text_models'): - cls.text_models = [] - # Fetch image models if not cached if not cls.image_models: url = "https://image.pollinations.ai/models" response = requests.get(url) raise_for_status(response) cls.image_models = response.json() - cls.image_models.extend(cls.additional_models_image) + cls.image_models.extend(cls.extra_image_models) # Fetch text models if not cached if not cls.text_models: @@ -73,7 +71,7 @@ class PollinationsAI(AsyncGeneratorProvider, ProviderModelMixin): response = requests.get(url) raise_for_status(response) cls.text_models = [model.get("name") for model in response.json()] - cls.text_models.extend(cls.additional_models_text) + cls.text_models.extend(cls.extra_text_models) # Return combined models return cls.text_models + cls.image_models @@ -94,22 +92,27 @@ class PollinationsAI(AsyncGeneratorProvider, ProviderModelMixin): enhance: bool = False, safe: bool = False, # Text specific parameters - temperature: float = 0.5, - presence_penalty: float = 0, + images: ImagesType = None, + temperature: float = None, + presence_penalty: float = None, top_p: float = 1, - frequency_penalty: float = 0, - stream: bool = False, + frequency_penalty: float = None, + response_format: Optional[dict] = None, + cache: bool = False, **kwargs ) -> AsyncResult: + if images is not None and not model: + model = cls.default_vision_model model = cls.get_model(model) + if not cache and seed is None: + seed = random.randint(0, 100000) # Check if models # Image generation if model in cls.image_models: - async for result in cls._generate_image( + yield await cls._generate_image( model=model, - messages=messages, - prompt=prompt, + prompt=messages[-1]["content"] if prompt is None else prompt, proxy=proxy, width=width, height=height, @@ -118,19 +121,21 @@ class PollinationsAI(AsyncGeneratorProvider, ProviderModelMixin): private=private, enhance=enhance, safe=safe - ): - yield result + ) else: # Text generation async for result in cls._generate_text( model=model, messages=messages, + images=images, proxy=proxy, temperature=temperature, presence_penalty=presence_penalty, top_p=top_p, frequency_penalty=frequency_penalty, - stream=stream + response_format=response_format, + seed=seed, + cache=cache, ): yield result @@ -138,7 +143,6 @@ class PollinationsAI(AsyncGeneratorProvider, ProviderModelMixin): async def _generate_image( cls, model: str, - messages: Messages, prompt: str, proxy: str, width: int, @@ -148,16 +152,7 @@ class PollinationsAI(AsyncGeneratorProvider, ProviderModelMixin): private: bool, enhance: bool, safe: bool - ) -> AsyncResult: - if seed is None: - seed = random.randint(0, 10000) - - headers = { - 'Accept': '*/*', - 'Accept-Language': 'en-US,en;q=0.9', - 'User-Agent': 'Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/130.0.0.0 Safari/537.36', - } - + ) -> ImageResponse: params = { "seed": seed, "width": width, @@ -168,42 +163,47 @@ class PollinationsAI(AsyncGeneratorProvider, ProviderModelMixin): "enhance": enhance, "safe": safe } - params = {k: v for k, v in params.items() if v is not None} - - async with ClientSession(headers=headers) as session: - prompt = messages[-1]["content"] if prompt is None else prompt - param_string = "&".join(f"{k}={v}" for k, v in params.items()) - url = f"{cls.image_api_endpoint}/prompt/{quote(prompt)}?{param_string}" - - async with session.head(url, proxy=proxy) as response: - if response.status == 200: - image_response = ImageResponse(images=url, alt=prompt) - yield image_response + params = {k: json.dumps(v) if isinstance(v, bool) else v for k, v in params.items() if v is not None} + async with ClientSession(headers=DEFAULT_HEADERS, connector=get_connector(proxy=proxy)) as session: + async with session.head(f"{cls.image_api_endpoint}prompt/{quote_plus(prompt)}", params=params) as response: + await raise_for_status(response) + return ImageResponse(str(response.url), prompt) @classmethod async def _generate_text( cls, model: str, messages: Messages, + images: Optional[ImagesType], proxy: str, temperature: float, presence_penalty: float, top_p: float, frequency_penalty: float, - stream: bool, - seed: Optional[int] = None - ) -> AsyncResult: - headers = { - "accept": "*/*", - "accept-language": "en-US,en;q=0.9", - "content-type": "application/json", - "user-agent": "Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/131.0.0.0 Safari/537.36" - } - - if seed is None: - seed = random.randint(0, 10000) - - async with ClientSession(headers=headers) as session: + response_format: Optional[dict], + seed: Optional[int], + cache: bool + ) -> AsyncResult: + jsonMode = False + if response_format is not None and "type" in response_format: + if response_format["type"] == "json_object": + jsonMode = True + + if images is not None and messages: + last_message = messages[-1].copy() + last_message["content"] = [ + *[{ + "type": "image_url", + "image_url": {"url": to_data_uri(image)} + } for image, _ in images], + { + "type": "text", + "text": messages[-1]["content"] + } + ] + messages[-1] = last_message + + async with ClientSession(headers=DEFAULT_HEADERS, connector=get_connector(proxy=proxy)) as session: data = { "messages": messages, "model": model, @@ -211,42 +211,33 @@ class PollinationsAI(AsyncGeneratorProvider, ProviderModelMixin): "presence_penalty": presence_penalty, "top_p": top_p, "frequency_penalty": frequency_penalty, - "jsonMode": False, - "stream": stream, + "jsonMode": jsonMode, + "stream": False, # To get more informations like Usage and FinishReason "seed": seed, - "cache": False + "cache": cache } - - async with session.post(cls.text_api_endpoint, json=data, proxy=proxy) as response: - response.raise_for_status() - async for chunk in response.content: - if chunk: - decoded_chunk = chunk.decode() - - # Skip [DONE]. - if "data: [DONE]" in decoded_chunk: - continue - - # Processing plain text - if not decoded_chunk.startswith("data:"): - clean_text = decoded_chunk.strip() - if clean_text: - yield clean_text - continue - - # Processing JSON format - try: - # Remove the prefix “data: “ and parse JSON - json_str = decoded_chunk.replace("data:", "").strip() - json_response = json.loads(json_str) - - if "choices" in json_response and json_response["choices"]: - if "delta" in json_response["choices"][0]: - content = json_response["choices"][0]["delta"].get("content") - if content: - # Remove escaped slashes before parentheses - clean_content = content.replace("\\(", "(").replace("\\)", ")") - yield clean_content - except json.JSONDecodeError: - # If JSON could not be parsed, skip - continue + async with session.post(cls.text_api_endpoint, json=filter_none(**data)) as response: + await raise_for_status(response) + async for line in response.content: + decoded_chunk = line.decode(errors="replace") + # If [DONE]. + if "data: [DONE]" in decoded_chunk: + break + # Processing JSON format + try: + # Remove the prefix “data: “ and parse JSON + json_str = decoded_chunk.replace("data:", "").strip() + data = json.loads(json_str) + choice = data["choices"][0] + if "usage" in data: + yield Usage(**data["usage"]) + if "message" in choice and "content" in choice["message"] and choice["message"]["content"]: + yield choice["message"]["content"].replace("\\(", "(").replace("\\)", ")") + elif "delta" in choice and "content" in choice["delta"] and choice["delta"]["content"]: + yield choice["delta"]["content"].replace("\\(", "(").replace("\\)", ")") + if "finish_reason" in choice and choice["finish_reason"] is not None: + yield FinishReason(choice["finish_reason"]) + break + except json.JSONDecodeError: + yield decoded_chunk.strip() + continue \ No newline at end of file diff --git a/g4f/Provider/hf_space/Qwen_QVQ_72B.py b/g4f/Provider/hf_space/Qwen_QVQ_72B.py index a9d224ea..81c89166 100644 --- a/g4f/Provider/hf_space/Qwen_QVQ_72B.py +++ b/g4f/Provider/hf_space/Qwen_QVQ_72B.py @@ -18,6 +18,7 @@ class Qwen_QVQ_72B(AsyncGeneratorProvider, ProviderModelMixin): default_model = "qwen-qvq-72b-preview" models = [default_model] + vision_models = models model_aliases = {"qwq-32b": default_model} @classmethod diff --git a/g4f/Provider/hf_space/__init__.py b/g4f/Provider/hf_space/__init__.py index 98856218..989ed9ab 100644 --- a/g4f/Provider/hf_space/__init__.py +++ b/g4f/Provider/hf_space/__init__.py @@ -33,12 +33,18 @@ class HuggingSpace(AsyncGeneratorProvider, ProviderModelMixin): def get_models(cls, **kwargs) -> list[str]: if not cls.models: models = [] + image_models = [] + vision_models = [] for provider in cls.providers: models.extend(provider.get_models(**kwargs)) models.extend(provider.model_aliases.keys()) + image_models.extend(provider.image_models) + vision_models.extend(provider.vision_models) models = list(set(models)) models.sort() cls.models = models + cls.image_models = list(set(image_models)) + cls.vision_models = list(set(vision_models)) return cls.models @classmethod diff --git a/g4f/Provider/needs_auth/HuggingChat.py b/g4f/Provider/needs_auth/HuggingChat.py index b5a7c410..7cd54f89 100644 --- a/g4f/Provider/needs_auth/HuggingChat.py +++ b/g4f/Provider/needs_auth/HuggingChat.py @@ -1,6 +1,8 @@ from __future__ import annotations import json +import re +import requests try: from curl_cffi.requests import Session, CurlMime @@ -13,14 +15,13 @@ from ..helper import format_prompt from ...typing import CreateResult, Messages, Cookies from ...errors import MissingRequirementsError from ...requests.raise_for_status import raise_for_status -from ...providers.response import JsonConversation, ImageResponse, Sources +from ...providers.response import JsonConversation, ImageResponse, Sources, TitleGeneration, Reasoning from ...cookies import get_cookies from ... import debug class Conversation(JsonConversation): - def __init__(self, conversation_id: str, message_id: str): - self.conversation_id: str = conversation_id - self.message_id: str = message_id + def __init__(self, models: dict): + self.models: dict = models class HuggingChat(AbstractProvider, ProviderModelMixin): url = "https://huggingface.co/chat" @@ -32,11 +33,11 @@ class HuggingChat(AbstractProvider, ProviderModelMixin): default_model = "Qwen/Qwen2.5-72B-Instruct" default_image_model = "black-forest-labs/FLUX.1-dev" image_models = [ - "black-forest-labs/FLUX.1-dev", + default_image_model, "black-forest-labs/FLUX.1-schnell", ] - models = [ - 'Qwen/Qwen2.5-Coder-32B-Instruct', + fallback_models = [ + default_model, 'meta-llama/Llama-3.3-70B-Instruct', 'CohereForAI/c4ai-command-r-plus-08-2024', 'Qwen/QwQ-32B-Preview', @@ -63,12 +64,33 @@ class HuggingChat(AbstractProvider, ProviderModelMixin): "flux-schnell": "black-forest-labs/FLUX.1-schnell", } + @classmethod + def get_models(cls): + if not cls.models: + try: + text = requests.get(cls.url).text + text = re.sub(r',parameters:{[^}]+?}', '', text) + text = re.search(r'models:(\[.+?\]),oldModels:', text).group(1) + text = text.replace('void 0', 'null') + def add_quotation_mark(match): + return f'{match.group(1)}"{match.group(2)}":' + text = re.sub(r'([{,])([A-Za-z0-9_]+?):', add_quotation_mark, text) + models = json.loads(text) + cls.text_models = [model["id"] for model in models] + cls.models = cls.text_models + cls.image_models + cls.vision_models = [model["id"] for model in models if model["multimodal"]] + except Exception as e: + debug.log(f"HuggingChat: Error reading models: {type(e).__name__}: {e}") + cls.models = [*cls.fallback_models] + return cls.models + @classmethod def create_completion( cls, model: str, messages: Messages, stream: bool, + prompt: str = None, return_conversation: bool = False, conversation: Conversation = None, web_search: bool = False, @@ -99,22 +121,26 @@ class HuggingChat(AbstractProvider, ProviderModelMixin): 'user-agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/127.0.0.0 Safari/537.36', } - if conversation is None: + if conversation is None or not hasattr(conversation, "models"): + conversation = Conversation({}) + + if model not in conversation.models: conversationId = cls.create_conversation(session, model) messageId = cls.fetch_message_id(session, conversationId) - conversation = Conversation(conversationId, messageId) + conversation.models[model] = {"conversationId": conversationId, "messageId": messageId} if return_conversation: yield conversation inputs = format_prompt(messages) else: - conversation.message_id = cls.fetch_message_id(session, conversation.conversation_id) + conversationId = conversation.models[model]["conversationId"] + conversation.models[model]["message_id"] = cls.fetch_message_id(session, conversationId) inputs = messages[-1]["content"] - debug.log(f"Use conversation: {conversation.conversation_id} Use message: {conversation.message_id}") + debug.log(f"Use model {model}: {json.dumps(conversation.models[model])}") settings = { "inputs": inputs, - "id": conversation.message_id, + "id": conversation.models[model]["message_id"], "is_retry": False, "is_continue": False, "web_search": web_search, @@ -128,7 +154,7 @@ class HuggingChat(AbstractProvider, ProviderModelMixin): 'origin': 'https://huggingface.co', 'pragma': 'no-cache', 'priority': 'u=1, i', - 'referer': f'https://huggingface.co/chat/conversation/{conversation.conversation_id}', + 'referer': f'https://huggingface.co/chat/conversation/{conversationId}', 'sec-ch-ua': '"Not)A;Brand";v="99", "Google Chrome";v="127", "Chromium";v="127"', 'sec-ch-ua-mobile': '?0', 'sec-ch-ua-platform': '"macOS"', @@ -142,7 +168,7 @@ class HuggingChat(AbstractProvider, ProviderModelMixin): data.addpart('data', data=json.dumps(settings, separators=(',', ':'))) response = session.post( - f'https://huggingface.co/chat/conversation/{conversation.conversation_id}', + f'https://huggingface.co/chat/conversation/{conversationId}', cookies=session.cookies, headers=headers, multipart=data, @@ -170,10 +196,17 @@ class HuggingChat(AbstractProvider, ProviderModelMixin): elif line["type"] == "finalAnswer": break elif line["type"] == "file": - url = f"https://huggingface.co/chat/conversation/{conversation.conversation_id}/output/{line['sha']}" - yield ImageResponse(url, alt=messages[-1]["content"], options={"cookies": cookies}) + url = f"https://huggingface.co/chat/conversation/{conversationId}/output/{line['sha']}" + prompt = messages[-1]["content"] if prompt is None else prompt + yield ImageResponse(url, alt=prompt, options={"cookies": cookies}) elif line["type"] == "webSearch" and "sources" in line: sources = Sources(line["sources"]) + elif line["type"] == "title": + yield TitleGeneration(line["title"]) + elif line["type"] == "reasoning": + yield Reasoning(line.get("token"), line.get("status")) + else: + pass #print(line) full_response = full_response.replace('<|im_end|', '').strip() if not stream: diff --git a/g4f/Provider/needs_auth/HuggingFace.py b/g4f/Provider/needs_auth/HuggingFace.py index 02220e78..b9ef5418 100644 --- a/g4f/Provider/needs_auth/HuggingFace.py +++ b/g4f/Provider/needs_auth/HuggingFace.py @@ -143,7 +143,7 @@ class HuggingFace(AsyncGeneratorProvider, ProviderModelMixin): else: is_special = True debug.log(f"Special token: {is_special}") - yield FinishReason("stop" if is_special else "length", actions=["variant"] if is_special else ["continue", "variant"]) + yield FinishReason("stop" if is_special else "length") else: if response.headers["content-type"].startswith("image/"): base64_data = base64.b64encode(b"".join([chunk async for chunk in response.iter_content()])) diff --git a/g4f/Provider/needs_auth/HuggingFaceAPI.py b/g4f/Provider/needs_auth/HuggingFaceAPI.py index a3817b15..1c1466d7 100644 --- a/g4f/Provider/needs_auth/HuggingFaceAPI.py +++ b/g4f/Provider/needs_auth/HuggingFaceAPI.py @@ -2,6 +2,7 @@ from __future__ import annotations from .OpenaiAPI import OpenaiAPI from .HuggingChat import HuggingChat +from ...providers.types import Messages class HuggingFaceAPI(OpenaiAPI): label = "HuggingFace (Inference API)" @@ -11,6 +12,23 @@ class HuggingFaceAPI(OpenaiAPI): working = True default_model = "meta-llama/Llama-3.2-11B-Vision-Instruct" default_vision_model = default_model - models = [ - *HuggingChat.models - ] \ No newline at end of file + + @classmethod + def get_models(cls, **kwargs): + HuggingChat.get_models() + cls.models = HuggingChat.text_models + cls.vision_models = HuggingChat.vision_models + return cls.models + + @classmethod + async def create_async_generator( + cls, + model: str, + messages: Messages, + api_base: str = None, + **kwargs + ): + if api_base is None: + api_base = f"https://api-inference.huggingface.co/models/{model}/v1" + async for chunk in super().create_async_generator(model, messages, api_base=api_base, **kwargs): + yield chunk \ No newline at end of file diff --git a/g4f/Provider/needs_auth/OpenaiAPI.py b/g4f/Provider/needs_auth/OpenaiAPI.py index 1ca256d4..8e7e0677 100644 --- a/g4f/Provider/needs_auth/OpenaiAPI.py +++ b/g4f/Provider/needs_auth/OpenaiAPI.py @@ -73,10 +73,11 @@ class OpenaiAPI(AsyncGeneratorProvider, ProviderModelMixin, RaiseErrorMixin): raise MissingAuthError('Add a "api_key"') if api_base is None: api_base = cls.api_base - if images is not None: + if images is not None and messages: if not model and hasattr(cls, "default_vision_model"): model = cls.default_vision_model - messages[-1]["content"] = [ + last_message = messages[-1].copy() + last_message["content"] = [ *[{ "type": "image_url", "image_url": {"url": to_data_uri(image)} @@ -86,6 +87,7 @@ class OpenaiAPI(AsyncGeneratorProvider, ProviderModelMixin, RaiseErrorMixin): "text": messages[-1]["content"] } ] + messages[-1] = last_message async with StreamSession( proxy=proxy, headers=cls.get_headers(stream, api_key, headers), @@ -117,9 +119,9 @@ class OpenaiAPI(AsyncGeneratorProvider, ProviderModelMixin, RaiseErrorMixin): yield ToolCalls(choice["message"]["tool_calls"]) if "usage" in data: yield Usage(**data["usage"]) - finish = cls.read_finish_reason(choice) - if finish is not None: - yield finish + if "finish_reason" in choice and choice["finish_reason"] is not None: + yield FinishReason(choice["finish_reason"]) + return else: first = True async for line in response.iter_lines(): @@ -137,16 +139,10 @@ class OpenaiAPI(AsyncGeneratorProvider, ProviderModelMixin, RaiseErrorMixin): if delta: first = False yield delta - finish = cls.read_finish_reason(choice) - if finish is not None: - yield finish + if "finish_reason" in choice and choice["finish_reason"] is not None: + yield FinishReason(choice["finish_reason"]) break - @staticmethod - def read_finish_reason(choice: dict) -> Optional[FinishReason]: - if "finish_reason" in choice and choice["finish_reason"] is not None: - return FinishReason(choice["finish_reason"]) - @classmethod def get_headers(cls, stream: bool, api_key: str = None, headers: dict = None) -> dict: return { diff --git a/g4f/Provider/needs_auth/OpenaiChat.py b/g4f/Provider/needs_auth/OpenaiChat.py index 38d9e5de..75396a87 100644 --- a/g4f/Provider/needs_auth/OpenaiChat.py +++ b/g4f/Provider/needs_auth/OpenaiChat.py @@ -495,8 +495,7 @@ class OpenaiChat(AsyncAuthedProvider, ProviderModelMixin): "headers": cls._headers, "web_search": web_search, }) - actions = ["variant", "continue"] if conversation.finish_reason == "max_tokens" else ["variant"] - yield FinishReason(conversation.finish_reason, actions=actions) + yield FinishReason(conversation.finish_reason) @classmethod async def iter_messages_line(cls, session: StreamSession, line: bytes, fields: Conversation, sources: Sources) -> AsyncIterator: diff --git a/g4f/gui/client/static/css/style.css b/g4f/gui/client/static/css/style.css index 997e4c22..307fdc3a 100644 --- a/g4f/gui/client/static/css/style.css +++ b/g4f/gui/client/static/css/style.css @@ -376,6 +376,29 @@ body:not(.white) a:visited{ display: flex; } +.message .reasoning_text.final:not(.hidden), .message .reasoning_title { + margin-bottom: var(--inner-gap); + padding-bottom: var(--inner-gap); + border-bottom: 1px solid var(--colour-3); + overflow: hidden; +} + +.message .reasoning_text.final { + max-height: 1000px; + transition: max-height 0.25s ease-in; +} + +.message .reasoning_text.final.hidden { + transition: max-height 0.15s ease-out; + max-height: 0; + display: block; + overflow: hidden; +} + +.message .reasoning_title { + cursor: pointer; +} + .message .user i { position: absolute; bottom: -6px; diff --git a/g4f/gui/client/static/js/chat.v1.js b/g4f/gui/client/static/js/chat.v1.js index dfbffc5c..10c4eeb3 100644 --- a/g4f/gui/client/static/js/chat.v1.js +++ b/g4f/gui/client/static/js/chat.v1.js @@ -35,6 +35,7 @@ let title_storage = {}; let parameters_storage = {}; let finish_storage = {}; let usage_storage = {}; +let reasoning_storage = {} messageInput.addEventListener("blur", () => { window.scrollTo(0, 0); @@ -70,6 +71,17 @@ if (window.markdownit) { } } +function render_reasoning(reasoning, final = false) { + return `