Files
gpt4free/g4f/gui/server/api.py
hlohaus 06546649db feat: add LM Arena provider, async‑ify Copilot & surface follow‑up suggestions
* **Provider/Blackbox.py**
  * Raise `RateLimitError` when `"You have reached your request limit for the hour"` substring is detected
* **Provider/Copilot.py**
  * Convert class to `AsyncGeneratorProvider`; rename `create_completion` → `create_async_generator`
  * Swap `curl_cffi.requests.Session` for `AsyncSession`; reduce default timeout to **30 s**
  * Fully async websocket flow (`await session.ws_connect`, `await wss.send/recv/close`)
  * Emit new response types: `TitleGeneration`, `SourceLink`, aggregated `Sources`
  * Track request completion with `done` flag; collect citations in `sources` dict
* **Provider/DuckDuckGo.py**
  * Replace `duckduckgo_search.DDGS` with `duckai.DuckAI`
  * Change base class to `AbstractProvider`; drop nodriver‑based auth
* **Provider/PollinationsAI.py**
  * Re‑build text/audio model lists ensuring uniqueness; remove unused `extra_text_models`
  * Fix image seed logic (`i==1` for first retry); propagate streaming `error` field via `ResponseError`
* **Provider/hf_space**
  * **New file** `LMArenaProvider.py` implementing async queue/stream client
  * Register `LMArenaProvider` in `hf_space/__init__.py`; delete `G4F` import
* **Provider/needs_auth/CopilotAccount.py**
  * Inherit order changed to `Copilot, AsyncAuthedProvider`
  * Refactor token & cookie propagation; add `cookies_to_dict` helper
* **Provider/needs_auth/OpenaiChat.py**
  * Parse reasoning thoughts/summary; yield `Reasoning` responses
  * Tighten access‑token validation and nodriver JS evaluations (`return_by_value`)
  * Extend `Conversation` with `p` and `thoughts_summary`
* **providers/response.py**
  * Add `SourceLink` response class returning single formatted citation link
* **providers/base_provider.py**
  * Serialize `AuthResult` with custom `json.dump` to handle non‑serializable fields
  * Gracefully skip empty cache files when loading auth data
* **image/copy_images.py**
  * Ignore file extensions longer than 4 chars when inferring type
* **requests/__init__.py**
  * Use `return_by_value=True` for `navigator.userAgent` extraction
* **models.py**
  * Remove `G4F` from model provider lists; update `janus_pro_7b` best providers
* **GUI server/api.py**
  * Stream `SuggestedFollowups` to client (`"suggestions"` event)
* **GUI static assets**
  * **style.css**: bold chat title, add `.suggestions` styles, remove padding from `.chat-body`
  * **chat.v1.js**
    * Capture `suggestions` packets, render buttons, and send as quick replies
    * Re‑order finish‑reason logic; adjust token count placement and system‑prompt toggling
  * **chat-top-panel / footer** interactions updated accordingly
* **gui/client/static/js/chat.v1.js** & **css** further UI refinements (scroll handling, token counting, hide prompt toggle)
* Minor updates across multiple files to match new async interfaces and headers (`userAgent`, `raise_for_status`)
2025-04-17 01:21:58 +02:00

262 lines
12 KiB
Python

from __future__ import annotations
import logging
import os
import asyncio
from typing import Iterator
from flask import send_from_directory
from inspect import signature
from ...errors import VersionNotFoundError, MissingAuthError
from ...image.copy_images import copy_media, ensure_images_dir, images_dir
from ...tools.run_tools import iter_run_tools
from ...Provider import ProviderUtils, __providers__
from ...providers.base_provider import ProviderModelMixin
from ...providers.retry_provider import BaseRetryProvider
from ...providers.helper import format_image_prompt
from ...providers.response import *
from ... import version, models
from ... import ChatCompletion, get_model_and_provider
from ... import debug
logger = logging.getLogger(__name__)
conversations: dict[dict[str, BaseConversation]] = {}
class Api:
@staticmethod
def get_models():
return [{
"name": model.name,
"image": isinstance(model, models.ImageModel),
"vision": isinstance(model, models.VisionModel),
"audio": isinstance(model, models.AudioModel),
"video": isinstance(model, models.VideoModel),
"providers": [
getattr(provider, "parent", provider.__name__)
for provider in providers
if provider.working
]
}
for model, providers in models.__models__.values()]
@staticmethod
def get_provider_models(provider: str, api_key: str = None, api_base: str = None):
if provider in ProviderUtils.convert:
provider = ProviderUtils.convert[provider]
if issubclass(provider, ProviderModelMixin):
if "api_key" in signature(provider.get_models).parameters:
models = provider.get_models(api_key=api_key, api_base=api_base)
else:
models = provider.get_models()
return [
{
"model": model,
"default": model == provider.default_model,
"vision": getattr(provider, "default_vision_model", None) == model or model in getattr(provider, "vision_models", []),
"audio": getattr(provider, "default_audio_model", None) == model or model in getattr(provider, "audio_models", []),
"video": getattr(provider, "default_video_model", None) == model or model in getattr(provider, "video_models", []),
"image": False if provider.image_models is None else model in provider.image_models,
"task": None if not hasattr(provider, "task_mapping") else provider.task_mapping[model] if model in provider.task_mapping else None
}
for model in models
]
return []
@staticmethod
def get_providers() -> dict[str, str]:
return [{
"name": provider.__name__,
"label": provider.label if hasattr(provider, "label") else provider.__name__,
"parent": getattr(provider, "parent", None),
"image": bool(getattr(provider, "image_models", False)),
"audio": getattr(provider, "audio_models", None) is not None,
"video": getattr(provider, "video_models", None) is not None,
"vision": getattr(provider, "default_vision_model", None) is not None,
"nodriver": getattr(provider, "use_nodriver", False),
"hf_space": getattr(provider, "hf_space", False),
"auth": provider.needs_auth,
"login_url": getattr(provider, "login_url", None),
} for provider in __providers__ if provider.working]
@staticmethod
def get_version() -> dict:
current_version = None
latest_version = None
try:
current_version = version.utils.current_version
latest_version = version.utils.latest_version
except VersionNotFoundError:
pass
return {
"version": current_version,
"latest_version": latest_version,
}
def serve_images(self, name):
ensure_images_dir()
return send_from_directory(os.path.abspath(images_dir), name)
def _prepare_conversation_kwargs(self, json_data: dict):
kwargs = {**json_data}
model = json_data.get('model')
provider = json_data.get('provider')
messages = json_data.get('messages')
kwargs["tool_calls"] = [{
"function": {
"name": "bucket_tool"
},
"type": "function"
}]
action = json_data.get('action')
if action == "continue":
kwargs["tool_calls"].append({
"function": {
"name": "continue_tool"
},
"type": "function"
})
conversation = json_data.get("conversation")
if isinstance(conversation, dict):
kwargs["conversation"] = JsonConversation(**conversation)
else:
conversation_id = json_data.get("conversation_id")
if conversation_id and provider:
if provider in conversations and conversation_id in conversations[provider]:
kwargs["conversation"] = conversations[provider][conversation_id]
return {
"model": model,
"provider": provider,
"messages": messages,
"stream": True,
"ignore_stream": True,
"return_conversation": True,
**kwargs
}
def _create_response_stream(self, kwargs: dict, conversation_id: str, provider: str, download_media: bool = True) -> Iterator:
def decorated_log(text: str, file = None):
debug.logs.append(text)
if debug.logging:
debug.log_handler(text, file=file)
debug.log = decorated_log
proxy = os.environ.get("G4F_PROXY")
provider = kwargs.get("provider")
try:
model, provider_handler = get_model_and_provider(
kwargs.get("model"), provider,
stream=True,
ignore_stream=True,
logging=False,
has_images="media" in kwargs,
)
except Exception as e:
debug.error(e)
yield self._format_json('error', type(e).__name__, message=get_error_message(e))
return
if not isinstance(provider_handler, BaseRetryProvider):
if not provider:
provider = provider_handler.__name__
yield self.handle_provider(provider_handler, model)
if hasattr(provider_handler, "get_parameters"):
yield self._format_json("parameters", provider_handler.get_parameters(as_json=True))
try:
result = iter_run_tools(ChatCompletion.create, **{**kwargs, "model": model, "provider": provider_handler, "download_media": download_media})
for chunk in result:
if isinstance(chunk, ProviderInfo):
yield self.handle_provider(chunk, model)
provider = chunk.name
elif isinstance(chunk, BaseConversation):
if provider is not None:
if hasattr(provider, "__name__"):
provider = provider.__name__
if provider not in conversations:
conversations[provider] = {}
conversations[provider][conversation_id] = chunk
if isinstance(chunk, JsonConversation):
yield self._format_json("conversation", {
provider: chunk.get_dict()
})
else:
yield self._format_json("conversation_id", conversation_id)
elif isinstance(chunk, Exception):
logger.exception(chunk)
debug.error(chunk)
yield self._format_json('message', get_error_message(chunk), error=type(chunk).__name__)
elif isinstance(chunk, RequestLogin):
yield self._format_json("preview", chunk.to_string())
elif isinstance(chunk, PreviewResponse):
yield self._format_json("preview", chunk.to_string())
elif isinstance(chunk, ImagePreview):
yield self._format_json("preview", chunk.to_string(), urls=chunk.urls, alt=chunk.alt)
elif isinstance(chunk, MediaResponse):
media = chunk
if download_media or chunk.get("cookies"):
chunk.alt = format_image_prompt(kwargs.get("messages"), chunk.alt)
tags = [model, kwargs.get("aspect_ratio"), kwargs.get("resolution"), kwargs.get("width"), kwargs.get("height")]
media = asyncio.run(copy_media(chunk.get_list(), chunk.get("cookies"), chunk.get("headers"), proxy=proxy, alt=chunk.alt, tags=tags))
media = ImageResponse(media, chunk.alt) if isinstance(chunk, ImageResponse) else VideoResponse(media, chunk.alt)
yield self._format_json("content", str(media), urls=chunk.urls, alt=chunk.alt)
elif isinstance(chunk, SynthesizeData):
yield self._format_json("synthesize", chunk.get_dict())
elif isinstance(chunk, TitleGeneration):
yield self._format_json("title", chunk.title)
elif isinstance(chunk, RequestLogin):
yield self._format_json("login", str(chunk))
elif isinstance(chunk, Parameters):
yield self._format_json("parameters", chunk.get_dict())
elif isinstance(chunk, FinishReason):
yield self._format_json("finish", chunk.get_dict())
elif isinstance(chunk, Usage):
yield self._format_json("usage", chunk.get_dict())
elif isinstance(chunk, Reasoning):
yield self._format_json("reasoning", **chunk.get_dict())
elif isinstance(chunk, YouTube):
yield self._format_json("content", chunk.to_string())
elif isinstance(chunk, AudioResponse):
yield self._format_json("content", str(chunk))
elif isinstance(chunk, SuggestedFollowups):
yield self._format_json("suggestions", chunk.suggestions)
elif isinstance(chunk, DebugResponse):
yield self._format_json("log", chunk.log)
elif isinstance(chunk, RawResponse):
yield self._format_json(chunk.type, **chunk.get_dict())
else:
yield self._format_json("content", str(chunk))
except MissingAuthError as e:
yield self._format_json('auth', type(e).__name__, message=get_error_message(e))
except Exception as e:
logger.exception(e)
debug.error(e)
yield self._format_json('error', type(e).__name__, message=get_error_message(e))
finally:
yield from self._yield_logs()
def _yield_logs(self):
if debug.logs:
for log in debug.logs:
yield self._format_json("log", log)
debug.logs = []
def _format_json(self, response_type: str, content = None, **kwargs):
if content is not None and isinstance(response_type, str):
return {
'type': response_type,
response_type: content,
**kwargs
}
return {
'type': response_type,
**kwargs
}
def handle_provider(self, provider_handler, model):
if isinstance(provider_handler, BaseRetryProvider) and provider_handler.last_provider is not None:
provider_handler = provider_handler.last_provider
if model:
return self._format_json("provider", {**provider_handler.get_dict(), "model": model})
return self._format_json("provider", provider_handler.get_dict())
def get_error_message(exception: Exception) -> str:
return f"{type(exception).__name__}: {exception}"