Merge branch 'xtekky:main' into main

This commit is contained in:
Ammar
2025-12-06 16:36:30 +02:00
committed by GitHub
8 changed files with 275 additions and 4 deletions

View File

@@ -0,0 +1,107 @@
from __future__ import annotations
import json
from ..typing import AsyncResult, Messages
from ..providers.response import Reasoning, JsonResponse
from ..requests import StreamSession
from .base_provider import AsyncGeneratorProvider, ProviderModelMixin
class GradientNetwork(AsyncGeneratorProvider, ProviderModelMixin):
"""
Provider for chat.gradient.network
Supports streaming text generation with Qwen and GPT OSS models.
"""
label = "Gradient Network"
url = "https://chat.gradient.network"
api_endpoint = "https://chat.gradient.network/api/generate"
working = True
needs_auth = False
supports_stream = True
supports_system_message = True
supports_message_history = True
default_model = "GPT OSS 120B"
models = [
default_model,
"GPT OSS 120B",
]
model_aliases = {
"qwen-3-235b": "Qwen3 235B",
"qwen3-235b": "Qwen3 235B",
"gpt-oss-120b": "GPT OSS 120B",
}
@classmethod
async def create_async_generator(
cls,
model: str,
messages: Messages,
proxy: str = None,
enable_thinking: bool = True,
**kwargs
) -> AsyncResult:
"""
Create an async generator for streaming chat responses.
Args:
model: The model name to use
messages: List of message dictionaries
proxy: Optional proxy URL
enable_thinking: Enable the thinking/analysis channel (maps to enableThinking in API)
**kwargs: Additional arguments
Yields:
str: Content chunks from the response
Reasoning: Reasoning content when enable_thinking is True
"""
model = cls.get_model(model)
headers = {
"Accept": "application/x-ndjson",
"Content-Type": "application/json",
"Origin": cls.url,
"Referer": f"{cls.url}/",
}
payload = {
"clusterMode": "nvidia" if "GPT OSS" in model else "hybrid",
"model": model,
"messages": messages,
}
if enable_thinking:
payload["enableThinking"] = enable_thinking
async with StreamSession(headers=headers, proxy=proxy, impersonate="chrome") as session:
async with session.post(
cls.api_endpoint,
json=payload,
) as response:
response.raise_for_status()
async for line in response.iter_lines():
if not line:
continue
try:
data = json.loads(line)
yield JsonResponse.from_dict(data)
msg_type = data.get("type")
if msg_type == "reply":
# Response chunks with content or reasoningContent
reply_data = data.get("data", {})
content = reply_data.get("content")
reasoning_content = reply_data.get("reasoningContent")
if reasoning_content:
yield Reasoning(reasoning_content)
if content:
yield content
# Skip clusterInfo and blockUpdate GPU visualization messages
except json.JSONDecodeError:
# Skip non-JSON lines (may be partial data or empty)
raise

46
g4f/Provider/ItalyGPT.py Normal file
View File

@@ -0,0 +1,46 @@
from .base_provider import AsyncGeneratorProvider, ProviderModelMixin
from ..typing import AsyncResult, Messages
from ..requests import DEFAULT_HEADERS
from aiohttp import ClientSession
class ItalyGPT(AsyncGeneratorProvider, ProviderModelMixin):
label = "ItalyGPT"
url = "https://italygpt.it"
working = True
supports_system_message = True
supports_message_history = True
default_model = "gpt-4o"
models = [default_model]
@classmethod
async def create_async_generator(
cls,
model: str,
messages: Messages,
stream: bool = True,
proxy: str = None,
**kwargs
) -> AsyncResult:
model = cls.get_model(model)
headers = {
**DEFAULT_HEADERS,
"content-type": "application/json",
"origin": "https://italygpt.it",
"referer": "https://italygpt.it/",
}
payload = {
"messages": messages,
"stream": stream,
}
async with ClientSession() as session:
async with session.post(
f"{cls.url}/api/chat",
json=payload,
headers=headers,
proxy=proxy,
) as resp:
resp.raise_for_status()
async for chunk in resp.content.iter_any():
if chunk:
yield chunk.decode()

View File

@@ -48,6 +48,8 @@ from .Copilot import Copilot
from .DeepInfra import DeepInfra
from .EasyChat import EasyChat
from .GLM import GLM
from .GradientNetwork import GradientNetwork
from .ItalyGPT import ItalyGPT
from .LambdaChat import LambdaChat
from .Mintlify import Mintlify
from .OIVSCodeSer import OIVSCodeSer2, OIVSCodeSer0501

View File

@@ -0,0 +1,112 @@
from __future__ import annotations
import aiohttp
import json
import uuid
from ...typing import AsyncResult, Messages
from ...providers.response import JsonConversation
from ...requests.raise_for_status import raise_for_status
from ..base_provider import AsyncGeneratorProvider, ProviderModelMixin
from ..helper import format_prompt, get_last_user_message, get_system_prompt
from ... import debug
class BAAI_Ling(AsyncGeneratorProvider, ProviderModelMixin):
label = "Ling & Ring Playground"
url = "https://cafe3310-ling-playground.hf.space"
api_endpoint = f"{url}/gradio_api/queue/join"
working = True
supports_stream = True
supports_system_message = True
supports_message_history = False
default_model = "ling-1t"
model_aliases = {
"ling": default_model,
}
models = ['ling-mini-2.0', 'ling-1t', 'ling-flash-2.0', 'ring-1t', 'ring-flash-2.0', 'ring-mini-2.0']
@classmethod
async def create_async_generator(
cls,
model: str,
messages: Messages,
proxy: str = None,
conversation: JsonConversation = None,
**kwargs
) -> AsyncResult:
is_new_conversation = conversation is None or not hasattr(conversation, 'session_hash')
if is_new_conversation:
conversation = JsonConversation(session_hash=str(uuid.uuid4()).replace('-', '')[:12])
model = cls.get_model(model)
prompt = format_prompt(messages) if is_new_conversation else get_last_user_message(messages)
headers = {
'accept': '*/*',
'accept-language': 'en-US,en;q=0.9',
'content-type': 'application/json',
'origin': cls.url,
'referer': f'{cls.url}/',
'user-agent': 'Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/132.0.0.0 Safari/537.36'
}
payload = {
"data": [
prompt,
[
[
None,
"Hello! I'm Ling. Try selecting a scenario and a message example below to get started."
]
],
get_system_prompt(messages),
1,
model
],
"event_data": None,
"fn_index": 11,
"trigger_id": 14,
"session_hash": conversation.session_hash
}
async with aiohttp.ClientSession() as session:
async with session.post(cls.api_endpoint, headers=headers, json=payload, proxy=proxy) as response:
await raise_for_status(response)
# Response body must be consumed for the request to complete
await response.json()
data_url = f'{cls.url}/gradio_api/queue/data?session_hash={conversation.session_hash}'
headers_data = {
'accept': 'text/event-stream',
'referer': f'{cls.url}/',
'user-agent': 'Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/132.0.0.0 Safari/537.36'
}
async with session.get(data_url, headers=headers_data, proxy=proxy) as response:
full_response = ""
async for line in response.content:
decoded_line = line.decode('utf-8')
if decoded_line.startswith('data: '):
try:
json_data = json.loads(decoded_line[6:])
if json_data.get('msg') == 'process_generating':
if 'output' in json_data and 'data' in json_data['output']:
output_data = json_data['output']['data']
if output_data and len(output_data) > 0:
parts = output_data[0][0]
if len(parts) == 2:
new_text = output_data[0][1].pop()
full_response += new_text
yield new_text
if len(parts) > 2:
new_text = parts[2]
full_response += new_text
yield new_text
elif json_data.get('msg') == 'process_completed':
break
except json.JSONDecodeError:
debug.log("Could not parse JSON:", decoded_line)

View File

@@ -6,6 +6,7 @@ from ...typing import AsyncResult, Messages, MediaListType
from ...errors import ResponseError
from ..base_provider import AsyncGeneratorProvider, ProviderModelMixin
from .BAAI_Ling import BAAI_Ling
from .BlackForestLabs_Flux1Dev import BlackForestLabs_Flux1Dev
from .BlackForestLabs_Flux1KontextDev import BlackForestLabs_Flux1KontextDev
from .CohereForAI_C4AI_Command import CohereForAI_C4AI_Command
@@ -27,6 +28,7 @@ class HuggingSpace(AsyncGeneratorProvider, ProviderModelMixin):
default_image_model = BlackForestLabs_Flux1Dev.default_model
default_vision_model = Microsoft_Phi_4_Multimodal.default_model
providers = [
BAAI_Ling,
BlackForestLabs_Flux1Dev,
BlackForestLabs_Flux1KontextDev,
CohereForAI_C4AI_Command,

View File

@@ -71,6 +71,7 @@ models = {
"gemini-2.0-flash-thinking": {"x-goog-ext-525001261-jspb": '[null,null,null,null,"9c17b1863f581b8a"]'},
"gemini-2.0-flash-thinking-with-apps": {"x-goog-ext-525001261-jspb": '[null,null,null,null,"f8f8f5ea629f5d37"]'},
# Currently used models
"gemini-3-pro": {"x-goog-ext-525001261-jspb": '[1,null,null,null,"9d8ca3786ebdfbea",null,null,0,[4]]'},
"gemini-2.5-pro": {"x-goog-ext-525001261-jspb": '[1,null,null,null,"61530e79959ab139",null,null,null,[4]]'},
"gemini-2.5-flash": {"x-goog-ext-525001261-jspb": '[1,null,null,null,"9ec249fc9ad08861",null,null,null,[4]]'},
"gemini-audio": {}
@@ -89,7 +90,7 @@ class Gemini(AsyncGeneratorProvider, ProviderModelMixin):
default_vision_model = default_model
image_models = [default_image_model]
models = [
default_model, "gemini-2.5-flash", "gemini-2.5-pro"
default_model, "gemini-3-pro", "gemini-2.5-flash", "gemini-2.5-pro"
]
synthesize_content_type = "audio/vnd.wav"

View File

@@ -500,6 +500,7 @@ class GeminiCLI(AsyncGeneratorProvider, ProviderModelMixin):
models = [
"gemini-2.5-pro",
"gemini-2.5-flash",
"gemini-3-pro-preview"
]
working = True

View File

@@ -440,7 +440,7 @@ class Backend_Api(Api):
os.remove(copyfile)
continue
if not is_media and result:
with open(os.path.join(bucket_dir, f"{filename}.md"), 'w') as f:
with open(os.path.join(bucket_dir, f"{filename}.md"), 'w', encoding="utf-8") as f:
f.write(f"{result}\n")
filenames.append(f"{filename}.md")
if is_media:
@@ -477,7 +477,7 @@ class Backend_Api(Api):
except OSError:
shutil.copyfile(copyfile, newfile)
os.remove(copyfile)
with open(os.path.join(bucket_dir, "files.txt"), 'w') as f:
with open(os.path.join(bucket_dir, "files.txt"), 'w', encoding="utf-8") as f:
for filename in filenames:
f.write(f"{filename}\n")
return {"bucket_id": bucket_id, "files": filenames, "media": media}
@@ -572,7 +572,7 @@ class Backend_Api(Api):
share_id = secure_filename(share_id)
bucket_dir = get_bucket_dir(share_id)
os.makedirs(bucket_dir, exist_ok=True)
with open(os.path.join(bucket_dir, "chat.json"), 'w') as f:
with open(os.path.join(bucket_dir, "chat.json"), 'w', encoding="utf-8") as f:
json.dump(chat_data, f)
self.chat_cache[share_id] = updated
return {"share_id": share_id}