Merge pull request #3290 from xtekky/copilot/remove-deprecated-providers

Remove deprecated and not_working providers
This commit is contained in:
H Lohaus
2025-12-11 19:17:58 +01:00
committed by GitHub
56 changed files with 12 additions and 8187 deletions

View File

@@ -34,8 +34,7 @@ def get_providers() -> list[ProviderType]:
return [
provider
for provider in __providers__
if provider.__name__ not in dir(Provider.deprecated)
and provider.url is not None
if provider.url is not None
]
def create_response(provider: ProviderType) -> str:

View File

@@ -69,6 +69,7 @@ class Perplexity(AsyncGeneratorProvider, ProviderModelMixin):
model_aliases = {
"gpt-5": "gpt5",
"gpt-5-thinking": "gpt5_thinking",
"r1-1776": "r1",
}
@classmethod

View File

@@ -12,10 +12,6 @@ try:
from .needs_auth.mini_max import HailuoAI, MiniMax
except ImportError as e:
debug.error("MiniMax providers not loaded:", e)
try:
from .not_working import *
except ImportError as e:
debug.error("Not working providers not loaded:", e)
try:
from .local import *
except ImportError as e:
@@ -35,11 +31,6 @@ except ImportError as e:
from .template import OpenaiTemplate, BackendApi
from .qwen.QwenCode import QwenCode
from .deprecated.ARTA import ARTA
from .deprecated.Blackbox import Blackbox
from .deprecated.DuckDuckGo import DuckDuckGo
from .deprecated.Kimi import Kimi
from .deprecated.PerplexityLabs import PerplexityLabs
from .ApiAirforce import ApiAirforce
from .Chatai import Chatai

View File

@@ -1,249 +0,0 @@
from __future__ import annotations
import os
import time
import json
import random
from pathlib import Path
from aiohttp import ClientSession, ClientResponse
import asyncio
from ...typing import AsyncResult, Messages
from ...providers.response import ImageResponse, Reasoning
from ...errors import ResponseError, ModelNotFoundError
from ...cookies import get_cookies_dir
from ..base_provider import AsyncGeneratorProvider, ProviderModelMixin
from ..helper import format_media_prompt
from ... import debug
class ARTA(AsyncGeneratorProvider, ProviderModelMixin):
url = "https://ai-arta.com"
auth_url = "https://www.googleapis.com/identitytoolkit/v3/relyingparty/signupNewUser?key=AIzaSyB3-71wG0fIt0shj0ee4fvx1shcjJHGrrQ"
token_refresh_url = "https://securetoken.googleapis.com/v1/token?key=AIzaSyB3-71wG0fIt0shj0ee4fvx1shcjJHGrrQ"
image_generation_url = "https://img-gen-prod.ai-arta.com/api/v1/text2image"
status_check_url = "https://img-gen-prod.ai-arta.com/api/v1/text2image/{record_id}/status"
working = False # Take down request
default_model = "flux"
default_image_model = default_model
model_aliases = {
"anything-xl": "Anything-xl",
"high-gpt4o": "High GPT4o",
"on-limbs-black": "On limbs black",
"f-dev": "F Dev",
"flux-dev": "F Dev", # Added
"sdxl-1.0": "SDXL 1.0", # Added
"old-school": "Old School",
"vincent-van-gogh": "Vincent Van Gogh",
"cor-epica-xl": "Cor-epica-xl",
"professional": "Professional",
"cheyenne-xl": "Cheyenne-xl",
"chicano": "Chicano",
"sdxl-l": "SDXL L", # Added
"black-ink": "Black Ink",
"juggernaut-xl": "Juggernaut-xl",
"cinematic-art": "Cinematic Art",
"dreamshaper-xl": "Dreamshaper-xl",
"fantasy-art": "Fantasy Art",
"neo-traditional": "Neo-traditional",
"realistic-stock-xl": "Realistic-stock-xl",
"flame-design": "Flame design",
"japanese-2": "Japanese_2",
"medieval": "Medieval",
"surrealism": "Surrealism",
"dotwork": "Dotwork",
"graffiti": "Graffiti",
"revanimated": "RevAnimated",
"on-limbs-color": "On limbs color",
"old-school-colored": "Old school colored",
"gpt4o-ghibli": "GPT4o Ghibli",
"low-poly": "Low Poly",
"gpt4o": "GPT4o",
"gpt-image": ["GPT4o", "High GPT4o", "GPT4o Ghibli"],
"no-style": "No Style",
"anime": "Anime",
"tattoo": "tattoo",
"embroidery-tattoo": "Embroidery tattoo",
"mini-tattoo": "Mini tattoo",
"realistic-tattoo": "Realistic tattoo",
"playground-xl": "Playground-xl",
"Watercolor": "Watercolor",
"f-pro": "F Pro",
"flux-pro": "F Pro", # Added
"kawaii": "Kawaii",
"photographic": "Photographic",
"katayama-mix-xl": "Katayama-mix-xl",
"death-metal": "Death metal",
"new-school": "New School",
"pony-xl": "Pony-xl",
"anima-pencil-xl": "Anima-pencil-xl",
default_image_model: "Flux", # Added
"biomech": "Biomech",
"yamers-realistic-xl": "Yamers-realistic-xl",
"trash-polka": "Trash Polka",
"red-and-black": "Red and Black",
}
image_models = list(model_aliases.keys())
models = image_models
@classmethod
def get_model(cls, model: str) -> str:
"""Get the internal model name from the user-provided model name."""
if not model:
return cls.model_aliases[cls.default_model]
# Always check aliases first to get the proper API name
if model in cls.model_aliases:
alias = cls.model_aliases[model]
# If the alias is a list, randomly select one of the options
if isinstance(alias, list):
selected_model = random.choice(alias)
debug.log(f"ARTA: Selected model '{selected_model}' from alias '{model}'")
return selected_model
debug.log(f"ARTA: Using model '{alias}' for alias '{model}'")
return alias
# If not in aliases, check if it's a direct API model name
api_model_names = [v for v in cls.model_aliases.values() if isinstance(v, str)]
if model in api_model_names:
return model
raise ModelNotFoundError(f"Model {model} not found")
@classmethod
def get_auth_file(cls):
path = Path(get_cookies_dir())
path.mkdir(exist_ok=True)
filename = f"auth_{cls.__name__}.json"
return path / filename
@classmethod
async def create_token(cls, path: Path, proxy: str | None = None):
async with ClientSession() as session:
# Step 1: Generate Authentication Token
auth_payload = {"clientType": "CLIENT_TYPE_ANDROID"}
async with session.post(cls.auth_url, json=auth_payload, proxy=proxy) as auth_response:
await raise_error(f"Failed to obtain authentication token", auth_response)
auth_data = await auth_response.json()
auth_token = auth_data.get("idToken")
#refresh_token = auth_data.get("refreshToken")
if not auth_token:
raise ResponseError("Failed to obtain authentication token.")
json.dump(auth_data, path.open("w"))
return auth_data
@classmethod
async def refresh_token(cls, refresh_token: str, proxy: str = None) -> tuple[str, str]:
async with ClientSession() as session:
payload = {
"grant_type": "refresh_token",
"refresh_token": refresh_token,
}
async with session.post(cls.token_refresh_url, data=payload, proxy=proxy) as response:
await raise_error(f"Failed to refresh token", response)
response_data = await response.json()
return response_data.get("id_token"), response_data.get("refresh_token")
@classmethod
async def read_and_refresh_token(cls, proxy: str | None = None) -> str:
path = cls.get_auth_file()
if path.is_file():
auth_data = json.load(path.open("rb"))
diff = time.time() - os.path.getmtime(path)
expiresIn = int(auth_data.get("expiresIn"))
if diff < expiresIn:
if diff > expiresIn / 2:
auth_data["idToken"], auth_data["refreshToken"] = await cls.refresh_token(auth_data.get("refreshToken"), proxy)
json.dump(auth_data, path.open("w"))
return auth_data
return await cls.create_token(path, proxy)
@classmethod
async def create_async_generator(
cls,
model: str,
messages: Messages,
proxy: str = None,
prompt: str = None,
negative_prompt: str = "blurry, deformed hands, ugly",
n: int = 1,
guidance_scale: int = 7,
num_inference_steps: int = 30,
aspect_ratio: str = None,
seed: int = None,
**kwargs
) -> AsyncResult:
model = cls.get_model(model)
prompt = format_media_prompt(messages, prompt)
# Generate a random seed if not provided
if seed is None:
seed = random.randint(9999, 99999999) # Common range for random seeds
# Step 1: Get Authentication Token
auth_data = await cls.read_and_refresh_token(proxy)
auth_token = auth_data.get("idToken")
async with ClientSession() as session:
# Step 2: Generate Images
# Create a form data structure as the API might expect form data instead of JSON
form_data = {
"prompt": prompt,
"negative_prompt": negative_prompt,
"style": model,
"images_num": str(n),
"cfg_scale": str(guidance_scale),
"steps": str(num_inference_steps),
"aspect_ratio": "1:1" if aspect_ratio is None else aspect_ratio,
"seed": str(seed),
}
headers = {
"Authorization": auth_token,
# No Content-Type header for multipart/form-data, aiohttp sets it automatically
}
# Try with form data instead of JSON
async with session.post(cls.image_generation_url, data=form_data, headers=headers, proxy=proxy) as image_response:
await raise_error(f"Failed to initiate image generation", image_response)
image_data = await image_response.json()
record_id = image_data.get("record_id")
if not record_id:
raise ResponseError(f"Failed to initiate image generation: {image_data}")
# Step 3: Check Generation Status
status_url = cls.status_check_url.format(record_id=record_id)
start_time = time.time()
last_status = None
while True:
async with session.get(status_url, headers=headers, proxy=proxy) as status_response:
await raise_error(f"Failed to check image generation status", status_response)
status_data = await status_response.json()
status = status_data.get("status")
if status == "DONE":
image_urls = [image["url"] for image in status_data.get("response", [])]
duration = time.time() - start_time
yield Reasoning(label="Generated", status=f"{n} image in {duration:.2f}s" if n == 1 else f"{n} images in {duration:.2f}s")
yield ImageResponse(urls=image_urls, alt=prompt)
return
elif status in ("IN_QUEUE", "IN_PROGRESS"):
if last_status != status:
last_status = status
if status == "IN_QUEUE":
yield Reasoning(label="Waiting")
else:
yield Reasoning(label="Generating")
await asyncio.sleep(2) # Poll every 2 seconds
else:
raise ResponseError(f"Image generation failed with status: {status}")
async def raise_error(message: str, response: ClientResponse):
if response.ok:
return
error_text = await response.text()
content_type = response.headers.get('Content-Type', 'unknown')
raise ResponseError(f"{message}. Content-Type: {content_type}, Response: {error_text}")

View File

@@ -1,311 +0,0 @@
from __future__ import annotations
from aiohttp import ClientSession
import os
import re
import json
import random
import string
from pathlib import Path
from typing import Optional
from ...typing import AsyncResult, Messages, MediaListType
from ...requests.raise_for_status import raise_for_status
from ..base_provider import AsyncGeneratorProvider, ProviderModelMixin
from ...image import to_data_uri
from ..helper import render_messages
from ...providers.response import JsonConversation
from ...tools.media import merge_media
from ... import debug
class Conversation(JsonConversation):
validated_value: str = None
chat_id: str = None
message_history: Messages = []
def __init__(self, model: str):
self.model = model
class Blackbox(AsyncGeneratorProvider, ProviderModelMixin):
label = "Blackbox AI"
url = "https://www.blackbox.ai"
api_endpoint = "https://www.blackbox.ai/api/chat"
working = False
active_by_default = True
supports_stream = True
supports_system_message = True
supports_message_history = True
default_model = "blackboxai"
default_vision_model = default_model
models = [
default_model,
"gpt-4.1-mini",
"gpt-4.1-nano",
"gpt-4",
"gpt-4o",
"gpt-4o-mini",
# Trending agent modes
'Python Agent',
'HTML Agent',
'Builder Agent',
'Java Agent',
'JavaScript Agent',
'React Agent',
'Android Agent',
'Flutter Agent',
'Next.js Agent',
'AngularJS Agent',
'Swift Agent',
'MongoDB Agent',
'PyTorch Agent',
'Xcode Agent',
'Azure Agent',
'Bitbucket Agent',
'DigitalOcean Agent',
'Docker Agent',
'Electron Agent',
'Erlang Agent',
'FastAPI Agent',
'Firebase Agent',
'Flask Agent',
'Git Agent',
'Gitlab Agent',
'Go Agent',
'Godot Agent',
'Google Cloud Agent',
'Heroku Agent'
]
vision_models = [default_vision_model]
# Trending agent modes
trendingAgentMode = {
'Python Agent': {'mode': True, 'id': "python"},
'HTML Agent': {'mode': True, 'id': "html"},
'Builder Agent': {'mode': True, 'id': "builder"},
'Java Agent': {'mode': True, 'id': "java"},
'JavaScript Agent': {'mode': True, 'id': "javascript"},
'React Agent': {'mode': True, 'id': "react"},
'Android Agent': {'mode': True, 'id': "android"},
'Flutter Agent': {'mode': True, 'id': "flutter"},
'Next.js Agent': {'mode': True, 'id': "next.js"},
'AngularJS Agent': {'mode': True, 'id': "angularjs"},
'Swift Agent': {'mode': True, 'id': "swift"},
'MongoDB Agent': {'mode': True, 'id': "mongodb"},
'PyTorch Agent': {'mode': True, 'id': "pytorch"},
'Xcode Agent': {'mode': True, 'id': "xcode"},
'Azure Agent': {'mode': True, 'id': "azure"},
'Bitbucket Agent': {'mode': True, 'id': "bitbucket"},
'DigitalOcean Agent': {'mode': True, 'id': "digitalocean"},
'Docker Agent': {'mode': True, 'id': "docker"},
'Electron Agent': {'mode': True, 'id': "electron"},
'Erlang Agent': {'mode': True, 'id': "erlang"},
'FastAPI Agent': {'mode': True, 'id': "fastapi"},
'Firebase Agent': {'mode': True, 'id': "firebase"},
'Flask Agent': {'mode': True, 'id': "flask"},
'Git Agent': {'mode': True, 'id': "git"},
'Gitlab Agent': {'mode': True, 'id': "gitlab"},
'Go Agent': {'mode': True, 'id': "go"},
'Godot Agent': {'mode': True, 'id': "godot"},
'Google Cloud Agent': {'mode': True, 'id': "googlecloud"},
'Heroku Agent': {'mode': True, 'id': "heroku"},
}
# Complete list of all models (for authorized users)
_all_models = list(dict.fromkeys([
*models,
*list(trendingAgentMode.keys())
]))
@classmethod
async def fetch_validated(cls, url: str = "https://www.blackbox.ai", force_refresh: bool = False) -> Optional[str]:
cache_path = Path(os.path.expanduser("~")) / ".g4f" / "cache"
cache_file = cache_path / 'blackbox.json'
if not force_refresh and cache_file.exists():
try:
with open(cache_file, 'r') as f:
data = json.load(f)
if data.get('validated_value'):
return data['validated_value']
except Exception as e:
debug.log(f"Blackbox: Error reading cache: {e}")
js_file_pattern = r'static/chunks/\d{4}-[a-fA-F0-9]+\.js'
uuid_pattern = r'["\']([0-9a-fA-F]{8}-[0-9a-fA-F]{4}-[0-9a-fA-F]{4}-[0-9a-fA-F]{4}-[0-9a-fA-F]{12})["\']'
def is_valid_context(text: str) -> bool:
return any(char + '=' in text for char in 'abcdefghijklmnopqrstuvwxyz')
async with ClientSession() as session:
try:
async with session.get(url) as response:
if response.status != 200:
return None
page_content = await response.text()
js_files = re.findall(js_file_pattern, page_content)
for js_file in js_files:
js_url = f"{url}/_next/{js_file}"
async with session.get(js_url) as js_response:
if js_response.status == 200:
js_content = await js_response.text()
for match in re.finditer(uuid_pattern, js_content):
start = max(0, match.start() - 10)
end = min(len(js_content), match.end() + 10)
context = js_content[start:end]
if is_valid_context(context):
validated_value = match.group(1)
cache_file.parent.mkdir(exist_ok=True, parents=True)
try:
with open(cache_file, 'w') as f:
json.dump({'validated_value': validated_value}, f)
except Exception as e:
debug.log(f"Blackbox: Error writing cache: {e}")
return validated_value
except Exception as e:
debug.log(f"Blackbox: Error retrieving validated_value: {e}")
return None
@classmethod
def generate_id(cls, length: int = 7) -> str:
chars = string.ascii_letters + string.digits
return ''.join(random.choice(chars) for _ in range(length))
@classmethod
async def create_async_generator(
cls,
model: str,
messages: Messages,
prompt: str = None,
proxy: str = None,
media: MediaListType = None,
top_p: float = None,
temperature: float = None,
max_tokens: int = None,
conversation: Conversation = None,
return_conversation: bool = True,
**kwargs
) -> AsyncResult:
model = cls.get_model(model)
headers = {
'accept': '*/*',
'accept-language': 'en-US,en;q=0.9',
'content-type': 'application/json',
'origin': 'https://www.blackbox.ai',
'referer': 'https://www.blackbox.ai/',
'user-agent': 'Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/131.0.0.0 Safari/537.36'
}
async with ClientSession(headers=headers) as session:
if conversation is None or not hasattr(conversation, "chat_id"):
conversation = Conversation(model)
conversation.validated_value = await cls.fetch_validated()
conversation.chat_id = cls.generate_id()
conversation.message_history = []
current_messages = []
for i, msg in enumerate(render_messages(messages)):
msg_id = conversation.chat_id if i == 0 and msg["role"] == "user" else cls.generate_id()
current_msg = {
"id": msg_id,
"content": msg["content"],
"role": msg["role"]
}
current_messages.append(current_msg)
media = list(merge_media(media, messages))
if media:
current_messages[-1]['data'] = {
"imagesData": [
{
"filePath": f"/{image_name}",
"contents": to_data_uri(image)
}
for image, image_name in media
],
"fileText": "",
"title": ""
}
data = {
"messages": current_messages,
"agentMode": {},
"id": conversation.chat_id,
"previewToken": None,
"userId": None,
"codeModelMode": True,
"trendingAgentMode": cls.trendingAgentMode.get(model, {}) if model in cls.trendingAgentMode else {},
"isMicMode": False,
"userSystemPrompt": None,
"maxTokens": max_tokens,
"playgroundTopP": top_p,
"playgroundTemperature": temperature,
"isChromeExt": False,
"githubToken": "",
"clickedAnswer2": False,
"clickedAnswer3": False,
"clickedForceWebSearch": False,
"visitFromDelta": False,
"isMemoryEnabled": False,
"mobileClient": False,
"userSelectedModel": None,
"validated": conversation.validated_value,
"imageGenerationMode": False,
"webSearchModePrompt": False,
"deepSearchMode": False,
"designerMode": False,
"domains": None,
"vscodeClient": False,
"codeInterpreterMode": False,
"customProfile": {
"additionalInfo": "",
"enableNewChats": False,
"name": "",
"occupation": "",
"traits": []
},
"webSearchModeOption": {
"autoMode": False,
"webMode": False,
"offlineMode": False
},
"session": None,
"isPremium": True,
"subscriptionCache": None,
"beastMode": False,
"reasoningMode": False,
"workspaceId": "",
"asyncMode": False,
"webSearchMode": False
}
# Continue with the API request and async generator behavior
async with session.post(cls.api_endpoint, json=data, proxy=proxy) as response:
await raise_for_status(response)
# Collect the full response
full_response = []
async for chunk in response.content.iter_any():
if chunk:
chunk_text = chunk.decode()
if chunk_text != "Login to continue using":
full_response.append(chunk_text)
yield chunk_text
full_response_text = ''.join(full_response)
# Handle conversation history
if return_conversation:
conversation.message_history.append({"role": "assistant", "content": full_response_text})
yield conversation

View File

@@ -1,51 +0,0 @@
from __future__ import annotations
import asyncio
try:
from duckai import DuckAI
has_requirements = True
except ImportError:
has_requirements = False
from ...typing import CreateResult, Messages
from ..base_provider import AbstractProvider, ProviderModelMixin
from ..helper import get_last_user_message
class DuckDuckGo(AbstractProvider, ProviderModelMixin):
label = "Duck.ai (duckduckgo_search)"
url = "https://duckduckgo.com/aichat"
api_base = "https://duckduckgo.com/duckchat/v1/"
working = False
supports_stream = True
supports_system_message = True
supports_message_history = True
default_model = "gpt-4o-mini"
models = [default_model, "meta-llama/Llama-3.3-70B-Instruct-Turbo", "claude-3-haiku-20240307", "o3-mini", "mistralai/Mistral-Small-24B-Instruct-2501"]
duck_ai: DuckAI = None
model_aliases = {
"gpt-4": "gpt-4o-mini",
"llama-3.3-70b": "meta-llama/Llama-3.3-70B-Instruct-Turbo",
"claude-3-haiku": "claude-3-haiku-20240307",
"mixtral-small-24b": "mistralai/Mistral-Small-24B-Instruct-2501",
}
@classmethod
def create_completion(
cls,
model: str,
messages: Messages,
proxy: str = None,
timeout: int = 60,
**kwargs
) -> CreateResult:
if not has_requirements:
raise ImportError("duckai is not installed. Install it with `pip install -U duckai`.")
if cls.duck_ai is None:
cls.duck_ai = DuckAI(proxy=proxy, timeout=timeout)
model = cls.get_model(model)
yield cls.duck_ai.chat(get_last_user_message(messages), model, timeout)

View File

@@ -1,108 +0,0 @@
from __future__ import annotations
import random
from typing import AsyncIterator
from ..base_provider import AsyncAuthedProvider, ProviderModelMixin
from ...providers.helper import get_last_user_message
from ...requests import StreamSession, sse_stream, raise_for_status
from ...providers.response import AuthResult, TitleGeneration, JsonConversation, FinishReason
from ...typing import AsyncResult, Messages
from ...errors import MissingAuthError
class Kimi(AsyncAuthedProvider, ProviderModelMixin):
url = "https://www.kimi.com"
working = False
active_by_default = True
default_model = "kimi-k2"
models = [default_model]
model_aliases = {"moonshotai/Kimi-K2-Instruct": default_model}
@classmethod
async def on_auth_async(cls, proxy: str = None, **kwargs) -> AsyncIterator:
device_id = str(random.randint(1000000000000000, 9999999999999999))
async with StreamSession(proxy=proxy, impersonate="chrome") as session:
async with session.post(
"https://www.kimi.com/api/device/register",
json={},
headers={
"x-msh-device-id": device_id,
"x-msh-platform": "web",
"x-traffic-id": device_id
}
) as response:
await raise_for_status(response)
data = await response.json()
if not data.get("access_token"):
raise Exception("No access token received")
yield AuthResult(
api_key=data.get("access_token"),
device_id=device_id,
)
@classmethod
async def create_authed(
cls,
model: str,
messages: Messages,
auth_result: AuthResult,
proxy: str = None,
conversation: JsonConversation = None,
web_search: bool = False,
**kwargs
) -> AsyncResult:
async with StreamSession(
proxy=proxy,
impersonate="chrome",
headers={
"Authorization": f"Bearer {auth_result.api_key}",
}
) as session:
if conversation is None:
async with session.post("https://www.kimi.com/api/chat", json={
"name":"未命名会话",
"born_from":"home",
"kimiplus_id":"kimi",
"is_example":False,
"source":"web",
"tags":[]
}) as response:
try:
await raise_for_status(response)
except Exception as e:
if "匿名聊天使用次数超过" in str(e):
raise MissingAuthError("Anonymous chat usage limit exceeded")
raise e
chat_data = await response.json()
conversation = JsonConversation(chat_id=chat_data.get("id"))
yield conversation
data = {
"kimiplus_id": "kimi",
"extend": {"sidebar": True},
"model": "k2",
"use_search": web_search,
"messages": [
{
"role": "user",
"content": get_last_user_message(messages)
}
],
"refs": [],
"history": [],
"scene_labels": [],
"use_semantic_memory": False,
"use_deep_research": False
}
async with session.post(
f"https://www.kimi.com/api/chat/{conversation.chat_id}/completion/stream",
json=data
) as response:
await raise_for_status(response)
async for line in sse_stream(response):
if line.get("event") == "cmpl":
yield line.get("text")
elif line.get("event") == "rename":
yield TitleGeneration(line.get("text"))
elif line.get("event") == "all_done":
yield FinishReason("stop")
break

View File

@@ -1,126 +0,0 @@
from __future__ import annotations
import random
import json
from ...typing import AsyncResult, Messages
from ...requests import StreamSession, raise_for_status
from ...errors import ResponseError
from ...providers.response import FinishReason, Sources
from ..base_provider import AsyncGeneratorProvider, ProviderModelMixin
API_URL = "https://www.perplexity.ai/socket.io/"
WS_URL = "wss://www.perplexity.ai/socket.io/"
class PerplexityLabs(AsyncGeneratorProvider, ProviderModelMixin):
label = "Perplexity Labs"
url = "https://labs.perplexity.ai"
working = False
active_by_default = True
default_model = "r1-1776"
models = [
default_model,
"sonar-pro",
"sonar",
"sonar-reasoning",
"sonar-reasoning-pro",
]
@classmethod
async def create_async_generator(
cls,
model: str,
messages: Messages,
proxy: str = None,
**kwargs
) -> AsyncResult:
headers = {
"Origin": cls.url,
"Referer": f"{cls.url}/",
}
async with StreamSession(headers=headers, proxy=proxy, impersonate="chrome") as session:
t = format(random.getrandbits(32), "08x")
async with session.get(
f"{API_URL}?EIO=4&transport=polling&t={t}"
) as response:
await raise_for_status(response)
text = await response.text()
assert text.startswith("0")
sid = json.loads(text[1:])["sid"]
post_data = '40{"jwt":"anonymous-ask-user"}'
async with session.post(
f"{API_URL}?EIO=4&transport=polling&t={t}&sid={sid}",
data=post_data
) as response:
await raise_for_status(response)
assert await response.text() == "OK"
async with session.get(
f"{API_URL}?EIO=4&transport=polling&t={t}&sid={sid}",
data=post_data
) as response:
await raise_for_status(response)
assert (await response.text()).startswith("40")
async with session.ws_connect(f"{WS_URL}?EIO=4&transport=websocket&sid={sid}", autoping=False) as ws:
await ws.send_str("2probe")
assert(await ws.receive_str() == "3probe")
await ws.send_str("5")
assert(await ws.receive_str() == "6")
format_messages = []
last_is_assistant = False
for message in messages:
if message["role"] == "assistant":
if last_is_assistant:
continue
last_is_assistant = True
else:
last_is_assistant = False
if isinstance(message["content"], str):
format_messages.append({
"role": message["role"],
"content": message["content"]
})
message_data = {
"version": "2.18",
"source": "default",
"model": model,
"messages": format_messages
}
await ws.send_str("42" + json.dumps(["perplexity_labs", message_data]))
last_message = 0
while True:
message = await ws.receive_str()
if message == "2":
if last_message == 0:
raise RuntimeError("Unknown error")
await ws.send_str("3")
continue
try:
if not message.startswith("42"):
continue
parsed_data = json.loads(message[2:])
message_type = parsed_data[0]
data = parsed_data[1]
# Handle error responses
if message_type.endswith("_query_progress") and data.get("status") == "failed":
error_message = data.get("text", "Unknown API error")
raise ResponseError(f"API Error: {error_message}\n")
# Handle normal responses
if "output" in data:
if last_message == 0 and model == cls.default_model:
yield "<think>"
yield data["output"][last_message:]
last_message = len(data["output"])
if data["final"]:
if data["citations"]:
yield Sources(data["citations"])
yield FinishReason("stop")
break
except ResponseError as e:
# Re-raise ResponseError directly
raise e
except Exception as e:
raise ResponseError(f"Error processing message: {message}") from e

View File

@@ -1,69 +0,0 @@
from __future__ import annotations
from aiohttp import ClientSession
from ...typing import AsyncResult, Messages
from ..base_provider import AsyncGeneratorProvider, ProviderModelMixin
from ..helper import format_prompt
class AI365VIP(AsyncGeneratorProvider, ProviderModelMixin):
url = "https://chat.ai365vip.com"
api_endpoint = "/api/chat"
working = False
default_model = 'gpt-3.5-turbo'
models = [
'gpt-3.5-turbo',
'gpt-3.5-turbo-16k',
'gpt-4o',
]
model_aliases = {
"gpt-3.5-turbo": "gpt-3.5-turbo-16k",
}
@classmethod
async def create_async_generator(
cls,
model: str,
messages: Messages,
proxy: str = None,
**kwargs
) -> AsyncResult:
headers = {
"accept": "*/*",
"accept-language": "en-US,en;q=0.9",
"content-type": "application/json",
"origin": cls.url,
"referer": f"{cls.url}/en",
"sec-ch-ua": '"Chromium";v="127", "Not)A;Brand";v="99"',
"sec-ch-ua-arch": '"x86"',
"sec-ch-ua-bitness": '"64"',
"sec-ch-ua-full-version": '"127.0.6533.119"',
"sec-ch-ua-full-version-list": '"Chromium";v="127.0.6533.119", "Not)A;Brand";v="99.0.0.0"',
"sec-ch-ua-mobile": "?0",
"sec-ch-ua-model": '""',
"sec-ch-ua-platform": '"Linux"',
"sec-ch-ua-platform-version": '"4.19.276"',
"sec-fetch-dest": "empty",
"sec-fetch-mode": "cors",
"sec-fetch-site": "same-origin",
"user-agent": "Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/127.0.0.0 Safari/537.36",
}
async with ClientSession(headers=headers) as session:
data = {
"model": {
"id": model,
"name": "GPT-3.5",
"maxLength": 3000,
"tokenLimit": 2048
},
"messages": [{"role": "user", "content": format_prompt(messages)}],
"key": "",
"prompt": "You are a helpful assistant.",
"temperature": 1
}
async with session.post(f"{cls.url}{cls.api_endpoint}", json=data, proxy=proxy) as response:
response.raise_for_status()
async for chunk in response.content:
if chunk:
yield chunk.decode()

View File

@@ -1,116 +0,0 @@
from __future__ import annotations
from aiohttp import ClientSession
import time
import hmac
import hashlib
import json
import random
from ...typing import AsyncResult, Messages
from ...requests.raise_for_status import raise_for_status
from ..base_provider import AsyncGeneratorProvider, ProviderModelMixin
from ..helper import format_prompt
from ...providers.response import FinishReason
class AIUncensored(AsyncGeneratorProvider, ProviderModelMixin):
url = "https://www.aiuncensored.info/ai_uncensored"
api_key = "62852b00cb9e44bca86f0ec7e7455dc6"
working = False
supports_stream = True
supports_system_message = True
supports_message_history = True
default_model = "hermes3-70b"
models = [default_model]
model_aliases = {"hermes-3": "hermes3-70b"}
@staticmethod
def calculate_signature(timestamp: str, json_dict: dict) -> str:
message = f"{timestamp}{json.dumps(json_dict)}"
secret_key = b'your-super-secret-key-replace-in-production'
signature = hmac.new(
secret_key,
message.encode('utf-8'),
hashlib.sha256
).hexdigest()
return signature
@staticmethod
def get_server_url() -> str:
servers = [
"https://llm-server-nov24-ibak.onrender.com",
"https://llm-server-nov24-qv2w.onrender.com",
"https://llm-server-nov24.onrender.com"
]
return random.choice(servers)
@classmethod
async def create_async_generator(
cls,
model: str,
messages: Messages,
stream: bool = False,
proxy: str = None,
api_key: str = None,
**kwargs
) -> AsyncResult:
model = cls.get_model(model)
timestamp = str(int(time.time()))
json_dict = {
"messages": [{"role": "user", "content": format_prompt(messages)}],
"model": model,
"stream": stream
}
signature = cls.calculate_signature(timestamp, json_dict)
headers = {
'accept': '*/*',
'accept-language': 'en-US,en;q=0.9',
'content-type': 'application/json',
'origin': 'https://www.aiuncensored.info',
'referer': 'https://www.aiuncensored.info/',
'user-agent': 'Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/131.0.0.0 Safari/537.36',
'x-api-key': cls.api_key,
'x-timestamp': timestamp,
'x-signature': signature
}
url = f"{cls.get_server_url()}/api/chat"
async with ClientSession(headers=headers) as session:
async with session.post(url, json=json_dict, proxy=proxy) as response:
await raise_for_status(response)
if stream:
full_response = ""
async for line in response.content:
if line:
try:
line_text = line.decode('utf-8')
if line_text.startswith(''):
data = line_text[6:]
if data == '[DONE]':
yield FinishReason("stop")
break
try:
json_data = json.loads(data)
if 'data' in json_data:
yield json_data['data']
full_response += json_data['data']
except json.JSONDecodeError:
continue
except UnicodeDecodeError:
continue
if full_response:
yield FinishReason("length")
else:
response_json = await response.json()
if 'content' in response_json:
yield response_json['content']
yield FinishReason("length")

View File

@@ -1,61 +0,0 @@
from __future__ import annotations
import json
from aiohttp import ClientSession
from ...typing import AsyncResult, Messages
from ..base_provider import AsyncGeneratorProvider, ProviderModelMixin
from ..helper import get_random_string, format_prompt
class AiChatOnline(AsyncGeneratorProvider, ProviderModelMixin):
site_url = "https://aichatonline.org"
url = "https://aichatonlineorg.erweima.ai"
api_endpoint = "/aichatonline/api/chat/gpt"
working = False
default_model = 'gpt-4o-mini'
@classmethod
async def grab_token(
cls,
session: ClientSession,
proxy: str
):
async with session.get(f'https://aichatonlineorg.erweima.ai/api/v1/user/getUniqueId?canvas=-{get_random_string()}', proxy=proxy) as response:
response.raise_for_status()
return (await response.json())['data']
@classmethod
async def create_async_generator(
cls,
model: str,
messages: Messages,
proxy: str = None,
**kwargs
) -> AsyncResult:
headers = {
"User-Agent": "Mozilla/5.0 (X11; Ubuntu; Linux x86_64; rv:109.0) Gecko/20100101 Firefox/119.0",
"Accept-Language": "de,en-US;q=0.7,en;q=0.3",
"Accept-Encoding": "gzip, deflate, br",
"Referer": f"{cls.url}/chatgpt/chat/",
"Content-Type": "application/json",
"Origin": cls.url,
"Alt-Used": "aichatonline.org",
"Connection": "keep-alive",
"Sec-Fetch-Dest": "empty",
"Sec-Fetch-Mode": "cors",
"Sec-Fetch-Site": "same-origin",
"TE": "trailers"
}
async with ClientSession(headers=headers) as session:
data = {
"conversationId": get_random_string(),
"prompt": format_prompt(messages),
}
headers['UniqueId'] = await cls.grab_token(session, proxy)
async with session.post(f"{cls.url}{cls.api_endpoint}", headers=headers, json=data, proxy=proxy) as response:
response.raise_for_status()
async for chunk in response.content:
try:
yield json.loads(chunk)['data']['message']
except:
continue

View File

@@ -1,105 +0,0 @@
from __future__ import annotations
import json
import base64
from aiohttp import ClientSession
from ...typing import AsyncResult, Messages
from ..base_provider import AsyncGeneratorProvider, ProviderModelMixin
from ...providers.response import ImageResponse
from ..helper import format_prompt
class AiChats(AsyncGeneratorProvider, ProviderModelMixin):
url = "https://ai-chats.org"
api_endpoint = "https://ai-chats.org/chat/send2/"
working = False
supports_message_history = True
default_model = 'gpt-4'
models = ['gpt-4', 'dalle']
@classmethod
async def create_async_generator(
cls,
model: str,
messages: Messages,
proxy: str = None,
**kwargs
) -> AsyncResult:
headers = {
"accept": "application/json, text/event-stream",
"accept-language": "en-US,en;q=0.9",
"cache-control": "no-cache",
"content-type": "application/json",
"origin": cls.url,
"pragma": "no-cache",
"referer": f"{cls.url}/{'image' if model == 'dalle' else 'chat'}/",
"sec-ch-ua": '"Chromium";v="127", "Not)A;Brand";v="99"',
"sec-ch-ua-mobile": "?0",
"sec-ch-ua-platform": '"Linux"',
"sec-fetch-dest": "empty",
"sec-fetch-mode": "cors",
"sec-fetch-site": "same-origin",
"user-agent": "Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/127.0.0.0 Safari/537.36",
'cookie': 'muVyak=LSFNvUWqdgKkGprbDBsfieIoEMzjOQ; LSFNvUWqdgKkGprbDBsfieIoEMzjOQ=ac28831b98143847e83dbe004404e619-1725548624-1725548621; muVyak_hits=9; ai-chat-front=9d714d5dc46a6b47607c9a55e7d12a95; _csrf-front=76c23dc0a013e5d1e21baad2e6ba2b5fdab8d3d8a1d1281aa292353f8147b057a%3A2%3A%7Bi%3A0%3Bs%3A11%3A%22_csrf-front%22%3Bi%3A1%3Bs%3A32%3A%22K9lz0ezsNPMNnfpd_8gT5yEeh-55-cch%22%3B%7D',
}
async with ClientSession(headers=headers) as session:
if model == 'dalle':
prompt = messages[-1]['content'] if messages else ""
else:
prompt = format_prompt(messages)
data = {
"type": "image" if model == 'dalle' else "chat",
"messagesHistory": [
{
"from": "you",
"content": prompt
}
]
}
try:
async with session.post(cls.api_endpoint, json=data, proxy=proxy) as response:
response.raise_for_status()
if model == 'dalle':
response_json = await response.json()
if 'data' in response_json and response_json['data']:
image_url = response_json['data'][0].get('url')
if image_url:
async with session.get(image_url) as img_response:
img_response.raise_for_status()
image_data = await img_response.read()
base64_image = base64.b64encode(image_data).decode('utf-8')
base64_url = f"data:image/png;base64,{base64_image}"
yield ImageResponse(image_url, prompt)
else:
yield f"Error: No image URL found in the response. Full response: {response_json}"
else:
yield f"Error: Unexpected response format. Full response: {response_json}"
else:
full_response = await response.text()
message = ""
for line in full_response.split('\n'):
if line.startswith('data: ') and line != 'data: ':
message += line[6:]
message = message.strip()
yield message
except Exception as e:
yield f"Error occurred: {str(e)}"
@classmethod
async def create_async(
cls,
model: str,
messages: Messages,
proxy: str = None,
**kwargs
) -> str:
async for response in cls.create_async_generator(model, messages, proxy, **kwargs):
if isinstance(response, ImageResponse):
return response.images[0]
return response

View File

@@ -1,64 +0,0 @@
from __future__ import annotations
from ...typing import Messages
from ..base_provider import AsyncProvider, format_prompt
from ..helper import get_cookies
from ...requests import StreamSession
class Aichat(AsyncProvider):
url = "https://chat-gpt.org/chat"
working = False
supports_gpt_35_turbo = True
@staticmethod
async def create_async(
model: str,
messages: Messages,
proxy: str = None, **kwargs) -> str:
cookies = get_cookies('chat-gpt.org') if not kwargs.get('cookies') else kwargs.get('cookies')
if not cookies:
raise RuntimeError(
"g4f.provider.Aichat requires cookies, [refresh https://chat-gpt.org on chrome]"
)
headers = {
'authority': 'chat-gpt.org',
'accept': '*/*',
'accept-language': 'en,fr-FR;q=0.9,fr;q=0.8,es-ES;q=0.7,es;q=0.6,en-US;q=0.5,am;q=0.4,de;q=0.3',
'content-type': 'application/json',
'origin': 'https://chat-gpt.org',
'referer': 'https://chat-gpt.org/chat',
'sec-ch-ua': '"Chromium";v="118", "Google Chrome";v="118", "Not=A?Brand";v="99"',
'sec-ch-ua-mobile': '?0',
'sec-ch-ua-platform': '"macOS"',
'sec-fetch-dest': 'empty',
'sec-fetch-mode': 'cors',
'sec-fetch-site': 'same-origin',
'user-agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/118.0.0.0 Safari/537.36',
}
async with StreamSession(headers=headers,
cookies=cookies,
timeout=6,
proxies={"https": proxy} if proxy else None,
impersonate="chrome110", verify=False) as session:
json_data = {
"message": format_prompt(messages),
"temperature": kwargs.get('temperature', 0.5),
"presence_penalty": 0,
"top_p": kwargs.get('top_p', 1),
"frequency_penalty": 0,
}
async with session.post("https://chat-gpt.org/api/text",
json=json_data) as response:
response.raise_for_status()
result = await response.json()
if not result['response']:
raise Exception(f"Error Response: {result}")
return result["message"]

View File

@@ -1,90 +0,0 @@
from __future__ import annotations
import hashlib
import time
import uuid
import json
from datetime import datetime
from aiohttp import ClientSession
from ...typing import SHA256, AsyncResult, Messages
from ..base_provider import AsyncGeneratorProvider
class Ails(AsyncGeneratorProvider):
url = "https://ai.ls"
working = False
supports_message_history = True
supports_gpt_35_turbo = True
@staticmethod
async def create_async_generator(
model: str,
messages: Messages,
stream: bool,
proxy: str = None,
**kwargs
) -> AsyncResult:
headers = {
"authority": "api.caipacity.com",
"accept": "*/*",
"accept-language": "en,fr-FR;q=0.9,fr;q=0.8,es-ES;q=0.7,es;q=0.6,en-US;q=0.5,am;q=0.4,de;q=0.3",
"authorization": "Bearer free",
"client-id": str(uuid.uuid4()),
"client-v": "0.1.278",
"content-type": "application/json",
"origin": "https://ai.ls",
"referer": "https://ai.ls/",
"sec-ch-ua": '"Not.A/Brand";v="8", "Chromium";v="114", "Google Chrome";v="114"',
"sec-ch-ua-mobile": "?0",
"sec-ch-ua-platform": '"Windows"',
"sec-fetch-dest": "empty",
"sec-fetch-mode": "cors",
"sec-fetch-site": "cross-site",
"user-agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/114.0.0.0 Safari/537.36",
"from-url": "https://ai.ls/?chat=1"
}
async with ClientSession(
headers=headers
) as session:
timestamp = _format_timestamp(int(time.time() * 1000))
json_data = {
"model": "gpt-3.5-turbo",
"temperature": kwargs.get("temperature", 0.6),
"stream": True,
"messages": messages,
"d": datetime.now().strftime("%Y-%m-%d"),
"t": timestamp,
"s": _hash({"t": timestamp, "m": messages[-1]["content"]}),
}
async with session.post(
"https://api.caipacity.com/v1/chat/completions",
proxy=proxy,
json=json_data
) as response:
response.raise_for_status()
start = "data: "
async for line in response.content:
line = line.decode('utf-8')
if line.startswith(start) and line != "data: [DONE]":
line = line[len(start):-1]
line = json.loads(line)
token = line["choices"][0]["delta"].get("content")
if token:
if "ai.ls" in token or "ai.ci" in token:
raise Exception(f"Response Error: {token}")
yield token
def _hash(json_data: dict[str, str]) -> SHA256:
base_string: str = f'{json_data["t"]}:{json_data["m"]}:WI,2rU#_r:r~aF4aJ36[.Z(/8Rv93Rf:{len(json_data["m"])}'
return SHA256(hashlib.sha256(base_string.encode()).hexdigest())
def _format_timestamp(timestamp: int) -> str:
e = timestamp
n = e % 10
r = n + 1 if n % 2 == 0 else n
return str(e - n + r)

View File

@@ -1,216 +0,0 @@
from __future__ import annotations
import json
from uuid import uuid4
from aiohttp import ClientSession
from ...typing import AsyncResult, Messages, MediaListType
from ...image import to_bytes, is_accepted_format, to_data_uri
from ..base_provider import AsyncGeneratorProvider, ProviderModelMixin
from ...requests.raise_for_status import raise_for_status
from ...providers.response import FinishReason, JsonConversation
from ..helper import format_prompt, get_last_user_message, format_media_prompt
from ...tools.media import merge_media
class Conversation(JsonConversation):
x_anonymous_user_id: str = None
def __init__(self, model: str):
super().__init__() # Ensure parent class is initialized
self.model = model
self.messages = [] # Instance-specific list
self.parent = None # Initialize parent as instance attribute
if not self.x_anonymous_user_id:
self.x_anonymous_user_id = str(uuid4())
class AllenAI(AsyncGeneratorProvider, ProviderModelMixin):
label = "Ai2 Playground"
url = "https://playground.allenai.org"
login_url = None
api_endpoint = "https://olmo-api.allen.ai/v4/message/stream"
working = False
needs_auth = False
use_nodriver = False
supports_stream = True
supports_system_message = False
supports_message_history = True
default_model = 'tulu3-405b'
default_vision_model = 'mm-olmo-uber-model-v4-synthetic'
vision_models = [default_vision_model]
# Map models to their required hosts
model_hosts = {
default_model: "inferd",
"OLMo-2-1124-13B-Instruct": "modal",
"tulu-3-1-8b": "modal",
"Llama-3-1-Tulu-3-70B": "modal",
"olmoe-0125": "modal",
"olmo-2-0325-32b-instruct": "modal",
"mm-olmo-uber-model-v4-synthetic": "modal",
}
models = list(model_hosts.keys())
model_aliases = {
"tulu-3-405b": default_model,
"olmo-1-7b": "olmoe-0125",
"olmo-2-13b": "OLMo-2-1124-13B-Instruct",
"olmo-2-32b": "olmo-2-0325-32b-instruct",
"tulu-3-1-8b": "tulu-3-1-8b",
"tulu-3-70b": "Llama-3-1-Tulu-3-70B",
"llama-3.1-405b": "tulu3-405b",
"llama-3.1-8b": "tulu-3-1-8b",
"llama-3.1-70b": "Llama-3-1-Tulu-3-70B",
"olmo-4-synthetic": "mm-olmo-uber-model-v4-synthetic",
}
@classmethod
async def create_async_generator(
cls,
model: str,
messages: Messages,
proxy: str = None,
host: str = None,
private: bool = True,
top_p: float = None,
temperature: float = None,
conversation: Conversation = None,
return_conversation: bool = True,
media: MediaListType = None,
**kwargs
) -> AsyncResult:
actual_model = cls.get_model(model)
prompt = format_prompt(messages) if conversation is None else get_last_user_message(messages)
# Determine the correct host for the model
if host is None:
# Use model-specific host from model_hosts dictionary
host = cls.model_hosts[actual_model]
# Initialize or update conversation
# For mm-olmo-uber-model-v4-synthetic, always create a new conversation
if conversation is None or actual_model == 'mm-olmo-uber-model-v4-synthetic':
conversation = Conversation(actual_model)
# Generate new boundary for each request
boundary = f"----WebKitFormBoundary{uuid4().hex}"
headers = {
"accept": "*/*",
"accept-language": "en-US,en;q=0.9",
"content-type": f"multipart/form-data; boundary={boundary}",
"origin": cls.url,
"referer": f"{cls.url}/",
"user-agent": "Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/133.0.0.0 Safari/537.36",
"x-anonymous-user-id": conversation.x_anonymous_user_id,
}
# Build multipart form data
form_data = [
f'--{boundary}\r\n'
f'Content-Disposition: form-data; name="model"\r\n\r\n{cls.get_model(model)}\r\n',
f'--{boundary}\r\n'
f'Content-Disposition: form-data; name="host"\r\n\r\n{host}\r\n',
f'--{boundary}\r\n'
f'Content-Disposition: form-data; name="content"\r\n\r\n{prompt}\r\n',
f'--{boundary}\r\n'
f'Content-Disposition: form-data; name="private"\r\n\r\n{str(private).lower()}\r\n'
]
# Add parent if exists in conversation
if hasattr(conversation, 'parent') and conversation.parent:
form_data.append(
f'--{boundary}\r\n'
f'Content-Disposition: form-data; name="parent"\r\n\r\n{conversation.parent}\r\n'
)
# Add optional parameters
if temperature is not None:
form_data.append(
f'--{boundary}\r\n'
f'Content-Disposition: form-data; name="temperature"\r\n\r\n{temperature}\r\n'
)
if top_p is not None:
form_data.append(
f'--{boundary}\r\n'
f'Content-Disposition: form-data; name="top_p"\r\n\r\n{top_p}\r\n'
)
# Always create a new conversation when an image is attached to avoid 403 errors
if media is not None and len(media) > 0:
conversation = Conversation(actual_model)
# For each image in the media list (using merge_media to handle different formats)
for image, image_name in merge_media(media, messages):
image_bytes = to_bytes(image)
form_data.extend([
f'--{boundary}\r\n'
f'Content-Disposition: form-data; name="files"; filename="{image_name}"\r\n'
f'Content-Type: {is_accepted_format(image_bytes)}\r\n\r\n'
])
form_data.append(image_bytes.decode('latin1'))
form_data.append('\r\n')
form_data.append(f'--{boundary}--\r\n')
data = "".join(form_data).encode('latin1')
async with ClientSession(headers=headers) as session:
async with session.post(
cls.api_endpoint,
data=data,
proxy=proxy,
) as response:
await raise_for_status(response)
current_parent = None
async for line in response.content:
line = line.strip()
if not line:
continue
try:
data = json.loads(line)
except json.JSONDecodeError:
continue
if isinstance(data, dict):
# Update the parental ID
if data.get("children"):
for child in data["children"]:
if child.get("role") == "assistant":
current_parent = child.get("id")
break
# We process content only from the assistant
if "message" in data and data.get("content"):
content = data["content"]
# Skip empty content blocks
if content.strip():
yield content
# Processing the final response
if data.get("final") or data.get("finish_reason") == "stop":
if current_parent:
# Ensure the parent attribute exists before setting it
if not hasattr(conversation, 'parent'):
setattr(conversation, 'parent', None)
conversation.parent = current_parent
# Add a message to the story
conversation.messages.extend([
{"role": "user", "content": prompt},
{"role": "assistant", "content": content}
])
if return_conversation:
yield conversation
yield FinishReason("stop")
return

View File

@@ -1,251 +0,0 @@
from __future__ import annotations
import json
import uuid
from ...typing import AsyncResult, Messages
from ..base_provider import AsyncGeneratorProvider, ProviderModelMixin
from ...providers.response import ImageResponse
from ...requests import StreamSession, raise_for_status
from ...errors import ResponseStatusError
MODELS = {
'chat': {
'gpt-4o-2024-11-20': {'persona_id': "gpt"},
'gpt-4o': {'persona_id': "summarizer"},
'gpt-4o-mini': {'persona_id': "amigo"},
'o1-preview-': {'persona_id': "openai-o-one"}, # Amigo, your balance is not enough to make the request, wait until 12 UTC or upgrade your plan
'o1-preview-2024-09-12-': {'persona_id': "orion"}, # Amigo, your balance is not enough to make the request, wait until 12 UTC or upgrade your plan
'o1-mini-': {'persona_id': "openai-o-one-mini"}, # Amigo, your balance is not enough to make the request, wait until 12 UTC or upgrade your plan
'meta-llama/Meta-Llama-3.1-405B-Instruct-Turbo': {'persona_id': "llama-three-point-one"},
'meta-llama/Llama-3.2-90B-Vision-Instruct-Turbo': {'persona_id': "llama-3-2"},
'codellama/CodeLlama-34b-Instruct-hf': {'persona_id': "codellama-CodeLlama-34b-Instruct-hf"},
'gemini-1.5-pro': {'persona_id': "gemini-1-5-pro"}, # Amigo, your balance is not enough to make the request, wait until 12 UTC or upgrade your plan
'gemini-1.5-flash': {'persona_id': "gemini-1.5-flash"},
'claude-3-5-sonnet-20240620': {'persona_id': "claude"},
'claude-3-5-sonnet-20241022': {'persona_id': "clude-claude-3-5-sonnet-20241022"},
'claude-3-5-haiku-latest': {'persona_id': "3-5-haiku"},
'Qwen/Qwen2.5-72B-Instruct-Turbo': {'persona_id': "qwen-2-5"},
'google/gemma-2b-it': {'persona_id': "google-gemma-2b-it"},
'google/gemma-7b': {'persona_id': "google-gemma-7b"}, # Error handling AIML chat completion stream
'Gryphe/MythoMax-L2-13b': {'persona_id': "Gryphe-MythoMax-L2-13b"},
'mistralai/Mistral-7B-Instruct-v0.3': {'persona_id': "mistralai-Mistral-7B-Instruct-v0.1"},
'mistralai/mistral-tiny': {'persona_id': "mistralai-mistral-tiny"},
'mistralai/mistral-nemo': {'persona_id': "mistralai-mistral-nemo"},
'deepseek-ai/deepseek-llm-67b-chat': {'persona_id': "deepseek-ai-deepseek-llm-67b-chat"},
'databricks/dbrx-instruct': {'persona_id': "databricks-dbrx-instruct"},
'NousResearch/Nous-Hermes-2-Mixtral-8x7B-DPO': {'persona_id': "NousResearch-Nous-Hermes-2-Mixtral-8x7B-DPO"},
'x-ai/grok-beta': {'persona_id': "x-ai-grok-beta"},
'anthracite-org/magnum-v4-72b': {'persona_id': "anthracite-org-magnum-v4-72b"},
'cohere/command-r-plus': {'persona_id': "cohere-command-r-plus"},
'ai21/jamba-1-5-mini': {'persona_id': "ai21-jamba-1-5-mini"},
'zero-one-ai/Yi-34B': {'persona_id': "zero-one-ai-Yi-34B"} # Error handling AIML chat completion stream
},
'image': {
'flux-pro/v1.1': {'persona_id': "flux-1-1-pro"}, # Amigo, your balance is not enough to make the request, wait until 12 UTC or upgrade your plan
'flux-realism': {'persona_id': "flux-realism"},
'flux-pro': {'persona_id': "flux-pro"}, # Amigo, your balance is not enough to make the request, wait until 12 UTC or upgrade your plan
'flux-pro/v1.1-ultra': {'persona_id': "flux-pro-v1.1-ultra"}, # Amigo, your balance is not enough to make the request, wait until 12 UTC or upgrade your plan
'flux-pro/v1.1-ultra-raw': {'persona_id': "flux-pro-v1.1-ultra-raw"}, # Amigo, your balance is not enough to make the request, wait until 12 UTC or upgrade your plan
'flux/dev': {'persona_id': "flux-dev"},
'dall-e-3': {'persona_id': "dalle-three"},
'recraft-v3': {'persona_id': "recraft"}
}
}
class AmigoChat(AsyncGeneratorProvider, ProviderModelMixin):
url = "https://amigochat.io/chat/"
chat_api_endpoint = "https://api.amigochat.io/v1/chat/completions"
image_api_endpoint = "https://api.amigochat.io/v1/images/generations"
working = False
supports_stream = True
supports_system_message = True
supports_message_history = True
default_model = 'gpt-4o-mini'
chat_models = list(MODELS['chat'].keys())
image_models = list(MODELS['image'].keys())
models = chat_models + image_models
model_aliases = {
### chat ###
"gpt-4o": "gpt-4o-2024-11-20",
"gpt-4o-mini": "gpt-4o-mini",
"llama-3.1-405b": "meta-llama/Meta-Llama-3.1-405B-Instruct-Turbo",
"llama-3.2-90b": "meta-llama/Llama-3.2-90B-Vision-Instruct-Turbo",
"codellama-34b": "codellama/CodeLlama-34b-Instruct-hf",
"gemini-flash": "gemini-1.5-flash",
"claude-3.5-sonnet": "claude-3-5-sonnet-20240620",
"claude-3.5-sonnet": "claude-3-5-sonnet-20241022",
"claude-3.5-haiku": "claude-3-5-haiku-latest",
"qwen-2.5-72b": "Qwen/Qwen2.5-72B-Instruct-Turbo",
"gemma-2b": "google/gemma-2b-it",
"mythomax-13b": "Gryphe/MythoMax-L2-13b",
"mixtral-7b": "mistralai/Mistral-7B-Instruct-v0.3",
"mistral-nemo": "mistralai/mistral-nemo",
"deepseek-chat": "deepseek-ai/deepseek-llm-67b-chat",
"dbrx-instruct": "databricks/dbrx-instruct",
"mixtral-8x7b-dpo": "NousResearch/Nous-Hermes-2-Mixtral-8x7B-DPO",
"grok-beta": "x-ai/grok-beta",
"magnum-72b": "anthracite-org/magnum-v4-72b",
"command-r-plus": "cohere/command-r-plus",
"jamba-mini": "ai21/jamba-1-5-mini",
### image ###
"flux-dev": "flux/dev",
}
@classmethod
def get_personaId(cls, model: str) -> str:
if model in cls.chat_models:
return MODELS['chat'][model]['persona_id']
elif model in cls.image_models:
return MODELS['image'][model]['persona_id']
else:
raise ValueError(f"Unknown model: {model}")
@staticmethod
def generate_chat_id() -> str:
"""Generate a chat ID in format: 8-4-4-4-12 hexadecimal digits"""
return str(uuid.uuid4())
@classmethod
async def create_async_generator(
cls,
model: str,
messages: Messages,
proxy: str = None,
stream: bool = False,
timeout: int = 300,
frequency_penalty: float = 0,
max_tokens: int = 4000,
presence_penalty: float = 0,
temperature: float = 0.5,
top_p: float = 0.95,
**kwargs
) -> AsyncResult:
model = cls.get_model(model)
device_uuid = str(uuid.uuid4())
max_retries = 3
retry_count = 0
while retry_count < max_retries:
try:
headers = {
"accept": "*/*",
"accept-language": "en-US,en;q=0.9",
"authorization": "Bearer",
"cache-control": "no-cache",
"content-type": "application/json",
"origin": cls.url,
"pragma": "no-cache",
"priority": "u=1, i",
"referer": f"{cls.url}/",
"sec-ch-ua": '"Chromium";v="129", "Not=A?Brand";v="8"',
"sec-ch-ua-mobile": "?0",
"sec-ch-ua-platform": '"Linux"',
"user-agent": "Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/129.0.0.0 Safari/537.36",
"x-device-language": "en-US",
"x-device-platform": "web",
"x-device-uuid": device_uuid,
"x-device-version": "1.0.45"
}
async with StreamSession(headers=headers, proxy=proxy) as session:
if model not in cls.image_models:
data = {
"chatId": cls.generate_chat_id(),
"frequency_penalty": frequency_penalty,
"max_tokens": max_tokens,
"messages": messages,
"model": model,
"personaId": cls.get_personaId(model),
"presence_penalty": presence_penalty,
"stream": stream,
"temperature": temperature,
"top_p": top_p
}
async with session.post(cls.chat_api_endpoint, json=data, timeout=timeout) as response:
await raise_for_status(response)
async for line in response.iter_lines():
line = line.decode('utf-8').strip()
if line.startswith('data: '):
if line == 'data: [DONE]':
break
try:
chunk = json.loads(line[6:]) # Remove 'data: ' prefix
if 'choices' in chunk and len(chunk['choices']) > 0:
choice = chunk['choices'][0]
if 'delta' in choice:
content = choice['delta'].get('content')
elif 'text' in choice:
content = choice['text']
else:
content = None
if content:
yield content
except json.JSONDecodeError:
pass
else:
# Image generation
prompt = messages[-1]['content']
data = {
"prompt": prompt,
"model": model,
"personaId": cls.get_personaId(model)
}
async with session.post(cls.image_api_endpoint, json=data) as response:
await raise_for_status(response)
response_data = await response.json()
if "data" in response_data:
image_urls = []
for item in response_data["data"]:
if "url" in item:
image_url = item["url"]
image_urls.append(image_url)
if image_urls:
yield ImageResponse(image_urls, prompt)
else:
yield None
break
except (ResponseStatusError, Exception) as e:
retry_count += 1
if retry_count >= max_retries:
raise e
device_uuid = str(uuid.uuid4())

View File

@@ -1,47 +0,0 @@
from __future__ import annotations
from aiohttp import ClientSession
from ...typing import AsyncResult, Messages
from ..base_provider import AsyncGeneratorProvider
class Aura(AsyncGeneratorProvider):
url = "https://openchat.team"
working = False
@classmethod
async def create_async_generator(
cls,
model: str,
messages: Messages,
proxy: str = None,
temperature: float = 0.5,
max_tokens: int = 8192,
webdriver = None,
**kwargs
) -> AsyncResult:
args = get_args_from_browser(cls.url, webdriver, proxy)
async with ClientSession(**args) as session:
new_messages = []
system_message = []
for message in messages:
if message["role"] == "system":
system_message.append(message["content"])
else:
new_messages.append(message)
data = {
"model": {
"id": "openchat_3.6",
"name": "OpenChat 3.6 (latest)",
"maxLength": 24576,
"tokenLimit": max_tokens
},
"messages": new_messages,
"key": "",
"prompt": "\n".join(system_message),
"temperature": temperature
}
async with session.post(f"{cls.url}/api/chat", json=data, proxy=proxy) as response:
response.raise_for_status()
async for chunk in response.content.iter_any():
yield chunk.decode(error="ignore")

View File

@@ -1,233 +0,0 @@
from __future__ import annotations
from ...typing import Messages, CreateResult
from ...providers.base_provider import AbstractProvider, ProviderModelMixin
import time
import uuid
import random
import json
from requests import Session
from ..openai.new import (
get_config,
get_answer_token,
process_turnstile,
get_requirements_token
)
def format_conversation(messages: list):
conversation = []
for message in messages:
conversation.append({
'id': str(uuid.uuid4()),
'author': {
'role': message['role'],
},
'content': {
'content_type': 'text',
'parts': [
message['content'],
],
},
'metadata': {
'serialization_metadata': {
'custom_symbol_offsets': [],
},
},
'create_time': round(time.time(), 3),
})
return conversation
def init_session(user_agent):
session = Session()
cookies = {
'_dd_s': '',
}
headers = {
'accept': '*/*',
'accept-language': 'en-US,en;q=0.8',
'cache-control': 'no-cache',
'pragma': 'no-cache',
'priority': 'u=0, i',
'sec-ch-ua': '"Not)A;Brand";v="99", "Google Chrome";v="127", "Chromium";v="127"',
'sec-ch-ua-arch': '"arm"',
'sec-ch-ua-bitness': '"64"',
'sec-ch-ua-mobile': '?0',
'sec-ch-ua-model': '""',
'sec-ch-ua-platform': '"macOS"',
'sec-ch-ua-platform-version': '"14.4.0"',
'sec-fetch-dest': 'document',
'sec-fetch-mode': 'navigate',
'sec-fetch-site': 'none',
'sec-fetch-user': '?1',
'upgrade-insecure-requests': '1',
'user-agent': user_agent,
}
session.get('https://chatgpt.com/', cookies=cookies, headers=headers)
return session
class ChatGpt(AbstractProvider, ProviderModelMixin):
label = "ChatGpt"
url = "https://chatgpt.com"
working = False
supports_message_history = True
supports_system_message = True
supports_stream = True
default_model = 'auto'
models = [
default_model,
'gpt-3.5-turbo',
'gpt-4o',
'gpt-4o-mini',
'gpt-4',
'gpt-4-turbo',
'chatgpt-4o-latest',
]
model_aliases = {
"gpt-4o": "chatgpt-4o-latest",
}
@classmethod
def get_model(cls, model: str) -> str:
if model in cls.models:
return model
elif model in cls.model_aliases:
return cls.model_aliases[model]
else:
return cls.default_model
@classmethod
def create_completion(
cls,
model: str,
messages: Messages,
stream: bool,
**kwargs
) -> CreateResult:
model = cls.get_model(model)
if model not in cls.models:
raise ValueError(f"Model '{model}' is not available. Available models: {', '.join(cls.models)}")
user_agent = 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/127.0.0.0 Safari/537.36'
session: Session = init_session(user_agent)
config = get_config(user_agent)
pow_req = get_requirements_token(config)
headers = {
'accept': '*/*',
'accept-language': 'en-US,en;q=0.8',
'content-type': 'application/json',
'oai-device-id': f'{uuid.uuid4()}',
'oai-language': 'en-US',
'origin': 'https://chatgpt.com',
'priority': 'u=1, i',
'referer': 'https://chatgpt.com/',
'sec-ch-ua-mobile': '?0',
'sec-ch-ua-platform': '"Linux"',
'sec-fetch-dest': 'empty',
'sec-fetch-mode': 'cors',
'sec-fetch-site': 'same-origin',
'sec-gpc': '1',
'user-agent': f'{user_agent}'
}
response = session.post('https://chatgpt.com/backend-anon/sentinel/chat-requirements',
headers=headers, json={'p': pow_req})
if response.status_code != 200:
return
response_data = response.json()
if "detail" in response_data and "Unusual activity" in response_data["detail"]:
return
turnstile = response_data.get('turnstile', {})
turnstile_required = turnstile.get('required')
pow_conf = response_data.get('proofofwork', {})
if turnstile_required:
turnstile_dx = turnstile.get('dx')
turnstile_token = process_turnstile(turnstile_dx, pow_req)
headers = {**headers,
'openai-sentinel-turnstile-token': turnstile_token,
'openai-sentinel-chat-requirements-token': response_data.get('token'),
'openai-sentinel-proof-token': get_answer_token(
pow_conf.get('seed'), pow_conf.get('difficulty'), config
)}
json_data = {
'action': 'next',
'messages': format_conversation(messages),
'parent_message_id': str(uuid.uuid4()),
'model': model,
'timezone_offset_min': -120,
'suggestions': [
'Can you help me create a personalized morning routine that would help increase my productivity throughout the day? Start by asking me about my current habits and what activities energize me in the morning.',
'Could you help me plan a relaxing day that focuses on activities for rejuvenation? To start, can you ask me what my favorite forms of relaxation are?',
'I have a photoshoot tomorrow. Can you recommend me some colors and outfit options that will look good on camera?',
'Make up a 5-sentence story about "Sharky", a tooth-brushing shark superhero. Make each sentence a bullet point.',
],
'history_and_training_disabled': False,
'conversation_mode': {
'kind': 'primary_assistant',
},
'force_paragen': False,
'force_paragen_model_slug': '',
'force_nulligen': False,
'force_rate_limit': False,
'reset_rate_limits': False,
'websocket_request_id': str(uuid.uuid4()),
'system_hints': [],
'force_use_sse': True,
'conversation_origin': None,
'client_contextual_info': {
'is_dark_mode': True,
'time_since_loaded': random.randint(22, 33),
'page_height': random.randint(600, 900),
'page_width': random.randint(500, 800),
'pixel_ratio': 2,
'screen_height': random.randint(800, 1200),
'screen_width': random.randint(1200, 2000),
},
}
time.sleep(2)
response = session.post('https://chatgpt.com/backend-anon/conversation',
headers=headers, json=json_data, stream=True)
response.raise_for_status()
replace = ''
for line in response.iter_lines():
if line:
decoded_line = line.decode()
if decoded_line.startswith('data:'):
json_string = decoded_line[6:].strip()
if json_string == '[DONE]':
break
if json_string:
try:
data = json.loads(json_string)
except json.JSONDecodeError:
continue
if data.get('message') and data['message'].get('author'):
role = data['message']['author'].get('role')
if role == 'assistant':
tokens = data['message']['content'].get('parts', [])
if tokens:
yield tokens[0].replace(replace, '')
replace = tokens[0]

View File

@@ -1,137 +0,0 @@
from __future__ import annotations
import os
import re
import json
try:
from curl_cffi.requests import Session
has_curl_cffi = True
except ImportError:
has_curl_cffi = False
from ...typing import AsyncResult, Messages
from ..base_provider import AsyncGeneratorProvider, ProviderModelMixin
from ..helper import format_prompt
from ...errors import MissingRequirementsError
class ChatGptEs(AsyncGeneratorProvider, ProviderModelMixin):
url = "https://chatgpt.es"
api_endpoint = "https://chatgpt.es/wp-admin/admin-ajax.php"
working = False
supports_stream = True
supports_system_message = False
supports_message_history = False
default_model = 'gpt-4o'
models = ['gpt-4', default_model, 'gpt-4o-mini']
SYSTEM_PROMPT = "Your default language is English. Always respond in English unless the user's message is in a different language. If the user's message is not in English, respond in the language of the user's message. Maintain this language behavior throughout the conversation unless explicitly instructed otherwise. User input:"
@classmethod
async def create_async_generator(
cls,
model: str,
messages: Messages,
proxy: str = None,
**kwargs
) -> AsyncResult:
if not has_curl_cffi:
raise MissingRequirementsError('Install or update "curl_cffi" package | pip install -U curl_cffi')
model = cls.get_model(model)
prompt = f"{cls.SYSTEM_PROMPT} {format_prompt(messages)}"
# Use curl_cffi with automatic Cloudflare bypass
session = Session()
session.headers.update({
"user-agent": "Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/133.0.0.0 Safari/537.36",
"referer": cls.url,
"origin": cls.url,
"accept": "*/*",
"accept-language": "en-US,en;q=0.9",
"content-type": "application/x-www-form-urlencoded; charset=UTF-8",
"x-requested-with": "XMLHttpRequest",
})
if proxy:
session.proxies = {"https": proxy, "http": proxy}
# First request to get nonce and post_id
initial_response = session.get(cls.url, impersonate="chrome110")
initial_text = initial_response.text
# More comprehensive nonce extraction
nonce_patterns = [
r'<input\s+type=[\'"]hidden[\'"]\s+name=[\'"]_wpnonce[\'"]\s+value=[\'"]([^\'"]+)[\'"]',
r'"_wpnonce":"([^"]+)"',
r'var\s+wpaicg_nonce\s*=\s*[\'"]([^\'"]+)[\'"]',
r'wpaicg_nonce\s*:\s*[\'"]([^\'"]+)[\'"]'
]
nonce_ = None
for pattern in nonce_patterns:
match = re.search(pattern, initial_text)
if match:
nonce_ = match.group(1)
break
if not nonce_:
# Try to find any nonce-like pattern as a last resort
general_nonce = re.search(r'nonce[\'"]?\s*[=:]\s*[\'"]([a-zA-Z0-9]+)[\'"]', initial_text)
if general_nonce:
nonce_ = general_nonce.group(1)
else:
# Fallback, but this likely won't work
nonce_ = "8cf9917be2"
# Look for post_id in HTML
post_id_patterns = [
r'<input\s+type=[\'"]hidden[\'"]\s+name=[\'"]post_id[\'"]\s+value=[\'"]([^\'"]+)[\'"]',
r'"post_id":"([^"]+)"',
r'var\s+post_id\s*=\s*[\'"]?(\d+)[\'"]?'
]
post_id = None
for pattern in post_id_patterns:
match = re.search(pattern, initial_text)
if match:
post_id = match.group(1)
break
if not post_id:
post_id = "106" # Default from curl example
client_id = os.urandom(5).hex()
# Prepare data
data = {
'_wpnonce': nonce_,
'post_id': post_id,
'url': cls.url,
'action': 'wpaicg_chat_shortcode_message',
'message': prompt,
'bot_id': '0',
'chatbot_identity': 'shortcode',
'wpaicg_chat_client_id': client_id,
'wpaicg_chat_history': json.dumps([f"Human: {prompt}"])
}
# Execute POST request
response = session.post(
cls.api_endpoint,
data=data,
impersonate="chrome110"
)
if response.status_code != 200:
raise ValueError(f"Error: {response.status_code} - {response.text}")
result = response.json()
if "data" in result:
if isinstance(result['data'], str) and "Du musst das Kästchen anklicken!" in result['data']:
raise ValueError(result['data'])
yield result['data']
else:
raise ValueError(f"Unexpected response format: {result}")

View File

@@ -1,75 +0,0 @@
from __future__ import annotations
import os
import re
from aiohttp import ClientSession
from ...typing import AsyncResult, Messages
from ...requests.raise_for_status import raise_for_status
from ..base_provider import AsyncGeneratorProvider, ProviderModelMixin
from ..helper import format_prompt
class ChatGptt(AsyncGeneratorProvider, ProviderModelMixin):
url = "https://chatgptt.me"
api_endpoint = "https://chatgptt.me/wp-admin/admin-ajax.php"
working = False
supports_stream = True
supports_system_message = True
supports_message_history = True
default_model = 'gpt-4o'
models = ['gpt-4', default_model, 'gpt-4o-mini']
@classmethod
async def create_async_generator(
cls,
model: str,
messages: Messages,
proxy: str = None,
**kwargs
) -> AsyncResult:
model = cls.get_model(model)
headers = {
"authority": "chatgptt.me",
"accept": "application/json",
"origin": cls.url,
"referer": f"{cls.url}/chat",
"user-agent": "Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/131.0.0.0 Safari/537.36",
}
async with ClientSession(headers=headers) as session:
# Get initial page content
initial_response = await session.get(cls.url)
await raise_for_status(initial_response)
html = await initial_response.text()
# Extract nonce and post ID with error handling
nonce_match = re.search(r'data-nonce=["\']([^"\']+)["\']', html)
post_id_match = re.search(r'data-post-id=["\']([^"\']+)["\']', html)
if not nonce_match or not post_id_match:
raise RuntimeError("Required authentication tokens not found in page HTML")
nonce_ = nonce_match.group(1)
post_id = post_id_match.group(1)
# Prepare payload with session data
payload = {
'_wpnonce': nonce_,
'post_id': post_id,
'url': cls.url,
'action': 'wpaicg_chat_shortcode_message',
'message': format_prompt(messages),
'bot_id': '0',
'chatbot_identity': 'shortcode',
'wpaicg_chat_client_id': os.urandom(5).hex(),
'wpaicg_chat_history': None
}
# Stream the response
async with session.post(cls.api_endpoint, headers=headers, data=payload, proxy=proxy) as response:
await raise_for_status(response)
result = await response.json()
yield result['data']

View File

@@ -1,88 +0,0 @@
from __future__ import annotations
import re
from ...requests import StreamSession, raise_for_status
from ...typing import Messages
from ..base_provider import AsyncProvider, ProviderModelMixin
from ..helper import format_prompt
class Chatgpt4o(AsyncProvider, ProviderModelMixin):
url = "https://chatgpt4o.one"
working = False
_post_id = None
_nonce = None
default_model = 'gpt-4o-mini-2024-07-18'
models = [
'gpt-4o-mini-2024-07-18',
]
model_aliases = {
"gpt-4o-mini": "gpt-4o-mini-2024-07-18",
}
@classmethod
async def create_async(
cls,
model: str,
messages: Messages,
proxy: str = None,
timeout: int = 120,
cookies: dict = None,
**kwargs
) -> str:
headers = {
'authority': 'chatgpt4o.one',
'accept': '*/*',
'accept-language': 'en,fr-FR;q=0.9,fr;q=0.8,es-ES;q=0.7,es;q=0.6,en-US;q=0.5,am;q=0.4,de;q=0.3',
'origin': 'https://chatgpt4o.one',
'referer': 'https://chatgpt4o.one',
'sec-ch-ua': '"Chromium";v="118", "Google Chrome";v="118", "Not=A?Brand";v="99"',
'sec-ch-ua-mobile': '?0',
'sec-ch-ua-platform': '"macOS"',
'sec-fetch-dest': 'empty',
'sec-fetch-mode': 'cors',
'sec-fetch-site': 'same-origin',
'user-agent': 'Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/118.0.0.0 Safari/537.36',
}
async with StreamSession(
headers=headers,
cookies=cookies,
impersonate="chrome",
proxies={"all": proxy},
timeout=timeout
) as session:
if not cls._post_id or not cls._nonce:
async with session.get(f"{cls.url}/") as response:
await raise_for_status(response)
response_text = await response.text()
post_id_match = re.search(r'data-post-id="([0-9]+)"', response_text)
nonce_match = re.search(r'data-nonce="(.*?)"', response_text)
if not post_id_match:
raise RuntimeError("No post ID found")
cls._post_id = post_id_match.group(1)
if not nonce_match:
raise RuntimeError("No nonce found")
cls._nonce = nonce_match.group(1)
prompt = format_prompt(messages)
data = {
"_wpnonce": cls._nonce,
"post_id": cls._post_id,
"url": cls.url,
"action": "wpaicg_chat_shortcode_message",
"message": prompt,
"bot_id": "0"
}
async with session.post(f"{cls.url}/wp-admin/admin-ajax.php", data=data, cookies=cookies) as response:
await raise_for_status(response)
response_json = await response.json()
if "data" not in response_json:
raise RuntimeError("Unexpected response structure: 'data' field missing")
return response_json["data"]

View File

@@ -1,106 +0,0 @@
from __future__ import annotations
import re
import json
import asyncio
from ...requests import StreamSession, raise_for_status
from ...typing import Messages, AsyncGenerator
from ..base_provider import AsyncGeneratorProvider, ProviderModelMixin
from ..helper import format_prompt
class ChatgptFree(AsyncGeneratorProvider, ProviderModelMixin):
url = "https://chatgptfree.ai"
working = False
_post_id = None
_nonce = None
default_model = 'gpt-4o-mini-2024-07-18'
models = [default_model]
model_aliases = {
"gpt-4o-mini": "gpt-4o-mini-2024-07-18",
}
@classmethod
async def create_async_generator(
cls,
model: str,
messages: Messages,
proxy: str = None,
timeout: int = 120,
cookies: dict = None,
**kwargs
) -> AsyncGenerator[str, None]:
headers = {
'authority': 'chatgptfree.ai',
'accept': '*/*',
'accept-language': 'en,fr-FR;q=0.9,fr;q=0.8,es-ES;q=0.7,es;q=0.6,en-US;q=0.5,am;q=0.4,de;q=0.3',
'origin': 'https://chatgptfree.ai',
'referer': 'https://chatgptfree.ai/chat/',
'sec-ch-ua': '"Chromium";v="118", "Google Chrome";v="118", "Not=A?Brand";v="99"',
'sec-ch-ua-mobile': '?0',
'sec-ch-ua-platform': '"macOS"',
'sec-fetch-dest': 'empty',
'sec-fetch-mode': 'cors',
'sec-fetch-site': 'same-origin',
'user-agent': 'Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/118.0.0.0 Safari/537.36',
}
async with StreamSession(
headers=headers,
cookies=cookies,
impersonate="chrome",
proxies={"all": proxy},
timeout=timeout
) as session:
if not cls._nonce:
async with session.get(f"{cls.url}/") as response:
await raise_for_status(response)
response = await response.text()
result = re.search(r'data-post-id="([0-9]+)"', response)
if not result:
raise RuntimeError("No post id found")
cls._post_id = result.group(1)
result = re.search(r'data-nonce="(.*?)"', response)
if result:
cls._nonce = result.group(1)
else:
raise RuntimeError("No nonce found")
prompt = format_prompt(messages)
data = {
"_wpnonce": cls._nonce,
"post_id": cls._post_id,
"url": cls.url,
"action": "wpaicg_chat_shortcode_message",
"message": prompt,
"bot_id": "0"
}
async with session.post(f"{cls.url}/wp-admin/admin-ajax.php", data=data, cookies=cookies) as response:
await raise_for_status(response)
buffer = ""
async for line in response.iter_lines():
line = line.decode('utf-8').strip()
if line.startswith('data: '):
data = line[6:]
if data == '[DONE]':
break
try:
json_data = json.loads(data)
content = json_data['choices'][0]['delta'].get('content', '')
if content:
yield content
except json.JSONDecodeError:
continue
elif line:
buffer += line
if buffer:
try:
json_response = json.loads(buffer)
if 'data' in json_response:
yield json_response['data']
except json.JSONDecodeError:
print(f"Failed to decode final JSON. Buffer content: {buffer}")

View File

@@ -1,202 +0,0 @@
from __future__ import annotations
import json
import base64
import time
import random
import hashlib
import asyncio
from aiohttp import ClientSession
from ...typing import AsyncResult, Messages
from ...errors import ResponseError
from ..base_provider import AsyncGeneratorProvider, ProviderModelMixin
from ..helper import format_prompt, get_last_user_message
from ...providers.response import FinishReason, JsonConversation
class Conversation(JsonConversation):
message_history: Messages = []
def __init__(self, model: str):
self.model = model
self.message_history = []
class DDG(AsyncGeneratorProvider, ProviderModelMixin):
label = "DuckDuckGo AI Chat"
url = "https://duckduckgo.com"
api_endpoint = "https://duckduckgo.com/duckchat/v1/chat"
status_url = "https://duckduckgo.com/duckchat/v1/status"
working = False
needs_auth = False
supports_stream = True
supports_system_message = True
supports_message_history = True
default_model = "gpt-4o-mini"
model_aliases = {
"gpt-4": default_model,
"gpt-4o": default_model,
"llama-3.3-70b": "meta-llama/Llama-3.3-70B-Instruct-Turbo",
"claude-3-haiku": "claude-3-haiku-20240307",
"mistral-small": "mistralai/Mistral-Small-24B-Instruct-2501",
"mistral-small-24b": "mistralai/Mistral-Small-24B-Instruct-2501",
}
models = [default_model, "o3-mini"] + list(model_aliases.keys())
@staticmethod
def generate_user_agent() -> str:
return f"Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/135.0.{random.randint(1000,9999)}.0 Safari/537.36"
@staticmethod
def generate_fe_signals() -> str:
current_time = int(time.time() * 1000)
signals_data = {
"start": current_time - 35000,
"events": [
{"name": "onboarding_impression_1", "delta": 383},
{"name": "onboarding_impression_2", "delta": 6004},
{"name": "onboarding_finish", "delta": 9690},
{"name": "startNewChat", "delta": 10082},
{"name": "initSwitchModel", "delta": 16586}
],
"end": 35163
}
return base64.b64encode(json.dumps(signals_data).encode()).decode()
@staticmethod
def generate_fe_version(page_content: str = "") -> str:
try:
fe_hash = page_content.split('__DDG_FE_CHAT_HASH__="', 1)[1].split('"', 1)[0]
return f"serp_20250510_052906_ET-{fe_hash}"
except Exception:
return "serp_20250510_052906_ET-ed4f51dc2e106020bc4b"
@staticmethod
def generate_x_vqd_hash_1(vqd: str, fe_version: str) -> str:
# Placeholder logic; in reality DuckDuckGo uses dynamic JS challenge
concat = f"{vqd}#{fe_version}"
hash_digest = hashlib.sha256(concat.encode()).digest()
b64 = base64.b64encode(hash_digest).decode()
return base64.b64encode(json.dumps({
"server_hashes": [],
"client_hashes": [b64],
"signals": {},
"meta": {
"v": "1",
"challenge_id": hashlib.md5(concat.encode()).hexdigest(),
"origin": "https://duckduckgo.com",
"stack": "Generated in Python"
}
}).encode()).decode()
@classmethod
async def create_async_generator(
cls,
model: str,
messages: Messages,
proxy: str = None,
conversation: Conversation = None,
return_conversation: bool = True,
retry_count: int = 0,
**kwargs
) -> AsyncResult:
model = cls.get_model(model)
if conversation is None:
conversation = Conversation(model)
conversation.message_history = messages.copy()
else:
last_message = next((m for m in reversed(messages) if m["role"] == "user"), None)
if last_message and last_message not in conversation.message_history:
conversation.message_history.append(last_message)
base_headers = {
"accept-language": "en-US,en;q=0.9",
"dnt": "1",
"origin": "https://duckduckgo.com",
"referer": "https://duckduckgo.com/",
"sec-ch-ua": '"Chromium";v="135", "Not-A.Brand";v="8"',
"sec-ch-ua-mobile": "?0",
"sec-ch-ua-platform": '"Linux"',
"sec-fetch-dest": "empty",
"sec-fetch-mode": "cors",
"sec-fetch-site": "same-origin",
"user-agent": cls.generate_user_agent(),
}
cookies = {'dcs': '1', 'dcm': '3'}
formatted_prompt = format_prompt(conversation.message_history) if len(conversation.message_history) > 1 else get_last_user_message(messages)
data = {"model": model, "messages": [{"role": "user", "content": formatted_prompt}], "canUseTools": False}
async with ClientSession(cookies=cookies) as session:
try:
# Step 1: Initial page load
async with session.get(f"{cls.url}/?q=DuckDuckGo+AI+Chat&ia=chat&duckai=1",
headers={**base_headers, "accept": "text/html"}, proxy=proxy) as r:
r.raise_for_status()
page = await r.text()
fe_version = cls.generate_fe_version(page)
# Step 2: Get VQD
status_headers = {**base_headers, "accept": "*/*", "cache-control": "no-store", "x-vqd-accept": "1"}
async with session.get(cls.status_url, headers=status_headers, proxy=proxy) as r:
r.raise_for_status()
vqd = r.headers.get("x-vqd-4", "") or f"4-{random.randint(10**29, 10**30 - 1)}"
x_vqd_hash_1 = cls.generate_x_vqd_hash_1(vqd, fe_version)
# Step 3: Actual chat request
chat_headers = {
**base_headers,
"accept": "text/event-stream",
"content-type": "application/json",
"x-fe-signals": cls.generate_fe_signals(),
"x-fe-version": fe_version,
"x-vqd-4": vqd,
"x-vqd-hash-1": x_vqd_hash_1,
}
async with session.post(cls.api_endpoint, json=data, headers=chat_headers, proxy=proxy) as response:
if response.status != 200:
error_text = await response.text()
if "ERR_BN_LIMIT" in error_text:
raise ResponseError("Blocked by DuckDuckGo: Bot limit exceeded (ERR_BN_LIMIT).")
if "ERR_INVALID_VQD" in error_text and retry_count < 3:
await asyncio.sleep(random.uniform(2.5, 5.5))
async for chunk in cls.create_async_generator(
model, messages, proxy, conversation, return_conversation, retry_count + 1, **kwargs
):
yield chunk
return
raise ResponseError(f"HTTP {response.status} - {error_text}")
full_message = ""
async for line in response.content:
line_text = line.decode("utf-8").strip()
if line_text.startswith("data:"):
payload = line_text[5:].strip()
if payload == "[DONE]":
if full_message:
conversation.message_history.append({"role": "assistant", "content": full_message})
if return_conversation:
yield conversation
yield FinishReason("stop")
break
try:
msg = json.loads(payload)
if msg.get("action") == "error":
raise ResponseError(f"Error: {msg.get('type', 'unknown')}")
if "message" in msg:
content = msg["message"]
yield content
full_message += content
except json.JSONDecodeError:
continue
except Exception as e:
if retry_count < 3:
await asyncio.sleep(random.uniform(2.5, 5.5))
async for chunk in cls.create_async_generator(
model, messages, proxy, conversation, return_conversation, retry_count + 1, **kwargs
):
yield chunk
else:
raise ResponseError(f"Error: {str(e)}")

View File

@@ -1,71 +0,0 @@
from __future__ import annotations
import json
from abc import ABC, abstractmethod
import requests
from ...typing import Any, CreateResult
from ..base_provider import AbstractProvider
class Equing(AbstractProvider):
url: str = 'https://next.eqing.tech/'
working = False
supports_stream = True
supports_gpt_35_turbo = True
supports_gpt_4 = False
@staticmethod
@abstractmethod
def create_completion(
model: str,
messages: list[dict[str, str]],
stream: bool, **kwargs: Any) -> CreateResult:
headers = {
'authority' : 'next.eqing.tech',
'accept' : 'text/event-stream',
'accept-language' : 'en,fr-FR;q=0.9,fr;q=0.8,es-ES;q=0.7,es;q=0.6,en-US;q=0.5,am;q=0.4,de;q=0.3',
'cache-control' : 'no-cache',
'content-type' : 'application/json',
'origin' : 'https://next.eqing.tech',
'plugins' : '0',
'pragma' : 'no-cache',
'referer' : 'https://next.eqing.tech/',
'sec-ch-ua' : '"Not/A)Brand";v="99", "Google Chrome";v="115", "Chromium";v="115"',
'sec-ch-ua-mobile' : '?0',
'sec-ch-ua-platform': '"macOS"',
'sec-fetch-dest' : 'empty',
'sec-fetch-mode' : 'cors',
'sec-fetch-site' : 'same-origin',
'user-agent' : 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/115.0.0.0 Safari/537.36',
'usesearch' : 'false',
'x-requested-with' : 'XMLHttpRequest'
}
json_data = {
'messages' : messages,
'stream' : stream,
'model' : model,
'temperature' : kwargs.get('temperature', 0.5),
'presence_penalty' : kwargs.get('presence_penalty', 0),
'frequency_penalty' : kwargs.get('frequency_penalty', 0),
'top_p' : kwargs.get('top_p', 1),
}
response = requests.post('https://next.eqing.tech/api/openai/v1/chat/completions',
headers=headers, json=json_data, stream=stream)
if not stream:
yield response.json()["choices"][0]["message"]["content"]
return
for line in response.iter_content(chunk_size=1024):
if line:
if b'content' in line:
line_json = json.loads(line.decode('utf-8').split('data: ')[1])
token = line_json['choices'][0]['delta'].get('content')
if token:
yield token

View File

@@ -1,101 +0,0 @@
from __future__ import annotations
import json
import time
import hashlib
from aiohttp import ClientSession
from ...typing import AsyncResult, Messages
from ..base_provider import AsyncGeneratorProvider, ProviderModelMixin
from ..helper import get_random_hex, get_random_string
from ...requests.raise_for_status import raise_for_status
class FlowGpt(AsyncGeneratorProvider, ProviderModelMixin):
url = "https://flowgpt.com/chat"
working = False
supports_message_history = True
supports_system_message = True
default_model = "gpt-3.5-turbo"
models = [
"gpt-3.5-turbo",
"gpt-3.5-long",
"gpt-4-turbo",
"google-gemini",
"claude-instant",
"claude-v1",
"claude-v2",
"llama2-13b",
"mythalion-13b",
"pygmalion-13b",
"chronos-hermes-13b",
"Mixtral-8x7B",
"Dolphin-2.6-8x7B",
]
model_aliases = {
"gemini": "google-gemini",
"gemini-pro": "google-gemini"
}
@classmethod
async def create_async_generator(
cls,
model: str,
messages: Messages,
proxy: str = None,
temperature: float = 0.7,
**kwargs
) -> AsyncResult:
model = cls.get_model(model)
timestamp = str(int(time.time()))
auth = "Bearer null"
nonce = get_random_hex()
data = f"{timestamp}-{nonce}-{auth}"
signature = hashlib.md5(data.encode()).hexdigest()
headers = {
"User-Agent": "Mozilla/5.0 (X11; Ubuntu; Linux x86_64; rv:122.0) Gecko/20100101 Firefox/122.0",
"Accept": "*/*",
"Accept-Language": "en-US;q=0.7,en;q=0.3",
"Accept-Encoding": "gzip, deflate, br",
"Referer": "https://flowgpt.com/",
"Content-Type": "application/json",
"Authorization": "Bearer null",
"Origin": "https://flowgpt.com",
"Connection": "keep-alive",
"Sec-Fetch-Dest": "empty",
"Sec-Fetch-Mode": "cors",
"Sec-Fetch-Site": "same-site",
"TE": "trailers",
"Authorization": auth,
"x-flow-device-id": f"f-{get_random_string(19)}",
"x-nonce": nonce,
"x-signature": signature,
"x-timestamp": timestamp
}
async with ClientSession(headers=headers) as session:
history = [message for message in messages[:-1] if message["role"] != "system"]
system_message = "\n".join([message["content"] for message in messages if message["role"] == "system"])
if not system_message:
system_message = "You are helpful assistant. Follow the user's instructions carefully."
data = {
"model": model,
"nsfw": False,
"question": messages[-1]["content"],
"history": [{"role": "assistant", "content": "Hello, how can I help you today?"}, *history],
"system": system_message,
"temperature": temperature,
"promptId": f"model-{model}",
"documentIds": [],
"chatFileDocumentIds": [],
"generateImage": False,
"generateAudio": False
}
async with session.post("https://prod-backend-k8s.flowgpt.com/v3/chat-anonymous", json=data, proxy=proxy) as response:
await raise_for_status(response)
async for chunk in response.content:
if chunk.strip():
message = json.loads(chunk)
if "event" not in message:
continue
if message["event"] == "text":
yield message["data"]

View File

@@ -1,67 +0,0 @@
from __future__ import annotations
import time
from hashlib import sha256
from aiohttp import BaseConnector, ClientSession
from ...errors import RateLimitError
from ...requests import raise_for_status
from ...requests.aiohttp import get_connector
from ...typing import AsyncResult, Messages
from ..base_provider import AsyncGeneratorProvider, ProviderModelMixin
class Free2GPT(AsyncGeneratorProvider, ProviderModelMixin):
url = "https://chat10.free2gpt.xyz"
working = False
supports_message_history = True
default_model = 'gemini-1.5-pro'
models = [default_model, 'gemini-1.5-flash']
@classmethod
async def create_async_generator(
cls,
model: str,
messages: Messages,
proxy: str = None,
connector: BaseConnector = None,
**kwargs,
) -> AsyncResult:
headers = {
"User-Agent": "Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/127.0.0.0 Safari/537.36",
"Accept": "*/*",
"Accept-Language": "en-US,en;q=0.9",
"Accept-Encoding": "gzip, deflate, br",
"Content-Type": "text/plain;charset=UTF-8",
"Referer": f"{cls.url}/",
"Origin": cls.url,
}
async with ClientSession(
connector=get_connector(connector, proxy), headers=headers
) as session:
timestamp = int(time.time() * 1e3)
data = {
"messages": messages,
"time": timestamp,
"pass": None,
"sign": generate_signature(timestamp, messages[-1]["content"]),
}
async with session.post(
f"{cls.url}/api/generate", json=data, proxy=proxy
) as response:
if response.status == 500:
if "Quota exceeded" in await response.text():
raise RateLimitError(
f"Response {response.status}: Rate limit reached"
)
await raise_for_status(response)
async for chunk in response.content.iter_any():
yield chunk.decode(errors="ignore")
def generate_signature(time: int, text: str, secret: str = ""):
message = f"{time}:{text}:{secret}"
return sha256(message.encode()).hexdigest()

View File

@@ -1,72 +0,0 @@
from __future__ import annotations
import time
import hashlib
import random
from typing import AsyncGenerator, Optional, Dict, Any
from ...typing import Messages
from ...requests import StreamSession, raise_for_status
from ..base_provider import AsyncGeneratorProvider, ProviderModelMixin
from ...errors import RateLimitError
# Constants
DOMAINS = [
"https://s.aifree.site",
"https://v.aifree.site/",
"https://al.aifree.site/",
"https://u4.aifree.site/"
]
RATE_LIMIT_ERROR_MESSAGE = "当前地区当日额度已消耗完"
class FreeGpt(AsyncGeneratorProvider, ProviderModelMixin):
url = "https://freegptsnav.aifree.site"
working = False
supports_message_history = True
supports_system_message = True
default_model = 'gemini-1.5-pro'
models = [default_model, 'gemini-1.5-flash']
@classmethod
async def create_async_generator(
cls,
model: str,
messages: Messages,
proxy: Optional[str] = None,
timeout: int = 120,
**kwargs: Any
) -> AsyncGenerator[str, None]:
prompt = messages[-1]["content"]
timestamp = int(time.time())
data = cls._build_request_data(messages, prompt, timestamp)
domain = random.choice(DOMAINS)
async with StreamSession(
impersonate="chrome",
timeout=timeout,
proxies={"all": proxy} if proxy else None
) as session:
async with session.post(f"{domain}/api/generate", json=data) as response:
await raise_for_status(response)
async for chunk in response.iter_content():
chunk_decoded = chunk.decode(errors="ignore")
if chunk_decoded == RATE_LIMIT_ERROR_MESSAGE:
raise RateLimitError("Rate limit reached")
yield chunk_decoded
@staticmethod
def _build_request_data(messages: Messages, prompt: str, timestamp: int, secret: str = "") -> Dict[str, Any]:
return {
"messages": messages,
"time": timestamp,
"pass": None,
"sign": generate_signature(timestamp, prompt, secret)
}
def generate_signature(timestamp: int, message: str, secret: str = "") -> str:
data = f"{timestamp}:{message}:{secret}"
return hashlib.sha256(data.encode()).hexdigest()

View File

@@ -1,105 +0,0 @@
from __future__ import annotations
import json
import asyncio
from aiohttp import ClientSession, ClientTimeout, ClientError
from typing import AsyncGenerator
from ...typing import AsyncResult, Messages
from ..base_provider import AsyncGeneratorProvider, ProviderModelMixin
class FreeNetfly(AsyncGeneratorProvider, ProviderModelMixin):
url = "https://free.netfly.top"
api_endpoint = "/api/openai/v1/chat/completions"
working = False
default_model = 'gpt-3.5-turbo'
models = [
'gpt-3.5-turbo',
'gpt-4',
]
@classmethod
async def create_async_generator(
cls,
model: str,
messages: Messages,
proxy: str = None,
**kwargs
) -> AsyncResult:
headers = {
"accept": "application/json, text/event-stream",
"accept-language": "en-US,en;q=0.9",
"content-type": "application/json",
"dnt": "1",
"origin": cls.url,
"referer": f"{cls.url}/",
"sec-ch-ua": '"Not/A)Brand";v="8", "Chromium";v="126"',
"sec-ch-ua-mobile": "?0",
"sec-ch-ua-platform": '"Linux"',
"sec-fetch-dest": "empty",
"sec-fetch-mode": "cors",
"sec-fetch-site": "same-origin",
"user-agent": "Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/126.0.0.0 Safari/537.36",
}
data = {
"messages": messages,
"stream": True,
"model": model,
"temperature": 0.5,
"presence_penalty": 0,
"frequency_penalty": 0,
"top_p": 1
}
max_retries = 5
retry_delay = 2
for attempt in range(max_retries):
try:
async with ClientSession(headers=headers) as session:
timeout = ClientTimeout(total=60)
async with session.post(f"{cls.url}{cls.api_endpoint}", json=data, proxy=proxy, timeout=timeout) as response:
response.raise_for_status()
async for chunk in cls._process_response(response):
yield chunk
return # If successful, exit the function
except (ClientError, asyncio.TimeoutError) as e:
if attempt == max_retries - 1:
raise # If all retries failed, raise the last exception
await asyncio.sleep(retry_delay)
retry_delay *= 2 # Exponential backoff
@classmethod
async def _process_response(cls, response) -> AsyncGenerator[str, None]:
buffer = ""
async for line in response.content:
buffer += line.decode('utf-8')
if buffer.endswith('\n\n'):
for subline in buffer.strip().split('\n'):
if subline.startswith('data: '):
if subline == 'data: [DONE]':
return
try:
data = json.loads(subline[6:])
content = data['choices'][0]['delta'].get('content')
if content:
yield content
except json.JSONDecodeError:
print(f"Failed to parse JSON: {subline}")
except KeyError:
print(f"Unexpected JSON structure: {data}")
buffer = ""
# Process any remaining data in the buffer
if buffer:
for subline in buffer.strip().split('\n'):
if subline.startswith('data: ') and subline != 'data: [DONE]':
try:
data = json.loads(subline[6:])
content = data['choices'][0]['delta'].get('content')
if content:
yield content
except (json.JSONDecodeError, KeyError):
pass

View File

@@ -1,9 +0,0 @@
from __future__ import annotations
from ..template import OpenaiTemplate
class FreeRouter(OpenaiTemplate):
label = "CablyAI FreeRouter"
url = "https://freerouter.cablyai.com"
api_base = "https://freerouter.cablyai.com/v1"
working = False

View File

@@ -1,60 +0,0 @@
from __future__ import annotations
import time
import hashlib
from aiohttp import ClientSession
from ...typing import AsyncResult, Messages
from ..base_provider import AsyncGeneratorProvider, ProviderModelMixin
from ..helper import format_prompt
class GPROChat(AsyncGeneratorProvider, ProviderModelMixin):
url = "https://gprochat.com"
api_endpoint = "https://gprochat.com/api/generate"
working = False
supports_stream = True
supports_message_history = True
default_model = 'gemini-1.5-pro'
@staticmethod
def generate_signature(timestamp: int, message: str) -> str:
secret_key = "2BC120D4-BB36-1B60-26DE-DB630472A3D8"
hash_input = f"{timestamp}:{message}:{secret_key}"
signature = hashlib.sha256(hash_input.encode('utf-8')).hexdigest()
return signature
@classmethod
async def create_async_generator(
cls,
model: str,
messages: Messages,
proxy: str = None,
**kwargs
) -> AsyncResult:
model = cls.get_model(model)
timestamp = int(time.time() * 1000)
prompt = format_prompt(messages)
sign = cls.generate_signature(timestamp, prompt)
headers = {
"accept": "*/*",
"origin": cls.url,
"referer": f"{cls.url}/",
"user-agent": "Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/129.0.0.0 Safari/537.36",
"content-type": "text/plain;charset=UTF-8"
}
data = {
"messages": [{"role": "user", "parts": [{"text": prompt}]}],
"time": timestamp,
"pass": None,
"sign": sign
}
async with ClientSession(headers=headers) as session:
async with session.post(cls.api_endpoint, json=data, proxy=proxy) as response:
response.raise_for_status()
async for chunk in response.content.iter_any():
if chunk:
yield chunk.decode()

View File

@@ -1,24 +0,0 @@
from __future__ import annotations
from ..template import OpenaiTemplate
class Glider(OpenaiTemplate):
label = "Glider"
url = "https://glider.so"
api_endpoint = "https://glider.so/api/chat"
working = False
default_model = 'chat-llama-3-1-70b'
models = [
'chat-llama-3-1-70b',
'chat-llama-3-1-8b',
'chat-llama-3-2-3b',
'deepseek-ai/DeepSeek-R1'
]
model_aliases = {
"llama-3.1-70b": "chat-llama-3-1-70b",
"llama-3.1-8b": "chat-llama-3-1-8b",
"llama-3.2-3b": "chat-llama-3-2-3b",
"deepseek-r1": "deepseek-ai/DeepSeek-R1",
}

View File

@@ -1,94 +0,0 @@
from __future__ import annotations
from ...typing import AsyncResult, Messages, MediaListType
from ...providers.response import JsonConversation, Reasoning, TitleGeneration
from ...requests import StreamSession, raise_for_status
from ...config import DEFAULT_MODEL
from ..base_provider import AsyncGeneratorProvider, ProviderModelMixin
from ..helper import get_last_user_message
class GptOss(AsyncGeneratorProvider, ProviderModelMixin):
label = "gpt-oss (playground)"
url = "https://gpt-oss.com"
api_endpoint = "https://api.gpt-oss.com/chatkit"
working = False
active_by_default = True
default_model = "gpt-oss-120b"
models = [default_model, "gpt-oss-20b"]
model_aliases = {
DEFAULT_MODEL: default_model,
}
@classmethod
async def create_async_generator(
cls,
model: str,
messages: Messages,
media: MediaListType = None,
conversation: JsonConversation = None,
reasoning_effort: str = "high",
proxy: str = None,
**kwargs
) -> AsyncResult:
if media:
raise ValueError("Media is not supported by gpt-oss")
model = cls.get_model(model)
user_message = get_last_user_message(messages)
cookies = {}
if conversation is None:
data = {
"op": "threads.create",
"params": {
"input": {
"text": user_message,
"content": [{"type": "input_text", "text": user_message}],
"quoted_text": "",
"attachments": []
}
}
}
else:
data = {
"op":"threads.addMessage",
"params": {
"input": {
"text": user_message,
"content": [{"type": "input_text", "text": user_message}],
"quoted_text": "",
"attachments": []
},
"threadId": conversation.id
}
}
cookies["user_id"] = conversation.user_id
headers = {
"accept": "text/event-stream",
"x-reasoning-effort": reasoning_effort,
"x-selected-model": model,
"x-show-reasoning": "true"
}
async with StreamSession(
headers=headers,
cookies=cookies,
proxy=proxy,
) as session:
async with session.post(
cls.api_endpoint,
json=data
) as response:
await raise_for_status(response)
async for chunk in response.sse():
if chunk.get("type") == "thread.created":
yield JsonConversation(id=chunk["thread"]["id"], user_id=response.cookies.get("user_id"))
elif chunk.get("type") == "thread.item_updated":
entry = chunk.get("update", {}).get("entry", chunk.get("update", {}))
if entry.get("type") == "thought":
yield Reasoning(entry.get("content"))
elif entry.get("type") == "recap":
pass #yield Reasoning(status=entry.get("summary"))
elif entry.get("type") == "assistant_message.content_part.text_delta":
yield entry.get("delta")
elif chunk.get("type") == "thread.updated":
yield TitleGeneration(chunk["thread"]["title"])

View File

@@ -1,105 +0,0 @@
from __future__ import annotations
from aiohttp import ClientSession
import time
import asyncio
from ...typing import AsyncResult, Messages
from ...providers.response import ImageResponse
from ...image import use_aspect_ratio
from ..base_provider import AsyncGeneratorProvider, ProviderModelMixin
class ImageLabs(AsyncGeneratorProvider, ProviderModelMixin):
url = "https://editor.imagelabs.net"
api_endpoint = "https://editor.imagelabs.net/txt2img"
working = True
supports_stream = False
supports_system_message = False
supports_message_history = False
default_model = 'sdxl-turbo'
default_image_model = default_model
image_models = [default_image_model]
models = image_models
@classmethod
async def create_async_generator(
cls,
model: str,
messages: Messages,
proxy: str = None,
# Image
prompt: str = None,
negative_prompt: str = "",
aspect_ratio: str = "1:1",
width: int = None,
height: int = None,
extra_body: dict = None,
**kwargs
) -> AsyncResult:
if extra_body is None:
extra_body = {}
extra_body = use_aspect_ratio({
"width": width,
"height": height,
**extra_body
}, aspect_ratio)
headers = {
'accept': '*/*',
'accept-language': 'en-US,en;q=0.9',
'cache-control': 'no-cache',
'content-type': 'application/json',
'origin': cls.url,
'referer': f'{cls.url}/',
'x-requested-with': 'XMLHttpRequest',
'user-agent': 'Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/131.0.0.0 Safari/537.36'
}
async with ClientSession(headers=headers) as session:
prompt = messages[-1]["content"] if prompt is None else prompt
# Generate image
payload = {
"prompt": prompt,
"seed": str(int(time.time())),
"subseed": str(int(time.time() * 1000)),
"attention": 0,
"tiling": False,
"negative_prompt": negative_prompt,
"reference_image": "",
"reference_image_type": None,
"reference_strength": 30,
**extra_body
}
async with session.post(f'{cls.url}/txt2img', json=payload, proxy=proxy) as generate_response:
generate_data = await generate_response.json()
task_id = generate_data.get('task_id')
# Poll for progress
while True:
async with session.post(f'{cls.url}/progress', json={"task_id": task_id}, proxy=proxy) as progress_response:
progress_data = await progress_response.json()
# Check for completion or error states
if progress_data.get('status') == 'Done' or progress_data.get('final_image_url'):
# Yield ImageResponse with the final image URL
yield ImageResponse(
urls=[progress_data.get('final_image_url')],
alt=prompt
)
break
# Check for queue or error states
if 'error' in progress_data.get('status', '').lower():
raise Exception(f"Image generation error: {progress_data}")
# Wait between polls
await asyncio.sleep(1)
@classmethod
def get_model(cls, model: str) -> str:
return cls.default_model

View File

@@ -1,79 +0,0 @@
from __future__ import annotations
import json
from typing import AsyncGenerator, Optional, List, Dict, Union, Any
from aiohttp import ClientSession, BaseConnector, ClientResponse
from ...typing import AsyncResult, Messages
from ..base_provider import AsyncGeneratorProvider, ProviderModelMixin
from ..helper import get_random_string, get_connector
from ...requests import raise_for_status
class Koala(AsyncGeneratorProvider, ProviderModelMixin):
url = "https://koala.sh/chat"
api_endpoint = "https://koala.sh/api/gpt/"
working = False
supports_message_history = True
default_model = 'gpt-4o-mini'
@classmethod
async def create_async_generator(
cls,
model: str,
messages: Messages,
proxy: Optional[str] = None,
connector: Optional[BaseConnector] = None,
**kwargs: Any
) -> AsyncGenerator[Dict[str, Union[str, int, float, List[Dict[str, Any]], None]], None]:
if not model:
model = "gpt-4o-mini"
headers = {
"User-Agent": "Mozilla/5.0 (X11; Ubuntu; Linux x86_64; rv:122.0) Gecko/20100101 Firefox/122.0",
"Accept": "text/event-stream",
"Accept-Language": "de,en-US;q=0.7,en;q=0.3",
"Accept-Encoding": "gzip, deflate, br",
"Referer": f"{cls.url}",
"Flag-Real-Time-Data": "false",
"Visitor-ID": get_random_string(20),
"Origin": "https://koala.sh",
"Alt-Used": "koala.sh",
"Sec-Fetch-Dest": "empty",
"Sec-Fetch-Mode": "cors",
"Sec-Fetch-Site": "same-origin",
"TE": "trailers",
}
async with ClientSession(headers=headers, connector=get_connector(connector, proxy)) as session:
input_text = messages[-1]["content"]
system_messages = " ".join(
message["content"] for message in messages if message["role"] == "system"
)
if system_messages:
input_text += f" {system_messages}"
data = {
"input": input_text,
"inputHistory": [
message["content"]
for message in messages[:-1]
if message["role"] == "user"
],
"outputHistory": [
message["content"]
for message in messages
if message["role"] == "assistant"
],
"model": model,
}
async with session.post(f"{cls.api_endpoint}", json=data, proxy=proxy) as response:
await raise_for_status(response)
async for chunk in cls._parse_event_stream(response):
yield chunk
@staticmethod
async def _parse_event_stream(response: ClientResponse) -> AsyncGenerator[Dict[str, Any], None]:
async for chunk in response.content:
if chunk.startswith(b"data: "):
yield json.loads(chunk[6:])

View File

@@ -1,617 +0,0 @@
from __future__ import annotations
import random
import json
import uuid
import asyncio
from ...typing import AsyncResult, Messages, MediaListType
from ...requests import StreamSession, StreamResponse, FormData, raise_for_status
from ...providers.response import JsonConversation, FinishReason
from ...tools.media import merge_media
from ...image import to_bytes, is_accepted_format
from ..base_provider import AsyncGeneratorProvider, ProviderModelMixin
from ..helper import get_last_user_message
from ...errors import ModelNotFoundError, ResponseError
from ... import debug
class LegacyLMArena(AsyncGeneratorProvider, ProviderModelMixin):
label = "LMArena (Legacy)"
url = "https://legacy.lmarena.ai"
api_endpoint = "/queue/join?"
working = False
default_model = "chatgpt-4o-latest-20250326"
models = []
# Models from HAR data (manually added)
har_models = [
"chatgpt-4o-latest-20250326", "gemini-2.5-pro-preview-05-06", "o3-2025-04-16",
"o4-mini-2025-04-16", "qwen3-235b-a22b", "mistral-medium-2505",
"gemini-2.5-flash-preview-04-17", "gpt-4.1-2025-04-14",
"llama-4-maverick-03-26-experimental", "grok-3-preview-02-24",
"claude-3-7-sonnet-20250219", "claude-3-7-sonnet-20250219-thinking-32k",
"deepseek-v3-0324", "llama-4-maverick-17b-128e-instruct",
"llama-4-scout-17b-16e-instruct", "gpt-4.1-mini-2025-04-14",
"gpt-4.1-nano-2025-04-14"
]
# Models from JS data (manually added)
js_models = [
"gemini-2.0-flash-001", "gemini-2.0-flash-lite-preview-02-05",
"gemma-3-27b-it", "gemma-3-12b-it", "gemma-3-4b-it",
"deepseek-r1", "claude-3-5-sonnet-20241022", "o3-mini"
]
# Updated vision models list from JS data
vision_models = [
"gemini-2.5-pro-preview-05-06", "o3-2025-04-16", "o4-mini-2025-04-16",
"mistral-medium-2505", "gemini-2.5-flash-preview-04-17", "gpt-4.1-2025-04-14",
"claude-3-7-sonnet-20250219", "claude-3-7-sonnet-20250219-thinking-32k",
"llama-4-maverick-17b-128e-instruct", "llama-4-scout-17b-16e-instruct",
"gpt-4.1-mini-2025-04-14", "gpt-4.1-nano-2025-04-14", "gemini-2.0-flash-001",
"gemini-2.0-flash-lite-preview-02-05", "gemma-3-27b-it", "claude-3-5-sonnet-20241022",
"gpt-4o-mini-2024-07-18", "gpt-4o-2024-11-20", "gpt-4o-2024-08-06",
"gpt-4o-2024-05-13", "mistral-small-3.1-24b-instruct-2503",
"claude-3-5-sonnet-20240620", "amazon-nova-pro-v1.0", "amazon-nova-lite-v1.0",
"qwen2.5-vl-32b-instruct", "qwen2.5-vl-72b-instruct", "gemini-1.5-pro-002",
"gemini-1.5-flash-002", "gemini-1.5-flash-8b-001", "gemini-1.5-pro-001",
"gemini-1.5-flash-001", "pixtral-large-2411", "step-1o-vision-32k-highres",
"claude-3-haiku-20240307", "claude-3-sonnet-20240229", "claude-3-opus-20240229",
"qwen-vl-max-1119", "qwen-vl-max-0809", "reka-core-20240904",
"reka-flash-20240904", "c4ai-aya-vision-32b", "pixtral-12b-2409"
]
model_aliases = {
# Existing aliases
"claude-3.7-sonnet": "claude-3-7-sonnet-20250219",
"claude-3.7-sonnet-thinking": "claude-3-7-sonnet-20250219-thinking-32k",
"gpt-4o": "chatgpt-4o-latest-20250326",
"grok-3": ["early-grok-3", "grok-3-preview-02-24",],
"gemini-2.0-flash-thinking": ["gemini-2.0-flash-thinking-exp-01-21", "gemini-2.0-flash-thinking-exp-1219",],
"gemini-2.0-pro-exp": "gemini-2.0-pro-exp-02-05",
"gemini-2.0-flash": "gemini-2.0-flash-001",
"o1": "o1-2024-12-17",
"qwen-2.5-max": "qwen2.5-max",
"o3": "o3-2025-04-16",
"o4-mini": "o4-mini-2025-04-16",
"gemini-1.5-pro": "gemini-1.5-pro-002",
"grok-2": "grok-2-2024-08-13",
"claude-3.5-sonnet": "claude-3-5-sonnet-20241022",
"qwen-2.5-plus": "qwen2.5-plus-1127",
"gpt-4o-mini": "gpt-4o-mini-2024-07-18",
"gemini-1.5-flash": "gemini-1.5-flash-002",
"llama-3.1-405b": ["llama-3.1-405b-instruct-bf16", "llama-3.1-405b-instruct-fp8",],
"nemotron-70b": "llama-3.1-nemotron-70b-instruct",
"grok-2-mini": "grok-2-mini-2024-08-13",
"qwen-2.5-72b": "qwen2.5-72b-instruct",
"qwen-2.5-vl-32b": "qwen2.5-vl-32b-instruct",
"qwen-2.5-vl-72b": "qwen2.5-vl-72b-instruct",
"gpt-4-turbo": "gpt-4-turbo-2024-04-09",
"llama-3.3-70b": "llama-3.3-70b-instruct",
"nemotron-49b": "llama-3.3-nemotron-49b-super-v1",
"mistral-large": "mistral-large-2411",
"pixtral-large": "pixtral-large-2411",
"gpt-4": "gpt-4-0613",
"gpt-4.1": "gpt-4.1-2025-04-14",
"gpt-4.1-mini": "gpt-4.1-mini-2025-04-14",
"gpt-4.1-nano": "gpt-4.1-nano-2025-04-14",
"llama-3.1-70b": "llama-3.1-70b-instruct",
"nemotron-253b": "llama-3.1-nemotron-ultra-253b-v1",
"claude-3-opus": "claude-3-opus-20240229",
"tulu-3-70b": "llama-3.1-tulu-3-70b",
"claude-3.5-haiku": "claude-3-5-haiku-20241022",
"reka-core": "reka-core-20240904",
"gemma-2-27b": "gemma-2-27b-it",
"gemma-3-27b": "gemma-3-27b-it",
"gemma-3-12b": "gemma-3-12b-it",
"gemma-3-4b": "gemma-3-4b-it",
"deepseek-v2": "deepseek-v2-api-0628",
"qwen-2.5-coder-32b": "qwen2.5-coder-32b-instruct",
"gemma-2-9b": ["gemma-2-9b-it-simpo", "gemma-2-9b-it",],
"command-a": "command-a-03-2025",
"nemotron-51b": "llama-3.1-nemotron-51b-instruct",
"mistral-small-24b": "mistral-small-24b-instruct-2501",
"mistral-small-3.1-24b": "mistral-small-3.1-24b-instruct-2503",
"glm-4": "glm-4-0520",
"llama-3-70b": "llama-3-70b-instruct",
"llama-4-maverick": "llama-4-maverick-17b-128e-instruct",
"llama-4-scout": "llama-4-scout-17b-16e-instruct",
"reka-flash": "reka-flash-20240904",
"phi-4": "phi-4",
"claude-3-sonnet": "claude-3-sonnet-20240229",
"qwen-2-72b": "qwen2-72b-instruct",
"qwen-3-235b": "qwen3-235b-a22b",
"qwen-3-30b": "qwen3-30b-a3b",
"qwen-3-32b": "qwen3-32b",
"tulu-3-8b": "llama-3.1-tulu-3-8b",
"command-r": ["command-r-08-2024", "command-r",],
"codestral": "codestral-2405",
"claude-3-haiku": "claude-3-haiku-20240307",
"llama-3.1-8b": "llama-3.1-8b-instruct",
"qwen-1.5-110b": "qwen1.5-110b-chat",
"qwq-32b": "qwq-32b-preview",
"llama-3-8b": "llama-3-8b-instruct",
"qwen-1.5-72b": "qwen1.5-72b-chat",
"gemma-2-2b": "gemma-2-2b-it",
"qwen-vl-max": ["qwen-vl-max-1119", "qwen-vl-max-0809"],
"gemini-2.5-pro": "gemini-2.5-pro-preview-05-06",
"gemini-2.5-flash": "gemini-2.5-flash-preview-04-17",
"mixtral-8x22b": "mixtral-8x22b-instruct-v0.1",
"qwen-1.5-32b": "qwen1.5-32b-chat",
"qwen-1.5-14b": "qwen1.5-14b-chat",
"qwen-1.5-7b": "qwen1.5-7b-chat",
"qwen-1.5-4b": "qwen1.5-4b-chat",
"mistral-next": "mistral-next",
"phi-3-medium": "phi-3-medium-4k-instruct",
"phi-3-small": "phi-3-small-8k-instruct",
"phi-3-mini": ["phi-3-mini-4k-instruct-june-2024", "phi-3-mini-4k-instruct", "phi-3-mini-128k-instruct"],
"tulu-2-70b": "tulu-2-dpo-70b",
"llama-2-70b": ["llama-2-70b-chat", "llama2-70b-steerlm-chat"],
"llama-2-13b": "llama-2-13b-chat",
"llama-2-7b": "llama-2-7b-chat",
"hermes-2-dpo": "nous-hermes-2-mixtral-8x7b-dpo",
"pplx-7b-online":"pplx-7b-online",
"deepseek-67b": "deepseek-llm-67b-chat",
"openhermes-2.5-7b": "openhermes-2.5-mistral-7b",
"mistral-7b": "mistral-7b-instruct-v0.2",
"llama-3.2-3b": "llama-3.2-3b-instruct",
"llama-3.2-1b": "llama-3.2-1b-instruct",
"codellama-34b": "codellama-34b-instruct",
"codellama-70b": "codellama-70b-instruct",
"qwen-14b": "qwen-14b-chat",
"gpt-3.5-turbo": "gpt-3.5-turbo-1106",
"mixtral-8x7b": "mixtral-8x7b-instruct-v0.1",
"dbrx-instruct": "dbrx-instruct-preview",
}
@classmethod
def get_models(cls):
"""Get models with improved fallback sources"""
if cls.models: # Return cached models if already loaded
return cls.models
try:
# Try to fetch models from Google Storage first
url = "https://storage.googleapis.com/public-arena-no-cors/p2l-explorer/data/overall/arena.json"
import requests
response = requests.get(url, timeout=5)
response.raise_for_status()
data = response.json()
leaderboard_models = [model[0] for model in data.get("leaderboard", [])]
# Combine models from all sources and remove duplicates
all_models = list(set(leaderboard_models + cls.har_models + cls.js_models))
if all_models:
# Ensure default model is at index 0
if cls.default_model in all_models:
all_models.remove(cls.default_model)
all_models.insert(0, cls.default_model)
cls.models = all_models
return cls.models
except Exception as e:
# Log the error and fall back to alternative sources
debug.log(f"Failed to fetch models from Google Storage: {str(e)}")
# Fallback: Use combined har_models and js_models
combined_models = list(set(cls.har_models + cls.js_models))
if combined_models:
if cls.default_model in combined_models:
combined_models.remove(cls.default_model)
combined_models.insert(0, cls.default_model)
cls.models = combined_models
return cls.models
# Final fallback: Use vision_models
models = cls.vision_models.copy()
if cls.default_model not in models:
models.insert(0, cls.default_model)
cls.models = models
return cls.models
@classmethod
def get_model(cls, model: str) -> str:
"""Get the internal model name from the user-provided model name."""
if not model:
return cls.default_model
# Ensure models are loaded
if not cls.models:
cls.get_models()
# Check if the model exists directly in our models list
if model in cls.models:
return model
# Check if there's an alias for this model
if model in cls.model_aliases:
alias = cls.model_aliases[model]
# If the alias is a list, randomly select one of the options
if isinstance(alias, list):
selected_model = random.choice(alias)
debug.log(f"LegacyLMArena: Selected model '{selected_model}' from alias '{model}'")
return selected_model
debug.log(f"LegacyLMArena: Using model '{alias}' for alias '{model}'")
return alias
# If model still not found, check in all available model sources directly
all_available_models = list(set(cls.har_models + cls.js_models + cls.vision_models))
if model in all_available_models:
return model
raise ModelNotFoundError(f"LegacyLMArena: Model {model} not found")
@classmethod
def _build_payloads(cls, model_id: str, session_hash: str, text: str, files: list, max_tokens: int, temperature: float, top_p: float):
"""Build payloads for new conversations"""
first_payload = {
"data": [
None,
model_id,
{"text": text, "files": files},
{
"text_models": [model_id],
"all_text_models": [model_id],
"vision_models": [],
"all_vision_models": [],
"image_gen_models": [],
"all_image_gen_models": [],
"search_models": [],
"all_search_models": [],
"models": [model_id],
"all_models": [model_id],
"arena_type": "text-arena"
}
],
"event_data": None,
"fn_index": 119,
"trigger_id": 159,
"session_hash": session_hash
}
second_payload = {
"data": [],
"event_data": None,
"fn_index": 120,
"trigger_id": 159,
"session_hash": session_hash
}
third_payload = {
"data": [None, temperature, top_p, max_tokens],
"event_data": None,
"fn_index": 121,
"trigger_id": 159,
"session_hash": session_hash
}
return first_payload, second_payload, third_payload
@classmethod
def _build_continuation_payloads(cls, model_id: str, session_hash: str, text: str, max_tokens: int, temperature: float, top_p: float):
"""Renamed from _build_second_payloads for clarity"""
first_payload = {
"data":[None,model_id,text,{
"text_models":[model_id],
"all_text_models":[model_id],
"vision_models":[],
"image_gen_models":[],
"all_image_gen_models":[],
"search_models":[],
"all_search_models":[],
"models":[model_id],
"all_models":[model_id],
"arena_type":"text-arena"}],
"event_data": None,
"fn_index": 122,
"trigger_id": 157,
"session_hash": session_hash
}
second_payload = {
"data": [],
"event_data": None,
"fn_index": 123,
"trigger_id": 157,
"session_hash": session_hash
}
third_payload = {
"data": [None, temperature, top_p, max_tokens],
"event_data": None,
"fn_index": 124,
"trigger_id": 157,
"session_hash": session_hash
}
return first_payload, second_payload, third_payload
@classmethod
async def create_async_generator(
cls,
model: str,
messages: Messages,
proxy: str = None,
media: MediaListType = None,
max_tokens: int = 4096,
temperature: float = 0.7,
top_p: float = 1,
conversation: JsonConversation = None,
return_conversation: bool = True,
max_retries: int = 1,
**kwargs
) -> AsyncResult:
async def read_response(response: StreamResponse):
returned_data = ""
async for line in response.iter_lines():
if not line:
continue
# Handle both "data: " prefix and raw JSON
if line.startswith(b"data: "):
line = line[6:]
# Skip empty lines or non-JSON data
line = line.strip()
if not line or line == b"[DONE]":
continue
try:
json_data = json.loads(line)
# Process data based on message type
if json_data.get("msg") == "process_generating":
output_data = json_data.get("output", {}).get("data", [])
if len(output_data) > 1 and output_data[1]:
# Extract content from various response formats
data = output_data[1]
content = None
if isinstance(data, list):
if data and data[0] == "replace" and len(data) > 2:
content = data[2]
elif data and isinstance(data[0], list) and len(data[0]) > 2:
content = data[0][2]
elif isinstance(data, str):
# Handle direct string responses
content = data
if content:
# Clean up content
if isinstance(content, str):
if content.endswith(""):
content = content[:-1]
if content in ['<span class="cursor"></span> ', 'update', '']:
continue
if content.startswith(returned_data):
content = content[len(returned_data):]
if content:
returned_data += content
yield content
# Process completed messages
elif json_data.get("msg") == "process_completed":
output_data = json_data.get("output", {}).get("data", [])
if len(output_data) > 1:
# Handle both list and direct content
if isinstance(output_data[1], list):
for item in output_data[1]:
if isinstance(item, list) and len(item) > 1:
content = item[1]
elif isinstance(item, str):
content = item
else:
continue
if content and content != returned_data and content != '<span class="cursor"></span> ':
if "**NETWORK ERROR DUE TO HIGH TRAFFIC." in content:
raise ResponseError(content)
if content.endswith(""):
content = content[:-1]
new_content = content
if content.startswith(returned_data):
new_content = content[len(returned_data):]
if new_content:
returned_data = content
yield new_content
elif isinstance(output_data[1], str) and output_data[1]:
# Direct string content
content = output_data[1]
if content != returned_data:
if content.endswith(""):
content = content[:-1]
new_content = content
if content.startswith(returned_data):
new_content = content[len(returned_data):]
if new_content:
returned_data = content
yield new_content
# Also check for other message types that might contain content
elif json_data.get("msg") in ["process_starts", "heartbeat"]:
# These are status messages, skip them but don't error
continue
except json.JSONDecodeError:
# Skip non-JSON lines
continue
except Exception as e:
if max_retries == 1:
raise e
debug.log(f"Error parsing response: {str(e)}")
continue
# Get the actual model name
model = cls.get_model(model)
prompt = get_last_user_message(messages)
async with StreamSession(impersonate="chrome") as session:
# Add retry logic for better reliability
retry_count = 0
while retry_count < max_retries:
try:
# Handle new conversation
if conversation is None:
conversation = JsonConversation(session_hash=str(uuid.uuid4()).replace("-", ""))
media_objects = []
# Process media if present
media = list(merge_media(media, messages))
if media:
data = FormData()
for i in range(len(media)):
media[i] = (to_bytes(media[i][0]), media[i][1])
for image, image_name in media:
data.add_field(f"files", image, filename=image_name)
# Upload media files
async with session.post(f"{cls.url}/upload", params={"upload_id": conversation.session_hash}, data=data) as response:
await raise_for_status(response)
image_files = await response.json()
# Format media objects for API request
media_objects = [{
"path": image_file,
"url": f"{cls.url}/file={image_file}",
"orig_name": media[i][1],
"size": len(media[i][0]),
"mime_type": is_accepted_format(media[i][0]),
"meta": {
"_type": "gradio.FileData"
}
} for i, image_file in enumerate(image_files)]
# Build payloads for new conversation
first_payload, second_payload, third_payload = cls._build_payloads(
model, conversation.session_hash, prompt, media_objects,
max_tokens, temperature, top_p
)
headers = {
"Content-Type": "application/json",
"Accept": "application/json",
}
# Send the three required requests with small delays
async with session.post(f"{cls.url}{cls.api_endpoint}", json=first_payload, proxy=proxy, headers=headers) as response:
await raise_for_status(response)
# Small delay between requests
await asyncio.sleep(0.1)
async with session.post(f"{cls.url}{cls.api_endpoint}", json=second_payload, proxy=proxy, headers=headers) as response:
await raise_for_status(response)
await asyncio.sleep(0.1)
async with session.post(f"{cls.url}{cls.api_endpoint}", json=third_payload, proxy=proxy, headers=headers) as response:
await raise_for_status(response)
# Small delay before streaming
await asyncio.sleep(0.2)
# Stream the response
stream_url = f"{cls.url}/queue/data?session_hash={conversation.session_hash}"
async with session.get(stream_url, headers={"Accept": "text/event-stream"}, proxy=proxy) as response:
await raise_for_status(response)
count = 0
has_content = False
# Add timeout for response
try:
async with asyncio.timeout(30): # 30 second timeout
async for chunk in read_response(response):
count += 1
has_content = True
yield chunk
except asyncio.TimeoutError:
if not has_content:
raise RuntimeError("Response timeout - no data received from server")
# Only raise error if we truly got no content
if count == 0 and not has_content:
retry_count += 1
if retry_count < max_retries:
debug.log(f"No response received, retrying... (attempt {retry_count + 1}/{max_retries})")
await asyncio.sleep(1) # Wait before retry
conversation = None # Reset conversation for retry
continue
else:
raise RuntimeError("No response from server after multiple attempts")
# Success - break retry loop
break
# Handle continuation of existing conversation
else:
# Build payloads for conversation continuation
first_payload, second_payload, third_payload = cls._build_continuation_payloads(
model, conversation.session_hash, prompt, max_tokens, temperature, top_p
)
headers = {
"Content-Type": "application/json",
"Accept": "application/json",
}
# Send the three required requests with delays
async with session.post(f"{cls.url}{cls.api_endpoint}", json=first_payload, proxy=proxy, headers=headers) as response:
await raise_for_status(response)
await asyncio.sleep(0.1)
async with session.post(f"{cls.url}{cls.api_endpoint}", json=second_payload, proxy=proxy, headers=headers) as response:
await raise_for_status(response)
await asyncio.sleep(0.1)
async with session.post(f"{cls.url}{cls.api_endpoint}", json=third_payload, proxy=proxy, headers=headers) as response:
await raise_for_status(response)
await asyncio.sleep(0.2)
# Stream the response
stream_url = f"{cls.url}/queue/data?session_hash={conversation.session_hash}"
async with session.get(stream_url, headers={"Accept": "text/event-stream"}, proxy=proxy) as response:
await raise_for_status(response)
count = 0
has_content = False
try:
async with asyncio.timeout(30):
async for chunk in read_response(response):
count += 1
has_content = True
yield chunk
except asyncio.TimeoutError:
if not has_content:
raise RuntimeError("Response timeout - no data received from server")
if count == 0 and not has_content:
raise RuntimeError("No response from server in conversation continuation")
# Success - break retry loop
break
except Exception as e:
if retry_count < max_retries - 1:
retry_count += 1
debug.log(f"Error occurred: {str(e)}, retrying... (attempt {retry_count + 1}/{max_retries})")
await asyncio.sleep(1)
conversation = None # Reset for retry
continue
else:
raise
# Return conversation object for future interactions
if return_conversation and conversation:
yield conversation
# Yield finish reason if we hit token limit
if count >= max_tokens:
yield FinishReason("length")

View File

@@ -1,540 +0,0 @@
from __future__ import annotations
import uuid
import json
from aiohttp import ClientSession, BaseConnector
from ..typing import AsyncResult, Messages
from .base_provider import AsyncGeneratorProvider, ProviderModelMixin
from .helper import get_connector
from ..requests import raise_for_status
from ..errors import RateLimitError
models = {
"claude-3-5-sonnet-20241022": {
"id": "claude-3-5-sonnet-20241022",
"name": "claude-3-5-sonnet-20241022",
"model": "claude-3-5-sonnet-20241022",
"provider": "Anthropic",
"maxLength": 0,
"tokenLimit": 0,
"context": 0,
"success_rate": 100,
"tps": 25.366666666666667,
},
"claude-3-5-sonnet-20241022-t": {
"id": "claude-3-5-sonnet-20241022-t",
"name": "claude-3-5-sonnet-20241022-t",
"model": "claude-3-5-sonnet-20241022-t",
"provider": "Anthropic",
"maxLength": 0,
"tokenLimit": 0,
"context": 0,
"success_rate": 100,
"tps": 39.820754716981135,
},
"claude-3-7-sonnet-20250219": {
"id": "claude-3-7-sonnet-20250219",
"name": "claude-3-7-sonnet-20250219",
"model": "claude-3-7-sonnet-20250219",
"provider": "Anthropic",
"maxLength": 0,
"tokenLimit": 0,
"context": 0,
"success_rate": 100,
"tps": 47.02970297029703,
},
"claude-3-7-sonnet-20250219-t": {
"id": "claude-3-7-sonnet-20250219-t",
"name": "claude-3-7-sonnet-20250219-t",
"model": "claude-3-7-sonnet-20250219-t",
"provider": "Anthropic",
"maxLength": 0,
"tokenLimit": 0,
"context": 0,
"success_rate": 100,
"tps": 39.04289693593315,
},
"deepseek-v3": {
"id": "deepseek-v3",
"name": "deepseek-v3",
"model": "deepseek-v3",
"provider": "DeepSeek",
"maxLength": 0,
"tokenLimit": 0,
"context": 0,
"success_rate": 100,
"tps": 40.484657419083646,
},
"gemini-1.0-pro-latest-123": {
"id": "gemini-1.0-pro-latest-123",
"name": "gemini-1.0-pro-latest-123",
"model": "gemini-1.0-pro-latest-123",
"provider": "Google",
"maxLength": 0,
"tokenLimit": 0,
"context": 0,
"success_rate": 100,
"tps": 10,
},
"gemini-2.0-flash": {
"id": "gemini-2.0-flash",
"name": "gemini-2.0-flash",
"model": "gemini-2.0-flash",
"provider": "Google",
"maxLength": 0,
"tokenLimit": 0,
"context": 0,
"success_rate": 100,
"tps": 216.44162436548223,
},
"gemini-2.0-flash-exp": {
"id": "gemini-2.0-flash-exp",
"name": "gemini-2.0-flash-exp",
"model": "gemini-2.0-flash-exp",
"provider": "Google",
"maxLength": 0,
"tokenLimit": 0,
"context": 0,
"success_rate": 0,
"tps": 0,
},
"gemini-2.0-flash-thinking-exp": {
"id": "gemini-2.0-flash-thinking-exp",
"name": "gemini-2.0-flash-thinking-exp",
"model": "gemini-2.0-flash-thinking-exp",
"provider": "Google",
"maxLength": 0,
"tokenLimit": 0,
"context": 0,
"success_rate": 0,
"tps": 0,
},
"gemini-2.5-flash-preview-04-17": {
"id": "gemini-2.5-flash-preview-04-17",
"name": "gemini-2.5-flash-preview-04-17",
"model": "gemini-2.5-flash-preview-04-17",
"provider": "Google",
"maxLength": 0,
"tokenLimit": 0,
"context": 0,
"success_rate": 100,
"tps": 189.84010840108402,
},
"gemini-2.5-pro-official": {
"id": "gemini-2.5-pro-official",
"name": "gemini-2.5-pro-official",
"model": "gemini-2.5-pro-official",
"provider": "Google",
"maxLength": 0,
"tokenLimit": 0,
"context": 0,
"success_rate": 100,
"tps": 91.00613496932516,
},
"gemini-2.5-pro-preview-03-25": {
"id": "gemini-2.5-pro-preview-03-25",
"name": "gemini-2.5-pro-preview-03-25",
"model": "gemini-2.5-pro-preview-03-25",
"provider": "Google",
"maxLength": 0,
"tokenLimit": 0,
"context": 0,
"success_rate": 99.05660377358491,
"tps": 45.050511247443765,
},
"gemini-2.5-pro-preview-05-06": {
"id": "gemini-2.5-pro-preview-05-06",
"name": "gemini-2.5-pro-preview-05-06",
"model": "gemini-2.5-pro-preview-05-06",
"provider": "Google",
"maxLength": 0,
"tokenLimit": 0,
"context": 0,
"success_rate": 100,
"tps": 99.29617834394904,
},
"gpt-4-turbo-2024-04-09": {
"id": "gpt-4-turbo-2024-04-09",
"name": "gpt-4-turbo-2024-04-09",
"model": "gpt-4-turbo-2024-04-09",
"provider": "OpenAI",
"maxLength": 0,
"tokenLimit": 0,
"context": 0,
"success_rate": 100,
"tps": 1,
},
"gpt-4.1": {
"id": "gpt-4.1",
"name": "gpt-4.1",
"model": "gpt-4.1",
"provider": "OpenAI",
"maxLength": 0,
"tokenLimit": 0,
"context": 0,
"success_rate": 42.857142857142854,
"tps": 19.58032786885246,
},
"gpt-4.1-mini": {
"id": "gpt-4.1-mini",
"name": "gpt-4.1-mini",
"model": "gpt-4.1-mini",
"provider": "OpenAI",
"maxLength": 0,
"tokenLimit": 0,
"context": 0,
"success_rate": 68.75,
"tps": 12.677576601671309,
},
"gpt-4.1-mini-2025-04-14": {
"id": "gpt-4.1-mini-2025-04-14",
"name": "gpt-4.1-mini-2025-04-14",
"model": "gpt-4.1-mini-2025-04-14",
"provider": "OpenAI",
"maxLength": 0,
"tokenLimit": 0,
"context": 0,
"success_rate": 94.23076923076923,
"tps": 8.297687861271676,
},
"gpt-4o-2024-11-20": {
"id": "gpt-4o-2024-11-20",
"name": "gpt-4o-2024-11-20",
"model": "gpt-4o-2024-11-20",
"provider": "OpenAI",
"maxLength": 0,
"tokenLimit": 0,
"context": 0,
"success_rate": 100,
"tps": 73.3955223880597,
},
"gpt-4o-mini-2024-07-18": {
"id": "gpt-4o-mini-2024-07-18",
"name": "gpt-4o-mini-2024-07-18",
"model": "gpt-4o-mini-2024-07-18",
"provider": "OpenAI",
"maxLength": 0,
"tokenLimit": 0,
"context": 0,
"success_rate": 100,
"tps": 26.874455100261553,
},
"grok-3": {
"id": "grok-3",
"name": "grok-3",
"model": "grok-3",
"provider": "xAI",
"maxLength": 0,
"tokenLimit": 0,
"context": 0,
"success_rate": 100,
"tps": 51.110652663165794,
},
"grok-3-reason": {
"id": "grok-3-reason",
"name": "grok-3-reason",
"model": "grok-3-reason",
"provider": "xAI",
"maxLength": 0,
"tokenLimit": 0,
"context": 0,
"success_rate": 100,
"tps": 62.81976744186046,
},
"o3-mini-2025-01-31": {
"id": "o3-mini-2025-01-31",
"name": "o3-mini-2025-01-31",
"model": "o3-mini-2025-01-31",
"provider": "Unknown",
"maxLength": 0,
"tokenLimit": 0,
"context": 0,
"success_rate": 100,
"tps": 125.31410256410257,
},
"qwen3-235b-a22b": {
"id": "qwen3-235b-a22b",
"name": "qwen3-235b-a22b",
"model": "qwen3-235b-a22b",
"provider": "Alibaba",
"maxLength": 0,
"tokenLimit": 0,
"context": 0,
"success_rate": 100,
"tps": 25.846153846153847,
},
}
class Liaobots(AsyncGeneratorProvider, ProviderModelMixin):
url = "https://liaobots.work"
working = True
supports_message_history = True
supports_system_message = True
default_model = "grok-3"
models = list(models.keys())
model_aliases = {
# Anthropic
"claude-3.5-sonnet": "claude-3-5-sonnet-20241022",
"claude-3.5-sonnet": "claude-3-5-sonnet-20241022-t",
"claude-3.7-sonnet": "claude-3-7-sonnet-20250219",
"claude-3.7-sonnet": "claude-3-7-sonnet-20250219-t",
# DeepSeek
#"deepseek-v3": "deepseek-v3",
# Google
"gemini-1.0-pro": "gemini-1.0-pro-latest-123",
"gemini-2.0-flash": "gemini-2.0-flash-exp",
"gemini-2.0-flash-thinking": "gemini-2.0-flash-thinking-exp",
"gemini-2.5-flash": "gemini-2.5-flash-preview-04-17",
"gemini-2.5-pro": "gemini-2.5-pro-official",
"gemini-2.5-pro": "gemini-2.5-pro-preview-03-25",
"gemini-2.5-pro": "gemini-2.5-pro-preview-05-06",
# OpenAI
"gpt-4-turbo": "gpt-4-turbo-2024-04-09",
"gpt-4.1-mini": "gpt-4.1-mini-2025-04-14",
"gpt-4": "gpt-4o-2024-11-20",
"gpt-4o": "gpt-4o-2024-11-20",
"gpt-4o-mini": "gpt-4o-mini-2024-07-18",
# xAI
"grok-3-reason": "grok-3-reason",
"o3-mini": "o3-mini-2025-01-31",
"qwen-3-235b": "qwen3-235b-a22b",
}
_auth_code = None
_cookie_jar = None
@classmethod
def is_supported(cls, model: str) -> bool:
"""
Check if the given model is supported.
"""
return model in models or model in cls.model_aliases
@classmethod
async def create_async_generator(
cls,
model: str,
messages: Messages,
proxy: str = None,
connector: BaseConnector = None,
**kwargs
) -> AsyncResult:
model = cls.get_model(model)
headers = {
"accept": "*/*",
"accept-language": "en-US,en;q=0.9",
"content-type": "application/json",
"dnt": "1",
"origin": "https://liaobots.work",
"priority": "u=1, i",
"referer": "https://liaobots.work/en",
"sec-ch-ua": "\"Chromium\";v=\"135\", \"Not-A.Brand\";v=\"8\"",
"sec-ch-ua-mobile": "?0",
"sec-ch-ua-platform": "\"Linux\"",
"sec-fetch-dest": "empty",
"sec-fetch-mode": "cors",
"sec-fetch-site": "same-origin",
"user-agent": "Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/135.0.0.0 Safari/537.36"
}
async with ClientSession(
headers=headers,
cookie_jar=cls._cookie_jar,
connector=get_connector(connector, proxy, True)
) as session:
# First, get a valid auth code
await cls.get_auth_code(session)
# Create conversation ID
conversation_id = str(uuid.uuid4())
# Prepare request data
data = {
"conversationId": conversation_id,
"models": [{
"modelId": model,
"provider": models[model]["provider"]
}],
"search": "false",
"messages": messages,
"key": "",
"prompt": kwargs.get("system_message", "你是 {{model}},一个由 {{provider}} 训练的大型语言模型,请仔细遵循用户的指示。")
}
# Try to make the chat request
try:
# Make the chat request with the current auth code
async with session.post(
f"{cls.url}/api/chat",
json=data,
headers={"x-auth-code": cls._auth_code},
ssl=False
) as response:
# Check if we got a streaming response
content_type = response.headers.get("Content-Type", "")
if "text/event-stream" in content_type:
async for line in response.content:
if line.startswith(b"data: "):
try:
response_data = json.loads(line[6:])
# Check for error response
if response_data.get("error") is True:
# Raise RateLimitError for payment required or other errors
if "402" in str(response_data.get("res_status", "")):
raise RateLimitError("This model requires payment or credits")
else:
error_msg = response_data.get('message', 'Unknown error')
raise RateLimitError(f"Error: {error_msg}")
# Process normal response
if response_data.get("role") == "assistant" and "content" in response_data:
content = response_data.get("content")
yield content
except json.JSONDecodeError:
continue
else:
# Not a streaming response, might be an error or HTML
response_text = await response.text()
# If we got HTML, we need to bypass CAPTCHA
if response_text.startswith("<!DOCTYPE html>"):
await cls.bypass_captcha(session)
# Get a fresh auth code
await cls.get_auth_code(session)
# Try the request again
async with session.post(
f"{cls.url}/api/chat",
json=data,
headers={"x-auth-code": cls._auth_code},
ssl=False
) as response2:
# Check if we got a streaming response
content_type = response2.headers.get("Content-Type", "")
if "text/event-stream" in content_type:
async for line in response2.content:
if line.startswith(b"data: "):
try:
response_data = json.loads(line[6:])
# Check for error response
if response_data.get("error") is True:
# Raise RateLimitError for payment required or other errors
if "402" in str(response_data.get("res_status", "")):
raise RateLimitError("This model requires payment or credits")
else:
error_msg = response_data.get('message', 'Unknown error')
raise RateLimitError(f"Error: {error_msg}")
# Process normal response
if response_data.get("role") == "assistant" and "content" in response_data:
content = response_data.get("content")
yield content
except json.JSONDecodeError:
continue
else:
raise RateLimitError("Failed to get streaming response")
else:
raise RateLimitError("Failed to connect to the service")
except Exception as e:
# If it's already a RateLimitError, re-raise it
if isinstance(e, RateLimitError):
raise
# Otherwise, wrap it in a RateLimitError
raise RateLimitError(f"Error processing request: {str(e)}")
@classmethod
async def bypass_captcha(cls, session: ClientSession) -> None:
"""
Bypass the CAPTCHA verification by directly making the recaptcha API request.
"""
try:
# First, try the direct recaptcha API request
async with session.post(
f"{cls.url}/recaptcha/api/login",
json={"token": "abcdefghijklmnopqrst"},
ssl=False
) as response:
if response.status == 200:
try:
response_text = await response.text()
# Try to parse as JSON
try:
response_data = json.loads(response_text)
# Check if we got a successful response
if response_data.get("code") == 200:
cls._cookie_jar = session.cookie_jar
except json.JSONDecodeError:
pass
except Exception:
pass
except Exception:
pass
@classmethod
async def get_auth_code(cls, session: ClientSession) -> None:
"""
Get a valid auth code by sending a request with an empty authcode.
"""
try:
# Send request with empty authcode to get a new one
auth_request_data = {
"authcode": "",
"recommendUrl": "https://liaobots.work/zh"
}
async with session.post(
f"{cls.url}/api/user",
json=auth_request_data,
ssl=False
) as response:
if response.status == 200:
response_text = await response.text()
try:
response_data = json.loads(response_text)
if "authCode" in response_data:
cls._auth_code = response_data["authCode"]
cls._cookie_jar = session.cookie_jar
return
except json.JSONDecodeError:
# If we got HTML, it might be the CAPTCHA page
if response_text.startswith("<!DOCTYPE html>"):
await cls.bypass_captcha(session)
# Try again after bypassing CAPTCHA
async with session.post(
f"{cls.url}/api/user",
json=auth_request_data,
ssl=False
) as response2:
if response2.status == 200:
response_text2 = await response2.text()
try:
response_data2 = json.loads(response_text2)
if "authCode" in response_data2:
cls._auth_code = response_data2["authCode"]
cls._cookie_jar = session.cookie_jar
return
except json.JSONDecodeError:
pass
except Exception:
pass
# If we're here, we couldn't get a valid auth code
# Set a default one as a fallback
cls._auth_code = "DvS3A5GTE9f0D" # Fallback to one of the provided auth codes

View File

@@ -1,54 +0,0 @@
from __future__ import annotations
import json
import requests
from ...typing import Any, CreateResult
from ..base_provider import AbstractProvider
class Lockchat(AbstractProvider):
url: str = "http://supertest.lockchat.app"
supports_stream = True
supports_gpt_35_turbo = True
supports_gpt_4 = True
@staticmethod
def create_completion(
model: str,
messages: list[dict[str, str]],
stream: bool, **kwargs: Any) -> CreateResult:
temperature = float(kwargs.get("temperature", 0.7))
payload = {
"temperature": temperature,
"messages" : messages,
"model" : model,
"stream" : True,
}
headers = {
"user-agent": "ChatX/39 CFNetwork/1408.0.4 Darwin/22.5.0",
}
response = requests.post("http://supertest.lockchat.app/v1/chat/completions",
json=payload, headers=headers, stream=True)
response.raise_for_status()
for token in response.iter_lines():
if b"The model: `gpt-4` does not exist" in token:
print("error, retrying...")
Lockchat.create_completion(
model = model,
messages = messages,
stream = stream,
temperature = temperature,
**kwargs)
if b"content" in token:
token = json.loads(token.decode("utf-8").split("data: ")[1])
token = token["choices"][0]["delta"].get("content")
if token:
yield (token)

View File

@@ -1,87 +0,0 @@
from __future__ import annotations
from aiohttp import ClientSession
import hashlib
import time
import random
import re
import json
from ...typing import AsyncResult, Messages
from ..base_provider import AsyncGeneratorProvider, ProviderModelMixin
from ..helper import format_prompt
class MagickPen(AsyncGeneratorProvider, ProviderModelMixin):
url = "https://magickpen.com"
api_endpoint = "https://api.magickpen.com/ask"
working = False
supports_stream = True
supports_system_message = True
supports_message_history = True
default_model = 'gpt-4o-mini'
models = ['gpt-4o-mini']
@classmethod
async def fetch_api_credentials(cls) -> tuple:
url = "https://magickpen.com/_nuxt/bf709a9ce19f14e18116.js"
async with ClientSession() as session:
async with session.get(url) as response:
text = await response.text()
pattern = r'"X-API-Secret":"(\w+)"'
match = re.search(pattern, text)
X_API_SECRET = match.group(1) if match else None
timestamp = str(int(time.time() * 1000))
nonce = str(random.random())
s = ["TGDBU9zCgM", timestamp, nonce]
s.sort()
signature_string = ''.join(s)
signature = hashlib.md5(signature_string.encode()).hexdigest()
pattern = r'secret:"(\w+)"'
match = re.search(pattern, text)
secret = match.group(1) if match else None
if X_API_SECRET and timestamp and nonce and secret:
return X_API_SECRET, signature, timestamp, nonce, secret
else:
raise Exception("Unable to extract all the necessary data from the JavaScript file.")
@classmethod
async def create_async_generator(
cls,
model: str,
messages: Messages,
proxy: str = None,
**kwargs
) -> AsyncResult:
model = cls.get_model(model)
X_API_SECRET, signature, timestamp, nonce, secret = await cls.fetch_api_credentials()
headers = {
'accept': 'application/json, text/plain, */*',
'accept-language': 'en-US,en;q=0.9',
'content-type': 'application/json',
'nonce': nonce,
'origin': cls.url,
'referer': f"{cls.url}/",
'secret': secret,
'signature': signature,
'timestamp': timestamp,
'x-api-secret': X_API_SECRET,
}
async with ClientSession(headers=headers) as session:
prompt = format_prompt(messages)
payload = {
'query': prompt,
'turnstileResponse': '',
'action': 'verify'
}
async with session.post(cls.api_endpoint, json=payload, proxy=proxy) as response:
response.raise_for_status()
async for chunk in response.content:
if chunk:
yield chunk.decode()

View File

@@ -1,50 +0,0 @@
from __future__ import annotations
import requests
from ..template import OpenaiTemplate
from ...requests import raise_for_status
from ... import debug
class PenguinAI(OpenaiTemplate):
label = "PenguinAI"
url = "https://penguinai.tech"
api_base = "https://api.penguinai.tech/v1"
working = False
active_by_default = False
default_model = "gpt-3.5-turbo"
default_vision_model = "gpt-4o"
# in reality, it uses pollinations
image_models = ["flux"]
@classmethod
def get_models(cls, api_key: str = None, api_base: str = None) -> list[str]:
if not cls.models:
try:
headers = {}
if api_base is None:
api_base = cls.api_base
if api_key is None and cls.api_key is not None:
api_key = cls.api_key
if api_key is not None:
headers["authorization"] = f"Bearer {api_key}"
response = requests.get(f"{api_base}/models", headers=headers, verify=cls.ssl)
raise_for_status(response)
data = response.json()
data = data.get("data") if isinstance(data, dict) else data
cls.image_models = [model.get("id") for model in data if "image" in model.get("type")]
cls.models = [model.get("id") for model in data]
if cls.sort_models:
cls.models.sort()
cls.vision_models = []
vision_model_prefixes = ["vision", "multimodal", "o1", "o3", "o4", "gpt-4", "claude-3", "claude-opus", "claude-sonnet"]
for model in cls.models:
for tag in vision_model_prefixes:
if tag in model and not "search" in model:
cls.vision_models.append(model)
except Exception as e:
debug.error(e)
return cls.fallback_models
return cls.models

View File

@@ -1,140 +0,0 @@
from __future__ import annotations
import re
import json
from urllib import parse
from datetime import datetime
from ...typing import AsyncResult, Messages
from ..base_provider import AsyncGeneratorProvider
from ...requests import StreamSession
class Phind(AsyncGeneratorProvider):
url = "https://www.phind.com"
working = False
lockdown = True
supports_stream = True
supports_message_history = True
@classmethod
async def create_async_generator(
cls,
model: str,
messages: Messages,
proxy: str = None,
timeout: int = 120,
creative_mode: bool = False,
**kwargs
) -> AsyncResult:
headers = {
"Accept": "*/*",
"Origin": cls.url,
"Referer": f"{cls.url}/search",
"Sec-Fetch-Dest": "empty",
"Sec-Fetch-Mode": "cors",
"Sec-Fetch-Site": "same-origin",
}
async with StreamSession(
headers=headers,
impersonate="chrome",
proxies={"https": proxy},
timeout=timeout
) as session:
url = "https://www.phind.com/search?home=true"
async with session.get(url) as response:
text = await response.text()
match = re.search(r'<script id="__NEXT_DATA__" type="application/json">(?P<json>[\S\s]+?)</script>', text)
data = json.loads(match.group("json"))
challenge_seeds = data["props"]["pageProps"]["challengeSeeds"]
prompt = messages[-1]["content"]
data = {
"question": prompt,
"question_history": [
message["content"] for message in messages[:-1] if message["role"] == "user"
],
"answer_history": [
message["content"] for message in messages if message["role"] == "assistant"
],
"webResults": [],
"options": {
"date": datetime.now().strftime("%d.%m.%Y"),
"language": "en-US",
"detailed": True,
"anonUserId": "",
"answerModel": "GPT-4" if model.startswith("gpt-4") else "Phind-34B",
"creativeMode": creative_mode,
"customLinks": []
},
"context": "\n".join([message["content"] for message in messages if message["role"] == "system"]),
}
data["challenge"] = generate_challenge(data, **challenge_seeds)
async with session.post(f"https://https.api.phind.com/infer/", headers=headers, json=data) as response:
new_line = False
async for line in response.iter_lines():
if line.startswith(b"data: "):
chunk = line[6:]
if chunk.startswith(b'<PHIND_DONE/>'):
break
if chunk.startswith(b'<PHIND_BACKEND_ERROR>'):
raise RuntimeError(f"Response: {chunk.decode()}")
if chunk.startswith(b'<PHIND_WEBRESULTS>') or chunk.startswith(b'<PHIND_FOLLOWUP>'):
pass
elif chunk.startswith(b"<PHIND_METADATA>") or chunk.startswith(b"<PHIND_INDICATOR>"):
pass
elif chunk.startswith(b"<PHIND_SPAN_BEGIN>") or chunk.startswith(b"<PHIND_SPAN_END>"):
pass
elif chunk:
yield chunk.decode()
elif new_line:
yield "\n"
new_line = False
else:
new_line = True
def deterministic_stringify(obj):
def handle_value(value):
if isinstance(value, (dict, list)):
if isinstance(value, list):
return '[' + ','.join(sorted(map(handle_value, value))) + ']'
else: # It's a dict
return '{' + deterministic_stringify(value) + '}'
elif isinstance(value, bool):
return 'true' if value else 'false'
elif isinstance(value, (int, float)):
return format(value, '.8f').rstrip('0').rstrip('.')
elif isinstance(value, str):
return f'"{value}"'
else:
return 'null'
items = sorted(obj.items(), key=lambda x: x[0])
return ','.join([f'{k}:{handle_value(v)}' for k, v in items if handle_value(v) is not None])
def prng_general(seed, multiplier, addend, modulus):
a = seed * multiplier + addend
if a < 0:
return ((a%modulus)-modulus)/modulus
else:
return a%modulus/modulus
def generate_challenge_seed(l):
I = deterministic_stringify(l)
d = parse.quote(I, safe='')
return simple_hash(d)
def simple_hash(s):
d = 0
for char in s:
if len(char) > 1 or ord(char) >= 256:
continue
d = ((d << 5) - d + ord(char[0])) & 0xFFFFFFFF
if d > 0x7FFFFFFF: # 2147483647
d -= 0x100000000 # Subtract 2**32
return d
def generate_challenge(obj, **kwargs):
return prng_general(
seed=generate_challenge_seed(obj),
**kwargs
)

View File

@@ -1,49 +0,0 @@
from __future__ import annotations
from aiohttp import ClientSession
from ...typing import AsyncResult, Messages
from ..base_provider import AsyncGeneratorProvider, ProviderModelMixin
from ..helper import format_prompt
from ...providers.response import FinishReason
class Pizzagpt(AsyncGeneratorProvider, ProviderModelMixin):
url = "https://www.pizzagpt.it"
api_endpoint = "/api/chatx-completion"
working = False
default_model = 'gpt-4o-mini'
models = [default_model]
@classmethod
async def create_async_generator(
cls,
model: str,
messages: Messages,
proxy: str = None,
**kwargs
) -> AsyncResult:
headers = {
"accept": "application/json",
"accept-language": "en-US,en;q=0.9",
"content-type": "application/json",
"origin": cls.url,
"referer": f"{cls.url}/en",
"user-agent": "Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/127.0.0.0 Safari/537.36",
"x-secret": "Marinara"
}
async with ClientSession(headers=headers) as session:
prompt = format_prompt(messages)
data = {
"question": prompt
}
async with session.post(f"{cls.url}{cls.api_endpoint}", json=data, proxy=proxy) as response:
response.raise_for_status()
response_json = await response.json()
content = response_json.get("answer", response_json).get("content")
if content:
if "Misuse detected. please get in touch" in content:
raise ValueError(content)
yield content
yield FinishReason("stop")

View File

@@ -1,115 +0,0 @@
from __future__ import annotations
import time
from ...typing import CreateResult, Messages
from ..base_provider import AbstractProvider
from ..helper import format_prompt
models = {
"meta-llama/Llama-2-7b-chat-hf": {"name": "Llama-2-7b"},
"meta-llama/Llama-2-13b-chat-hf": {"name": "Llama-2-13b"},
"meta-llama/Llama-2-70b-chat-hf": {"name": "Llama-2-70b"},
"codellama/CodeLlama-7b-Instruct-hf": {"name": "Code-Llama-7b"},
"codellama/CodeLlama-13b-Instruct-hf": {"name": "Code-Llama-13b"},
"codellama/CodeLlama-34b-Instruct-hf": {"name": "Code-Llama-34b"},
"gpt-3.5-turbo": {"name": "GPT-3.5-Turbo"},
"gpt-3.5-turbo-instruct": {"name": "GPT-3.5-Turbo-Instruct"},
"gpt-4": {"name": "GPT-4"},
"palm": {"name": "Google-PaLM"},
}
class Poe(AbstractProvider):
url = "https://poe.com"
working = False
needs_auth = True
supports_stream = True
models = models.keys()
@classmethod
def create_completion(
cls,
model: str,
messages: Messages,
stream: bool,
proxy: str = None,
webdriver: WebDriver = None,
user_data_dir: str = None,
headless: bool = True,
**kwargs
) -> CreateResult:
if not model:
model = "gpt-3.5-turbo"
elif model not in models:
raise ValueError(f"Model are not supported: {model}")
prompt = format_prompt(messages)
session = WebDriverSession(webdriver, user_data_dir, headless, proxy=proxy)
with session as driver:
from selenium.webdriver.common.by import By
from selenium.webdriver.support.ui import WebDriverWait
from selenium.webdriver.support import expected_conditions as EC
driver.execute_cdp_cmd("Page.addScriptToEvaluateOnNewDocument", {
"source": """
window._message = window._last_message = "";
window._message_finished = false;
class ProxiedWebSocket extends WebSocket {
constructor(url, options) {
super(url, options);
this.addEventListener("message", (e) => {
const data = JSON.parse(JSON.parse(e.data)["messages"][0])["payload"]["data"];
if ("messageAdded" in data) {
if (data["messageAdded"]["author"] != "human") {
window._message = data["messageAdded"]["text"];
if (data["messageAdded"]["state"] == "complete") {
window._message_finished = true;
}
}
}
});
}
}
window.WebSocket = ProxiedWebSocket;
"""
})
try:
driver.get(f"{cls.url}/{models[model]['name']}")
wait = WebDriverWait(driver, 10 if headless else 240)
wait.until(EC.visibility_of_element_located((By.CSS_SELECTOR, "textarea[class^='GrowingTextArea']")))
except:
# Reopen browser for login
if not webdriver:
driver = session.reopen()
driver.get(f"{cls.url}/{models[model]['name']}")
wait = WebDriverWait(driver, 240)
wait.until(EC.visibility_of_element_located((By.CSS_SELECTOR, "textarea[class^='GrowingTextArea']")))
else:
raise RuntimeError("Prompt textarea not found. You may not be logged in.")
element_send_text(driver.find_element(By.CSS_SELECTOR, "footer textarea[class^='GrowingTextArea']"), prompt)
driver.find_element(By.CSS_SELECTOR, "footer button[class*='ChatMessageSendButton']").click()
script = """
if(window._message && window._message != window._last_message) {
try {
return window._message.substring(window._last_message.length);
} finally {
window._last_message = window._message;
}
} else if(window._message_finished) {
return null;
} else {
return '';
}
"""
while True:
chunk = driver.execute_script(script)
if chunk:
yield chunk
elif chunk != "":
break
else:
time.sleep(0.1)

View File

@@ -1,161 +0,0 @@
from __future__ import annotations
from aiohttp import ClientSession
import asyncio
import random
from ...typing import AsyncResult, Messages
from ..base_provider import AsyncGeneratorProvider, ProviderModelMixin
from ...providers.response import ImageResponse
class Prodia(AsyncGeneratorProvider, ProviderModelMixin):
url = "https://app.prodia.com"
api_endpoint = "https://api.prodia.com/generate"
working = False
default_model = 'absolutereality_v181.safetensors [3d9d4d2b]'
default_image_model = default_model
image_models = [
'3Guofeng3_v34.safetensors [50f420de]',
'absolutereality_V16.safetensors [37db0fc3]',
default_image_model,
'amIReal_V41.safetensors [0a8a2e61]',
'analog-diffusion-1.0.ckpt [9ca13f02]',
'aniverse_v30.safetensors [579e6f85]',
'anythingv3_0-pruned.ckpt [2700c435]',
'anything-v4.5-pruned.ckpt [65745d25]',
'anythingV5_PrtRE.safetensors [893e49b9]',
'AOM3A3_orangemixs.safetensors [9600da17]',
'blazing_drive_v10g.safetensors [ca1c1eab]',
'breakdomain_I2428.safetensors [43cc7d2f]',
'breakdomain_M2150.safetensors [15f7afca]',
'cetusMix_Version35.safetensors [de2f2560]',
'childrensStories_v13D.safetensors [9dfaabcb]',
'childrensStories_v1SemiReal.safetensors [a1c56dbb]',
'childrensStories_v1ToonAnime.safetensors [2ec7b88b]',
'Counterfeit_v30.safetensors [9e2a8f19]',
'cuteyukimixAdorable_midchapter3.safetensors [04bdffe6]',
'cyberrealistic_v33.safetensors [82b0d085]',
'dalcefo_v4.safetensors [425952fe]',
'deliberate_v2.safetensors [10ec4b29]',
'deliberate_v3.safetensors [afd9d2d4]',
'dreamlike-anime-1.0.safetensors [4520e090]',
'dreamlike-diffusion-1.0.safetensors [5c9fd6e0]',
'dreamlike-photoreal-2.0.safetensors [fdcf65e7]',
'dreamshaper_6BakedVae.safetensors [114c8abb]',
'dreamshaper_7.safetensors [5cf5ae06]',
'dreamshaper_8.safetensors [9d40847d]',
'edgeOfRealism_eorV20.safetensors [3ed5de15]',
'EimisAnimeDiffusion_V1.ckpt [4f828a15]',
'elldreths-vivid-mix.safetensors [342d9d26]',
'epicphotogasm_xPlusPlus.safetensors [1a8f6d35]',
'epicrealism_naturalSinRC1VAE.safetensors [90a4c676]',
'epicrealism_pureEvolutionV3.safetensors [42c8440c]',
'ICantBelieveItsNotPhotography_seco.safetensors [4e7a3dfd]',
'indigoFurryMix_v75Hybrid.safetensors [91208cbb]',
'juggernaut_aftermath.safetensors [5e20c455]',
'lofi_v4.safetensors [ccc204d6]',
'lyriel_v16.safetensors [68fceea2]',
'majicmixRealistic_v4.safetensors [29d0de58]',
'mechamix_v10.safetensors [ee685731]',
'meinamix_meinaV9.safetensors [2ec66ab0]',
'meinamix_meinaV11.safetensors [b56ce717]',
'neverendingDream_v122.safetensors [f964ceeb]',
'openjourney_V4.ckpt [ca2f377f]',
'pastelMixStylizedAnime_pruned_fp16.safetensors [793a26e8]',
'portraitplus_V1.0.safetensors [1400e684]',
'protogenx34.safetensors [5896f8d5]',
'Realistic_Vision_V1.4-pruned-fp16.safetensors [8d21810b]',
'Realistic_Vision_V2.0.safetensors [79587710]',
'Realistic_Vision_V4.0.safetensors [29a7afaa]',
'Realistic_Vision_V5.0.safetensors [614d1063]',
'Realistic_Vision_V5.1.safetensors [a0f13c83]',
'redshift_diffusion-V10.safetensors [1400e684]',
'revAnimated_v122.safetensors [3f4fefd9]',
'rundiffusionFX25D_v10.safetensors [cd12b0ee]',
'rundiffusionFX_v10.safetensors [cd4e694d]',
'sdv1_4.ckpt [7460a6fa]',
'v1-5-pruned-emaonly.safetensors [d7049739]',
'v1-5-inpainting.safetensors [21c7ab71]',
'shoninsBeautiful_v10.safetensors [25d8c546]',
'theallys-mix-ii-churned.safetensors [5d9225a4]',
'timeless-1.0.ckpt [7c4971d4]',
'toonyou_beta6.safetensors [980f6b15]'
]
models = [*image_models]
@classmethod
def get_model(cls, model: str) -> str:
if model in cls.models:
return model
elif model in cls.model_aliases:
return cls.model_aliases[model]
else:
return cls.default_model
@classmethod
async def create_async_generator(
cls,
model: str,
messages: Messages,
proxy: str = None,
negative_prompt: str = "",
steps: str = 20, # 1-25
cfg: str = 7, # 0-20
seed: Optional[int] = None,
sampler: str = "DPM++ 2M Karras", # "Euler", "Euler a", "Heun", "DPM++ 2M Karras", "DPM++ SDE Karras", "DDIM"
aspect_ratio: str = "square", # "square", "portrait", "landscape"
**kwargs
) -> AsyncResult:
model = cls.get_model(model)
if seed is None:
seed = random.randint(0, 10000)
headers = {
"accept": "*/*",
"accept-language": "en-US,en;q=0.9",
"origin": cls.url,
"referer": f"{cls.url}/",
"user-agent": "Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/128.0.0.0 Safari/537.36"
}
async with ClientSession(headers=headers) as session:
prompt = messages[-1]['content'] if messages else ""
params = {
"new": "true",
"prompt": prompt,
"model": model,
"negative_prompt": negative_prompt,
"steps": steps,
"cfg": cfg,
"seed": seed,
"sampler": sampler,
"aspect_ratio": aspect_ratio
}
async with session.get(cls.api_endpoint, params=params, proxy=proxy) as response:
response.raise_for_status()
job_data = await response.json()
job_id = job_data["job"]
image_url = await cls._poll_job(session, job_id, proxy)
yield ImageResponse(image_url, alt=prompt)
@classmethod
async def _poll_job(cls, session: ClientSession, job_id: str, proxy: str, max_attempts: int = 30, delay: int = 2) -> str:
for _ in range(max_attempts):
async with session.get(f"https://api.prodia.com/job/{job_id}", proxy=proxy) as response:
response.raise_for_status()
job_status = await response.json()
if job_status["status"] == "succeeded":
return f"https://images.prodia.xyz/{job_id}.png"
elif job_status["status"] == "failed":
raise Exception("Image generation failed")
await asyncio.sleep(delay)
raise Exception("Timeout waiting for image generation")

View File

@@ -1,68 +0,0 @@
from __future__ import annotations
import json
import requests
from ...typing import CreateResult, Messages
from ..base_provider import AbstractProvider
class Raycast(AbstractProvider):
url = "https://raycast.com"
supports_stream = True
needs_auth = True
working = False
models = [
"gpt-3.5-turbo",
"gpt-4"
]
@staticmethod
def create_completion(
model: str,
messages: Messages,
stream: bool,
proxy: str = None,
**kwargs,
) -> CreateResult:
auth = kwargs.get('auth')
if not auth:
raise ValueError("Raycast needs an auth token, pass it with the `auth` parameter")
headers = {
'Accept': 'application/json',
'Accept-Language': 'en-US,en;q=0.9',
'Authorization': f'Bearer {auth}',
'Content-Type': 'application/json',
'User-Agent': 'Raycast/0 CFNetwork/1410.0.3 Darwin/22.6.0',
}
parsed_messages = [
{'author': message['role'], 'content': {'text': message['content']}}
for message in messages
]
data = {
"debug": False,
"locale": "en-CN",
"messages": parsed_messages,
"model": model,
"provider": "openai",
"source": "ai_chat",
"system_instruction": "markdown",
"temperature": 0.5
}
response = requests.post(
"https://backend.raycast.com/api/v1/ai/chat_completions",
headers=headers,
json=data,
stream=True,
proxies={"https": proxy}
)
for token in response.iter_lines():
if b'data: ' not in token:
continue
completion_chunk = json.loads(token.decode().replace('data: ', ''))
token = completion_chunk['text']
if token != None:
yield token

View File

@@ -1,131 +0,0 @@
from __future__ import annotations
import random
import string
import json
from urllib.parse import urlencode
from aiohttp import ClientSession
from ...typing import AsyncResult, Messages
from ..base_provider import AsyncGeneratorProvider, ProviderModelMixin, Sources
from ...requests.raise_for_status import raise_for_status
class RubiksAI(AsyncGeneratorProvider, ProviderModelMixin):
label = "Rubiks AI"
url = "https://rubiks.ai"
api_endpoint = "https://rubiks.ai/search/api/"
working = False
supports_stream = True
supports_system_message = True
supports_message_history = True
default_model = 'gpt-4o-mini'
models = [default_model, 'gpt-4o', 'o1-mini', 'claude-3.5-sonnet', 'grok-beta', 'gemini-1.5-pro', 'nova-pro', "llama-3.1-70b-versatile"]
model_aliases = {
"llama-3.1-70b": "llama-3.1-70b-versatile",
}
@staticmethod
def generate_mid() -> str:
"""
Generates a 'mid' string following the pattern:
6 characters - 4 characters - 4 characters - 4 characters - 12 characters
Example: 0r7v7b-quw4-kdy3-rvdu-ekief6xbuuq4
"""
parts = [
''.join(random.choices(string.ascii_lowercase + string.digits, k=6)),
''.join(random.choices(string.ascii_lowercase + string.digits, k=4)),
''.join(random.choices(string.ascii_lowercase + string.digits, k=4)),
''.join(random.choices(string.ascii_lowercase + string.digits, k=4)),
''.join(random.choices(string.ascii_lowercase + string.digits, k=12))
]
return '-'.join(parts)
@staticmethod
def create_referer(q: str, mid: str, model: str = '') -> str:
"""
Creates a Referer URL with dynamic q and mid values, using urlencode for safe parameter encoding.
"""
params = {'q': q, 'model': model, 'mid': mid}
encoded_params = urlencode(params)
return f'https://rubiks.ai/search/?{encoded_params}'
@classmethod
async def create_async_generator(
cls,
model: str,
messages: Messages,
proxy: str = None,
web_search: bool = False,
temperature: float = 0.6,
**kwargs
) -> AsyncResult:
"""
Creates an asynchronous generator that sends requests to the Rubiks AI API and yields the response.
Parameters:
- model (str): The model to use in the request.
- messages (Messages): The messages to send as a prompt.
- proxy (str, optional): Proxy URL, if needed.
- web_search (bool, optional): Indicates whether to include search sources in the response. Defaults to False.
"""
model = cls.get_model(model)
mid_value = cls.generate_mid()
referer = cls.create_referer(q=messages[-1]["content"], mid=mid_value, model=model)
data = {
"messages": messages,
"model": model,
"search": web_search,
"stream": True,
"temperature": temperature
}
headers = {
'Accept': 'text/event-stream',
'Accept-Language': 'en-US,en;q=0.9',
'Cache-Control': 'no-cache',
'Connection': 'keep-alive',
'Pragma': 'no-cache',
'Referer': referer,
'Sec-Fetch-Dest': 'empty',
'Sec-Fetch-Mode': 'cors',
'Sec-Fetch-Site': 'same-origin',
'User-Agent': 'Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/129.0.0.0 Safari/537.36',
'sec-ch-ua': '"Chromium";v="129", "Not=A?Brand";v="8"',
'sec-ch-ua-mobile': '?0',
'sec-ch-ua-platform': '"Linux"'
}
async with ClientSession() as session:
async with session.post(cls.api_endpoint, headers=headers, json=data, proxy=proxy) as response:
await raise_for_status(response)
sources = []
async for line in response.content:
decoded_line = line.decode('utf-8').strip()
if not decoded_line.startswith('data: '):
continue
data = decoded_line[6:]
if data in ('[DONE]', '{"done": ""}'):
break
try:
json_data = json.loads(data)
except json.JSONDecodeError:
continue
if 'url' in json_data and 'title' in json_data:
if web_search:
sources.append(json_data)
elif 'choices' in json_data:
for choice in json_data['choices']:
delta = choice.get('delta', {})
content = delta.get('content', '')
if content:
yield content
if web_search and sources:
yield Sources(sources)

View File

@@ -1,156 +0,0 @@
from __future__ import annotations
import time
from ...typing import CreateResult, Messages
from ..base_provider import AbstractProvider
from ..helper import format_prompt
models = {
"theb-ai": "TheB.AI",
"theb-ai-free": "TheB.AI Free",
"gpt-3.5-turbo": "GPT-3.5 Turbo (New)",
"gpt-3.5-turbo-16k": "GPT-3.5-16K",
"gpt-4-turbo": "GPT-4 Turbo",
"gpt-4": "GPT-4",
"gpt-4-32k": "GPT-4 32K",
"claude-2": "Claude 2",
"claude-instant-1": "Claude Instant 1.2",
"palm-2": "PaLM 2",
"palm-2-32k": "PaLM 2 32K",
"palm-2-codey": "Codey",
"palm-2-codey-32k": "Codey 32K",
"vicuna-13b-v1.5": "Vicuna v1.5 13B",
"llama-2-7b-chat": "Llama 2 7B",
"llama-2-13b-chat": "Llama 2 13B",
"llama-2-70b-chat": "Llama 2 70B",
"code-llama-7b": "Code Llama 7B",
"code-llama-13b": "Code Llama 13B",
"code-llama-34b": "Code Llama 34B",
"qwen-7b-chat": "Qwen 7B"
}
class Theb(AbstractProvider):
label = "TheB.AI"
url = "https://beta.theb.ai"
working = False
supports_stream = True
models = models.keys()
@classmethod
def create_completion(
cls,
model: str,
messages: Messages,
stream: bool,
proxy: str = None,
webdriver: WebDriver = None,
virtual_display: bool = True,
**kwargs
) -> CreateResult:
if model in models:
model = models[model]
prompt = format_prompt(messages)
web_session = WebDriverSession(webdriver, virtual_display=virtual_display, proxy=proxy)
with web_session as driver:
from selenium.webdriver.common.by import By
from selenium.webdriver.support.ui import WebDriverWait
from selenium.webdriver.support import expected_conditions as EC
from selenium.webdriver.common.keys import Keys
# Register fetch hook
script = """
window._fetch = window.fetch;
window.fetch = async (url, options) => {
// Call parent fetch method
const response = await window._fetch(url, options);
if (!url.startsWith("/api/conversation")) {
return result;
}
// Copy response
copy = response.clone();
window._reader = response.body.pipeThrough(new TextDecoderStream()).getReader();
return copy;
}
window._last_message = "";
"""
driver.execute_cdp_cmd("Page.addScriptToEvaluateOnNewDocument", {
"source": script
})
try:
driver.get(f"{cls.url}/home")
wait = WebDriverWait(driver, 5)
wait.until(EC.visibility_of_element_located((By.ID, "textareaAutosize")))
except:
driver = web_session.reopen()
driver.execute_cdp_cmd("Page.addScriptToEvaluateOnNewDocument", {
"source": script
})
driver.get(f"{cls.url}/home")
wait = WebDriverWait(driver, 240)
wait.until(EC.visibility_of_element_located((By.ID, "textareaAutosize")))
try:
driver.find_element(By.CSS_SELECTOR, ".driver-overlay").click()
driver.find_element(By.CSS_SELECTOR, ".driver-overlay").click()
except:
pass
if model:
# Load model panel
wait.until(EC.visibility_of_element_located((By.CSS_SELECTOR, "#SelectModel svg")))
time.sleep(0.1)
driver.find_element(By.CSS_SELECTOR, "#SelectModel svg").click()
try:
driver.find_element(By.CSS_SELECTOR, ".driver-overlay").click()
driver.find_element(By.CSS_SELECTOR, ".driver-overlay").click()
except:
pass
# Select model
selector = f"div.flex-col div.items-center span[title='{model}']"
wait.until(EC.visibility_of_element_located((By.CSS_SELECTOR, selector)))
span = driver.find_element(By.CSS_SELECTOR, selector)
container = span.find_element(By.XPATH, "//div/../..")
button = container.find_element(By.CSS_SELECTOR, "button.btn-blue.btn-small.border")
button.click()
# Submit prompt
wait.until(EC.visibility_of_element_located((By.ID, "textareaAutosize")))
element_send_text(driver.find_element(By.ID, "textareaAutosize"), prompt)
# Read response with reader
script = """
if(window._reader) {
chunk = await window._reader.read();
if (chunk['done']) {
return null;
}
message = '';
chunk['value'].split('\\r\\n').forEach((line, index) => {
if (line.startsWith('data: ')) {
try {
line = JSON.parse(line.substring('data: '.length));
message = line["args"]["content"];
} catch(e) { }
}
});
if (message) {
try {
return message.substring(window._last_message.length);
} finally {
window._last_message = message;
}
}
}
return '';
"""
while True:
chunk = driver.execute_script(script)
if chunk:
yield chunk
elif chunk != "":
break
else:
time.sleep(0.1)

View File

@@ -1,47 +0,0 @@
from __future__ import annotations
import requests
from ..template import OpenaiTemplate
from ...errors import ModelNotFoundError
from ... import debug
class TypeGPT(OpenaiTemplate):
label = "TypeGpt"
url = "https://chat.typegpt.net"
api_base = "https://chat.typegpt.net/api/openai/v1"
working = False
headers = {
"accept": "application/json, text/event-stream",
"accept-language": "de,en-US;q=0.9,en;q=0.8",
"content-type": "application/json",
"priority": "u=1, i",
"sec-ch-ua": "\"Not(A:Brand\";v=\"99\", \"Google Chrome\";v=\"133\", \"Chromium\";v=\"133\"",
"sec-ch-ua-mobile": "?0",
"sec-ch-ua-platform": "\"Linux\"",
"sec-fetch-dest": "empty",
"sec-fetch-mode": "cors",
"sec-fetch-site": "same-origin",
"referer": "https://chat.typegpt.net/",
}
default_model = 'gpt-4o-mini-2024-07-18'
default_vision_model = default_model
vision_models = ['gpt-3.5-turbo', 'gpt-3.5-turbo-202201', default_vision_model, "o3-mini"]
fallback_models = vision_models + ["deepseek-r1", "deepseek-v3", "evil"]
image_models = ["Image-Generator"]
model_aliases = {
"gpt-4o-mini": "gpt-4o-mini-2024-07-18",
"evil": "uncensored-r1",
}
@classmethod
def get_models(cls, **kwargs):
if not cls.models:
try:
cls.models = requests.get(f"{cls.url}/api/config").json()["customModels"].split(",")
cls.models = [model.split("@")[0].strip("+") for model in cls.models if not model.startswith("-") and model not in cls.image_models]
except Exception as e:
cls.models = cls.fallback_models
debug.log(f"Error fetching models: {e}")
return cls.models

View File

@@ -1,91 +0,0 @@
from __future__ import annotations
from aiohttp import ClientSession
import json
from ...typing import AsyncResult, Messages
from ..base_provider import AsyncGeneratorProvider, ProviderModelMixin
from ..helper import format_prompt
class Upstage(AsyncGeneratorProvider, ProviderModelMixin):
url = "https://console.upstage.ai/playground/chat"
api_endpoint = "https://ap-northeast-2.apistage.ai/v1/web/demo/chat/completions"
working = False
default_model = 'solar-pro'
models = [
'upstage/solar-1-mini-chat',
'upstage/solar-1-mini-chat-ja',
'solar-pro',
]
model_aliases = {
"solar-mini": "upstage/solar-1-mini-chat",
"solar-mini": "upstage/solar-1-mini-chat-ja",
}
@classmethod
def get_model(cls, model: str) -> str:
if model in cls.models:
return model
elif model in cls.model_aliases:
return cls.model_aliases[model]
else:
return cls.default_model
@classmethod
async def create_async_generator(
cls,
model: str,
messages: Messages,
proxy: str = None,
**kwargs
) -> AsyncResult:
model = cls.get_model(model)
headers = {
"accept": "*/*",
"accept-language": "en-US,en;q=0.9",
"cache-control": "no-cache",
"content-type": "application/json",
"dnt": "1",
"origin": "https://console.upstage.ai",
"pragma": "no-cache",
"priority": "u=1, i",
"referer": "https://console.upstage.ai/",
"sec-ch-ua": '"Not?A_Brand";v="99", "Chromium";v="130"',
"sec-ch-ua-mobile": "?0",
"sec-ch-ua-platform": '"Linux"',
"sec-fetch-dest": "empty",
"sec-fetch-mode": "cors",
"sec-fetch-site": "cross-site",
"user-agent": "Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/130.0.0.0 Safari/537.36"
}
async with ClientSession(headers=headers) as session:
data = {
"stream": True,
"messages": [{"role": "user", "content": format_prompt(messages)}],
"model": model
}
async with session.post(f"{cls.api_endpoint}", json=data, proxy=proxy) as response:
response.raise_for_status()
response_text = ""
async for line in response.content:
if line:
line = line.decode('utf-8').strip()
if line.startswith("data: ") and line != "data: [DONE]":
try:
data = json.loads(line[6:])
content = data['choices'][0]['delta'].get('content', '')
if content:
response_text += content
yield content
except json.JSONDecodeError:
continue
if line == "data: [DONE]":
break

View File

@@ -1,392 +0,0 @@
from __future__ import annotations
import json, base64, requests, random, uuid
try:
import execjs
has_requirements = True
except ImportError:
has_requirements = False
from ...typing import Messages, TypedDict, CreateResult, Any
from ..base_provider import AbstractProvider
from ...errors import MissingRequirementsError
class Vercel(AbstractProvider):
url = 'https://sdk.vercel.ai'
working = False
supports_message_history = True
supports_gpt_35_turbo = True
supports_stream = True
@staticmethod
def create_completion(
model: str,
messages: Messages,
stream: bool,
proxy: str = None,
**kwargs
) -> CreateResult:
if not has_requirements:
raise MissingRequirementsError('Install "PyExecJS" package')
if not model:
model = "gpt-3.5-turbo"
elif model not in model_info:
raise ValueError(f"Vercel does not support {model}")
headers = {
'authority': 'sdk.vercel.ai',
'accept': '*/*',
'accept-language': 'en,fr-FR;q=0.9,fr;q=0.8,es-ES;q=0.7,es;q=0.6,en-US;q=0.5,am;q=0.4,de;q=0.3',
'cache-control': 'no-cache',
'content-type': 'application/json',
'custom-encoding': get_anti_bot_token(),
'origin': 'https://sdk.vercel.ai',
'pragma': 'no-cache',
'referer': 'https://sdk.vercel.ai/',
'sec-ch-ua': '"Google Chrome";v="117", "Not;A=Brand";v="8", "Chromium";v="117"',
'sec-ch-ua-mobile': '?0',
'sec-ch-ua-platform': '"macOS"',
'sec-fetch-dest': 'empty',
'sec-fetch-mode': 'cors',
'sec-fetch-site': 'same-origin',
'user-agent': f'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/117.0.{random.randint(99, 999)}.{random.randint(99, 999)} Safari/537.36',
}
json_data = {
'model' : model_info[model]['id'],
'messages' : messages,
'playgroundId': str(uuid.uuid4()),
'chatIndex' : 0,
**model_info[model]['default_params'],
**kwargs
}
max_retries = kwargs.get('max_retries', 20)
for _ in range(max_retries):
response = requests.post('https://chat.vercel.ai/api/chat',
headers=headers, json=json_data, stream=True, proxies={"https": proxy})
try:
response.raise_for_status()
except:
continue
for token in response.iter_content(chunk_size=None):
yield token.decode()
break
def get_anti_bot_token() -> str:
headers = {
'authority': 'sdk.vercel.ai',
'accept': '*/*',
'accept-language': 'en,fr-FR;q=0.9,fr;q=0.8,es-ES;q=0.7,es;q=0.6,en-US;q=0.5,am;q=0.4,de;q=0.3',
'cache-control': 'no-cache',
'pragma': 'no-cache',
'referer': 'https://sdk.vercel.ai/',
'sec-ch-ua': '"Google Chrome";v="117", "Not;A=Brand";v="8", "Chromium";v="117"',
'sec-ch-ua-mobile': '?0',
'sec-ch-ua-platform': '"macOS"',
'sec-fetch-dest': 'empty',
'sec-fetch-mode': 'cors',
'sec-fetch-site': 'same-origin',
'user-agent': f'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/117.0.{random.randint(99, 999)}.{random.randint(99, 999)} Safari/537.36',
}
response = requests.get('https://sdk.vercel.ai/openai.jpeg',
headers=headers).text
raw_data = json.loads(base64.b64decode(response,
validate=True))
js_script = '''const globalThis={marker:"mark"};String.prototype.fontcolor=function(){return `<font>${this}</font>`};
return (%s)(%s)''' % (raw_data['c'], raw_data['a'])
raw_token = json.dumps({'r': execjs.compile(js_script).call(''), 't': raw_data['t']},
separators = (",", ":"))
return base64.b64encode(raw_token.encode('utf-16le')).decode()
class ModelInfo(TypedDict):
id: str
default_params: dict[str, Any]
model_info: dict[str, ModelInfo] = {
# 'claude-instant-v1': {
# 'id': 'anthropic:claude-instant-v1',
# 'default_params': {
# 'temperature': 1,
# 'maximumLength': 1024,
# 'topP': 1,
# 'topK': 1,
# 'presencePenalty': 1,
# 'frequencyPenalty': 1,
# 'stopSequences': ['\n\nHuman:'],
# },
# },
# 'claude-v1': {
# 'id': 'anthropic:claude-v1',
# 'default_params': {
# 'temperature': 1,
# 'maximumLength': 1024,
# 'topP': 1,
# 'topK': 1,
# 'presencePenalty': 1,
# 'frequencyPenalty': 1,
# 'stopSequences': ['\n\nHuman:'],
# },
# },
# 'claude-v2': {
# 'id': 'anthropic:claude-v2',
# 'default_params': {
# 'temperature': 1,
# 'maximumLength': 1024,
# 'topP': 1,
# 'topK': 1,
# 'presencePenalty': 1,
# 'frequencyPenalty': 1,
# 'stopSequences': ['\n\nHuman:'],
# },
# },
'replicate/llama70b-v2-chat': {
'id': 'replicate:replicate/llama-2-70b-chat',
'default_params': {
'temperature': 0.75,
'maximumLength': 3000,
'topP': 1,
'repetitionPenalty': 1,
},
},
'a16z-infra/llama7b-v2-chat': {
'id': 'replicate:a16z-infra/llama7b-v2-chat',
'default_params': {
'temperature': 0.75,
'maximumLength': 3000,
'topP': 1,
'repetitionPenalty': 1,
},
},
'a16z-infra/llama13b-v2-chat': {
'id': 'replicate:a16z-infra/llama13b-v2-chat',
'default_params': {
'temperature': 0.75,
'maximumLength': 3000,
'topP': 1,
'repetitionPenalty': 1,
},
},
'replicate/llama-2-70b-chat': {
'id': 'replicate:replicate/llama-2-70b-chat',
'default_params': {
'temperature': 0.75,
'maximumLength': 3000,
'topP': 1,
'repetitionPenalty': 1,
},
},
'bigscience/bloom': {
'id': 'huggingface:bigscience/bloom',
'default_params': {
'temperature': 0.5,
'maximumLength': 1024,
'topP': 0.95,
'topK': 4,
'repetitionPenalty': 1.03,
},
},
'google/flan-t5-xxl': {
'id': 'huggingface:google/flan-t5-xxl',
'default_params': {
'temperature': 0.5,
'maximumLength': 1024,
'topP': 0.95,
'topK': 4,
'repetitionPenalty': 1.03,
},
},
'EleutherAI/gpt-neox-20b': {
'id': 'huggingface:EleutherAI/gpt-neox-20b',
'default_params': {
'temperature': 0.5,
'maximumLength': 1024,
'topP': 0.95,
'topK': 4,
'repetitionPenalty': 1.03,
'stopSequences': [],
},
},
'OpenAssistant/oasst-sft-4-pythia-12b-epoch-3.5': {
'id': 'huggingface:OpenAssistant/oasst-sft-4-pythia-12b-epoch-3.5',
'default_params': {
'maximumLength': 1024,
'typicalP': 0.2,
'repetitionPenalty': 1,
},
},
'OpenAssistant/oasst-sft-1-pythia-12b': {
'id': 'huggingface:OpenAssistant/oasst-sft-1-pythia-12b',
'default_params': {
'maximumLength': 1024,
'typicalP': 0.2,
'repetitionPenalty': 1,
},
},
'bigcode/santacoder': {
'id': 'huggingface:bigcode/santacoder',
'default_params': {
'temperature': 0.5,
'maximumLength': 1024,
'topP': 0.95,
'topK': 4,
'repetitionPenalty': 1.03,
},
},
'command-light-nightly': {
'id': 'cohere:command-light-nightly',
'default_params': {
'temperature': 0.9,
'maximumLength': 1024,
'topP': 1,
'topK': 0,
'presencePenalty': 0,
'frequencyPenalty': 0,
'stopSequences': [],
},
},
'command-nightly': {
'id': 'cohere:command-nightly',
'default_params': {
'temperature': 0.9,
'maximumLength': 1024,
'topP': 1,
'topK': 0,
'presencePenalty': 0,
'frequencyPenalty': 0,
'stopSequences': [],
},
},
# 'gpt-4': {
# 'id': 'openai:gpt-4',
# 'default_params': {
# 'temperature': 0.7,
# 'maximumLength': 8192,
# 'topP': 1,
# 'presencePenalty': 0,
# 'frequencyPenalty': 0,
# 'stopSequences': [],
# },
# },
# 'gpt-4-0613': {
# 'id': 'openai:gpt-4-0613',
# 'default_params': {
# 'temperature': 0.7,
# 'maximumLength': 8192,
# 'topP': 1,
# 'presencePenalty': 0,
# 'frequencyPenalty': 0,
# 'stopSequences': [],
# },
# },
'code-davinci-002': {
'id': 'openai:code-davinci-002',
'default_params': {
'temperature': 0.5,
'maximumLength': 1024,
'topP': 1,
'presencePenalty': 0,
'frequencyPenalty': 0,
'stopSequences': [],
},
},
'gpt-3.5-turbo': {
'id': 'openai:gpt-3.5-turbo',
'default_params': {
'temperature': 0.7,
'maximumLength': 4096,
'topP': 1,
'topK': 1,
'presencePenalty': 1,
'frequencyPenalty': 1,
'stopSequences': [],
},
},
'gpt-3.5-turbo-16k': {
'id': 'openai:gpt-3.5-turbo-16k',
'default_params': {
'temperature': 0.7,
'maximumLength': 16280,
'topP': 1,
'topK': 1,
'presencePenalty': 1,
'frequencyPenalty': 1,
'stopSequences': [],
},
},
'gpt-3.5-turbo-16k-0613': {
'id': 'openai:gpt-3.5-turbo-16k-0613',
'default_params': {
'temperature': 0.7,
'maximumLength': 16280,
'topP': 1,
'topK': 1,
'presencePenalty': 1,
'frequencyPenalty': 1,
'stopSequences': [],
},
},
'text-ada-001': {
'id': 'openai:text-ada-001',
'default_params': {
'temperature': 0.5,
'maximumLength': 1024,
'topP': 1,
'presencePenalty': 0,
'frequencyPenalty': 0,
'stopSequences': [],
},
},
'text-babbage-001': {
'id': 'openai:text-babbage-001',
'default_params': {
'temperature': 0.5,
'maximumLength': 1024,
'topP': 1,
'presencePenalty': 0,
'frequencyPenalty': 0,
'stopSequences': [],
},
},
'text-curie-001': {
'id': 'openai:text-curie-001',
'default_params': {
'temperature': 0.5,
'maximumLength': 1024,
'topP': 1,
'presencePenalty': 0,
'frequencyPenalty': 0,
'stopSequences': [],
},
},
'text-davinci-002': {
'id': 'openai:text-davinci-002',
'default_params': {
'temperature': 0.5,
'maximumLength': 1024,
'topP': 1,
'presencePenalty': 0,
'frequencyPenalty': 0,
'stopSequences': [],
},
},
'text-davinci-003': {
'id': 'openai:text-davinci-003',
'default_params': {
'temperature': 0.5,
'maximumLength': 4097,
'topP': 1,
'presencePenalty': 0,
'frequencyPenalty': 0,
'stopSequences': [],
},
},
}

View File

@@ -1,184 +0,0 @@
from __future__ import annotations
import json
import random
import string
import asyncio
from aiohttp import ClientSession
from ...typing import AsyncResult, Messages
from ..base_provider import AsyncGeneratorProvider, ProviderModelMixin
from ...requests.raise_for_status import raise_for_status
from ...errors import ResponseStatusError
from ...providers.response import ImageResponse
from ..helper import format_prompt, format_media_prompt
class Websim(AsyncGeneratorProvider, ProviderModelMixin):
url = "https://websim.ai"
login_url = None
chat_api_endpoint = "https://websim.ai/api/v1/inference/run_chat_completion"
image_api_endpoint = "https://websim.ai/api/v1/inference/run_image_generation"
working = False
needs_auth = False
use_nodriver = False
supports_stream = False
supports_system_message = True
supports_message_history = True
default_model = 'gemini-2.5-pro'
default_image_model = 'flux'
image_models = [default_image_model]
models = [default_model, 'gemini-2.5-flash'] + image_models
@staticmethod
def generate_project_id(for_image=False):
"""
Generate a project ID in the appropriate format
For chat: format like 'ke3_xh5gai3gjkmruomu'
For image: format like 'kx0m131_rzz66qb2xoy7'
"""
chars = string.ascii_lowercase + string.digits
if for_image:
first_part = ''.join(random.choices(chars, k=7))
second_part = ''.join(random.choices(chars, k=12))
return f"{first_part}_{second_part}"
else:
prefix = ''.join(random.choices(chars, k=3))
suffix = ''.join(random.choices(chars, k=15))
return f"{prefix}_{suffix}"
@classmethod
async def create_async_generator(
cls,
model: str,
messages: Messages,
prompt: str = None,
proxy: str = None,
aspect_ratio: str = "1:1",
project_id: str = None,
**kwargs
) -> AsyncResult:
is_image_request = model in cls.image_models
if project_id is None:
project_id = cls.generate_project_id(for_image=is_image_request)
headers = {
'accept': '*/*',
'accept-language': 'en-US,en;q=0.9',
'content-type': 'text/plain;charset=UTF-8',
'origin': 'https://websim.ai',
'user-agent': 'Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/133.0.0.0 Safari/537.36',
'websim-flags;': ''
}
if is_image_request:
headers['referer'] = 'https://websim.ai/@ISWEARIAMNOTADDICTEDTOPILLOW/ai-image-prompt-generator'
async for result in cls._handle_image_request(
project_id=project_id,
messages=messages,
prompt=prompt,
aspect_ratio=aspect_ratio,
headers=headers,
proxy=proxy,
**kwargs
):
yield result
else:
headers['referer'] = 'https://websim.ai/@ISWEARIAMNOTADDICTEDTOPILLOW/zelos-ai-assistant'
async for result in cls._handle_chat_request(
project_id=project_id,
messages=messages,
headers=headers,
proxy=proxy,
**kwargs
):
yield result
@classmethod
async def _handle_image_request(
cls,
project_id: str,
messages: Messages,
prompt: str,
aspect_ratio: str,
headers: dict,
proxy: str = None,
**kwargs
) -> AsyncResult:
used_prompt = format_media_prompt(messages, prompt)
async with ClientSession(headers=headers) as session:
data = {
"project_id": project_id,
"prompt": used_prompt,
"aspect_ratio": aspect_ratio
}
async with session.post(f"{cls.image_api_endpoint}", json=data, proxy=proxy) as response:
await raise_for_status(response)
response_text = await response.text()
response_json = json.loads(response_text)
image_url = response_json.get("url")
if image_url:
yield ImageResponse(urls=[image_url], alt=used_prompt)
@classmethod
async def _handle_chat_request(
cls,
project_id: str,
messages: Messages,
headers: dict,
proxy: str = None,
**kwargs
) -> AsyncResult:
max_retries = 3
retry_count = 0
last_error = None
while retry_count < max_retries:
try:
async with ClientSession(headers=headers) as session:
data = {
"project_id": project_id,
"messages": messages
}
async with session.post(f"{cls.chat_api_endpoint}", json=data, proxy=proxy) as response:
if response.status == 429:
response_text = await response.text()
last_error = ResponseStatusError(f"Response {response.status}: {response_text}")
retry_count += 1
if retry_count < max_retries:
wait_time = 2 ** retry_count
await asyncio.sleep(wait_time)
continue
else:
raise last_error
await raise_for_status(response)
response_text = await response.text()
try:
response_json = json.loads(response_text)
content = response_json.get("content", "")
yield content.strip()
break
except json.JSONDecodeError:
yield response_text
break
except ResponseStatusError as e:
if "Rate limit exceeded" in str(e) and retry_count < max_retries:
retry_count += 1
wait_time = 2 ** retry_count
await asyncio.sleep(wait_time)
else:
if retry_count >= max_retries:
raise e
else:
raise
except Exception as e:
raise

View File

@@ -1,46 +0,0 @@
from .har import HarProvider
from .AI365VIP import AI365VIP
from .Aichat import Aichat
from .AiChatOnline import AiChatOnline
from .AiChats import AiChats
from .Ails import Ails
from .AIUncensored import AIUncensored
from .AllenAI import AllenAI
from .AmigoChat import AmigoChat
from .Aura import Aura
from .ChatGpt import ChatGpt
from .Chatgpt4o import Chatgpt4o
from .ChatGptEs import ChatGptEs
from .ChatgptFree import ChatgptFree
from .ChatGptt import ChatGptt
from .DDG import DDG
from .Equing import Equing
from .FlowGpt import FlowGpt
from .Free2GPT import Free2GPT
from .FreeGpt import FreeGpt
from .FreeNetfly import FreeNetfly
from .FreeRouter import FreeRouter
from .Glider import Glider
from .GPROChat import GPROChat
from .GptOss import GptOss
from .ImageLabs import ImageLabs
from .Koala import Koala
from .LegacyLMArena import LegacyLMArena
from .Liaobots import Liaobots
from .Lockchat import Lockchat
from .MagickPen import MagickPen
from .PenguinAI import PenguinAI
from .Phind import Phind
from .Pizzagpt import Pizzagpt
from .Poe import Poe
from .Prodia import Prodia
from .Raycast import Raycast
from .RubiksAI import RubiksAI
from .Theb import Theb
from .TypeGPT import TypeGPT
from .Upstage import Upstage
from .Vercel import Vercel
from .Websim import Websim
from .GptOss import GptOss

View File

@@ -1,263 +0,0 @@
from __future__ import annotations
import os
import json
import uuid
import random
import asyncio
from urllib.parse import urlparse
from ....typing import AsyncResult, Messages, MediaListType
from ....requests import DEFAULT_HEADERS, StreamSession, StreamResponse, FormData, raise_for_status
from ....providers.response import JsonConversation, AuthResult
from ....requests import get_args_from_nodriver, has_nodriver
from ....tools.media import merge_media
from ....image import to_bytes, is_accepted_format
from ....errors import ResponseError
from ...base_provider import AsyncAuthedProvider, ProviderModelMixin
from ...helper import get_last_user_message
from ...deprecated.LegacyLMArena import LegacyLMArena
from .... import debug
class HarProvider(AsyncAuthedProvider, ProviderModelMixin):
label = "LMArena (Har)"
url = "https://legacy.lmarena.ai"
api_endpoint = "/queue/join?"
working = False
active_by_default = True
default_model = LegacyLMArena.default_model
@classmethod
async def on_auth_async(cls, proxy: str = None, **kwargs):
if has_nodriver:
try:
async def callback(page):
while not await page.evaluate('document.querySelector(\'textarea[data-testid="textbox"]\')'):
await asyncio.sleep(1)
args = await get_args_from_nodriver(cls.url, proxy=proxy, callback=callback)
except (RuntimeError, FileNotFoundError) as e:
debug.log(f"Nodriver is not available:", e)
args = {"headers": DEFAULT_HEADERS.copy(), "cookies": {}, "impersonate": "chrome"}
else:
args = {"headers": DEFAULT_HEADERS.copy(), "cookies": {}, "impersonate": "chrome"}
args["headers"].update({
"content-type": "application/json",
"accept": "application/json",
"referer": f"{cls.url}/",
"origin": cls.url,
})
yield AuthResult(**args)
@classmethod
def get_models(cls) -> list[str]:
LegacyLMArena.get_models()
cls.models = LegacyLMArena.models
cls.model_aliases = LegacyLMArena.model_aliases
cls.vision_models = LegacyLMArena.vision_models
return cls.models
@classmethod
def _build_second_payloads(cls, model_id: str, session_hash: str, text: str, max_tokens: int, temperature: float, top_p: float):
first_payload = {
"data":[None,model_id,text,{
"text_models":[model_id],
"all_text_models":[model_id],
"vision_models":[],
"image_gen_models":[],
"all_image_gen_models":[],
"search_models":[],
"all_search_models":[],
"models":[model_id],
"all_models":[model_id],
"arena_type":"text-arena"}],
"event_data": None,
"fn_index": 122,
"trigger_id": 157,
"session_hash": session_hash
}
second_payload = {
"data": [],
"event_data": None,
"fn_index": 123,
"trigger_id": 157,
"session_hash": session_hash
}
third_payload = {
"data": [None, temperature, top_p, max_tokens],
"event_data": None,
"fn_index": 124,
"trigger_id": 157,
"session_hash": session_hash
}
return first_payload, second_payload, third_payload
@classmethod
async def create_authed(
cls,
model: str,
messages: Messages,
auth_result: AuthResult,
media: MediaListType = None,
max_tokens: int = 2048,
temperature: float = 0.7,
top_p: float = 1,
conversation: JsonConversation = None,
**kwargs
) -> AsyncResult:
async def read_response(response: StreamResponse):
returned_data = ""
async for line in response.iter_lines():
if not line.startswith(b"data: "):
continue
for content in find_str(json.loads(line[6:]), 3):
if "**NETWORK ERROR DUE TO HIGH TRAFFIC." in content:
raise ResponseError(content)
if content == '<span class="cursor"></span> ' or content == 'update':
continue
if content.endswith(""):
content = content[:-2]
new_content = content
if content.startswith(returned_data):
new_content = content[len(returned_data):]
if not new_content:
continue
returned_data += new_content
yield new_content
if model in cls.model_aliases:
model = cls.model_aliases[model]
if isinstance(model, list):
model = random.choice(model)
prompt = get_last_user_message(messages)
async with StreamSession(**auth_result.get_dict()) as session:
if conversation is None:
conversation = JsonConversation(session_hash=str(uuid.uuid4()).replace("-", ""))
media = list(merge_media(media, messages))
if media:
data = FormData()
for i in range(len(media)):
media[i] = (to_bytes(media[i][0]), media[i][1])
for image, image_name in media:
data.add_field(f"files", image, filename=image_name)
async with session.post(f"{cls.url}/upload", params={"upload_id": conversation.session_hash}, data=data) as response:
await raise_for_status(response)
image_files = await response.json()
media = [{
"path": image_file,
"url": f"{cls.url}/file={image_file}",
"orig_name": media[i][1],
"size": len(media[i][0]),
"mime_type": is_accepted_format(media[i][0]),
"meta": {
"_type": "gradio.FileData"
}
} for i, image_file in enumerate(image_files)]
for domain, harFile in read_har_files():
for v in harFile['log']['entries']:
request_url = v['request']['url']
if domain not in request_url or "." in urlparse(request_url).path or "heartbeat" in request_url:
continue
postData = None
if "postData" in v['request']:
postData = v['request']['postData']['text']
postData = postData.replace('"hello"', json.dumps(prompt))
postData = postData.replace('[null,0.7,1,2048]', json.dumps([None, temperature, top_p, max_tokens]))
postData = postData.replace('"files":[]', f'"files":{json.dumps(media)}')
postData = postData.replace("__SESSION__", conversation.session_hash)
if model:
postData = postData.replace("__MODEL__", model)
request_url = request_url.replace("__SESSION__", conversation.session_hash)
method = v['request']['method'].lower()
async with getattr(session, method)(request_url, data=postData) as response:
await raise_for_status(response)
async for chunk in read_response(response):
yield chunk
yield conversation
else:
first_payload, second_payload, third_payload = cls._build_second_payloads(model, conversation.session_hash, prompt, max_tokens, temperature, top_p)
# POST 1
async with session.post(f"{cls.url}{cls.api_endpoint}", json=first_payload) as response:
await raise_for_status(response)
# POST 2
async with session.post(f"{cls.url}{cls.api_endpoint}", json=second_payload) as response:
await raise_for_status(response)
# POST 3
async with session.post(f"{cls.url}{cls.api_endpoint}", json=third_payload) as response:
await raise_for_status(response)
stream_url = f"{cls.url}/queue/data?session_hash={conversation.session_hash}"
async with session.get(stream_url, headers={"Accept": "text/event-stream"}) as response:
await raise_for_status(response)
async for chunk in read_response(response):
yield chunk
def read_har_files():
for root, _, files in os.walk(os.path.dirname(__file__)):
for file in files:
if not file.endswith(".har"):
continue
with open(os.path.join(root, file), 'rb') as f:
try:
yield os.path.splitext(file)[0], json.load(f)
except json.JSONDecodeError:
raise RuntimeError(f"Failed to read HAR file: {file}")
def read_str_recusive(data):
if isinstance(data, dict):
data = data.values()
for item in data:
if isinstance(item, (list, dict)):
yield from read_str_recusive(item)
elif isinstance(item, str):
yield item
def find_str(data, skip: int = 0):
for item in read_str_recusive(data):
if skip > 0:
skip -= 1
continue
yield item
break
def read_list_recusive(data, key):
if isinstance(data, dict):
for k, v in data.items():
if k == key:
yield v
else:
yield from read_list_recusive(v, key)
elif isinstance(data, list):
for item in data:
yield from read_list_recusive(item, key)
def find_list(data, key):
for item in read_list_recusive(data, key):
if isinstance(item, str):
yield item
elif isinstance(item, list):
yield from item
def get_str_list(data):
for item in data:
if isinstance(item, list):
yield from get_str_list(item)
else:
yield item
# with open("g4f/Provider/har/lmarena.ai.har", "r") as f:
# try:
# harFile = json.loads(f.read())
# except json.JSONDecodeError:
# raise RuntimeError(f"Failed to read HAR file")
# new_entries = []
# for v in harFile['log']['entries']:
# request_url = v['request']['url']
# if not request_url.startswith("https://lmarena.ai") or "." in urlparse(request_url).path or "heartbeat" in request_url:
# continue
# v['request']['cookies'] = []
# v['request']['headers'] = [header for header in v['request']['headers'] if header['name'].lower() != "cookie"]
# v['response']['headers'] = []
# new_entries.append(v)
# print(f"Request URL: {request_url}"

File diff suppressed because one or more lines are too long

View File

@@ -14,14 +14,13 @@ from .Provider import (
Grok,
DeepseekAI_JanusPro7b,
GLM,
Kimi,
LambdaChat,
OIVSCodeSer2,
OIVSCodeSer0501,
OperaAria,
Perplexity,
Startnest,
OpenAIFM,
PerplexityLabs,
PollinationsAI,
PollinationsImage,
Qwen,
@@ -48,6 +47,7 @@ from .Provider import (
OpenaiAccount,
OpenaiChat,
OpenRouter,
PuterJS,
)
class ModelRegistry:
@@ -838,7 +838,7 @@ grok_3_r1 = Model(
kimi = Model(
name = 'kimi-k2',
base_provider = 'kimi.com',
best_provider = IterListProvider([Kimi, HuggingFace, DeepInfra, Groq]),
best_provider = IterListProvider([HuggingFace, DeepInfra, Groq]),
long_name = "moonshotai/Kimi-K2-Instruct"
)
@@ -846,31 +846,31 @@ kimi = Model(
sonar = Model(
name = 'sonar',
base_provider = 'Perplexity AI',
best_provider = PerplexityLabs
best_provider = PuterJS
)
sonar_pro = Model(
name = 'sonar-pro',
base_provider = 'Perplexity AI',
best_provider = PerplexityLabs
best_provider = PuterJS
)
sonar_reasoning = Model(
name = 'sonar-reasoning',
base_provider = 'Perplexity AI',
best_provider = PerplexityLabs
best_provider = PuterJS
)
sonar_reasoning_pro = Model(
name = 'sonar-reasoning-pro',
base_provider = 'Perplexity AI',
best_provider = PerplexityLabs
best_provider = PuterJS
)
r1_1776 = Model(
name = 'r1-1776',
base_provider = 'Perplexity AI',
best_provider = IterListProvider([Together, PerplexityLabs])
best_provider = IterListProvider([Together, PuterJS, Perplexity])
)
### Nvidia ###

View File

@@ -9,7 +9,7 @@ from ..image import is_data_an_audio
from ..providers.retry_provider import RotatedProvider
from ..Provider.needs_auth import OpenaiChat, CopilotAccount
from ..Provider.hf_space import HuggingSpace
from ..Provider import Custom, PollinationsImage, OpenaiAccount, Copilot, Cloudflare, Gemini, Grok, PerplexityLabs, LambdaChat, PollinationsAI, PuterJS
from ..Provider import Custom, PollinationsImage, OpenaiAccount, Copilot, Cloudflare, Gemini, Grok, Perplexity, LambdaChat, PollinationsAI, PuterJS
from ..Provider import Microsoft_Phi_4_Multimodal, DeepInfra, LMArena, EdgeTTS, gTTS, MarkItDown, OpenAIFM
from ..Provider import HuggingFace, HuggingFaceMedia, Azure, Qwen, EasyChat, GLM, OpenRouterFree, GeminiPro
from .base_provider import AsyncGeneratorProvider, ProviderModelMixin
@@ -20,7 +20,7 @@ from .any_model_map import audio_models, image_models, vision_models, video_mode
# Add providers to existing models on map
PROVIDERS_LIST_2 = [
OpenaiChat, Copilot, CopilotAccount, PollinationsAI, PerplexityLabs, Gemini, Grok, Azure, Qwen, EasyChat, GLM, OpenRouterFree
OpenaiChat, Copilot, CopilotAccount, PollinationsAI, Perplexity, Gemini, Grok, Azure, Qwen, EasyChat, GLM, OpenRouterFree
]
# Add all models to the model map