mirror of
https://github.com/xtekky/gpt4free.git
synced 2025-11-02 20:34:02 +08:00
Merge pull request #2920 from hlohaus/30Mar
feat: add LM Arena provider, async‑ify Copilot & surface follow‑up su…
This commit is contained in:
@@ -26,6 +26,10 @@ from typing import Optional, Dict, Any, List, Tuple
|
||||
|
||||
from g4f.client import Client
|
||||
from g4f.models import ModelUtils
|
||||
import g4f.Provider
|
||||
|
||||
from g4f import debug
|
||||
debug.logging = True
|
||||
|
||||
# Constants
|
||||
DEFAULT_MODEL = "claude-3.7-sonnet"
|
||||
@@ -184,16 +188,21 @@ def generate_commit_message(diff_text: str, model: str = DEFAULT_MODEL) -> Optio
|
||||
|
||||
# Make API call
|
||||
response = client.chat.completions.create(
|
||||
prompt,
|
||||
model=model,
|
||||
messages=[{"role": "user", "content": prompt}]
|
||||
stream=True,
|
||||
)
|
||||
|
||||
# Stop spinner and clear line
|
||||
spinner.set()
|
||||
sys.stdout.write("\r" + " " * 50 + "\r")
|
||||
sys.stdout.flush()
|
||||
|
||||
return response.choices[0].message.content.strip()
|
||||
content = []
|
||||
for chunk in response:
|
||||
# Stop spinner and clear line
|
||||
if spinner:
|
||||
spinner.set()
|
||||
print(" " * 50 + "\n", flush=True)
|
||||
spinner = None
|
||||
if isinstance(chunk.choices[0].delta.content, str):
|
||||
content.append(chunk.choices[0].delta.content)
|
||||
print(chunk.choices[0].delta.content, end="", flush=True)
|
||||
return "".join(content).strip()
|
||||
except Exception as e:
|
||||
# Stop spinner if it's running
|
||||
if 'spinner' in locals() and spinner:
|
||||
@@ -306,11 +315,6 @@ def main():
|
||||
print("Failed to generate commit message after multiple attempts.")
|
||||
sys.exit(1)
|
||||
|
||||
print("\nGenerated commit message:")
|
||||
print("-" * 50)
|
||||
print(commit_message)
|
||||
print("-" * 50)
|
||||
|
||||
if args.edit:
|
||||
print("\nOpening editor to modify commit message...")
|
||||
commit_message = edit_commit_message(commit_message)
|
||||
|
||||
@@ -14,12 +14,13 @@ from datetime import datetime, timedelta
|
||||
from ..typing import AsyncResult, Messages, MediaListType
|
||||
from ..requests.raise_for_status import raise_for_status
|
||||
from .base_provider import AsyncGeneratorProvider, ProviderModelMixin
|
||||
from .openai.har_file import get_har_files
|
||||
from ..image import to_data_uri
|
||||
from ..cookies import get_cookies_dir
|
||||
from .helper import format_image_prompt
|
||||
from .helper import format_image_prompt, render_messages
|
||||
from ..providers.response import JsonConversation, ImageResponse
|
||||
from ..tools.media import merge_media
|
||||
from ..errors import PaymentRequiredError
|
||||
from ..errors import RateLimitError
|
||||
from .. import debug
|
||||
|
||||
class Conversation(JsonConversation):
|
||||
@@ -428,53 +429,46 @@ class Blackbox(AsyncGeneratorProvider, ProviderModelMixin):
|
||||
Optional[dict]: Session data if found, None otherwise
|
||||
"""
|
||||
try:
|
||||
har_dir = get_cookies_dir()
|
||||
if not os.access(har_dir, os.R_OK):
|
||||
return None
|
||||
|
||||
for root, _, files in os.walk(har_dir):
|
||||
for file in files:
|
||||
if file.endswith(".har"):
|
||||
try:
|
||||
with open(os.path.join(root, file), 'rb') as f:
|
||||
har_data = json.load(f)
|
||||
|
||||
for entry in har_data['log']['entries']:
|
||||
# Only look at blackbox API responses
|
||||
if 'blackbox.ai/api' in entry['request']['url']:
|
||||
# Look for a response that has the right structure
|
||||
if 'response' in entry and 'content' in entry['response']:
|
||||
content = entry['response']['content']
|
||||
# Look for both regular and Google auth session formats
|
||||
if ('text' in content and
|
||||
isinstance(content['text'], str) and
|
||||
'"user"' in content['text'] and
|
||||
'"email"' in content['text'] and
|
||||
'"expires"' in content['text']):
|
||||
|
||||
try:
|
||||
# Remove any HTML or other non-JSON content
|
||||
text = content['text'].strip()
|
||||
if text.startswith('{') and text.endswith('}'):
|
||||
# Replace escaped quotes
|
||||
text = text.replace('\\"', '"')
|
||||
har_session = json.loads(text)
|
||||
|
||||
# Check if this is a valid session object
|
||||
if (isinstance(har_session, dict) and
|
||||
'user' in har_session and
|
||||
'email' in har_session['user'] and
|
||||
'expires' in har_session):
|
||||
|
||||
debug.log(f"Blackbox: Found session in HAR file: {file}")
|
||||
return har_session
|
||||
except json.JSONDecodeError as e:
|
||||
# Only print error for entries that truly look like session data
|
||||
if ('"user"' in content['text'] and
|
||||
'"email"' in content['text']):
|
||||
debug.log(f"Blackbox: Error parsing likely session data: {e}")
|
||||
except Exception as e:
|
||||
debug.log(f"Blackbox: Error reading HAR file {file}: {e}")
|
||||
for file in get_har_files():
|
||||
try:
|
||||
with open(file, 'rb') as f:
|
||||
har_data = json.load(f)
|
||||
|
||||
for entry in har_data['log']['entries']:
|
||||
# Only look at blackbox API responses
|
||||
if 'blackbox.ai/api' in entry['request']['url']:
|
||||
# Look for a response that has the right structure
|
||||
if 'response' in entry and 'content' in entry['response']:
|
||||
content = entry['response']['content']
|
||||
# Look for both regular and Google auth session formats
|
||||
if ('text' in content and
|
||||
isinstance(content['text'], str) and
|
||||
'"user"' in content['text'] and
|
||||
'"email"' in content['text'] and
|
||||
'"expires"' in content['text']):
|
||||
try:
|
||||
# Remove any HTML or other non-JSON content
|
||||
text = content['text'].strip()
|
||||
if text.startswith('{') and text.endswith('}'):
|
||||
# Replace escaped quotes
|
||||
text = text.replace('\\"', '"')
|
||||
har_session = json.loads(text)
|
||||
|
||||
# Check if this is a valid session object
|
||||
if (isinstance(har_session, dict) and
|
||||
'user' in har_session and
|
||||
'email' in har_session['user'] and
|
||||
'expires' in har_session):
|
||||
|
||||
debug.log(f"Blackbox: Found session in HAR file: {file}")
|
||||
return har_session
|
||||
except json.JSONDecodeError as e:
|
||||
# Only print error for entries that truly look like session data
|
||||
if ('"user"' in content['text'] and
|
||||
'"email"' in content['text']):
|
||||
debug.log(f"Blackbox: Error parsing likely session data: {e}")
|
||||
except Exception as e:
|
||||
debug.log(f"Blackbox: Error reading HAR file {file}: {e}")
|
||||
return None
|
||||
except Exception as e:
|
||||
debug.log(f"Blackbox: Error searching HAR files: {e}")
|
||||
@@ -573,7 +567,7 @@ class Blackbox(AsyncGeneratorProvider, ProviderModelMixin):
|
||||
conversation.message_history = []
|
||||
|
||||
current_messages = []
|
||||
for i, msg in enumerate(messages):
|
||||
for i, msg in enumerate(render_messages(messages)):
|
||||
msg_id = conversation.chat_id if i == 0 and msg["role"] == "user" else cls.generate_id()
|
||||
current_msg = {
|
||||
"id": msg_id,
|
||||
@@ -690,8 +684,8 @@ class Blackbox(AsyncGeneratorProvider, ProviderModelMixin):
|
||||
async for chunk in response.content.iter_any():
|
||||
if chunk:
|
||||
chunk_text = chunk.decode()
|
||||
if chunk_text == "You have reached your request limit for the hour":
|
||||
raise PaymentRequiredError(chunk_text)
|
||||
if "You have reached your request limit for the hour" in chunk_text:
|
||||
raise RateLimitError(chunk_text)
|
||||
full_response.append(chunk_text)
|
||||
# Only yield chunks for non-image models
|
||||
if model != cls.default_image_model:
|
||||
|
||||
@@ -9,7 +9,7 @@ from ..requests import Session, StreamSession, get_args_from_nodriver, raise_for
|
||||
from ..requests import DEFAULT_HEADERS, has_nodriver, has_curl_cffi
|
||||
from ..providers.response import FinishReason, Usage
|
||||
from ..errors import ResponseStatusError, ModelNotFoundError
|
||||
from .helper import to_string
|
||||
from .helper import render_messages
|
||||
|
||||
class Cloudflare(AsyncGeneratorProvider, ProviderModelMixin, AuthFileMixin):
|
||||
label = "Cloudflare AI"
|
||||
@@ -82,7 +82,7 @@ class Cloudflare(AsyncGeneratorProvider, ProviderModelMixin, AuthFileMixin):
|
||||
elif has_nodriver:
|
||||
cls._args = await get_args_from_nodriver(cls.url, proxy, timeout, cookies)
|
||||
else:
|
||||
cls._args = {"headers": DEFAULT_HEADERS, "cookies": {}}
|
||||
cls._args = {"headers": DEFAULT_HEADERS, "cookies": {}, "impersonate": "chrome"}
|
||||
try:
|
||||
model = cls.get_model(model)
|
||||
except ModelNotFoundError:
|
||||
@@ -90,8 +90,7 @@ class Cloudflare(AsyncGeneratorProvider, ProviderModelMixin, AuthFileMixin):
|
||||
data = {
|
||||
"messages": [{
|
||||
**message,
|
||||
"content": to_string(message["content"]),
|
||||
"parts": [{"type":"text", "text": to_string(message["content"])}]} for message in messages],
|
||||
"parts": [{"type":"text", "text": message["content"]}]} for message in render_messages(messages)],
|
||||
"lora": None,
|
||||
"model": model,
|
||||
"max_tokens": max_tokens,
|
||||
@@ -120,5 +119,5 @@ class Cloudflare(AsyncGeneratorProvider, ProviderModelMixin, AuthFileMixin):
|
||||
yield Usage(**finish.get("usage"))
|
||||
yield FinishReason(finish.get("finishReason"))
|
||||
|
||||
with cache_file.open("w") as f:
|
||||
json.dump(cls._args, f)
|
||||
with cache_file.open("w") as f:
|
||||
json.dump(cls._args, f)
|
||||
|
||||
@@ -3,11 +3,10 @@ from __future__ import annotations
|
||||
import os
|
||||
import json
|
||||
import asyncio
|
||||
import base64
|
||||
from urllib.parse import quote
|
||||
|
||||
try:
|
||||
from curl_cffi.requests import Session
|
||||
from curl_cffi.requests import AsyncSession
|
||||
from curl_cffi import CurlWsFlag
|
||||
has_curl_cffi = True
|
||||
except ImportError:
|
||||
@@ -18,14 +17,12 @@ try:
|
||||
except ImportError:
|
||||
has_nodriver = False
|
||||
|
||||
from .base_provider import AbstractProvider, ProviderModelMixin
|
||||
from .base_provider import AsyncGeneratorProvider, ProviderModelMixin
|
||||
from .helper import format_prompt_max_length
|
||||
from .openai.har_file import get_headers, get_har_files
|
||||
from ..typing import CreateResult, Messages, MediaListType
|
||||
from ..typing import AsyncResult, Messages, MediaListType
|
||||
from ..errors import MissingRequirementsError, NoValidHarFileError, MissingAuthError
|
||||
from ..requests.raise_for_status import raise_for_status
|
||||
from ..providers.response import BaseConversation, JsonConversation, RequestLogin, ImageResponse, FinishReason, SuggestedFollowups
|
||||
from ..providers.asyncio import get_running_loop
|
||||
from ..providers.response import BaseConversation, JsonConversation, RequestLogin, ImageResponse, FinishReason, SuggestedFollowups, TitleGeneration, Sources, SourceLink
|
||||
from ..tools.media import merge_media
|
||||
from ..requests import get_nodriver
|
||||
from ..image import to_bytes, is_accepted_format
|
||||
@@ -38,7 +35,7 @@ class Conversation(JsonConversation):
|
||||
def __init__(self, conversation_id: str):
|
||||
self.conversation_id = conversation_id
|
||||
|
||||
class Copilot(AbstractProvider, ProviderModelMixin):
|
||||
class Copilot(AsyncGeneratorProvider, ProviderModelMixin):
|
||||
label = "Microsoft Copilot"
|
||||
url = "https://copilot.microsoft.com"
|
||||
|
||||
@@ -62,20 +59,20 @@ class Copilot(AbstractProvider, ProviderModelMixin):
|
||||
_cookies: dict = None
|
||||
|
||||
@classmethod
|
||||
def create_completion(
|
||||
async def create_async_generator(
|
||||
cls,
|
||||
model: str,
|
||||
messages: Messages,
|
||||
stream: bool = False,
|
||||
proxy: str = None,
|
||||
timeout: int = 900,
|
||||
timeout: int = 30,
|
||||
prompt: str = None,
|
||||
media: MediaListType = None,
|
||||
conversation: BaseConversation = None,
|
||||
return_conversation: bool = False,
|
||||
api_key: str = None,
|
||||
**kwargs
|
||||
) -> CreateResult:
|
||||
) -> AsyncResult:
|
||||
if not has_curl_cffi:
|
||||
raise MissingRequirementsError('Install or update "curl_cffi" package | pip install -U curl_cffi')
|
||||
model = cls.get_model(model)
|
||||
@@ -91,14 +88,13 @@ class Copilot(AbstractProvider, ProviderModelMixin):
|
||||
debug.log(f"Copilot: {h}")
|
||||
if has_nodriver:
|
||||
yield RequestLogin(cls.label, os.environ.get("G4F_LOGIN_URL", ""))
|
||||
get_running_loop(check_nested=True)
|
||||
cls._access_token, cls._cookies = asyncio.run(get_access_token_and_cookies(cls.url, proxy))
|
||||
cls._access_token, cls._cookies = await get_access_token_and_cookies(cls.url, proxy)
|
||||
else:
|
||||
raise h
|
||||
websocket_url = f"{websocket_url}&accessToken={quote(cls._access_token)}"
|
||||
headers = {"authorization": f"Bearer {cls._access_token}"}
|
||||
|
||||
with Session(
|
||||
async with AsyncSession(
|
||||
timeout=timeout,
|
||||
proxy=proxy,
|
||||
impersonate="chrome",
|
||||
@@ -107,35 +103,19 @@ class Copilot(AbstractProvider, ProviderModelMixin):
|
||||
) as session:
|
||||
if cls._access_token is not None:
|
||||
cls._cookies = session.cookies.jar if hasattr(session.cookies, "jar") else session.cookies
|
||||
# if cls._access_token is None:
|
||||
# try:
|
||||
# url = "https://copilot.microsoft.com/cl/eus-sc/collect"
|
||||
# headers = {
|
||||
# "Accept": "application/x-clarity-gzip",
|
||||
# "referrer": "https://copilot.microsoft.com/onboarding"
|
||||
# }
|
||||
# response = session.post(url, headers=headers, data=get_clarity())
|
||||
# clarity_token = json.loads(response.text.split(" ", maxsplit=1)[-1])[0]["value"]
|
||||
# debug.log(f"Copilot: Clarity Token: ...{clarity_token[-12:]}")
|
||||
# except Exception as e:
|
||||
# debug.log(f"Copilot: {e}")
|
||||
# else:
|
||||
# clarity_token = None
|
||||
response = session.get("https://copilot.microsoft.com/c/api/user")
|
||||
response = await session.get("https://copilot.microsoft.com/c/api/user")
|
||||
if response.status_code == 401:
|
||||
raise MissingAuthError("Status 401: Invalid access token")
|
||||
raise_for_status(response)
|
||||
response.raise_for_status()
|
||||
user = response.json().get('firstName')
|
||||
if user is None:
|
||||
cls._access_token = None
|
||||
debug.log(f"Copilot: User: {user or 'null'}")
|
||||
if conversation is None:
|
||||
response = session.post(cls.conversation_url)
|
||||
raise_for_status(response)
|
||||
response = await session.post(cls.conversation_url)
|
||||
response.raise_for_status()
|
||||
conversation_id = response.json().get("id")
|
||||
conversation = Conversation(conversation_id)
|
||||
if return_conversation:
|
||||
yield conversation
|
||||
if prompt is None:
|
||||
prompt = format_prompt_max_length(messages, 10000)
|
||||
debug.log(f"Copilot: Created conversation: {conversation_id}")
|
||||
@@ -144,30 +124,28 @@ class Copilot(AbstractProvider, ProviderModelMixin):
|
||||
if prompt is None:
|
||||
prompt = get_last_user_message(messages)
|
||||
debug.log(f"Copilot: Use conversation: {conversation_id}")
|
||||
if return_conversation:
|
||||
yield conversation
|
||||
|
||||
uploaded_images = []
|
||||
media, _ = [(None, None), *merge_media(media, messages)].pop()
|
||||
if media:
|
||||
for media, _ in merge_media(media, messages):
|
||||
if not isinstance(media, str):
|
||||
data = to_bytes(media)
|
||||
response = session.post(
|
||||
response = await session.post(
|
||||
"https://copilot.microsoft.com/c/api/attachments",
|
||||
headers={"content-type": is_accepted_format(data)},
|
||||
headers={
|
||||
"content-type": is_accepted_format(data),
|
||||
"content-length": str(len(data)),
|
||||
},
|
||||
data=data
|
||||
)
|
||||
raise_for_status(response)
|
||||
response.raise_for_status()
|
||||
media = response.json().get("url")
|
||||
uploaded_images.append({"type":"image", "url": media})
|
||||
|
||||
wss = session.ws_connect(cls.websocket_url)
|
||||
# if clarity_token is not None:
|
||||
# wss.send(json.dumps({
|
||||
# "event": "challengeResponse",
|
||||
# "token": clarity_token,
|
||||
# "method":"clarity"
|
||||
# }).encode(), CurlWsFlag.TEXT)
|
||||
wss.send(json.dumps({"event":"setOptions","supportedCards":["weather","local","image","sports","video","ads","finance"],"ads":{"supportedTypes":["multimedia","product","tourActivity","propertyPromotion","text"]}}));
|
||||
wss.send(json.dumps({
|
||||
wss = await session.ws_connect(cls.websocket_url, timeout=3)
|
||||
await wss.send(json.dumps({"event":"setOptions","supportedCards":["weather","local","image","sports","video","ads","finance"],"ads":{"supportedTypes":["multimedia","product","tourActivity","propertyPromotion","text"]}}));
|
||||
await wss.send(json.dumps({
|
||||
"event": "send",
|
||||
"conversationId": conversation_id,
|
||||
"content": [*uploaded_images, {
|
||||
@@ -177,20 +155,20 @@ class Copilot(AbstractProvider, ProviderModelMixin):
|
||||
"mode": "reasoning" if "Think" in model else "chat",
|
||||
}).encode(), CurlWsFlag.TEXT)
|
||||
|
||||
is_started = False
|
||||
done = False
|
||||
msg = None
|
||||
image_prompt: str = None
|
||||
last_msg = None
|
||||
sources = {}
|
||||
try:
|
||||
while True:
|
||||
while not wss.closed:
|
||||
try:
|
||||
msg = wss.recv()[0]
|
||||
msg = json.loads(msg)
|
||||
msg = await asyncio.wait_for(wss.recv(), 3 if done else timeout)
|
||||
msg = json.loads(msg[0])
|
||||
except:
|
||||
break
|
||||
last_msg = msg
|
||||
if msg.get("event") == "appendText":
|
||||
is_started = True
|
||||
yield msg.get("text")
|
||||
elif msg.get("event") == "generatingImage":
|
||||
image_prompt = msg.get("prompt")
|
||||
@@ -198,20 +176,28 @@ class Copilot(AbstractProvider, ProviderModelMixin):
|
||||
yield ImageResponse(msg.get("url"), image_prompt, {"preview": msg.get("thumbnailUrl")})
|
||||
elif msg.get("event") == "done":
|
||||
yield FinishReason("stop")
|
||||
break
|
||||
done = True
|
||||
elif msg.get("event") == "suggestedFollowups":
|
||||
yield SuggestedFollowups(msg.get("suggestions"))
|
||||
break
|
||||
elif msg.get("event") == "replaceText":
|
||||
yield msg.get("text")
|
||||
elif msg.get("event") == "titleUpdate":
|
||||
yield TitleGeneration(msg.get("title"))
|
||||
elif msg.get("event") == "citation":
|
||||
sources[msg.get("url")] = msg
|
||||
yield SourceLink(list(sources.keys()).index(msg.get("url")), msg.get("url"))
|
||||
elif msg.get("event") == "error":
|
||||
raise RuntimeError(f"Error: {msg}")
|
||||
elif msg.get("event") not in ["received", "startMessage", "citation", "partCompleted"]:
|
||||
elif msg.get("event") not in ["received", "startMessage", "partCompleted"]:
|
||||
debug.log(f"Copilot Message: {msg}")
|
||||
if not is_started:
|
||||
if not done:
|
||||
raise RuntimeError(f"Invalid response: {last_msg}")
|
||||
if sources:
|
||||
yield Sources(sources.values())
|
||||
finally:
|
||||
wss.close()
|
||||
if not wss.closed:
|
||||
await wss.close()
|
||||
|
||||
async def get_access_token_and_cookies(url: str, proxy: str = None, target: str = "ChatAI",):
|
||||
browser, stop_browser = await get_nodriver(proxy=proxy, user_data_dir="copilot")
|
||||
@@ -263,9 +249,4 @@ def readHAR(url: str):
|
||||
if api_key is None:
|
||||
raise NoValidHarFileError("No access token found in .har files")
|
||||
|
||||
return api_key, cookies
|
||||
|
||||
def get_clarity() -> bytes:
|
||||
#{"e":["0.7.58",5,7284,4779,"n59ae4ieqq","aln5en","1upufhz",1,0,0],"a":[[7323,12,65,217,324],[7344,12,65,214,329],[7385,12,65,211,334],[7407,12,65,210,337],[7428,12,65,209,338],[7461,12,65,209,339],[7497,12,65,209,339],[7531,12,65,208,340],[7545,12,65,208,342],[11654,13,65,208,342],[11728,14,65,208,342],[11728,9,65,208,342,17535,19455,0,0,0,"Annehmen",null,"52w7wqv1r.8ovjfyrpu",1],[7284,4,1,393,968,393,968,0,0,231,310,939,0],[12063,0,2,147,3,4,4,18,5,1,10,79,25,15],[12063,36,6,[11938,0]]]}
|
||||
body = base64.b64decode("H4sIAAAAAAAAA23RwU7DMAwG4HfJ2aqS2E5ibjxH1cMOnQYqYZvUTQPx7vyJRGGAemj01XWcP+9udg+j80MetDhSyrEISc5GrqrtZnmaTydHbrdUnSsWYT2u+8Obo0Ce/IQvaDBmjkwhUlKKIRNHmQgosqEArWPRDQMx90rxeUMPzB1j+UJvwNIxhTvsPcXyX1T+rizE4juK3mEEhpAUg/JvzW1/+U/tB1LATmhqotoiweMea50PLy2vui4LOY3XfD1dwnkor5fn/e18XBFgm6fHjSzZmCyV7d3aRByAEYextaTHEH3i5pgKGVP/s+DScE5PuLKIpW6FnCi1gY3Rbpqmj0/DI/+L7QEAAA==")
|
||||
return body
|
||||
return api_key, cookies
|
||||
@@ -3,28 +3,21 @@ from __future__ import annotations
|
||||
import asyncio
|
||||
|
||||
try:
|
||||
from duckduckgo_search import DDGS
|
||||
from duckduckgo_search.exceptions import DuckDuckGoSearchException, RatelimitException, ConversationLimitException
|
||||
from duckai import DuckAI
|
||||
has_requirements = True
|
||||
except ImportError:
|
||||
has_requirements = False
|
||||
try:
|
||||
import nodriver
|
||||
has_nodriver = True
|
||||
except ImportError:
|
||||
has_nodriver = False
|
||||
|
||||
from ..typing import AsyncResult, Messages
|
||||
from ..requests import get_nodriver
|
||||
from .base_provider import AsyncGeneratorProvider, ProviderModelMixin
|
||||
from ..typing import CreateResult, Messages
|
||||
from .base_provider import AbstractProvider, ProviderModelMixin
|
||||
from .helper import get_last_user_message
|
||||
|
||||
class DuckDuckGo(AsyncGeneratorProvider, ProviderModelMixin):
|
||||
class DuckDuckGo(AbstractProvider, ProviderModelMixin):
|
||||
label = "Duck.ai (duckduckgo_search)"
|
||||
url = "https://duckduckgo.com/aichat"
|
||||
api_base = "https://duckduckgo.com/duckchat/v1/"
|
||||
|
||||
working = False
|
||||
working = has_requirements
|
||||
supports_stream = True
|
||||
supports_system_message = True
|
||||
supports_message_history = True
|
||||
@@ -32,7 +25,7 @@ class DuckDuckGo(AsyncGeneratorProvider, ProviderModelMixin):
|
||||
default_model = "gpt-4o-mini"
|
||||
models = [default_model, "meta-llama/Llama-3.3-70B-Instruct-Turbo", "claude-3-haiku-20240307", "o3-mini", "mistralai/Mistral-Small-24B-Instruct-2501"]
|
||||
|
||||
ddgs: DDGS = None
|
||||
duck_ai: DuckAI = None
|
||||
|
||||
model_aliases = {
|
||||
"gpt-4": "gpt-4o-mini",
|
||||
@@ -42,44 +35,17 @@ class DuckDuckGo(AsyncGeneratorProvider, ProviderModelMixin):
|
||||
}
|
||||
|
||||
@classmethod
|
||||
async def create_async_generator(
|
||||
def create_completion(
|
||||
cls,
|
||||
model: str,
|
||||
messages: Messages,
|
||||
proxy: str = None,
|
||||
timeout: int = 60,
|
||||
**kwargs
|
||||
) -> AsyncResult:
|
||||
) -> CreateResult:
|
||||
if not has_requirements:
|
||||
raise ImportError("duckduckgo_search is not installed. Install it with `pip install duckduckgo-search`.")
|
||||
if cls.ddgs is None:
|
||||
cls.ddgs = DDGS(proxy=proxy, timeout=timeout)
|
||||
if has_nodriver:
|
||||
await cls.nodriver_auth(proxy=proxy)
|
||||
raise ImportError("duckai is not installed. Install it with `pip install -U duckai`.")
|
||||
if cls.duck_ai is None:
|
||||
cls.duck_ai = DuckAI(proxy=proxy, timeout=timeout)
|
||||
model = cls.get_model(model)
|
||||
for chunk in cls.ddgs.chat_yield(get_last_user_message(messages), model, timeout):
|
||||
yield chunk
|
||||
|
||||
@classmethod
|
||||
async def nodriver_auth(cls, proxy: str = None):
|
||||
browser, stop_browser = await get_nodriver(proxy=proxy)
|
||||
try:
|
||||
page = browser.main_tab
|
||||
def on_request(event: nodriver.cdp.network.RequestWillBeSent, page=None):
|
||||
if cls.api_base in event.request.url:
|
||||
if "X-Vqd-4" in event.request.headers:
|
||||
cls.ddgs._chat_vqd = event.request.headers["X-Vqd-4"]
|
||||
if "X-Vqd-Hash-1" in event.request.headers:
|
||||
cls.ddgs._chat_vqd_hash = event.request.headers["X-Vqd-Hash-1"]
|
||||
if "F-Fe-Version" in event.request.headers:
|
||||
cls.ddgs._chat_xfe = event.request.headers["F-Fe-Version" ]
|
||||
await page.send(nodriver.cdp.network.enable())
|
||||
page.add_handler(nodriver.cdp.network.RequestWillBeSent, on_request)
|
||||
page = await browser.get(cls.url)
|
||||
while True:
|
||||
if cls.ddgs._chat_vqd:
|
||||
break
|
||||
await asyncio.sleep(1)
|
||||
await page.close()
|
||||
finally:
|
||||
stop_browser()
|
||||
yield cls.duck_ai.chat(get_last_user_message(messages), model, timeout)
|
||||
@@ -6,4 +6,4 @@ class FreeRouter(OpenaiTemplate):
|
||||
label = "CablyAI FreeRouter"
|
||||
url = "https://freerouter.cablyai.com"
|
||||
api_base = "https://freerouter.cablyai.com/v1"
|
||||
working = False
|
||||
working = True
|
||||
@@ -52,8 +52,8 @@ class PollinationsAI(AsyncGeneratorProvider, ProviderModelMixin):
|
||||
audio_models = [default_audio_model]
|
||||
extra_image_models = ["flux-pro", "flux-dev", "flux-schnell", "midjourney", "dall-e-3", "turbo"]
|
||||
vision_models = [default_vision_model, "gpt-4o-mini", "openai", "openai-large", "searchgpt"]
|
||||
extra_text_models = vision_models
|
||||
_models_loaded = False
|
||||
# https://github.com/pollinations/pollinations/blob/master/text.pollinations.ai/generateTextPortkey.js#L15
|
||||
model_aliases = {
|
||||
### Text Models ###
|
||||
"gpt-4o-mini": "openai",
|
||||
@@ -100,43 +100,32 @@ class PollinationsAI(AsyncGeneratorProvider, ProviderModelMixin):
|
||||
|
||||
cls.image_models = all_image_models
|
||||
|
||||
# Update of text models
|
||||
text_response = requests.get("https://text.pollinations.ai/models")
|
||||
text_response.raise_for_status()
|
||||
models = text_response.json()
|
||||
|
||||
# Purpose of text models
|
||||
cls.text_models = [
|
||||
model.get("name")
|
||||
for model in models
|
||||
if "input_modalities" in model and "text" in model["input_modalities"]
|
||||
]
|
||||
|
||||
# Purpose of audio models
|
||||
cls.audio_models = {
|
||||
model.get("name"): model.get("voices")
|
||||
for model in models
|
||||
if model.get("audio")
|
||||
if "output_modalities" in model and "audio" in model["output_modalities"]
|
||||
}
|
||||
|
||||
# Create a set of unique text models starting with default model
|
||||
unique_text_models = {cls.default_model}
|
||||
|
||||
unique_text_models = cls.text_models.copy()
|
||||
|
||||
# Add models from vision_models
|
||||
unique_text_models.update(cls.vision_models)
|
||||
|
||||
unique_text_models.extend(cls.vision_models)
|
||||
|
||||
# Add models from the API response
|
||||
for model in models:
|
||||
model_name = model.get("name")
|
||||
if model_name and "input_modalities" in model and "text" in model["input_modalities"]:
|
||||
unique_text_models.add(model_name)
|
||||
|
||||
unique_text_models.append(model_name)
|
||||
|
||||
# Convert to list and update text_models
|
||||
cls.text_models = list(unique_text_models)
|
||||
|
||||
# Update extra_text_models with unique vision models
|
||||
cls.extra_text_models = [model for model in cls.vision_models if model != cls.default_model]
|
||||
|
||||
cls.text_models = list(dict.fromkeys(unique_text_models))
|
||||
|
||||
cls._models_loaded = True
|
||||
|
||||
except Exception as e:
|
||||
@@ -148,12 +137,10 @@ class PollinationsAI(AsyncGeneratorProvider, ProviderModelMixin):
|
||||
debug.error(f"Failed to fetch models: {e}")
|
||||
|
||||
# Return unique models across all categories
|
||||
all_models = set(cls.text_models)
|
||||
all_models.update(cls.image_models)
|
||||
all_models.update(cls.audio_models.keys())
|
||||
result = list(all_models)
|
||||
return result
|
||||
|
||||
all_models = cls.text_models.copy()
|
||||
all_models.extend(cls.image_models)
|
||||
all_models.extend(cls.audio_models.keys())
|
||||
return list(dict.fromkeys(all_models))
|
||||
|
||||
@classmethod
|
||||
async def create_async_generator(
|
||||
@@ -265,15 +252,15 @@ class PollinationsAI(AsyncGeneratorProvider, ProviderModelMixin):
|
||||
query = "&".join(f"{k}={quote_plus(str(v))}" for k, v in params.items() if v is not None)
|
||||
prompt = quote_plus(prompt)[:2048-256-len(query)]
|
||||
url = f"{cls.image_api_endpoint}prompt/{prompt}?{query}"
|
||||
def get_image_url(i: int = 0, seed: Optional[int] = None):
|
||||
if i == 0:
|
||||
def get_image_url(i: int, seed: Optional[int] = None):
|
||||
if i == 1:
|
||||
if not cache and seed is None:
|
||||
seed = random.randint(0, 2**32)
|
||||
else:
|
||||
seed = random.randint(0, 2**32)
|
||||
return f"{url}&seed={seed}" if seed else url
|
||||
async with ClientSession(headers=DEFAULT_HEADERS, connector=get_connector(proxy=proxy)) as session:
|
||||
async def get_image(i: int = 0, seed: Optional[int] = None):
|
||||
async def get_image(i: int, seed: Optional[int] = None):
|
||||
async with session.get(get_image_url(i, seed), allow_redirects=False) as response:
|
||||
try:
|
||||
await raise_for_status(response)
|
||||
@@ -343,6 +330,8 @@ class PollinationsAI(AsyncGeneratorProvider, ProviderModelMixin):
|
||||
if line[6:].startswith(b"[DONE]"):
|
||||
break
|
||||
result = json.loads(line[6:])
|
||||
if "error" in result:
|
||||
raise ResponseError(result["error"].get("message", result["error"]))
|
||||
if "usage" in result:
|
||||
yield Usage(**result["usage"])
|
||||
choices = result.get("choices", [{}])
|
||||
|
||||
253
g4f/Provider/hf_space/LMArenaProvider.py
Normal file
253
g4f/Provider/hf_space/LMArenaProvider.py
Normal file
@@ -0,0 +1,253 @@
|
||||
from __future__ import annotations
|
||||
|
||||
import json
|
||||
import uuid
|
||||
import asyncio
|
||||
|
||||
from ...typing import AsyncResult, Messages
|
||||
from ...requests import StreamSession, raise_for_status
|
||||
from ...providers.response import FinishReason
|
||||
from ..base_provider import AsyncGeneratorProvider, ProviderModelMixin, AuthFileMixin
|
||||
from ..helper import format_prompt
|
||||
from ... import debug
|
||||
|
||||
class LMArenaProvider(AsyncGeneratorProvider, ProviderModelMixin, AuthFileMixin):
|
||||
label = "LM Arena"
|
||||
url = "https://lmarena.ai"
|
||||
api_endpoint = "/queue/join?"
|
||||
|
||||
working = True
|
||||
|
||||
default_model = "chatgpt-4o-latest-20250326"
|
||||
model_aliases = {"gpt-4o": default_model}
|
||||
models = [
|
||||
default_model,
|
||||
"gpt-4.1-2025-04-14",
|
||||
"gemini-2.5-pro-exp-03-25",
|
||||
"llama-4-maverick-03-26-experimental",
|
||||
"grok-3-preview-02-24",
|
||||
"claude-3-7-sonnet-20250219",
|
||||
"claude-3-7-sonnet-20250219-thinking-32k",
|
||||
"deepseek-v3-0324",
|
||||
"llama-4-maverick-17b-128e-instruct",
|
||||
"gpt-4.1-mini-2025-04-14",
|
||||
"gpt-4.1-nano-2025-04-14",
|
||||
"gemini-2.0-flash-thinking-exp-01-21",
|
||||
"gemini-2.0-flash-001",
|
||||
"gemini-2.0-flash-lite-preview-02-05",
|
||||
"gemma-3-27b-it",
|
||||
"gemma-3-12b-it",
|
||||
"gemma-3-4b-it",
|
||||
"deepseek-r1",
|
||||
"claude-3-5-sonnet-20241022",
|
||||
"o3-mini",
|
||||
"llama-3.3-70b-instruct",
|
||||
"gpt-4o-mini-2024-07-18",
|
||||
"gpt-4o-2024-11-20",
|
||||
"gpt-4o-2024-08-06",
|
||||
"gpt-4o-2024-05-13",
|
||||
"command-a-03-2025",
|
||||
"qwq-32b",
|
||||
"p2l-router-7b",
|
||||
"claude-3-5-haiku-20241022",
|
||||
"claude-3-5-sonnet-20240620",
|
||||
"doubao-1.5-pro-32k-250115",
|
||||
"doubao-1.5-vision-pro-32k-250115",
|
||||
"mistral-small-24b-instruct-2501",
|
||||
"phi-4",
|
||||
"amazon-nova-pro-v1.0",
|
||||
"amazon-nova-lite-v1.0",
|
||||
"amazon-nova-micro-v1.0",
|
||||
"cobalt-exp-beta-v3",
|
||||
"cobalt-exp-beta-v4",
|
||||
"qwen-max-2025-01-25",
|
||||
"qwen-plus-0125-exp",
|
||||
"qwen2.5-vl-32b-instruct",
|
||||
"qwen2.5-vl-72b-instruct",
|
||||
"gemini-1.5-pro-002",
|
||||
"gemini-1.5-flash-002",
|
||||
"gemini-1.5-flash-8b-001",
|
||||
"gemini-1.5-pro-001",
|
||||
"gemini-1.5-flash-001",
|
||||
"llama-3.1-405b-instruct-bf16",
|
||||
"llama-3.3-nemotron-49b-super-v1",
|
||||
"llama-3.1-nemotron-ultra-253b-v1",
|
||||
"llama-3.1-nemotron-70b-instruct",
|
||||
"llama-3.1-70b-instruct",
|
||||
"llama-3.1-8b-instruct",
|
||||
"hunyuan-standard-2025-02-10",
|
||||
"hunyuan-large-2025-02-10",
|
||||
"hunyuan-standard-vision-2024-12-31",
|
||||
"hunyuan-turbo-0110",
|
||||
"hunyuan-turbos-20250226",
|
||||
"mistral-large-2411",
|
||||
"pixtral-large-2411",
|
||||
"mistral-large-2407",
|
||||
"llama-3.1-nemotron-51b-instruct",
|
||||
"granite-3.1-8b-instruct",
|
||||
"granite-3.1-2b-instruct",
|
||||
"step-2-16k-exp-202412",
|
||||
"step-2-16k-202502",
|
||||
"step-1o-vision-32k-highres",
|
||||
"yi-lightning",
|
||||
"glm-4-plus",
|
||||
"glm-4-plus-0111",
|
||||
"jamba-1.5-large",
|
||||
"jamba-1.5-mini",
|
||||
"gemma-2-27b-it",
|
||||
"gemma-2-9b-it",
|
||||
"gemma-2-2b-it",
|
||||
"eureka-chatbot",
|
||||
"claude-3-haiku-20240307",
|
||||
"claude-3-sonnet-20240229",
|
||||
"claude-3-opus-20240229",
|
||||
"nemotron-4-340b",
|
||||
"llama-3-70b-instruct",
|
||||
"llama-3-8b-instruct",
|
||||
"qwen2.5-plus-1127",
|
||||
"qwen2.5-coder-32b-instruct",
|
||||
"qwen2.5-72b-instruct",
|
||||
"qwen-max-0919",
|
||||
"qwen-vl-max-1119",
|
||||
"qwen-vl-max-0809",
|
||||
"llama-3.1-tulu-3-70b",
|
||||
"olmo-2-0325-32b-instruct",
|
||||
"gpt-3.5-turbo-0125",
|
||||
"reka-core-20240904",
|
||||
"reka-flash-20240904",
|
||||
"c4ai-aya-expanse-32b",
|
||||
"c4ai-aya-expanse-8b",
|
||||
"c4ai-aya-vision-32b",
|
||||
"command-r-plus-08-2024",
|
||||
"command-r-08-2024",
|
||||
"codestral-2405",
|
||||
"mixtral-8x22b-instruct-v0.1",
|
||||
"mixtral-8x7b-instruct-v0.1",
|
||||
"pixtral-12b-2409",
|
||||
"ministral-8b-2410"]
|
||||
|
||||
_args: dict = None
|
||||
|
||||
@staticmethod
|
||||
def _random_session_hash():
|
||||
return str(uuid.uuid4())
|
||||
|
||||
@classmethod
|
||||
def _build_payloads(cls, model_id: str, session_hash: str, messages: Messages, max_tokens: int, temperature: float, top_p: float):
|
||||
first_payload = {
|
||||
"data": [
|
||||
None,
|
||||
model_id,
|
||||
{"text": format_prompt(messages), "files": []},
|
||||
{
|
||||
"text_models": [model_id],
|
||||
"all_text_models": [model_id],
|
||||
"vision_models": [],
|
||||
"all_vision_models": [],
|
||||
"image_gen_models": [],
|
||||
"all_image_gen_models": [],
|
||||
"search_models": [],
|
||||
"all_search_models": [],
|
||||
"models": [model_id],
|
||||
"all_models": [model_id],
|
||||
"arena_type": "text-arena"
|
||||
}
|
||||
],
|
||||
"event_data": None,
|
||||
"fn_index": 117,
|
||||
"trigger_id": 159,
|
||||
"session_hash": session_hash
|
||||
}
|
||||
|
||||
second_payload = {
|
||||
"data": [],
|
||||
"event_data": None,
|
||||
"fn_index": 118,
|
||||
"trigger_id": 159,
|
||||
"session_hash": session_hash
|
||||
}
|
||||
|
||||
third_payload = {
|
||||
"data": [None, temperature, top_p, max_tokens],
|
||||
"event_data": None,
|
||||
"fn_index": 119,
|
||||
"trigger_id": 159,
|
||||
"session_hash": session_hash
|
||||
}
|
||||
|
||||
return first_payload, second_payload, third_payload
|
||||
|
||||
@classmethod
|
||||
async def create_async_generator(
|
||||
cls, model: str, messages: Messages,
|
||||
max_tokens: int = 2048,
|
||||
temperature: float = 0.7,
|
||||
top_p: float = 1,
|
||||
proxy: str = None,
|
||||
**kwargs
|
||||
) -> AsyncResult:
|
||||
if not model:
|
||||
model = cls.default_model
|
||||
if model in cls.model_aliases:
|
||||
model = cls.model_aliases[model]
|
||||
session_hash = cls._random_session_hash()
|
||||
headers = {
|
||||
"Content-Type": "application/json",
|
||||
"Accept": "application/json"
|
||||
}
|
||||
async with StreamSession(impersonate="chrome", headers=headers) as session:
|
||||
first_payload, second_payload, third_payload = cls._build_payloads(model, session_hash, messages, max_tokens, temperature, top_p)
|
||||
# Long stream GET
|
||||
async def long_stream():
|
||||
# POST 1
|
||||
async with session.post(f"{cls.url}{cls.api_endpoint}", json=first_payload, proxy=proxy) as response:
|
||||
await raise_for_status(response)
|
||||
|
||||
# POST 2
|
||||
async with session.post(f"{cls.url}{cls.api_endpoint}", json=second_payload, proxy=proxy) as response:
|
||||
await raise_for_status(response)
|
||||
|
||||
# POST 3
|
||||
async with session.post(f"{cls.url}{cls.api_endpoint}", json=third_payload, proxy=proxy) as response:
|
||||
await raise_for_status(response)
|
||||
|
||||
stream_url = f"{cls.url}/queue/data?session_hash={session_hash}"
|
||||
async with session.get(stream_url, headers={"Accept": "text/event-stream"}, proxy=proxy) as response:
|
||||
await raise_for_status(response)
|
||||
text_position = 0
|
||||
count = 0
|
||||
async for line in response.iter_lines():
|
||||
if line.startswith(b"data: "):
|
||||
try:
|
||||
msg = json.loads(line[6:])
|
||||
except Exception as e:
|
||||
raise RuntimeError(f"Failed to decode JSON from stream: {line}", e)
|
||||
if msg.get("msg") == "process_generating":
|
||||
data = msg["output"]["data"][1]
|
||||
if data:
|
||||
data = data[0]
|
||||
if len(data) > 2:
|
||||
if isinstance(data[2], list):
|
||||
data[2] = data[2][-1]
|
||||
content = data[2][text_position:]
|
||||
if content.endswith("▌"):
|
||||
content = content[:-2]
|
||||
if content:
|
||||
count += 1
|
||||
yield count, content
|
||||
text_position += len(content)
|
||||
elif msg.get("msg") == "close_stream":
|
||||
break
|
||||
elif msg.get("msg") not in ("process_completed", "process_starts", "estimation"):
|
||||
debug.log(f"Unexpected message: {msg}")
|
||||
count = 0
|
||||
async for count, chunk in long_stream():
|
||||
yield chunk
|
||||
if count == 0:
|
||||
await asyncio.sleep(10)
|
||||
async for count, chunk in long_stream():
|
||||
yield chunk
|
||||
if count == 0:
|
||||
raise RuntimeError("No response from server.")
|
||||
if count == max_tokens:
|
||||
yield FinishReason("length")
|
||||
@@ -10,7 +10,7 @@ from .BlackForestLabs_Flux1Dev import BlackForestLabs_Flux1Dev
|
||||
from .BlackForestLabs_Flux1Schnell import BlackForestLabs_Flux1Schnell
|
||||
from .CohereForAI_C4AI_Command import CohereForAI_C4AI_Command
|
||||
from .DeepseekAI_JanusPro7b import DeepseekAI_JanusPro7b
|
||||
from .G4F import G4F
|
||||
from .LMArenaProvider import LMArenaProvider
|
||||
from .Microsoft_Phi_4 import Microsoft_Phi_4
|
||||
from .Qwen_QVQ_72B import Qwen_QVQ_72B
|
||||
from .Qwen_Qwen_2_5 import Qwen_Qwen_2_5
|
||||
@@ -33,7 +33,7 @@ class HuggingSpace(AsyncGeneratorProvider, ProviderModelMixin):
|
||||
BlackForestLabs_Flux1Schnell,
|
||||
CohereForAI_C4AI_Command,
|
||||
DeepseekAI_JanusPro7b,
|
||||
G4F,
|
||||
LMArenaProvider,
|
||||
Microsoft_Phi_4,
|
||||
Qwen_QVQ_72B,
|
||||
Qwen_Qwen_2_5,
|
||||
@@ -88,7 +88,7 @@ class HuggingSpace(AsyncGeneratorProvider, ProviderModelMixin):
|
||||
for provider in cls.providers:
|
||||
if model in provider.get_models():
|
||||
try:
|
||||
async for chunk in provider.create_async_generator(model, messages, images=images, **kwargs):
|
||||
async for chunk in provider.create_async_generator(model, messages, media=media, **kwargs):
|
||||
is_started = True
|
||||
yield chunk
|
||||
if is_started:
|
||||
|
||||
@@ -10,10 +10,7 @@ from ...typing import AsyncResult, Messages
|
||||
from ...errors import NoValidHarFileError
|
||||
from ... import debug
|
||||
|
||||
def cookies_to_dict():
|
||||
return Copilot._cookies if isinstance(Copilot._cookies, dict) else {c.name: c.value for c in Copilot._cookies}
|
||||
|
||||
class CopilotAccount(AsyncAuthedProvider, Copilot):
|
||||
class CopilotAccount(Copilot, AsyncAuthedProvider):
|
||||
needs_auth = True
|
||||
use_nodriver = True
|
||||
parent = "Copilot"
|
||||
@@ -23,17 +20,17 @@ class CopilotAccount(AsyncAuthedProvider, Copilot):
|
||||
@classmethod
|
||||
async def on_auth_async(cls, proxy: str = None, **kwargs) -> AsyncIterator:
|
||||
try:
|
||||
Copilot._access_token, Copilot._cookies = readHAR(cls.url)
|
||||
cls._access_token, cls._cookies = readHAR(cls.url)
|
||||
except NoValidHarFileError as h:
|
||||
debug.log(f"Copilot: {h}")
|
||||
if has_nodriver:
|
||||
yield RequestLogin(cls.label, os.environ.get("G4F_LOGIN_URL", ""))
|
||||
Copilot._access_token, Copilot._cookies = await get_access_token_and_cookies(cls.url, proxy)
|
||||
cls._access_token, cls._cookies = await get_access_token_and_cookies(cls.url, proxy)
|
||||
else:
|
||||
raise h
|
||||
yield AuthResult(
|
||||
api_key=Copilot._access_token,
|
||||
cookies=cookies_to_dict()
|
||||
api_key=cls._access_token,
|
||||
cookies=cls.cookies_to_dict()
|
||||
)
|
||||
|
||||
@classmethod
|
||||
@@ -44,9 +41,12 @@ class CopilotAccount(AsyncAuthedProvider, Copilot):
|
||||
auth_result: AuthResult,
|
||||
**kwargs
|
||||
) -> AsyncResult:
|
||||
Copilot._access_token = getattr(auth_result, "api_key")
|
||||
Copilot._cookies = getattr(auth_result, "cookies")
|
||||
Copilot.needs_auth = cls.needs_auth
|
||||
for chunk in Copilot.create_completion(model, messages, **kwargs):
|
||||
cls._access_token = getattr(auth_result, "api_key")
|
||||
cls._cookies = getattr(auth_result, "cookies")
|
||||
async for chunk in cls.create_async_generator(model, messages, **kwargs):
|
||||
yield chunk
|
||||
auth_result.cookies = cookies_to_dict()
|
||||
auth_result.cookies = cls.cookies_to_dict()
|
||||
|
||||
@classmethod
|
||||
def cookies_to_dict(cls):
|
||||
return cls._cookies if isinstance(cls._cookies, dict) else {c.name: c.value for c in cls._cookies}
|
||||
@@ -146,7 +146,7 @@ async def get_access_token_and_user_agent(url: str, proxy: str = None):
|
||||
browser, stop_browser = await get_nodriver(proxy=proxy, user_data_dir="designer")
|
||||
try:
|
||||
page = await browser.get(url)
|
||||
user_agent = await page.evaluate("navigator.userAgent")
|
||||
user_agent = await page.evaluate("navigator.userAgent", return_by_value=True)
|
||||
access_token = None
|
||||
while access_token is None:
|
||||
access_token = await page.evaluate("""
|
||||
|
||||
@@ -278,7 +278,7 @@ class OpenaiChat(AsyncAuthedProvider, ProviderModelMixin):
|
||||
messages: Messages,
|
||||
auth_result: AuthResult,
|
||||
proxy: str = None,
|
||||
timeout: int = 180,
|
||||
timeout: int = 360,
|
||||
auto_continue: bool = False,
|
||||
action: str = "next",
|
||||
conversation: Conversation = None,
|
||||
@@ -447,7 +447,7 @@ class OpenaiChat(AsyncAuthedProvider, ProviderModelMixin):
|
||||
link = sources.list[int(match.group(1))]["url"]
|
||||
return f"[[{int(match.group(1))+1}]]({link})"
|
||||
return f" [{int(match.group(1))+1}]"
|
||||
buffer = re.sub(r'(?:cite\nturn0search|cite\nturn0news|turn0news)(\d+)', replacer, buffer)
|
||||
buffer = re.sub(r'(?:cite\nturn[0-9]+|turn[0-9]+)(?:search|news|view)(\d+)', replacer, buffer)
|
||||
else:
|
||||
continue
|
||||
yield buffer
|
||||
@@ -489,25 +489,39 @@ class OpenaiChat(AsyncAuthedProvider, ProviderModelMixin):
|
||||
if "type" in line:
|
||||
if line["type"] == "title_generation":
|
||||
yield TitleGeneration(line["title"])
|
||||
fields.p = line.get("p", fields.p)
|
||||
if fields.p.startswith("/message/content/thoughts"):
|
||||
if fields.p.endswith("/content"):
|
||||
if fields.thoughts_summary:
|
||||
yield Reasoning(token="", status=fields.thoughts_summary)
|
||||
fields.thoughts_summary = ""
|
||||
yield Reasoning(token=line.get("v"))
|
||||
elif fields.p.endswith("/summary"):
|
||||
fields.thoughts_summary += line.get("v")
|
||||
return
|
||||
if "v" in line:
|
||||
v = line.get("v")
|
||||
if isinstance(v, str) and fields.is_recipient:
|
||||
if isinstance(v, str) and fields.recipient == "all":
|
||||
if "p" not in line or line.get("p") == "/message/content/parts/0":
|
||||
yield Reasoning(token=v) if fields.is_thinking else v
|
||||
elif isinstance(v, list):
|
||||
for m in v:
|
||||
if m.get("p") == "/message/content/parts/0" and fields.is_recipient:
|
||||
if m.get("p") == "/message/content/parts/0" and fields.recipient == "all":
|
||||
yield m.get("v")
|
||||
elif m.get("p") == "/message/metadata/search_result_groups":
|
||||
for entry in [p.get("entries") for p in m.get("v")]:
|
||||
for link in entry:
|
||||
sources.add_source(link)
|
||||
elif re.match(r"^/message/metadata/content_references/\d+$", m.get("p")):
|
||||
elif m.get("p") == "/message/metadata/content_references":
|
||||
for entry in m.get("v"):
|
||||
for link in entry.get("sources", []):
|
||||
sources.add_source(link)
|
||||
elif m.get("p") and re.match(r"^/message/metadata/content_references/\d+$", m.get("p")):
|
||||
sources.add_source(m.get("v"))
|
||||
elif m.get("p") == "/message/metadata/finished_text":
|
||||
fields.is_thinking = False
|
||||
yield Reasoning(status=m.get("v"))
|
||||
elif m.get("p") == "/message/metadata":
|
||||
elif m.get("p") == "/message/metadata" and fields.recipient == "all":
|
||||
fields.finish_reason = m.get("v", {}).get("finish_details", {}).get("type")
|
||||
break
|
||||
elif isinstance(v, dict):
|
||||
@@ -515,8 +529,8 @@ class OpenaiChat(AsyncAuthedProvider, ProviderModelMixin):
|
||||
fields.conversation_id = v.get("conversation_id")
|
||||
debug.log(f"OpenaiChat: New conversation: {fields.conversation_id}")
|
||||
m = v.get("message", {})
|
||||
fields.is_recipient = m.get("recipient", "all") == "all"
|
||||
if fields.is_recipient:
|
||||
fields.recipient = m.get("recipient", fields.recipient)
|
||||
if fields.recipient == "all":
|
||||
c = m.get("content", {})
|
||||
if c.get("content_type") == "text" and m.get("author", {}).get("role") == "tool" and "initial_text" in m.get("metadata", {}):
|
||||
fields.is_thinking = True
|
||||
@@ -578,14 +592,17 @@ class OpenaiChat(AsyncAuthedProvider, ProviderModelMixin):
|
||||
cls._set_api_key(api_key)
|
||||
else:
|
||||
try:
|
||||
await get_request_config(cls.request_config, proxy)
|
||||
cls.request_config = await get_request_config(cls.request_config, proxy)
|
||||
if cls.request_config is None:
|
||||
cls.request_config = RequestConfig()
|
||||
cls._create_request_args(cls.request_config.cookies, cls.request_config.headers)
|
||||
if cls.request_config.access_token is not None or cls.needs_auth:
|
||||
if not cls._set_api_key(cls.request_config.access_token):
|
||||
raise NoValidHarFileError(f"Access token is not valid: {cls.request_config.access_token}")
|
||||
if cls.needs_auth and cls.request_config.access_token is None:
|
||||
raise NoValidHarFileError(f"Missing access token")
|
||||
if not cls._set_api_key(cls.request_config.access_token):
|
||||
raise NoValidHarFileError(f"Access token is not valid: {cls.request_config.access_token}")
|
||||
except NoValidHarFileError:
|
||||
if has_nodriver:
|
||||
if cls._api_key is None:
|
||||
if cls.request_config.access_token is None:
|
||||
yield RequestLogin(cls.label, os.environ.get("G4F_LOGIN_URL", ""))
|
||||
await cls.nodriver_auth(proxy)
|
||||
else:
|
||||
@@ -622,15 +639,18 @@ class OpenaiChat(AsyncAuthedProvider, ProviderModelMixin):
|
||||
await page.send(nodriver.cdp.network.enable())
|
||||
page.add_handler(nodriver.cdp.network.RequestWillBeSent, on_request)
|
||||
page = await browser.get(cls.url)
|
||||
user_agent = await page.evaluate("window.navigator.userAgent")
|
||||
await page.select("#prompt-textarea", 240)
|
||||
await page.evaluate("document.getElementById('prompt-textarea').innerText = 'Hello'")
|
||||
await page.select("[data-testid=\"send-button\"]", 30)
|
||||
user_agent = await page.evaluate("window.navigator.userAgent", return_by_value=True)
|
||||
while not await page.evaluate("document.getElementById('prompt-textarea').id"):
|
||||
await asyncio.sleep(1)
|
||||
while not await page.evaluate("document.querySelector('[data-testid=\"send-button\"]').type"):
|
||||
await asyncio.sleep(1)
|
||||
await page.evaluate("document.querySelector('[data-testid=\"send-button\"]').click()")
|
||||
while True:
|
||||
body = await page.evaluate("JSON.stringify(window.__remixContext)")
|
||||
body = await page.evaluate("JSON.stringify(window.__remixContext)", return_by_value=True)
|
||||
if hasattr(body, "value"):
|
||||
body = body.value
|
||||
if body:
|
||||
match = re.search(r'"accessToken":"(.*?)"', body)
|
||||
match = re.search(r'"accessToken":"(.+?)"', body)
|
||||
if match:
|
||||
cls._api_key = match.group(1)
|
||||
break
|
||||
@@ -674,6 +694,7 @@ class OpenaiChat(AsyncAuthedProvider, ProviderModelMixin):
|
||||
|
||||
@classmethod
|
||||
def _set_api_key(cls, api_key: str):
|
||||
cls._api_key = api_key
|
||||
if api_key:
|
||||
exp = api_key.split(".")[1]
|
||||
exp = (exp + "=" * (4 - len(exp) % 4)).encode()
|
||||
@@ -681,11 +702,11 @@ class OpenaiChat(AsyncAuthedProvider, ProviderModelMixin):
|
||||
debug.log(f"OpenaiChat: API key expires at\n {cls._expires} we have:\n {time.time()}")
|
||||
if time.time() > cls._expires:
|
||||
debug.log(f"OpenaiChat: API key is expired")
|
||||
return False
|
||||
else:
|
||||
cls._api_key = api_key
|
||||
cls._headers["authorization"] = f"Bearer {api_key}"
|
||||
return True
|
||||
return False
|
||||
return True
|
||||
|
||||
@classmethod
|
||||
def _update_cookie_header(cls):
|
||||
@@ -700,10 +721,12 @@ class Conversation(JsonConversation):
|
||||
self.conversation_id = conversation_id
|
||||
self.message_id = message_id
|
||||
self.finish_reason = finish_reason
|
||||
self.is_recipient = False
|
||||
self.recipient = "all"
|
||||
self.parent_message_id = message_id if parent_message_id is None else parent_message_id
|
||||
self.user_id = user_id
|
||||
self.is_thinking = is_thinking
|
||||
self.p = None
|
||||
self.thoughts_summary = ""
|
||||
|
||||
def get_cookies(
|
||||
urls: Optional[Iterator[str]] = None
|
||||
|
||||
@@ -49,6 +49,7 @@ def get_har_files():
|
||||
for file in files:
|
||||
if file.endswith(".har"):
|
||||
harPath.append(os.path.join(root, file))
|
||||
break
|
||||
if not harPath:
|
||||
raise NoValidHarFileError("No .har file found")
|
||||
harPath.sort(key=lambda x: os.path.getmtime(x))
|
||||
@@ -86,8 +87,6 @@ def readHAR(request_config: RequestConfig):
|
||||
request_config.cookies = {c['name']: c['value'] for c in v['request']['cookies']}
|
||||
except Exception as e:
|
||||
debug.log(f"Error on read headers: {e}")
|
||||
if request_config.proof_token is None:
|
||||
raise NoValidHarFileError("No proof_token found in .har files")
|
||||
|
||||
def get_headers(entry) -> dict:
|
||||
return {h['name'].lower(): h['value'] for h in entry['request']['headers'] if h['name'].lower() not in ['content-length', 'cookie'] and not h['name'].startswith(':')}
|
||||
@@ -152,8 +151,9 @@ def getN() -> str:
|
||||
return base64.b64encode(timestamp.encode()).decode()
|
||||
|
||||
async def get_request_config(request_config: RequestConfig, proxy: str) -> RequestConfig:
|
||||
if request_config.proof_token is None:
|
||||
readHAR(request_config)
|
||||
readHAR(request_config)
|
||||
if request_config.arkose_request is not None:
|
||||
request_config.arkose_token = await sendRequest(genArkReq(request_config.arkose_request), proxy)
|
||||
if request_config.proof_token is None:
|
||||
raise NoValidHarFileError("No proof_token found in .har files")
|
||||
return request_config
|
||||
|
||||
@@ -309,9 +309,9 @@ class Api:
|
||||
if credentials is not None and credentials.credentials != "secret":
|
||||
config.api_key = credentials.credentials
|
||||
|
||||
conversation = None
|
||||
conversation = config.conversation
|
||||
return_conversation = config.return_conversation
|
||||
if conversation is not None:
|
||||
if conversation:
|
||||
conversation = JsonConversation(**conversation)
|
||||
return_conversation = True
|
||||
elif config.conversation_id is not None and config.provider is not None:
|
||||
|
||||
@@ -217,7 +217,7 @@ async def async_iter_response(
|
||||
|
||||
if stream:
|
||||
chat_completion = ChatCompletionChunk.model_construct(
|
||||
None, finish_reason, completion_id, int(time.time()), usage=usage
|
||||
None, finish_reason, completion_id, int(time.time()), usage=usage, conversation=conversation
|
||||
)
|
||||
else:
|
||||
if response_format is not None and "type" in response_format:
|
||||
@@ -228,7 +228,7 @@ async def async_iter_response(
|
||||
**filter_none(
|
||||
tool_calls=[ToolCallModel.model_construct(**tool_call) for tool_call in tool_calls]
|
||||
) if tool_calls is not None else {},
|
||||
conversation=None if conversation is None else conversation.get_dict()
|
||||
conversation=conversation
|
||||
)
|
||||
if provider is not None:
|
||||
chat_completion.provider = provider.name
|
||||
|
||||
@@ -10,7 +10,7 @@ from ..client.helper import filter_markdown
|
||||
from .helper import filter_none
|
||||
|
||||
try:
|
||||
from pydantic import BaseModel
|
||||
from pydantic import BaseModel, field_serializer
|
||||
except ImportError:
|
||||
class BaseModel():
|
||||
@classmethod
|
||||
@@ -19,6 +19,11 @@ except ImportError:
|
||||
for key, value in data.items():
|
||||
setattr(new, key, value)
|
||||
return new
|
||||
class field_serializer():
|
||||
def __init__(self, field_name):
|
||||
self.field_name = field_name
|
||||
def __call__(self, *args, **kwargs):
|
||||
return args[0]
|
||||
|
||||
class BaseModel(BaseModel):
|
||||
@classmethod
|
||||
@@ -72,6 +77,7 @@ class ChatCompletionChunk(BaseModel):
|
||||
provider: Optional[str]
|
||||
choices: List[ChatCompletionDeltaChoice]
|
||||
usage: UsageModel
|
||||
conversation: dict
|
||||
|
||||
@classmethod
|
||||
def model_construct(
|
||||
@@ -80,7 +86,8 @@ class ChatCompletionChunk(BaseModel):
|
||||
finish_reason: str,
|
||||
completion_id: str = None,
|
||||
created: int = None,
|
||||
usage: UsageModel = None
|
||||
usage: UsageModel = None,
|
||||
conversation: dict = None
|
||||
):
|
||||
return super().model_construct(
|
||||
id=f"chatcmpl-{completion_id}" if completion_id else None,
|
||||
@@ -92,9 +99,15 @@ class ChatCompletionChunk(BaseModel):
|
||||
ChatCompletionDelta.model_construct(content),
|
||||
finish_reason
|
||||
)],
|
||||
**filter_none(usage=usage)
|
||||
**filter_none(usage=usage, conversation=conversation)
|
||||
)
|
||||
|
||||
@field_serializer('conversation')
|
||||
def serialize_conversation(self, conversation: dict):
|
||||
if hasattr(conversation, "get_dict"):
|
||||
return conversation.get_dict()
|
||||
return conversation
|
||||
|
||||
class ChatCompletionMessage(BaseModel):
|
||||
role: str
|
||||
content: str
|
||||
@@ -104,6 +117,10 @@ class ChatCompletionMessage(BaseModel):
|
||||
def model_construct(cls, content: str, tool_calls: list = None):
|
||||
return super().model_construct(role="assistant", content=content, **filter_none(tool_calls=tool_calls))
|
||||
|
||||
@field_serializer('content')
|
||||
def serialize_content(self, content: str):
|
||||
return str(content)
|
||||
|
||||
def save(self, filepath: str, allowd_types = None):
|
||||
if hasattr(self.content, "data"):
|
||||
os.rename(self.content.data.replace("/media", images_dir), filepath)
|
||||
@@ -160,6 +177,12 @@ class ChatCompletion(BaseModel):
|
||||
**filter_none(usage=usage, conversation=conversation)
|
||||
)
|
||||
|
||||
@field_serializer('conversation')
|
||||
def serialize_conversation(self, conversation: dict):
|
||||
if hasattr(conversation, "get_dict"):
|
||||
return conversation.get_dict()
|
||||
return conversation
|
||||
|
||||
class ChatCompletionDelta(BaseModel):
|
||||
role: str
|
||||
content: str
|
||||
@@ -168,6 +191,10 @@ class ChatCompletionDelta(BaseModel):
|
||||
def model_construct(cls, content: Optional[str]):
|
||||
return super().model_construct(role="assistant", content=content)
|
||||
|
||||
@field_serializer('content')
|
||||
def serialize_content(self, content: str):
|
||||
return str(content)
|
||||
|
||||
class ChatCompletionDeltaChoice(BaseModel):
|
||||
index: int
|
||||
delta: ChatCompletionDelta
|
||||
|
||||
@@ -56,12 +56,11 @@ DOMAINS = [
|
||||
".google.com",
|
||||
"www.whiterabbitneo.com",
|
||||
"huggingface.co",
|
||||
".huggingface.co"
|
||||
"chat.reka.ai",
|
||||
"chatgpt.com",
|
||||
".cerebras.ai",
|
||||
"github.com",
|
||||
"huggingface.co",
|
||||
".huggingface.co"
|
||||
]
|
||||
|
||||
if has_browser_cookie3 and os.environ.get('DBUS_SESSION_BUS_ADDRESS') == "/dev/null":
|
||||
@@ -152,6 +151,7 @@ def read_cookie_files(dirPath: str = None):
|
||||
harFiles.append(os.path.join(root, file))
|
||||
elif file.endswith(".json"):
|
||||
cookieFiles.append(os.path.join(root, file))
|
||||
break
|
||||
|
||||
CookiesConfig.cookies = {}
|
||||
for path in harFiles:
|
||||
|
||||
@@ -169,15 +169,15 @@
|
||||
if (errorVideo < 3 || !refreshOnHide) {
|
||||
return;
|
||||
}
|
||||
if (skipRefresh > 0) {
|
||||
skipRefresh -= 1;
|
||||
return;
|
||||
}
|
||||
if (errorImage < 3) {
|
||||
imageFeed.src = "/search/image+g4f?skip=" + skipImage;
|
||||
skipImage++;
|
||||
return;
|
||||
}
|
||||
if (skipRefresh > 0) {
|
||||
skipRefresh -= 1;
|
||||
return;
|
||||
}
|
||||
if (images.length > 0) {
|
||||
imageFeed.classList.remove("hidden");
|
||||
imageFeed.src = images.shift();
|
||||
@@ -194,10 +194,13 @@
|
||||
imageFeed.onload = () => {
|
||||
imageFeed.classList.remove("hidden");
|
||||
gradient.classList.add("hidden");
|
||||
errorImage = 0;
|
||||
};
|
||||
imageFeed.onclick = () => {
|
||||
imageFeed.src = "/search/image?random=" + Math.random();
|
||||
skipRefresh = 2;
|
||||
if (skipRefresh < 4) {
|
||||
skipRefresh += 1;
|
||||
}
|
||||
};
|
||||
})();
|
||||
</script>
|
||||
|
||||
@@ -1533,6 +1533,7 @@ form textarea {
|
||||
.chat-top-panel .convo-title {
|
||||
margin: 0 10px;
|
||||
font-size: 14px;
|
||||
font-weight: bold;
|
||||
text-align: center;
|
||||
flex: 1;
|
||||
}
|
||||
@@ -1581,7 +1582,6 @@ form textarea {
|
||||
}
|
||||
.chat-body {
|
||||
flex: 1;
|
||||
padding: 10px;
|
||||
overflow-y: auto;
|
||||
display: flex;
|
||||
flex-direction: column;
|
||||
@@ -1613,11 +1613,25 @@ form textarea {
|
||||
border: 1px dashed var(--conversations);
|
||||
box-shadow: 1px 1px 1px 0px rgba(0,0,0,0.75);
|
||||
}
|
||||
.white .chat-footer .send-buttons button {
|
||||
.suggestions {
|
||||
display: flex;
|
||||
gap: 6px;
|
||||
flex-wrap: wrap;
|
||||
}
|
||||
.suggestions .suggestion {
|
||||
background: var(--blur-bg);
|
||||
color: var(--colour-3);
|
||||
padding: 10px;
|
||||
margin: 0 2px 0 4px;
|
||||
border-radius: 5px;
|
||||
cursor: pointer;
|
||||
border: 1px dashed var(--conversations);
|
||||
}
|
||||
.white .chat-footer .send-buttons button, .white .suggestions .suggestion {
|
||||
border-style: solid;
|
||||
border-color: var(--blur-border);
|
||||
}
|
||||
.chat-footer .send-buttons button:hover {
|
||||
.chat-footer .send-buttons button:hover, .suggestions .suggestion:hover {
|
||||
border-style: solid;
|
||||
box-shadow: none;
|
||||
background-color: var(--button-hover);
|
||||
|
||||
@@ -48,6 +48,7 @@ let wakeLock = null;
|
||||
let countTokensEnabled = true;
|
||||
let reloadConversation = true;
|
||||
let privateConversation = null;
|
||||
let suggestions = null;
|
||||
|
||||
userInput.addEventListener("blur", () => {
|
||||
document.documentElement.scrollTop = 0;
|
||||
@@ -119,9 +120,9 @@ function filter_message(text) {
|
||||
if (Array.isArray(text)) {
|
||||
return text;
|
||||
}
|
||||
return text.replaceAll(
|
||||
return filter_message_content(text.replaceAll(
|
||||
/<!-- generated images start -->[\s\S]+<!-- generated images end -->/gm, ""
|
||||
).replace(/ \[aborted\]$/g, "").replace(/ \[error\]$/g, "");
|
||||
))
|
||||
}
|
||||
|
||||
function filter_message_content(text) {
|
||||
@@ -468,7 +469,7 @@ const register_message_buttons = async () => {
|
||||
el.dataset.click = true;
|
||||
const message_el = get_message_el(el);
|
||||
el.addEventListener("click", async () => {
|
||||
iframe.src = `/qrcode/${window.conversation_id}#${message_el.dataset.index}`;
|
||||
iframe.src = window.conversation_id ? `/qrcode/${window.conversation_id}#${message_el.dataset.index}` : '/qrcode';
|
||||
iframe_container.classList.remove("hidden");
|
||||
});
|
||||
});
|
||||
@@ -933,9 +934,7 @@ async function add_message_chunk(message, message_id, provider, scroll, finish_m
|
||||
} else if (message.type == "login") {
|
||||
update_message(content_map, message_id, markdown_render(message.login), scroll);
|
||||
} else if (message.type == "finish") {
|
||||
if (!finish_storage[message_id]) {
|
||||
finish_storage[message_id] = message.finish;
|
||||
}
|
||||
finish_storage[message_id] = message.finish;
|
||||
} else if (message.type == "usage") {
|
||||
usage_storage[message_id] = message.usage;
|
||||
} else if (message.type == "reasoning") {
|
||||
@@ -950,7 +949,7 @@ async function add_message_chunk(message, message_id, provider, scroll, finish_m
|
||||
} if (message.token) {
|
||||
reasoning_storage[message_id].text += message.token;
|
||||
}
|
||||
update_message(content_map, message_id, render_reasoning(reasoning_storage[message_id]), scroll);
|
||||
update_message(content_map, message_id, null, scroll);
|
||||
} else if (message.type == "parameters") {
|
||||
if (!parameters_storage[provider]) {
|
||||
parameters_storage[provider] = {};
|
||||
@@ -958,6 +957,8 @@ async function add_message_chunk(message, message_id, provider, scroll, finish_m
|
||||
Object.entries(message.parameters).forEach(([key, value]) => {
|
||||
parameters_storage[provider][key] = value;
|
||||
});
|
||||
} else if (message.type == "suggestions") {
|
||||
suggestions = message.suggestions;
|
||||
}
|
||||
}
|
||||
|
||||
@@ -998,6 +999,9 @@ const ask_gpt = async (message_id, message_index = -1, regenerate = false, provi
|
||||
if (scroll) {
|
||||
await lazy_scroll_to_bottom();
|
||||
}
|
||||
|
||||
let suggestions_el = chatBody.querySelector('.suggestions');
|
||||
suggestions_el ? suggestions_el.remove() : null;
|
||||
if (countTokensEnabled) {
|
||||
let count_total = chatBody.querySelector('.count_total');
|
||||
count_total ? count_total.parentElement.removeChild(count_total) : null;
|
||||
@@ -1507,7 +1511,7 @@ const load_conversation = async (conversation, scroll=true) => {
|
||||
} else if (reason == "stop" && buffer.split("```").length - 1 % 2 === 1) {
|
||||
reason = "length";
|
||||
}
|
||||
if (reason == "length" || reason == "max_tokens" || reason == "error") {
|
||||
if (reason != "stop") {
|
||||
actions.push("continue")
|
||||
}
|
||||
}
|
||||
@@ -1578,8 +1582,23 @@ const load_conversation = async (conversation, scroll=true) => {
|
||||
</div>
|
||||
`);
|
||||
});
|
||||
chatBody.innerHTML = elements.join("");
|
||||
|
||||
if (countTokensEnabled && window.GPTTokenizer_cl100k_base) {
|
||||
if (suggestions) {
|
||||
const suggestions_el = document.createElement("div");
|
||||
suggestions_el.classList.add("suggestions");
|
||||
suggestions.forEach((suggestion)=> {
|
||||
const el = document.createElement("button");
|
||||
el.classList.add("suggestion");
|
||||
el.innerHTML = `<span>${escapeHtml(suggestion)}</span> <i class="fa-solid fa-turn-up"></i>`;
|
||||
el.onclick = async () => {
|
||||
await handle_ask(true, suggestion);
|
||||
}
|
||||
suggestions_el.appendChild(el);
|
||||
});
|
||||
chatBody.appendChild(suggestions_el);
|
||||
suggestions = null;
|
||||
} else if (countTokensEnabled && window.GPTTokenizer_cl100k_base) {
|
||||
const has_media = messages.filter((item)=>Array.isArray(item.content)).length > 0;
|
||||
if (!has_media) {
|
||||
const filtered = prepare_messages(messages, null, true, false);
|
||||
@@ -1587,13 +1606,15 @@ const load_conversation = async (conversation, scroll=true) => {
|
||||
last_model = last_model?.startsWith("gpt-3") ? "gpt-3.5-turbo" : "gpt-4"
|
||||
let count_total = GPTTokenizer_cl100k_base?.encodeChat(filtered, last_model).length
|
||||
if (count_total > 0) {
|
||||
elements.push(`<div class="count_total">(${count_total} total tokens)</div>`);
|
||||
const count_total_el = document.createElement("div");
|
||||
count_total_el.classList.add("count_total");
|
||||
count_total_el.innerText = `(${count_total} total tokens)`;
|
||||
chatBody.appendChild(count_total_el);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
chatBody.innerHTML = elements.join("");
|
||||
await register_message_buttons();
|
||||
highlight(chatBody);
|
||||
regenerate_button.classList.remove("regenerate-hidden");
|
||||
@@ -2079,8 +2100,10 @@ function count_words_and_tokens(text, model, completion_tokens, prompt_tokens) {
|
||||
function update_message(content_map, message_id, content = null, scroll = true) {
|
||||
content_map.update_timeouts.push(setTimeout(() => {
|
||||
if (!content) {
|
||||
if (reasoning_storage[message_id]) {
|
||||
if (reasoning_storage[message_id] && message_storage[message_id]) {
|
||||
content = render_reasoning(reasoning_storage[message_id], true) + markdown_render(message_storage[message_id]);
|
||||
} else if (reasoning_storage[message_id]) {
|
||||
content = render_reasoning(reasoning_storage[message_id]);
|
||||
} else {
|
||||
content = markdown_render(message_storage[message_id]);
|
||||
}
|
||||
@@ -2097,10 +2120,9 @@ function update_message(content_map, message_id, content = null, scroll = true)
|
||||
}
|
||||
}
|
||||
if (error_storage[message_id]) {
|
||||
content_map.inner.innerHTML = message + markdown_render(`**An error occured:** ${error_storage[message_id]}`);
|
||||
} else {
|
||||
content_map.inner.innerHTML = content;
|
||||
content += markdown_render(`**An error occured:** ${error_storage[message_id]}`);
|
||||
}
|
||||
content_map.inner.innerHTML = content;
|
||||
if (countTokensEnabled) {
|
||||
content_map.count.innerText = count_words_and_tokens(
|
||||
(reasoning_storage[message_id] ? reasoning_storage[message_id].text : "")
|
||||
@@ -2484,8 +2506,14 @@ async function on_api() {
|
||||
|
||||
const hide_systemPrompt = document.getElementById("hide-systemPrompt")
|
||||
const slide_systemPrompt_icon = document.querySelector(".slide-header i");
|
||||
document.querySelector(".slide-header")?.addEventListener("click", () => {
|
||||
const checked = slide_systemPrompt_icon.classList.contains("fa-angles-up");
|
||||
chatPrompt.classList[checked ? "add": "remove"]("hidden");
|
||||
slide_systemPrompt_icon.classList[checked ? "remove": "add"]("fa-angles-up");
|
||||
slide_systemPrompt_icon.classList[checked ? "add": "remove"]("fa-angles-down");
|
||||
});
|
||||
if (hide_systemPrompt.checked) {
|
||||
chatPrompt.classList.add("hidden");
|
||||
slide_systemPrompt_icon.click();
|
||||
}
|
||||
hide_systemPrompt.addEventListener('change', async (event) => {
|
||||
if (event.target.checked) {
|
||||
@@ -2494,12 +2522,6 @@ async function on_api() {
|
||||
chatPrompt.classList.remove("hidden");
|
||||
}
|
||||
});
|
||||
document.querySelector(".slide-header")?.addEventListener("click", () => {
|
||||
const checked = slide_systemPrompt_icon.classList.contains("fa-angles-up");
|
||||
chatPrompt.classList[checked ? "add": "remove"]("hidden");
|
||||
slide_systemPrompt_icon.classList[checked ? "remove": "add"]("fa-angles-up");
|
||||
slide_systemPrompt_icon.classList[checked ? "add": "remove"]("fa-angles-down");
|
||||
});
|
||||
const userInputHeight = document.getElementById("message-input-height");
|
||||
if (userInputHeight) {
|
||||
if (userInputHeight.value) {
|
||||
|
||||
@@ -215,6 +215,8 @@ class Api:
|
||||
yield self._format_json("content", chunk.to_string())
|
||||
elif isinstance(chunk, AudioResponse):
|
||||
yield self._format_json("content", str(chunk))
|
||||
elif isinstance(chunk, SuggestedFollowups):
|
||||
yield self._format_json("suggestions", chunk.suggestions)
|
||||
elif isinstance(chunk, DebugResponse):
|
||||
yield self._format_json("log", chunk.log)
|
||||
elif isinstance(chunk, RawResponse):
|
||||
|
||||
@@ -341,28 +341,35 @@ class Backend_Api(Api):
|
||||
return redirect(source_url)
|
||||
raise
|
||||
|
||||
self.match_files = {}
|
||||
|
||||
@app.route('/search/<search>', methods=['GET'])
|
||||
def find_media(search: str):
|
||||
search = [secure_filename(chunk.lower()) for chunk in search.split("+")]
|
||||
safe_search = [secure_filename(chunk.lower()) for chunk in search.split("+")]
|
||||
if not os.access(images_dir, os.R_OK):
|
||||
return jsonify({"error": {"message": "Not found"}}), 404
|
||||
match_files = {}
|
||||
for root, _, files in os.walk(images_dir):
|
||||
for file in files:
|
||||
mime_type = is_allowed_extension(file)
|
||||
if mime_type is not None:
|
||||
mime_type = secure_filename(mime_type)
|
||||
for tag in search:
|
||||
if tag in mime_type:
|
||||
match_files[file] = match_files.get(file, 0) + 1
|
||||
break
|
||||
for tag in search:
|
||||
if tag in file.lower():
|
||||
match_files[file] = match_files.get(file, 0) + 1
|
||||
match_files = [file for file, count in match_files.items() if count >= request.args.get("min", len(search))]
|
||||
if search not in self.match_files:
|
||||
self.match_files[search] = {}
|
||||
for root, _, files in os.walk(images_dir):
|
||||
for file in files:
|
||||
mime_type = is_allowed_extension(file)
|
||||
if mime_type is not None:
|
||||
mime_type = secure_filename(mime_type)
|
||||
for tag in safe_search:
|
||||
if tag in mime_type:
|
||||
self.match_files[search][file] = self.match_files[search].get(file, 0) + 1
|
||||
break
|
||||
for tag in safe_search:
|
||||
if tag in file.lower():
|
||||
self.match_files[search][file] = self.match_files[search].get(file, 0) + 1
|
||||
break
|
||||
match_files = [file for file, count in self.match_files[search].items() if count >= request.args.get("min", len(safe_search))]
|
||||
if int(request.args.get("skip", 0)) >= len(match_files):
|
||||
return jsonify({"error": {"message": "Not found"}}), 404
|
||||
if (request.args.get("random", False)):
|
||||
seed = request.args.get("random")
|
||||
if seed not in ["true", "True", "1"]:
|
||||
random.seed(seed)
|
||||
return redirect(f"/media/{random.choice(match_files)}"), 302
|
||||
return redirect(f"/media/{match_files[int(request.args.get('skip', 0))]}", 302)
|
||||
|
||||
|
||||
@@ -28,7 +28,7 @@ def get_media_extension(media: str) -> str:
|
||||
extension = os.path.splitext(path)[1]
|
||||
if not extension:
|
||||
extension = os.path.splitext(media)[1]
|
||||
if not extension:
|
||||
if not extension or len(extension) > 4:
|
||||
return ""
|
||||
if extension[1:] not in EXTENSIONS_MAP:
|
||||
raise ValueError(f"Unsupported media extension: {extension} in: {media}")
|
||||
|
||||
@@ -18,7 +18,6 @@ from .Provider import (
|
||||
Free2GPT,
|
||||
FreeGpt,
|
||||
HuggingSpace,
|
||||
G4F,
|
||||
Grok,
|
||||
DeepseekAI_JanusPro7b,
|
||||
Glider,
|
||||
@@ -535,7 +534,7 @@ deepseek_r1 = Model(
|
||||
janus_pro_7b = VisionModel(
|
||||
name = DeepseekAI_JanusPro7b.default_model,
|
||||
base_provider = 'DeepSeek',
|
||||
best_provider = IterListProvider([DeepseekAI_JanusPro7b, G4F])
|
||||
best_provider = IterListProvider([DeepseekAI_JanusPro7b])
|
||||
)
|
||||
|
||||
### x.ai ###
|
||||
@@ -985,7 +984,7 @@ demo_models = {
|
||||
llama_3_2_11b.name: [llama_3_2_11b, [HuggingChat]],
|
||||
qwen_2_vl_7b.name: [qwen_2_vl_7b, [HuggingFaceAPI]],
|
||||
deepseek_r1.name: [deepseek_r1, [HuggingFace, PollinationsAI]],
|
||||
janus_pro_7b.name: [janus_pro_7b, [HuggingSpace, G4F]],
|
||||
janus_pro_7b.name: [janus_pro_7b, [HuggingSpace]],
|
||||
command_r.name: [command_r, [HuggingSpace]],
|
||||
command_r_plus.name: [command_r_plus, [HuggingSpace]],
|
||||
command_r7b.name: [command_r7b, [HuggingSpace]],
|
||||
|
||||
@@ -425,9 +425,14 @@ class AsyncAuthedProvider(AsyncGeneratorProvider, AuthFileMixin):
|
||||
if auth_result is not None:
|
||||
cache_file.parent.mkdir(parents=True, exist_ok=True)
|
||||
try:
|
||||
cache_file.write_text(json.dumps(auth_result.get_dict()))
|
||||
except TypeError:
|
||||
raise RuntimeError(f"Failed to save: {auth_result.get_dict()}")
|
||||
def toJSON(obj):
|
||||
if hasattr(obj, "get_dict"):
|
||||
return obj.get_dict()
|
||||
return str(obj)
|
||||
with cache_file.open("w") as cache_file:
|
||||
json.dump(auth_result, cache_file, default=toJSON)
|
||||
except TypeError as e:
|
||||
raise RuntimeError(f"Failed to save: {auth_result.get_dict()}\n{type(e).__name__}: {e}")
|
||||
elif cache_file.exists():
|
||||
cache_file.unlink()
|
||||
|
||||
@@ -443,7 +448,9 @@ class AsyncAuthedProvider(AsyncGeneratorProvider, AuthFileMixin):
|
||||
try:
|
||||
if cache_file.exists():
|
||||
with cache_file.open("r") as f:
|
||||
auth_result = AuthResult(**json.load(f))
|
||||
data = f.read()
|
||||
if data:
|
||||
auth_result = AuthResult(**json.loads(data))
|
||||
else:
|
||||
raise MissingAuthError
|
||||
yield from to_sync_generator(cls.create_authed(model, messages, auth_result, **kwargs))
|
||||
|
||||
@@ -24,6 +24,16 @@ def to_string(value) -> str:
|
||||
return "".join([to_string(v) for v in value if v.get("type", "text") == "text"])
|
||||
return str(value)
|
||||
|
||||
def render_messages(messages: Messages) -> Iterator:
|
||||
for idx, message in enumerate(messages):
|
||||
if isinstance(message, dict) and isinstance(message.get("content"), list):
|
||||
yield {
|
||||
**message,
|
||||
"content": to_string(message["content"]),
|
||||
}
|
||||
else:
|
||||
yield message
|
||||
|
||||
def format_prompt(messages: Messages, add_special_tokens: bool = False, do_continue: bool = False, include_system: bool = True) -> str:
|
||||
"""
|
||||
Format a series of messages into a single string, optionally adding special tokens.
|
||||
|
||||
@@ -240,6 +240,15 @@ class Sources(ResponseType):
|
||||
for idx, link in enumerate(self.list)
|
||||
]))
|
||||
|
||||
class SourceLink(ResponseType):
|
||||
def __init__(self, title: str, url: str) -> None:
|
||||
self.title = title
|
||||
self.url = url
|
||||
|
||||
def __str__(self) -> str:
|
||||
title = f"[{self.title}]"
|
||||
return f" {format_link(self.url, title)}"
|
||||
|
||||
class YouTube(HiddenResponse):
|
||||
def __init__(self, ids: List[str]) -> None:
|
||||
"""Initialize with a list of YouTube IDs."""
|
||||
|
||||
@@ -103,8 +103,9 @@ async def get_args_from_nodriver(
|
||||
else:
|
||||
await browser.cookies.set_all(get_cookie_params_from_dict(cookies, url=url, domain=domain))
|
||||
page = await browser.get(url)
|
||||
user_agent = str(await page.evaluate("window.navigator.userAgent"))
|
||||
await page.wait_for("body:not(.no-js)", timeout=timeout)
|
||||
user_agent = await page.evaluate("window.navigator.userAgent", return_by_value=True)
|
||||
while not await page.evaluate("document.querySelector('body:not(.no-js)')"):
|
||||
await asyncio.sleep(1)
|
||||
if wait_for is not None:
|
||||
await page.wait_for(wait_for, timeout=timeout)
|
||||
if callback is not None:
|
||||
|
||||
@@ -44,7 +44,7 @@ async def raise_for_status_async(response: Union[StreamResponse, ClientResponse]
|
||||
if response.status == 403 and is_cloudflare(message):
|
||||
raise CloudflareError(f"Response {response.status}: Cloudflare detected")
|
||||
elif response.status == 403 and is_openai(message):
|
||||
raise ResponseStatusError(f"Response {response.status}: OpenAI Bot detected")
|
||||
raise MissingAuthError(f"Response {response.status}: OpenAI Bot detected")
|
||||
elif response.status == 502:
|
||||
raise ResponseStatusError(f"Response {response.status}: Bad Gateway")
|
||||
elif response.status == 504:
|
||||
@@ -71,7 +71,7 @@ def raise_for_status(response: Union[Response, StreamResponse, ClientResponse, R
|
||||
if response.status_code == 403 and is_cloudflare(response.text):
|
||||
raise CloudflareError(f"Response {response.status_code}: Cloudflare detected")
|
||||
elif response.status_code == 403 and is_openai(response.text):
|
||||
raise ResponseStatusError(f"Response {response.status_code}: OpenAI Bot detected")
|
||||
raise MissingAuthError(f"Response {response.status_code}: OpenAI Bot detected")
|
||||
elif response.status_code == 502:
|
||||
raise ResponseStatusError(f"Response {response.status_code}: Bad Gateway")
|
||||
elif response.status_code == 504:
|
||||
|
||||
Reference in New Issue
Block a user