Add AsyncAuthedProvider in Copilot

Add orginal url to downloaded image
Support ssl argument in StreamSession
Report Provider and Errors in RetryProvider
Support ssl argument in OpenaiTemplate
Remove model duplication in OpenaiChat
Disable ChatGpt provider and remove it from models.py
Update slim requirements
Support provider names as model name in Image generation
Add model qwen-2.5-1m-demo to models.py
This commit is contained in:
hlohaus
2025-01-28 20:33:50 +01:00
parent aef3d8dc66
commit 9524c3f327
20 changed files with 169 additions and 128 deletions

View File

@@ -76,7 +76,7 @@ def init_session(user_agent):
class ChatGpt(AbstractProvider, ProviderModelMixin): class ChatGpt(AbstractProvider, ProviderModelMixin):
label = "ChatGpt" label = "ChatGpt"
url = "https://chatgpt.com" url = "https://chatgpt.com"
working = True working = False
supports_message_history = True supports_message_history = True
supports_system_message = True supports_system_message = True
supports_stream = True supports_stream = True

View File

@@ -92,7 +92,6 @@ class Copilot(AbstractProvider, ProviderModelMixin):
cls._access_token, cls._cookies = asyncio.run(get_access_token_and_cookies(cls.url, proxy)) cls._access_token, cls._cookies = asyncio.run(get_access_token_and_cookies(cls.url, proxy))
else: else:
raise h raise h
yield Parameters(**{"api_key": cls._access_token, "cookies": cls._cookies if isinstance(cls._cookies, dict) else {c.name: c.value for c in cls._cookies}})
websocket_url = f"{websocket_url}&accessToken={quote(cls._access_token)}" websocket_url = f"{websocket_url}&accessToken={quote(cls._access_token)}"
headers = {"authorization": f"Bearer {cls._access_token}"} headers = {"authorization": f"Bearer {cls._access_token}"}

View File

@@ -1,8 +1,16 @@
from __future__ import annotations from __future__ import annotations
from ..Copilot import Copilot import os
from typing import AsyncIterator
class CopilotAccount(Copilot): from ..base_provider import AsyncAuthedProvider
from ..Copilot import Copilot, readHAR, has_nodriver, get_access_token_and_cookies
from ...providers.response import AuthResult, RequestLogin
from ...typing import AsyncResult, Messages
from ...errors import NoValidHarFileError
from ... import debug
class CopilotAccount(AsyncAuthedProvider, Copilot):
needs_auth = True needs_auth = True
use_nodriver = True use_nodriver = True
parent = "Copilot" parent = "Copilot"
@@ -12,4 +20,38 @@ class CopilotAccount(Copilot):
image_models = models image_models = models
model_aliases = { model_aliases = {
"dall-e-3": default_model "dall-e-3": default_model
} }
@classmethod
async def on_auth_async(cls, proxy: str = None, **kwargs) -> AsyncIterator:
if cls._access_token is None:
try:
cls._access_token, cls._cookies = readHAR(cls.url)
except NoValidHarFileError as h:
debug.log(f"Copilot: {h}")
if has_nodriver:
login_url = os.environ.get("G4F_LOGIN_URL")
if login_url:
yield RequestLogin(cls.label, login_url)
cls._access_token, cls._cookies = await get_access_token_and_cookies(cls.url, proxy)
else:
raise h
yield AuthResult(
api_key=cls._access_token,
cookies=cls._cookies,
)
@classmethod
async def create_authed(
cls,
model: str,
messages: Messages,
auth_result: AuthResult,
**kwargs
) -> AsyncResult:
Copilot._access_token = getattr(auth_result, "api_key")
Copilot._cookies = getattr(auth_result, "cookies")
Copilot.needs_auth = cls.needs_auth
for chunk in Copilot.create_completion(model, messages, **kwargs):
yield chunk
auth_result.cookies = Copilot._cookies if isinstance(Copilot._cookies, dict) else {c.name: c.value for c in Copilot._cookies}

View File

@@ -4,9 +4,4 @@ from .OpenaiChat import OpenaiChat
class OpenaiAccount(OpenaiChat): class OpenaiAccount(OpenaiChat):
needs_auth = True needs_auth = True
parent = "OpenaiChat" parent = "OpenaiChat"
default_model = "gpt-4o"
default_vision_model = default_model
default_image_model = OpenaiChat.default_image_model
image_models = [default_model, default_image_model, "gpt-4"]
fallback_models = [*OpenaiChat.fallback_models, default_image_model]

View File

@@ -98,8 +98,9 @@ class OpenaiChat(AsyncAuthedProvider, ProviderModelMixin):
default_model = "auto" default_model = "auto"
default_image_model = "dall-e-3" default_image_model = "dall-e-3"
image_models = [default_image_model] image_models = [default_image_model]
fallback_models = [default_model, "gpt-4", "gpt-4o", "gpt-4o-mini", "gpt-4o-canmore", "o1", "o1-preview", "o1-mini"] +image_models text_models = [default_model, "gpt-4", "gpt-4o", "gpt-4o-mini", "gpt-4o-canmore", "o1", "o1-preview", "o1-mini"]
vision_models = fallback_models vision_models = text_models
models = text_models + image_models
synthesize_content_type = "audio/mpeg" synthesize_content_type = "audio/mpeg"
_api_key: str = None _api_key: str = None
@@ -120,33 +121,6 @@ class OpenaiChat(AsyncAuthedProvider, ProviderModelMixin):
turnstile_token=RequestConfig.turnstile_token turnstile_token=RequestConfig.turnstile_token
) )
@classmethod
def get_models(cls, proxy: str = None, timeout: int = 180) -> List[str]:
if not cls.models:
# try:
# headers = {
# **(cls.get_default_headers() if cls._headers is None else cls._headers),
# "accept": "application/json",
# }
# with Session(
# proxy=proxy,
# impersonate="chrome",
# timeout=timeout,
# headers=headers
# ) as session:
# response = session.get(
# f"{cls.url}/backend-anon/models"
# if cls._api_key is None else
# f"{cls.url}/backend-api/models"
# )
# raise_for_status(response)
# data = response.json()
# cls.models = [model.get("slug") for model in data.get("models")]
# except Exception as e:
# debug.log(f"OpenaiChat: Failed to get models: {type(e).__name__}: {e}")
cls.models = cls.fallback_models
return cls.models
@classmethod @classmethod
async def upload_images( async def upload_images(
cls, cls,

View File

@@ -20,6 +20,7 @@ class OpenaiTemplate(AsyncGeneratorProvider, ProviderModelMixin, RaiseErrorMixin
default_model = "" default_model = ""
fallback_models = [] fallback_models = []
sort_models = True sort_models = True
ssl = None
@classmethod @classmethod
def get_models(cls, api_key: str = None, api_base: str = None) -> list[str]: def get_models(cls, api_key: str = None, api_base: str = None) -> list[str]:
@@ -30,7 +31,7 @@ class OpenaiTemplate(AsyncGeneratorProvider, ProviderModelMixin, RaiseErrorMixin
api_base = cls.api_base api_base = cls.api_base
if api_key is not None: if api_key is not None:
headers["authorization"] = f"Bearer {api_key}" headers["authorization"] = f"Bearer {api_key}"
response = requests.get(f"{api_base}/models", headers=headers) response = requests.get(f"{api_base}/models", headers=headers, verify=cls.ssl)
raise_for_status(response) raise_for_status(response)
data = response.json() data = response.json()
data = data.get("data") if isinstance(data, dict) else data data = data.get("data") if isinstance(data, dict) else data
@@ -79,12 +80,12 @@ class OpenaiTemplate(AsyncGeneratorProvider, ProviderModelMixin, RaiseErrorMixin
api_base = cls.api_base api_base = cls.api_base
# Proxy for image generation feature # Proxy for image generation feature
if model in cls.image_models: if model and model in cls.image_models:
data = { data = {
"prompt": messages[-1]["content"] if prompt is None else prompt, "prompt": messages[-1]["content"] if prompt is None else prompt,
"model": model, "model": model,
} }
async with session.post(f"{api_base.rstrip('/')}/images/generations", json=data) as response: async with session.post(f"{api_base.rstrip('/')}/images/generations", json=data, ssl=cls.ssl) as response:
data = await response.json() data = await response.json()
cls.raise_error(data) cls.raise_error(data)
await raise_for_status(response) await raise_for_status(response)
@@ -119,7 +120,7 @@ class OpenaiTemplate(AsyncGeneratorProvider, ProviderModelMixin, RaiseErrorMixin
) )
if api_endpoint is None: if api_endpoint is None:
api_endpoint = f"{api_base.rstrip('/')}/chat/completions" api_endpoint = f"{api_base.rstrip('/')}/chat/completions"
async with session.post(api_endpoint, json=data) as response: async with session.post(api_endpoint, json=data, ssl=cls.ssl) as response:
content_type = response.headers.get("content-type", "text/event-stream" if stream else "application/json") content_type = response.headers.get("content-type", "text/event-stream" if stream else "application/json")
if content_type.startswith("application/json"): if content_type.startswith("application/json"):
data = await response.json() data = await response.json()
@@ -180,7 +181,7 @@ class OpenaiTemplate(AsyncGeneratorProvider, ProviderModelMixin, RaiseErrorMixin
"Content-Type": "application/json", "Content-Type": "application/json",
**( **(
{"Authorization": f"Bearer {api_key}"} {"Authorization": f"Bearer {api_key}"}
if api_key is not None else {} if api_key else {}
), ),
**({} if headers is None else headers) **({} if headers is None else headers)
} }

View File

@@ -70,6 +70,8 @@ def iter_response(
continue continue
elif isinstance(chunk, SynthesizeData) or not chunk: elif isinstance(chunk, SynthesizeData) or not chunk:
continue continue
elif isinstance(chunk, Exception):
continue
chunk = str(chunk) chunk = str(chunk)
content += chunk content += chunk
@@ -149,6 +151,8 @@ async def async_iter_response(
continue continue
elif isinstance(chunk, SynthesizeData) or not chunk: elif isinstance(chunk, SynthesizeData) or not chunk:
continue continue
elif isinstance(chunk, Exception):
continue
chunk = str(chunk) chunk = str(chunk)
content += chunk content += chunk

View File

@@ -1,14 +1,15 @@
from __future__ import annotations from __future__ import annotations
from ..models import ModelUtils from ..models import ModelUtils
from ..Provider import ProviderUtils
class ImageModels(): class ImageModels():
def __init__(self, client): def __init__(self, client):
self.client = client self.client = client
self.models = ModelUtils.convert
def get(self, name, default=None): def get(self, name, default=None):
model = self.models.get(name) if name in ModelUtils.convert:
if model and model.best_provider: return ModelUtils.convert[name].best_provider
return model.best_provider if name in ProviderUtils.convert:
return default return ProviderUtils.convert[name]
return default

View File

@@ -722,12 +722,13 @@ async function add_message_chunk(message, message_id, provider, scroll, finish_m
`; `;
} else if (message.type == "message") { } else if (message.type == "message") {
console.error(message.message) console.error(message.message)
await api("log", {...message, provider: provider_storage[message_id]});
} else if (message.type == "error") { } else if (message.type == "error") {
content_map.update_timeouts.forEach((timeoutId)=>clearTimeout(timeoutId)); content_map.update_timeouts.forEach((timeoutId)=>clearTimeout(timeoutId));
content_map.update_timeouts = []; content_map.update_timeouts = [];
error_storage[message_id] = message.error error_storage[message_id] = message.message
console.error(message.error); console.error(message.message);
content_map.inner.innerHTML += markdown_render(`**An error occured:** ${message.error}`); content_map.inner.innerHTML += markdown_render(`**An error occured:** ${message.message}`);
let p = document.createElement("p"); let p = document.createElement("p");
p.innerText = message.error; p.innerText = message.error;
log_storage.appendChild(p); log_storage.appendChild(p);
@@ -865,19 +866,23 @@ const ask_gpt = async (message_id, message_index = -1, regenerate = false, provi
} }
if (message_storage[message_id]) { if (message_storage[message_id]) {
const message_provider = message_id in provider_storage ? provider_storage[message_id] : null; const message_provider = message_id in provider_storage ? provider_storage[message_id] : null;
let usage; let usage = {};
if (usage_storage[message_id]) { if (usage_storage[message_id]) {
usage = usage_storage[message_id]; usage = usage_storage[message_id];
delete usage_storage[message_id]; delete usage_storage[message_id];
} }
// Calculate usage if we have no usage result jet usage = {
if (document.getElementById("track_usage").checked && !usage && window.GPTTokenizer_cl100k_base) { model: message_provider?.model,
provider: message_provider?.name,
...usage
}
// Calculate usage if we don't have it jet
if (document.getElementById("track_usage").checked && !usage.prompt_tokens && window.GPTTokenizer_cl100k_base) {
const prompt_token_model = model?.startsWith("gpt-3") ? "gpt-3.5-turbo" : "gpt-4" const prompt_token_model = model?.startsWith("gpt-3") ? "gpt-3.5-turbo" : "gpt-4"
const prompt_tokens = GPTTokenizer_cl100k_base?.encodeChat(messages, prompt_token_model).length; const prompt_tokens = GPTTokenizer_cl100k_base?.encodeChat(messages, prompt_token_model).length;
const completion_tokens = count_tokens(message_provider?.model, message_storage[message_id]); const completion_tokens = count_tokens(message_provider?.model, message_storage[message_id]);
usage = { usage = {
model: message_provider?.model, ...usage,
provider: message_provider?.name,
prompt_tokens: prompt_tokens, prompt_tokens: prompt_tokens,
completion_tokens: completion_tokens, completion_tokens: completion_tokens,
total_tokens: prompt_tokens + completion_tokens total_tokens: prompt_tokens + completion_tokens
@@ -1748,9 +1753,10 @@ function update_message(content_map, message_id, content = null, scroll = true)
content = content.substring(0, lastIndex) + '<span class="cursor"></span>' + lastElement; content = content.substring(0, lastIndex) + '<span class="cursor"></span>' + lastElement;
} }
} }
content_map.inner.innerHTML = content;
if (error_storage[message_id]) { if (error_storage[message_id]) {
content_map.inner.innerHTML += markdown_render(`**An error occured:** ${error_storage[message_id]}`); content_map.inner.innerHTML = message + markdown_render(`**An error occured:** ${error_storage[message_id]}`);
} else {
content_map.inner.innerHTML = content;
} }
content_map.count.innerText = count_words_and_tokens(message_storage[message_id], provider_storage[message_id]?.model); content_map.count.innerText = count_words_and_tokens(message_storage[message_id], provider_storage[message_id]?.model);
highlight(content_map.inner); highlight(content_map.inner);

View File

@@ -12,9 +12,9 @@ from ...image import ImagePreview, ImageResponse, copy_images, ensure_images_dir
from ...tools.run_tools import iter_run_tools from ...tools.run_tools import iter_run_tools
from ...Provider import ProviderUtils, __providers__ from ...Provider import ProviderUtils, __providers__
from ...providers.base_provider import ProviderModelMixin from ...providers.base_provider import ProviderModelMixin
from ...providers.retry_provider import IterListProvider from ...providers.retry_provider import BaseRetryProvider
from ...providers.response import BaseConversation, JsonConversation, FinishReason, Usage, Reasoning from ...providers.response import BaseConversation, JsonConversation, FinishReason, Usage, Reasoning
from ...providers.response import SynthesizeData, TitleGeneration, RequestLogin, Parameters from ...providers.response import SynthesizeData, TitleGeneration, RequestLogin, Parameters, ProviderInfo
from ... import version, models from ... import version, models
from ... import ChatCompletion, get_model_and_provider from ... import ChatCompletion, get_model_and_provider
from ... import debug from ... import debug
@@ -154,41 +154,32 @@ class Api:
) )
except Exception as e: except Exception as e:
logger.exception(e) logger.exception(e)
yield self._format_json('error', get_error_message(e)) yield self._format_json('error', type(e).__name__, message=get_error_message(e))
return return
params = { if not isinstance(provider_handler, BaseRetryProvider):
**(provider_handler.get_parameters(as_json=True) if hasattr(provider_handler, "get_parameters") else {}), yield self.handle_provider(provider_handler, model)
"model": model, if hasattr(provider_handler, "get_parameters"):
"messages": kwargs.get("messages"), yield self._format_json("parameters", provider_handler.get_parameters(as_json=True))
}
if isinstance(kwargs.get("conversation"), JsonConversation):
params["conversation"] = kwargs.get("conversation").get_dict()
else:
params["conversation_id"] = conversation_id
if kwargs.get("api_key") is not None:
params["api_key"] = kwargs["api_key"]
yield self._format_json("parameters", params)
first = True
try: try:
result = iter_run_tools(ChatCompletion.create, **{**kwargs, "model": model, "provider": provider_handler}) result = iter_run_tools(ChatCompletion.create, **{**kwargs, "model": model, "provider": provider_handler})
for chunk in result: for chunk in result:
if first: if isinstance(chunk, ProviderInfo):
first = False yield self.handle_provider(chunk, model)
yield self.handle_provider(provider_handler, model) provider = chunk.name
if isinstance(chunk, BaseConversation): elif isinstance(chunk, BaseConversation):
if provider is not None: if provider is not None:
if provider not in conversations: if provider not in conversations:
conversations[provider] = {} conversations[provider] = {}
conversations[provider][conversation_id] = chunk conversations[provider][conversation_id] = chunk
if isinstance(chunk, JsonConversation): if isinstance(chunk, JsonConversation):
yield self._format_json("conversation", { yield self._format_json("conversation", {
provider.__name__ if isinstance(provider, type) else provider: chunk.get_dict() provider: chunk.get_dict()
}) })
else: else:
yield self._format_json("conversation_id", conversation_id) yield self._format_json("conversation_id", conversation_id)
elif isinstance(chunk, Exception): elif isinstance(chunk, Exception):
logger.exception(chunk) logger.exception(chunk)
yield self._format_json("message", get_error_message(chunk)) yield self._format_json('message', get_error_message(chunk), error=type(chunk).__name__)
elif isinstance(chunk, ImagePreview): elif isinstance(chunk, ImagePreview):
yield self._format_json("preview", chunk.to_string()) yield self._format_json("preview", chunk.to_string())
elif isinstance(chunk, ImageResponse): elif isinstance(chunk, ImageResponse):
@@ -219,9 +210,11 @@ class Api:
debug.logs = [] debug.logs = []
except Exception as e: except Exception as e:
logger.exception(e) logger.exception(e)
yield self._format_json('error', get_error_message(e)) if debug.logs:
if first: for log in debug.logs:
yield self.handle_provider(provider_handler, model) yield self._format_json("log", str(log))
debug.logs = []
yield self._format_json('error', type(e).__name__, message=get_error_message(e))
def _format_json(self, response_type: str, content = None, **kwargs): def _format_json(self, response_type: str, content = None, **kwargs):
if content is not None: if content is not None:
@@ -235,11 +228,11 @@ class Api:
} }
def handle_provider(self, provider_handler, model): def handle_provider(self, provider_handler, model):
if isinstance(provider_handler, IterListProvider) and provider_handler.last_provider is not None: if isinstance(provider_handler, BaseRetryProvider) and provider_handler.last_provider is not None:
provider_handler = provider_handler.last_provider provider_handler = provider_handler.last_provider
if not model and hasattr(provider_handler, "last_model") and provider_handler.last_model is not None: if model:
model = provider_handler.last_model return self._format_json("provider", {**provider_handler.get_dict(), "model": model})
return self._format_json("provider", {**provider_handler.get_dict(), "model": model}) return self._format_json("provider", provider_handler.get_dict())
def get_error_message(exception: Exception) -> str: def get_error_message(exception: Exception) -> str:
return f"{type(exception).__name__}: {exception}" return f"{type(exception).__name__}: {exception}"

View File

@@ -277,7 +277,8 @@ class Backend_Api(Api):
return Response(filter_markdown(response, do_filter_markdown), mimetype='text/plain') return Response(filter_markdown(response, do_filter_markdown), mimetype='text/plain')
def cast_str(): def cast_str():
for chunk in response: for chunk in response:
yield str(chunk) if not isinstance(chunk, Exception):
yield str(chunk)
return Response(cast_str(), mimetype='text/plain') return Response(cast_str(), mimetype='text/plain')
except Exception as e: except Exception as e:
logger.exception(e) logger.exception(e)

View File

@@ -242,36 +242,48 @@ def ensure_images_dir():
async def copy_images( async def copy_images(
images: list[str], images: list[str],
cookies: Optional[Cookies] = None, cookies: Optional[Cookies] = None,
proxy: Optional[str] = None proxy: Optional[str] = None,
add_url: bool = True,
target: str = None,
ssl: bool = None
) -> list[str]: ) -> list[str]:
if add_url:
add_url = not cookies
ensure_images_dir() ensure_images_dir()
async with ClientSession( async with ClientSession(
connector=get_connector(proxy=proxy), connector=get_connector(proxy=proxy),
cookies=cookies cookies=cookies
) as session: ) as session:
async def copy_image(image: str) -> str: async def copy_image(image: str, target: str = None) -> str:
target = os.path.join(images_dir, f"{int(time.time())}_{str(uuid.uuid4())}") if target is None or len(images) > 1:
if image.startswith("data:"): target = os.path.join(images_dir, f"{int(time.time())}_{str(uuid.uuid4())}")
with open(target, "wb") as f: try:
f.write(extract_data_uri(image)) if image.startswith("data:"):
else: with open(target, "wb") as f:
try: f.write(extract_data_uri(image))
async with session.get(image) as response: else:
response.raise_for_status() try:
with open(target, "wb") as f: async with session.get(image, ssl=ssl) as response:
async for chunk in response.content.iter_chunked(4096): response.raise_for_status()
f.write(chunk) with open(target, "wb") as f:
except ClientError as e: async for chunk in response.content.iter_chunked(4096):
debug.log(f"copy_images failed: {e.__class__.__name__}: {e}") f.write(chunk)
return image except ClientError as e:
with open(target, "rb") as f: debug.log(f"copy_images failed: {e.__class__.__name__}: {e}")
extension = is_accepted_format(f.read(12)).split("/")[-1] return image
extension = "jpg" if extension == "jpeg" else extension if "." not in target:
new_target = f"{target}.{extension}" with open(target, "rb") as f:
os.rename(target, new_target) extension = is_accepted_format(f.read(12)).split("/")[-1]
return f"/images/{os.path.basename(new_target)}" extension = "jpg" if extension == "jpeg" else extension
new_target = f"{target}.{extension}"
os.rename(target, new_target)
target = new_target
finally:
if "." not in target and os.path.exists(target):
os.unlink(target)
return f"/images/{os.path.basename(target)}{'?url=' + image if add_url and not image.startswith('data:') else ''}"
return await asyncio.gather(*[copy_image(image) for image in images]) return await asyncio.gather(*[copy_image(image, target) for image in images])
class ImageDataResponse(): class ImageDataResponse():
def __init__( def __init__(

View File

@@ -11,7 +11,6 @@ from .Provider import (
Blackbox, Blackbox,
CablyAI, CablyAI,
ChatGLM, ChatGLM,
ChatGpt,
ChatGptEs, ChatGptEs,
ChatGptt, ChatGptt,
Cloudflare, Cloudflare,
@@ -120,7 +119,7 @@ default_vision = Model(
gpt_35_turbo = Model( gpt_35_turbo = Model(
name = 'gpt-3.5-turbo', name = 'gpt-3.5-turbo',
base_provider = 'OpenAI', base_provider = 'OpenAI',
best_provider = IterListProvider([DarkAI, ChatGpt]) best_provider = IterListProvider([DarkAI])
) )
# gpt-4 # gpt-4
@@ -134,13 +133,13 @@ gpt_4 = Model(
gpt_4o = VisionModel( gpt_4o = VisionModel(
name = 'gpt-4o', name = 'gpt-4o',
base_provider = 'OpenAI', base_provider = 'OpenAI',
best_provider = IterListProvider([Blackbox, ChatGptt, Jmuz, ChatGptEs, PollinationsAI, DarkAI, Copilot, ChatGpt, Liaobots, OpenaiChat]) best_provider = IterListProvider([Blackbox, ChatGptt, Jmuz, ChatGptEs, PollinationsAI, DarkAI, Copilot, Liaobots, OpenaiChat])
) )
gpt_4o_mini = Model( gpt_4o_mini = Model(
name = 'gpt-4o-mini', name = 'gpt-4o-mini',
base_provider = 'OpenAI', base_provider = 'OpenAI',
best_provider = IterListProvider([DDG, ChatGptEs, ChatGptt, Jmuz, PollinationsAI, OIVSCode, ChatGpt, Liaobots, OpenaiChat]) best_provider = IterListProvider([DDG, ChatGptEs, ChatGptt, Jmuz, PollinationsAI, OIVSCode, Liaobots, OpenaiChat])
) )
# o1 # o1
@@ -440,14 +439,18 @@ qwen_2_5_coder_32b = Model(
base_provider = 'Qwen', base_provider = 'Qwen',
best_provider = IterListProvider([DeepInfraChat, PollinationsAI, AutonomousAI, Jmuz, HuggingChat]) best_provider = IterListProvider([DeepInfraChat, PollinationsAI, AutonomousAI, Jmuz, HuggingChat])
) )
qwen_2_5_1m = Model(
name = 'qwen-2.5-1m-demo',
base_provider = 'Qwen',
best_provider = HuggingSpace
)
# qwq/qvq ### qwq/qvq ###
qwq_32b = Model( qwq_32b = Model(
name = 'qwq-32b', name = 'qwq-32b',
base_provider = 'Qwen', base_provider = 'Qwen',
best_provider = IterListProvider([Blackbox, DeepInfraChat, Jmuz, HuggingChat]) best_provider = IterListProvider([Blackbox, DeepInfraChat, Jmuz, HuggingChat])
) )
qvq_72b = VisionModel( qvq_72b = VisionModel(
name = 'qvq-72b', name = 'qvq-72b',
base_provider = 'Qwen', base_provider = 'Qwen',
@@ -467,7 +470,6 @@ deepseek_chat = Model(
base_provider = 'DeepSeek', base_provider = 'DeepSeek',
best_provider = IterListProvider([Blackbox, DeepInfraChat, Jmuz, PollinationsAI]) best_provider = IterListProvider([Blackbox, DeepInfraChat, Jmuz, PollinationsAI])
) )
deepseek_r1 = Model( deepseek_r1 = Model(
name = 'deepseek-r1', name = 'deepseek-r1',
base_provider = 'DeepSeek', base_provider = 'DeepSeek',
@@ -721,6 +723,7 @@ class ModelUtils:
qwen_2_vl_7b.name: qwen_2_vl_7b, qwen_2_vl_7b.name: qwen_2_vl_7b,
qwen_2_5_72b.name: qwen_2_5_72b, qwen_2_5_72b.name: qwen_2_5_72b,
qwen_2_5_coder_32b.name: qwen_2_5_coder_32b, qwen_2_5_coder_32b.name: qwen_2_5_coder_32b,
qwen_2_5_1m.name: qwen_2_5_1m,
# qwq/qvq # qwq/qvq
qwq_32b.name: qwq_32b, qwq_32b.name: qwq_32b,

View File

@@ -379,7 +379,7 @@ class RaiseErrorMixin():
raise ResponseError(data["error"]["message"]) raise ResponseError(data["error"]["message"])
else: else:
raise ResponseError(data["error"]) raise ResponseError(data["error"])
elif "choices" not in data or not data["choices"]: elif ("choices" not in data or not data["choices"]) and "data" not in data:
raise ResponseError(f"Invalid response: {json.dumps(data)}") raise ResponseError(f"Invalid response: {json.dumps(data)}")
class AsyncAuthedProvider(AsyncGeneratorProvider): class AsyncAuthedProvider(AsyncGeneratorProvider):

View File

@@ -198,5 +198,9 @@ class ImagePreview(ImageResponse):
return super().__str__() return super().__str__()
class Parameters(ResponseType, JsonMixin): class Parameters(ResponseType, JsonMixin):
def __str__(self):
return ""
class ProviderInfo(ResponseType, JsonMixin):
def __str__(self): def __str__(self):
return "" return ""

View File

@@ -4,7 +4,7 @@ import random
from ..typing import Type, List, CreateResult, Messages, AsyncResult from ..typing import Type, List, CreateResult, Messages, AsyncResult
from .types import BaseProvider, BaseRetryProvider, ProviderType from .types import BaseProvider, BaseRetryProvider, ProviderType
from .response import ImageResponse from .response import ImageResponse, ProviderInfo
from .. import debug from .. import debug
from ..errors import RetryProviderError, RetryNoProviderError from ..errors import RetryProviderError, RetryNoProviderError
@@ -53,6 +53,7 @@ class IterListProvider(BaseRetryProvider):
for provider in self.get_providers(stream and not ignore_stream, ignored): for provider in self.get_providers(stream and not ignore_stream, ignored):
self.last_provider = provider self.last_provider = provider
debug.log(f"Using {provider.__name__} provider") debug.log(f"Using {provider.__name__} provider")
yield ProviderInfo(**provider.get_dict(), model=model if model else getattr(provider, "default_model"))
try: try:
response = provider.get_create_function()(model, messages, stream=stream, **kwargs) response = provider.get_create_function()(model, messages, stream=stream, **kwargs)
for chunk in response: for chunk in response:
@@ -67,6 +68,7 @@ class IterListProvider(BaseRetryProvider):
debug.log(f"{provider.__name__}: {e.__class__.__name__}: {e}") debug.log(f"{provider.__name__}: {e.__class__.__name__}: {e}")
if started: if started:
raise e raise e
yield e
raise_exceptions(exceptions) raise_exceptions(exceptions)
@@ -85,6 +87,7 @@ class IterListProvider(BaseRetryProvider):
for provider in self.get_providers(stream and not ignore_stream, ignored): for provider in self.get_providers(stream and not ignore_stream, ignored):
self.last_provider = provider self.last_provider = provider
debug.log(f"Using {provider.__name__} provider") debug.log(f"Using {provider.__name__} provider")
yield ProviderInfo(provider.get_dict())
try: try:
response = provider.get_async_create_function()(model, messages, stream=stream, **kwargs) response = provider.get_async_create_function()(model, messages, stream=stream, **kwargs)
if hasattr(response, "__aiter__"): if hasattr(response, "__aiter__"):
@@ -105,6 +108,7 @@ class IterListProvider(BaseRetryProvider):
debug.log(f"{provider.__name__}: {e.__class__.__name__}: {e}") debug.log(f"{provider.__name__}: {e.__class__.__name__}: {e}")
if started: if started:
raise e raise e
yield e
raise_exceptions(exceptions) raise_exceptions(exceptions)

View File

@@ -71,12 +71,12 @@ class StreamSession(AsyncSession):
""" """
def request( def request(
self, method: str, url: str, **kwargs self, method: str, url: str, ssl = None, **kwargs
) -> StreamResponse: ) -> StreamResponse:
if isinstance(kwargs.get("data"), CurlMime): if isinstance(kwargs.get("data"), CurlMime):
kwargs["multipart"] = kwargs.pop("data") kwargs["multipart"] = kwargs.pop("data")
"""Create and return a StreamResponse object for the given HTTP request.""" """Create and return a StreamResponse object for the given HTTP request."""
return StreamResponse(super().request(method, url, stream=True, **kwargs)) return StreamResponse(super().request(method, url, stream=True, verify=ssl, **kwargs))
def ws_connect(self, url, *args, **kwargs): def ws_connect(self, url, *args, **kwargs):
return WebSocket(self, url, **kwargs) return WebSocket(self, url, **kwargs)

View File

@@ -25,7 +25,8 @@ async def raise_for_status_async(response: Union[StreamResponse, ClientResponse]
return return
text = await response.text() text = await response.text()
if message is None: if message is None:
message = "HTML content" if response.headers.get("content-type", "").startswith("text/html") else text is_html = response.headers.get("content-type", "").startswith("text/html") or text.startswith("<!DOCTYPE")
message = "HTML content" if is_html else text
if message == "HTML content": if message == "HTML content":
if response.status == 520: if response.status == 520:
message = "Unknown error (Cloudflare)" message = "Unknown error (Cloudflare)"
@@ -46,7 +47,8 @@ def raise_for_status(response: Union[Response, StreamResponse, ClientResponse, R
if response.ok: if response.ok:
return return
if message is None: if message is None:
message = "HTML content" if response.headers.get("content-type", "").startswith("text/html") else response.text is_html = response.headers.get("content-type", "").startswith("text/html") or response.text.startswith("<!DOCTYPE")
message = "HTML content" if is_html else response.text
if message == "HTML content": if message == "HTML content":
if response.status_code == 520: if response.status_code == 520:
message = "Unknown error (Cloudflare)" message = "Unknown error (Cloudflare)"

View File

@@ -14,4 +14,5 @@ brotli
beautifulsoup4 beautifulsoup4
aiohttp_socks aiohttp_socks
cryptography cryptography
python-multipart python-multipart
pypdf2

View File

@@ -58,7 +58,6 @@ EXTRA_REQUIRE = {
"uvicorn", # api "uvicorn", # api
"python-multipart", "python-multipart",
"pypdf2", # files "pypdf2", # files
"docx",
], ],
"image": [ "image": [
"pillow", "pillow",