Qwen Catch error (#3186)

This commit is contained in:
Ammar
2025-09-23 01:00:10 +03:00
committed by GitHub
parent 74fcb27cbc
commit 90627d595b
2 changed files with 152 additions and 133 deletions

View File

@@ -61,6 +61,7 @@ FOLLOWUPS_DEVELOPER_MESSAGE = [{
"content": "Provide conversation options.",
}]
class PollinationsAI(AsyncGeneratorProvider, ProviderModelMixin):
label = "Pollinations AI 🌸"
url = "https://pollinations.ai"
@@ -110,6 +111,7 @@ class PollinationsAI(AsyncGeneratorProvider, ProviderModelMixin):
elif alias in cls.swap_model_aliases:
alias = cls.swap_model_aliases[alias]
return alias.replace("-instruct", "").replace("qwen-", "qwen").replace("qwen", "qwen-")
if not cls._models_loaded:
try:
# Update of image models
@@ -220,7 +222,8 @@ class PollinationsAI(AsyncGeneratorProvider, ProviderModelMixin):
top_p: float = None,
frequency_penalty: float = None,
response_format: Optional[dict] = None,
extra_parameters: list[str] = ["tools", "parallel_tool_calls", "tool_choice", "reasoning_effort", "logit_bias", "voice", "modalities", "audio"],
extra_parameters: list[str] = ["tools", "parallel_tool_calls", "tool_choice", "reasoning_effort",
"logit_bias", "voice", "modalities", "audio"],
**kwargs
) -> AsyncResult:
if cache is None:
@@ -339,17 +342,19 @@ class PollinationsAI(AsyncGeneratorProvider, ProviderModelMixin):
encoded_prompt = prompt.strip(". \n")
if model == "gptimage" and aspect_ratio is not None:
encoded_prompt = f"{encoded_prompt} aspect-ratio: {aspect_ratio}"
encoded_prompt = quote_plus(encoded_prompt)[:4096-len(cls.image_api_endpoint)-len(query)-8].rstrip("%")
encoded_prompt = quote_plus(encoded_prompt)[:4096 - len(cls.image_api_endpoint) - len(query) - 8].rstrip("%")
url = f"{cls.image_api_endpoint}prompt/{encoded_prompt}?{query}"
def get_url_with_seed(i: int, seed: Optional[int] = None):
if model == "gptimage":
return url
if i == 0:
if not cache and seed is None:
seed = random.randint(0, 2**32)
seed = random.randint(0, 2 ** 32)
else:
seed = random.randint(0, 2**32)
seed = random.randint(0, 2 ** 32)
return f"{url}&seed={seed}" if seed else url
headers = {"referer": referrer}
if api_key:
headers["authorization"] = f"Bearer {api_key}"
@@ -362,14 +367,22 @@ class PollinationsAI(AsyncGeneratorProvider, ProviderModelMixin):
yield Reasoning(label=f"Generating {n} {'image' if n == 1 else 'images'}")
finished = 0
start = time.time()
async def get_image(responses: set, i: int, seed: Optional[int] = None):
try:
async with session.get(get_url_with_seed(i, seed), allow_redirects=False, headers=headers) as response:
async with session.get(get_url_with_seed(i, seed), allow_redirects=False,
headers=headers) as response:
await raise_for_status(response)
except Exception as e:
responses.add(e)
debug.error(f"Error fetching image: {e}")
if response.headers['content-type'].startswith("image/"):
responses.add(ImageResponse(str(response.url), prompt, {"headers": headers}))
else:
t_ = await response.text()
debug.error(f"UnHandel Error fetching image: {t_}")
responses.add(t_)
tasks: list[asyncio.Task] = []
for i in range(int(n)):
tasks.append(asyncio.create_task(get_image(responses, i, seed)))
@@ -386,7 +399,8 @@ class PollinationsAI(AsyncGeneratorProvider, ProviderModelMixin):
raise item
else:
finished += 1
yield Reasoning(label=f"Image {finished}/{n} failed after {time.time() - start:.2f}s: {item}")
yield Reasoning(
label=f"Image {finished}/{n} failed after {time.time() - start:.2f}s: {item}")
else:
finished += 1
yield Reasoning(label=f"Image {finished}/{n} generated in {time.time() - start:.2f}s")
@@ -417,7 +431,7 @@ class PollinationsAI(AsyncGeneratorProvider, ProviderModelMixin):
**kwargs
) -> AsyncResult:
if not cache and seed is None:
seed = random.randint(0, 2**32)
seed = random.randint(0, 2 ** 32)
async with ClientSession(headers=DEFAULT_HEADERS, connector=get_connector(proxy=proxy)) as session:
extra_body.update({param: kwargs[param] for param in extra_parameters if param in kwargs})
@@ -440,7 +454,7 @@ class PollinationsAI(AsyncGeneratorProvider, ProviderModelMixin):
frequency_penalty=frequency_penalty,
response_format=response_format,
stream=stream,
seed=None if model =="grok" else seed,
seed=None if model == "grok" else seed,
referrer=referrer,
**extra_body
)
@@ -451,7 +465,8 @@ class PollinationsAI(AsyncGeneratorProvider, ProviderModelMixin):
if response.status in (400, 500):
debug.error(f"Error: {response.status} - Bad Request: {data}")
full_resposne = []
async for chunk in read_response(response, stream, format_media_prompt(messages), cls.get_dict(), kwargs.get("download_media", True)):
async for chunk in read_response(response, stream, format_media_prompt(messages), cls.get_dict(),
kwargs.get("download_media", True)):
if isinstance(chunk, str):
full_resposne.append(chunk)
yield chunk
@@ -479,7 +494,8 @@ class PollinationsAI(AsyncGeneratorProvider, ProviderModelMixin):
async with session.post(cls.openai_endpoint, json=data, headers=headers) as response:
try:
await raise_for_status(response)
tool_calls = (await response.json()).get("choices", [{}])[0].get("message", {}).get("tool_calls", [])
tool_calls = (await response.json()).get("choices", [{}])[0].get("message", {}).get(
"tool_calls", [])
if tool_calls:
arguments = json.loads(tool_calls.pop().get("function", {}).get("arguments"))
if arguments.get("title"):

View File

@@ -8,7 +8,7 @@ from time import time
from typing import Literal, Optional
import aiohttp
from ..errors import RateLimitError
from ..errors import RateLimitError, ResponseError
from ..typing import AsyncResult, Messages, MediaListType
from ..providers.response import JsonConversation, Reasoning, Usage, ImageResponse, FinishReason
from ..requests import sse_stream
@@ -265,6 +265,9 @@ class Qwen(AsyncGeneratorProvider, ProviderModelMixin):
usage = None
async for chunk in sse_stream(resp):
try:
error = chunk.get("error", {})
if error:
raise ResponseError(f'{error["code"]}: {error["details"]}')
usage = chunk.get("usage", usage)
choices = chunk.get("choices", [])
if not choices: continue