mirror of
https://github.com/xtekky/gpt4free.git
synced 2025-09-26 20:31:14 +08:00
Qwen Catch error (#3186)
This commit is contained in:
@@ -61,6 +61,7 @@ FOLLOWUPS_DEVELOPER_MESSAGE = [{
|
|||||||
"content": "Provide conversation options.",
|
"content": "Provide conversation options.",
|
||||||
}]
|
}]
|
||||||
|
|
||||||
|
|
||||||
class PollinationsAI(AsyncGeneratorProvider, ProviderModelMixin):
|
class PollinationsAI(AsyncGeneratorProvider, ProviderModelMixin):
|
||||||
label = "Pollinations AI 🌸"
|
label = "Pollinations AI 🌸"
|
||||||
url = "https://pollinations.ai"
|
url = "https://pollinations.ai"
|
||||||
@@ -110,6 +111,7 @@ class PollinationsAI(AsyncGeneratorProvider, ProviderModelMixin):
|
|||||||
elif alias in cls.swap_model_aliases:
|
elif alias in cls.swap_model_aliases:
|
||||||
alias = cls.swap_model_aliases[alias]
|
alias = cls.swap_model_aliases[alias]
|
||||||
return alias.replace("-instruct", "").replace("qwen-", "qwen").replace("qwen", "qwen-")
|
return alias.replace("-instruct", "").replace("qwen-", "qwen").replace("qwen", "qwen-")
|
||||||
|
|
||||||
if not cls._models_loaded:
|
if not cls._models_loaded:
|
||||||
try:
|
try:
|
||||||
# Update of image models
|
# Update of image models
|
||||||
@@ -121,12 +123,12 @@ class PollinationsAI(AsyncGeneratorProvider, ProviderModelMixin):
|
|||||||
|
|
||||||
# Combine image models without duplicates
|
# Combine image models without duplicates
|
||||||
image_models = cls.image_models.copy() # Start with default model
|
image_models = cls.image_models.copy() # Start with default model
|
||||||
|
|
||||||
# Add extra image models if not already in the list
|
# Add extra image models if not already in the list
|
||||||
for model in new_image_models:
|
for model in new_image_models:
|
||||||
if model not in image_models:
|
if model not in image_models:
|
||||||
image_models.append(model)
|
image_models.append(model)
|
||||||
|
|
||||||
cls.image_models = image_models
|
cls.image_models = image_models
|
||||||
|
|
||||||
text_response = requests.get("https://g4f.dev/api/pollinations.ai/models")
|
text_response = requests.get("https://g4f.dev/api/pollinations.ai/models")
|
||||||
@@ -192,36 +194,37 @@ class PollinationsAI(AsyncGeneratorProvider, ProviderModelMixin):
|
|||||||
|
|
||||||
@classmethod
|
@classmethod
|
||||||
async def create_async_generator(
|
async def create_async_generator(
|
||||||
cls,
|
cls,
|
||||||
model: str,
|
model: str,
|
||||||
messages: Messages,
|
messages: Messages,
|
||||||
stream: bool = True,
|
stream: bool = True,
|
||||||
proxy: str = None,
|
proxy: str = None,
|
||||||
cache: bool = None,
|
cache: bool = None,
|
||||||
referrer: str = STATIC_URL,
|
referrer: str = STATIC_URL,
|
||||||
api_key: str = None,
|
api_key: str = None,
|
||||||
extra_body: dict = None,
|
extra_body: dict = None,
|
||||||
# Image generation parameters
|
# Image generation parameters
|
||||||
prompt: str = None,
|
prompt: str = None,
|
||||||
aspect_ratio: str = None,
|
aspect_ratio: str = None,
|
||||||
width: int = None,
|
width: int = None,
|
||||||
height: int = None,
|
height: int = None,
|
||||||
seed: Optional[int] = None,
|
seed: Optional[int] = None,
|
||||||
nologo: bool = True,
|
nologo: bool = True,
|
||||||
private: bool = False,
|
private: bool = False,
|
||||||
enhance: bool = None,
|
enhance: bool = None,
|
||||||
safe: bool = False,
|
safe: bool = False,
|
||||||
transparent: bool = False,
|
transparent: bool = False,
|
||||||
n: int = 1,
|
n: int = 1,
|
||||||
# Text generation parameters
|
# Text generation parameters
|
||||||
media: MediaListType = None,
|
media: MediaListType = None,
|
||||||
temperature: float = None,
|
temperature: float = None,
|
||||||
presence_penalty: float = None,
|
presence_penalty: float = None,
|
||||||
top_p: float = None,
|
top_p: float = None,
|
||||||
frequency_penalty: float = None,
|
frequency_penalty: float = None,
|
||||||
response_format: Optional[dict] = None,
|
response_format: Optional[dict] = None,
|
||||||
extra_parameters: list[str] = ["tools", "parallel_tool_calls", "tool_choice", "reasoning_effort", "logit_bias", "voice", "modalities", "audio"],
|
extra_parameters: list[str] = ["tools", "parallel_tool_calls", "tool_choice", "reasoning_effort",
|
||||||
**kwargs
|
"logit_bias", "voice", "modalities", "audio"],
|
||||||
|
**kwargs
|
||||||
) -> AsyncResult:
|
) -> AsyncResult:
|
||||||
if cache is None:
|
if cache is None:
|
||||||
cache = kwargs.get("action") == "next"
|
cache = kwargs.get("action") == "next"
|
||||||
@@ -241,23 +244,23 @@ class PollinationsAI(AsyncGeneratorProvider, ProviderModelMixin):
|
|||||||
debug.log(f"Using model: {model}")
|
debug.log(f"Using model: {model}")
|
||||||
if model in cls.image_models:
|
if model in cls.image_models:
|
||||||
async for chunk in cls._generate_image(
|
async for chunk in cls._generate_image(
|
||||||
model="gptimage" if model == "transparent" else model,
|
model="gptimage" if model == "transparent" else model,
|
||||||
prompt=format_media_prompt(messages, prompt),
|
prompt=format_media_prompt(messages, prompt),
|
||||||
media=media,
|
media=media,
|
||||||
proxy=proxy,
|
proxy=proxy,
|
||||||
aspect_ratio=aspect_ratio,
|
aspect_ratio=aspect_ratio,
|
||||||
width=width,
|
width=width,
|
||||||
height=height,
|
height=height,
|
||||||
seed=seed,
|
seed=seed,
|
||||||
cache=cache,
|
cache=cache,
|
||||||
nologo=nologo,
|
nologo=nologo,
|
||||||
private=private,
|
private=private,
|
||||||
enhance=enhance,
|
enhance=enhance,
|
||||||
safe=safe,
|
safe=safe,
|
||||||
transparent=transparent or model == "transparent",
|
transparent=transparent or model == "transparent",
|
||||||
n=n,
|
n=n,
|
||||||
referrer=referrer,
|
referrer=referrer,
|
||||||
api_key=api_key
|
api_key=api_key
|
||||||
):
|
):
|
||||||
yield chunk
|
yield chunk
|
||||||
else:
|
else:
|
||||||
@@ -272,47 +275,47 @@ class PollinationsAI(AsyncGeneratorProvider, ProviderModelMixin):
|
|||||||
}
|
}
|
||||||
model = cls.default_audio_model
|
model = cls.default_audio_model
|
||||||
async for result in cls._generate_text(
|
async for result in cls._generate_text(
|
||||||
model=model,
|
model=model,
|
||||||
messages=messages,
|
messages=messages,
|
||||||
media=media,
|
media=media,
|
||||||
proxy=proxy,
|
proxy=proxy,
|
||||||
temperature=temperature,
|
temperature=temperature,
|
||||||
presence_penalty=presence_penalty,
|
presence_penalty=presence_penalty,
|
||||||
top_p=top_p,
|
top_p=top_p,
|
||||||
frequency_penalty=frequency_penalty,
|
frequency_penalty=frequency_penalty,
|
||||||
response_format=response_format,
|
response_format=response_format,
|
||||||
seed=seed,
|
seed=seed,
|
||||||
cache=cache,
|
cache=cache,
|
||||||
stream=stream,
|
stream=stream,
|
||||||
extra_parameters=extra_parameters,
|
extra_parameters=extra_parameters,
|
||||||
referrer=referrer,
|
referrer=referrer,
|
||||||
api_key=api_key,
|
api_key=api_key,
|
||||||
extra_body=extra_body,
|
extra_body=extra_body,
|
||||||
**kwargs
|
**kwargs
|
||||||
):
|
):
|
||||||
yield result
|
yield result
|
||||||
|
|
||||||
@classmethod
|
@classmethod
|
||||||
async def _generate_image(
|
async def _generate_image(
|
||||||
cls,
|
cls,
|
||||||
model: str,
|
model: str,
|
||||||
prompt: str,
|
prompt: str,
|
||||||
media: MediaListType,
|
media: MediaListType,
|
||||||
proxy: str,
|
proxy: str,
|
||||||
aspect_ratio: str,
|
aspect_ratio: str,
|
||||||
width: int,
|
width: int,
|
||||||
height: int,
|
height: int,
|
||||||
seed: Optional[int],
|
seed: Optional[int],
|
||||||
cache: bool,
|
cache: bool,
|
||||||
nologo: bool,
|
nologo: bool,
|
||||||
private: bool,
|
private: bool,
|
||||||
enhance: bool,
|
enhance: bool,
|
||||||
safe: bool,
|
safe: bool,
|
||||||
transparent: bool,
|
transparent: bool,
|
||||||
n: int,
|
n: int,
|
||||||
referrer: str,
|
referrer: str,
|
||||||
api_key: str,
|
api_key: str,
|
||||||
timeout: int = 120
|
timeout: int = 120
|
||||||
) -> AsyncResult:
|
) -> AsyncResult:
|
||||||
if enhance is None:
|
if enhance is None:
|
||||||
enhance = True if model == "flux" else False
|
enhance = True if model == "flux" else False
|
||||||
@@ -339,37 +342,47 @@ class PollinationsAI(AsyncGeneratorProvider, ProviderModelMixin):
|
|||||||
encoded_prompt = prompt.strip(". \n")
|
encoded_prompt = prompt.strip(". \n")
|
||||||
if model == "gptimage" and aspect_ratio is not None:
|
if model == "gptimage" and aspect_ratio is not None:
|
||||||
encoded_prompt = f"{encoded_prompt} aspect-ratio: {aspect_ratio}"
|
encoded_prompt = f"{encoded_prompt} aspect-ratio: {aspect_ratio}"
|
||||||
encoded_prompt = quote_plus(encoded_prompt)[:4096-len(cls.image_api_endpoint)-len(query)-8].rstrip("%")
|
encoded_prompt = quote_plus(encoded_prompt)[:4096 - len(cls.image_api_endpoint) - len(query) - 8].rstrip("%")
|
||||||
url = f"{cls.image_api_endpoint}prompt/{encoded_prompt}?{query}"
|
url = f"{cls.image_api_endpoint}prompt/{encoded_prompt}?{query}"
|
||||||
|
|
||||||
def get_url_with_seed(i: int, seed: Optional[int] = None):
|
def get_url_with_seed(i: int, seed: Optional[int] = None):
|
||||||
if model == "gptimage":
|
if model == "gptimage":
|
||||||
return url
|
return url
|
||||||
if i == 0:
|
if i == 0:
|
||||||
if not cache and seed is None:
|
if not cache and seed is None:
|
||||||
seed = random.randint(0, 2**32)
|
seed = random.randint(0, 2 ** 32)
|
||||||
else:
|
else:
|
||||||
seed = random.randint(0, 2**32)
|
seed = random.randint(0, 2 ** 32)
|
||||||
return f"{url}&seed={seed}" if seed else url
|
return f"{url}&seed={seed}" if seed else url
|
||||||
|
|
||||||
headers = {"referer": referrer}
|
headers = {"referer": referrer}
|
||||||
if api_key:
|
if api_key:
|
||||||
headers["authorization"] = f"Bearer {api_key}"
|
headers["authorization"] = f"Bearer {api_key}"
|
||||||
async with ClientSession(
|
async with ClientSession(
|
||||||
headers=DEFAULT_HEADERS,
|
headers=DEFAULT_HEADERS,
|
||||||
connector=get_connector(proxy=proxy),
|
connector=get_connector(proxy=proxy),
|
||||||
timeout=ClientTimeout(timeout)
|
timeout=ClientTimeout(timeout)
|
||||||
) as session:
|
) as session:
|
||||||
responses = set()
|
responses = set()
|
||||||
yield Reasoning(label=f"Generating {n} {'image' if n == 1 else 'images'}")
|
yield Reasoning(label=f"Generating {n} {'image' if n == 1 else 'images'}")
|
||||||
finished = 0
|
finished = 0
|
||||||
start = time.time()
|
start = time.time()
|
||||||
|
|
||||||
async def get_image(responses: set, i: int, seed: Optional[int] = None):
|
async def get_image(responses: set, i: int, seed: Optional[int] = None):
|
||||||
try:
|
try:
|
||||||
async with session.get(get_url_with_seed(i, seed), allow_redirects=False, headers=headers) as response:
|
async with session.get(get_url_with_seed(i, seed), allow_redirects=False,
|
||||||
|
headers=headers) as response:
|
||||||
await raise_for_status(response)
|
await raise_for_status(response)
|
||||||
except Exception as e:
|
except Exception as e:
|
||||||
responses.add(e)
|
responses.add(e)
|
||||||
debug.error(f"Error fetching image: {e}")
|
debug.error(f"Error fetching image: {e}")
|
||||||
responses.add(ImageResponse(str(response.url), prompt, {"headers": headers}))
|
if response.headers['content-type'].startswith("image/"):
|
||||||
|
responses.add(ImageResponse(str(response.url), prompt, {"headers": headers}))
|
||||||
|
else:
|
||||||
|
t_ = await response.text()
|
||||||
|
debug.error(f"UnHandel Error fetching image: {t_}")
|
||||||
|
responses.add(t_)
|
||||||
|
|
||||||
tasks: list[asyncio.Task] = []
|
tasks: list[asyncio.Task] = []
|
||||||
for i in range(int(n)):
|
for i in range(int(n)):
|
||||||
tasks.append(asyncio.create_task(get_image(responses, i, seed)))
|
tasks.append(asyncio.create_task(get_image(responses, i, seed)))
|
||||||
@@ -386,8 +399,9 @@ class PollinationsAI(AsyncGeneratorProvider, ProviderModelMixin):
|
|||||||
raise item
|
raise item
|
||||||
else:
|
else:
|
||||||
finished += 1
|
finished += 1
|
||||||
yield Reasoning(label=f"Image {finished}/{n} failed after {time.time() - start:.2f}s: {item}")
|
yield Reasoning(
|
||||||
else:
|
label=f"Image {finished}/{n} failed after {time.time() - start:.2f}s: {item}")
|
||||||
|
else:
|
||||||
finished += 1
|
finished += 1
|
||||||
yield Reasoning(label=f"Image {finished}/{n} generated in {time.time() - start:.2f}s")
|
yield Reasoning(label=f"Image {finished}/{n} generated in {time.time() - start:.2f}s")
|
||||||
yield item
|
yield item
|
||||||
@@ -397,27 +411,27 @@ class PollinationsAI(AsyncGeneratorProvider, ProviderModelMixin):
|
|||||||
|
|
||||||
@classmethod
|
@classmethod
|
||||||
async def _generate_text(
|
async def _generate_text(
|
||||||
cls,
|
cls,
|
||||||
model: str,
|
model: str,
|
||||||
messages: Messages,
|
messages: Messages,
|
||||||
media: MediaListType,
|
media: MediaListType,
|
||||||
proxy: str,
|
proxy: str,
|
||||||
temperature: float,
|
temperature: float,
|
||||||
presence_penalty: float,
|
presence_penalty: float,
|
||||||
top_p: float,
|
top_p: float,
|
||||||
frequency_penalty: float,
|
frequency_penalty: float,
|
||||||
response_format: Optional[dict],
|
response_format: Optional[dict],
|
||||||
seed: Optional[int],
|
seed: Optional[int],
|
||||||
cache: bool,
|
cache: bool,
|
||||||
stream: bool,
|
stream: bool,
|
||||||
extra_parameters: list[str],
|
extra_parameters: list[str],
|
||||||
referrer: str,
|
referrer: str,
|
||||||
api_key: str,
|
api_key: str,
|
||||||
extra_body: dict,
|
extra_body: dict,
|
||||||
**kwargs
|
**kwargs
|
||||||
) -> AsyncResult:
|
) -> AsyncResult:
|
||||||
if not cache and seed is None:
|
if not cache and seed is None:
|
||||||
seed = random.randint(0, 2**32)
|
seed = random.randint(0, 2 ** 32)
|
||||||
|
|
||||||
async with ClientSession(headers=DEFAULT_HEADERS, connector=get_connector(proxy=proxy)) as session:
|
async with ClientSession(headers=DEFAULT_HEADERS, connector=get_connector(proxy=proxy)) as session:
|
||||||
extra_body.update({param: kwargs[param] for param in extra_parameters if param in kwargs})
|
extra_body.update({param: kwargs[param] for param in extra_parameters if param in kwargs})
|
||||||
@@ -440,7 +454,7 @@ class PollinationsAI(AsyncGeneratorProvider, ProviderModelMixin):
|
|||||||
frequency_penalty=frequency_penalty,
|
frequency_penalty=frequency_penalty,
|
||||||
response_format=response_format,
|
response_format=response_format,
|
||||||
stream=stream,
|
stream=stream,
|
||||||
seed=None if model =="grok" else seed,
|
seed=None if model == "grok" else seed,
|
||||||
referrer=referrer,
|
referrer=referrer,
|
||||||
**extra_body
|
**extra_body
|
||||||
)
|
)
|
||||||
@@ -451,7 +465,8 @@ class PollinationsAI(AsyncGeneratorProvider, ProviderModelMixin):
|
|||||||
if response.status in (400, 500):
|
if response.status in (400, 500):
|
||||||
debug.error(f"Error: {response.status} - Bad Request: {data}")
|
debug.error(f"Error: {response.status} - Bad Request: {data}")
|
||||||
full_resposne = []
|
full_resposne = []
|
||||||
async for chunk in read_response(response, stream, format_media_prompt(messages), cls.get_dict(), kwargs.get("download_media", True)):
|
async for chunk in read_response(response, stream, format_media_prompt(messages), cls.get_dict(),
|
||||||
|
kwargs.get("download_media", True)):
|
||||||
if isinstance(chunk, str):
|
if isinstance(chunk, str):
|
||||||
full_resposne.append(chunk)
|
full_resposne.append(chunk)
|
||||||
yield chunk
|
yield chunk
|
||||||
@@ -479,7 +494,8 @@ class PollinationsAI(AsyncGeneratorProvider, ProviderModelMixin):
|
|||||||
async with session.post(cls.openai_endpoint, json=data, headers=headers) as response:
|
async with session.post(cls.openai_endpoint, json=data, headers=headers) as response:
|
||||||
try:
|
try:
|
||||||
await raise_for_status(response)
|
await raise_for_status(response)
|
||||||
tool_calls = (await response.json()).get("choices", [{}])[0].get("message", {}).get("tool_calls", [])
|
tool_calls = (await response.json()).get("choices", [{}])[0].get("message", {}).get(
|
||||||
|
"tool_calls", [])
|
||||||
if tool_calls:
|
if tool_calls:
|
||||||
arguments = json.loads(tool_calls.pop().get("function", {}).get("arguments"))
|
arguments = json.loads(tool_calls.pop().get("function", {}).get("arguments"))
|
||||||
if arguments.get("title"):
|
if arguments.get("title"):
|
||||||
@@ -487,4 +503,4 @@ class PollinationsAI(AsyncGeneratorProvider, ProviderModelMixin):
|
|||||||
if arguments.get("followups"):
|
if arguments.get("followups"):
|
||||||
yield SuggestedFollowups(arguments.get("followups"))
|
yield SuggestedFollowups(arguments.get("followups"))
|
||||||
except Exception as e:
|
except Exception as e:
|
||||||
debug.error("Error generating title and followups:", e)
|
debug.error("Error generating title and followups:", e)
|
||||||
|
@@ -8,7 +8,7 @@ from time import time
|
|||||||
from typing import Literal, Optional
|
from typing import Literal, Optional
|
||||||
|
|
||||||
import aiohttp
|
import aiohttp
|
||||||
from ..errors import RateLimitError
|
from ..errors import RateLimitError, ResponseError
|
||||||
from ..typing import AsyncResult, Messages, MediaListType
|
from ..typing import AsyncResult, Messages, MediaListType
|
||||||
from ..providers.response import JsonConversation, Reasoning, Usage, ImageResponse, FinishReason
|
from ..providers.response import JsonConversation, Reasoning, Usage, ImageResponse, FinishReason
|
||||||
from ..requests import sse_stream
|
from ..requests import sse_stream
|
||||||
@@ -97,20 +97,20 @@ class Qwen(AsyncGeneratorProvider, ProviderModelMixin):
|
|||||||
|
|
||||||
@classmethod
|
@classmethod
|
||||||
async def create_async_generator(
|
async def create_async_generator(
|
||||||
cls,
|
cls,
|
||||||
model: str,
|
model: str,
|
||||||
messages: Messages,
|
messages: Messages,
|
||||||
media: MediaListType = None,
|
media: MediaListType = None,
|
||||||
conversation: JsonConversation = None,
|
conversation: JsonConversation = None,
|
||||||
proxy: str = None,
|
proxy: str = None,
|
||||||
timeout: int = 120,
|
timeout: int = 120,
|
||||||
stream: bool = True,
|
stream: bool = True,
|
||||||
enable_thinking: bool = True,
|
enable_thinking: bool = True,
|
||||||
chat_type: Literal[
|
chat_type: Literal[
|
||||||
"t2t", "search", "artifacts", "web_dev", "deep_research", "t2i", "image_edit", "t2v"
|
"t2t", "search", "artifacts", "web_dev", "deep_research", "t2i", "image_edit", "t2v"
|
||||||
] = "t2t",
|
] = "t2t",
|
||||||
aspect_ratio: Optional[Literal["1:1", "4:3", "3:4", "16:9", "9:16"]] = None,
|
aspect_ratio: Optional[Literal["1:1", "4:3", "3:4", "16:9", "9:16"]] = None,
|
||||||
**kwargs
|
**kwargs
|
||||||
) -> AsyncResult:
|
) -> AsyncResult:
|
||||||
"""
|
"""
|
||||||
chat_type:
|
chat_type:
|
||||||
@@ -265,6 +265,9 @@ class Qwen(AsyncGeneratorProvider, ProviderModelMixin):
|
|||||||
usage = None
|
usage = None
|
||||||
async for chunk in sse_stream(resp):
|
async for chunk in sse_stream(resp):
|
||||||
try:
|
try:
|
||||||
|
error = chunk.get("error", {})
|
||||||
|
if error:
|
||||||
|
raise ResponseError(f'{error["code"]}: {error["details"]}')
|
||||||
usage = chunk.get("usage", usage)
|
usage = chunk.get("usage", usage)
|
||||||
choices = chunk.get("choices", [])
|
choices = chunk.get("choices", [])
|
||||||
if not choices: continue
|
if not choices: continue
|
||||||
|
Reference in New Issue
Block a user