feat: Refactor PollinationsAI and ARTA provider structure

- Updated `PollinationsAI.py` to strip trailing periods and newlines from the prompt before encoding.
- Modified the encoding of the prompt to remove trailing percent signs after URL encoding.
- Simplified the audio response handling in `PollinationsAI.py` by removing unnecessary checks and yielding chunks directly.
- Renamed `ARTA.py` to `deprecated/ARTA.py` and updated import paths accordingly in `__init__.py`.
- Changed the `working` status of the `ARTA` class to `False` to indicate it is deprecated.
- Enhanced the `Video` class in `Video.py` to include aspect ratio handling and improved URL response caching.
- Updated the `RequestConfig` class to use a dictionary for storing URLs associated with prompts.
- Removed references to the `ARTA` provider in various files, including `models.py` and `any_provider.py`.
- Adjusted the `best_provider` assignments in `models.py` to exclude `ARTA` and include `HuggingFaceMedia` where applicable.
- Updated the response handling in `Video.py` to yield cached responses when available.
This commit is contained in:
hlohaus
2025-06-19 00:42:41 +02:00
parent faf94ccfbb
commit d824d77d65
11 changed files with 106 additions and 87 deletions

View File

@@ -392,10 +392,10 @@ class PollinationsAI(AsyncGeneratorProvider, ProviderModelMixin):
**params **params
}, "1:1" if aspect_ratio is None else aspect_ratio) }, "1:1" if aspect_ratio is None else aspect_ratio)
query = "&".join(f"{k}={quote_plus(str(v))}" for k, v in params.items() if v is not None) query = "&".join(f"{k}={quote_plus(str(v))}" for k, v in params.items() if v is not None)
encoded_prompt = prompt encoded_prompt = prompt.strip(". \n")
if model == "gptimage" and aspect_ratio is not None: if model == "gptimage" and aspect_ratio is not None:
encoded_prompt = f"{encoded_prompt} aspect-ratio: {aspect_ratio}" encoded_prompt = f"{encoded_prompt} aspect-ratio: {aspect_ratio}"
encoded_prompt = quote_plus(encoded_prompt)[:4096-len(cls.image_api_endpoint)-len(query)-8] encoded_prompt = quote_plus(encoded_prompt)[:4096-len(cls.image_api_endpoint)-len(query)-8].rstrip("%")
url = f"{cls.image_api_endpoint}prompt/{encoded_prompt}?{query}" url = f"{cls.image_api_endpoint}prompt/{encoded_prompt}?{query}"
def get_url_with_seed(i: int, seed: Optional[int] = None): def get_url_with_seed(i: int, seed: Optional[int] = None):
if model == "gptimage": if model == "gptimage":
@@ -583,15 +583,7 @@ class PollinationsAI(AsyncGeneratorProvider, ProviderModelMixin):
audio = message.get("audio", {}) audio = message.get("audio", {})
if "data" in audio: if "data" in audio:
async for chunk in save_response_media(audio["data"], prompt, [model, extra_body.get("audio", {}).get("voice")]): async for chunk in save_response_media(audio["data"], prompt, [model, extra_body.get("audio", {}).get("voice")]):
if isinstance(chunk, AudioResponse) and not download_media and voice and len(messages) == 1: yield chunk
prompt = messages[0].get("content")
if isinstance(prompt, str):
url = f"https://text.pollinations.ai/{quote(prompt)}?model={quote(model)}&voice={quote(voice)}&seed={quote(str(seed))}"
yield AudioResponse(url)
else:
yield chunk
else:
yield chunk
if "transcript" in audio: if "transcript" in audio:
yield "\n\n" yield "\n\n"
yield audio["transcript"] yield audio["transcript"]

View File

@@ -32,7 +32,7 @@ try:
except ImportError as e: except ImportError as e:
debug.error("Audio providers not loaded:", e) debug.error("Audio providers not loaded:", e)
from .ARTA import ARTA from .deprecated.ARTA import ARTA
from .Blackbox import Blackbox from .Blackbox import Blackbox
from .Chatai import Chatai from .Chatai import Chatai
from .Cloudflare import Cloudflare from .Cloudflare import Cloudflare

View File

@@ -8,13 +8,13 @@ from pathlib import Path
from aiohttp import ClientSession, ClientResponse from aiohttp import ClientSession, ClientResponse
import asyncio import asyncio
from ..typing import AsyncResult, Messages from ...typing import AsyncResult, Messages
from ..providers.response import ImageResponse, Reasoning from ...providers.response import ImageResponse, Reasoning
from ..errors import ResponseError, ModelNotFoundError from ...errors import ResponseError, ModelNotFoundError
from ..cookies import get_cookies_dir from ...cookies import get_cookies_dir
from .base_provider import AsyncGeneratorProvider, ProviderModelMixin from ..base_provider import AsyncGeneratorProvider, ProviderModelMixin
from .helper import format_media_prompt from ..helper import format_media_prompt
from .. import debug from ... import debug
class ARTA(AsyncGeneratorProvider, ProviderModelMixin): class ARTA(AsyncGeneratorProvider, ProviderModelMixin):
url = "https://ai-arta.com" url = "https://ai-arta.com"
@@ -23,7 +23,7 @@ class ARTA(AsyncGeneratorProvider, ProviderModelMixin):
image_generation_url = "https://img-gen-prod.ai-arta.com/api/v1/text2image" image_generation_url = "https://img-gen-prod.ai-arta.com/api/v1/text2image"
status_check_url = "https://img-gen-prod.ai-arta.com/api/v1/text2image/{record_id}/status" status_check_url = "https://img-gen-prod.ai-arta.com/api/v1/text2image/{record_id}/status"
working = True working = False # Take down request
default_model = "flux" default_model = "flux"
default_image_model = default_model default_image_model = default_model

View File

@@ -10,22 +10,33 @@ from aiohttp import ClientSession
try: try:
import nodriver import nodriver
from nodriver.core.connection import ProtocolException
except: except:
pass pass
from ...typing import Messages, AsyncResult from ...typing import Messages, AsyncResult
from ...providers.response import VideoResponse, Reasoning, ContinueResponse from ...providers.response import VideoResponse, Reasoning, ContinueResponse, ProviderInfo
from ...requests import get_nodriver from ...requests import get_nodriver
from ...errors import MissingRequirementsError from ...errors import MissingRequirementsError
from ..base_provider import AsyncGeneratorProvider from ..base_provider import AsyncGeneratorProvider, ProviderModelMixin
from ..helper import format_media_prompt from ..helper import format_media_prompt
from ... import debug from ... import debug
class RequestConfig: class RequestConfig:
urls: list[str] = [] urls: dict[str, list[str]] = {}
headers: dict = {} headers: dict = {}
class Video(AsyncGeneratorProvider): @classmethod
def get_response(cls, prompt: str) -> VideoResponse | None:
if prompt in cls.urls and cls.urls[prompt]:
cls.urls[prompt] = list(set(cls.urls[prompt]))
debug.log(f"Video URL: {len(cls.urls[prompt])}")
return VideoResponse(cls.urls[prompt], prompt, {
"headers": {"authorization": cls.headers.get("authorization")} if cls.headers.get("authorization") else {},
"preview": [url.replace("md.mp4", "thumb.webp") for url in cls.urls[prompt]]
})
class Video(AsyncGeneratorProvider, ProviderModelMixin):
urls = [ urls = [
"https://sora.chatgpt.com/explore", "https://sora.chatgpt.com/explore",
#"https://aistudio.google.com/generate-video" #"https://aistudio.google.com/generate-video"
@@ -35,6 +46,10 @@ class Video(AsyncGeneratorProvider):
search_url = f"{pub_url}/search/video+" search_url = f"{pub_url}/search/video+"
drive_url = "https://www.googleapis.com/drive/v3/" drive_url = "https://www.googleapis.com/drive/v3/"
active_by_default = True
default_model = "sora"
video_models = [default_model]
needs_auth = True needs_auth = True
working = True working = True
@@ -48,26 +63,33 @@ class Video(AsyncGeneratorProvider):
messages: Messages, messages: Messages,
proxy: str = None, proxy: str = None,
prompt: str = None, prompt: str = None,
aspect_ratio: str = None,
**kwargs **kwargs
) -> AsyncResult: ) -> AsyncResult:
yield ProviderInfo(**cls.get_dict(), model="sora")
started = time.time() started = time.time()
prompt = format_media_prompt(messages, prompt) prompt = format_media_prompt(messages, prompt)
if not prompt: if not prompt:
raise ValueError("Prompt cannot be empty.") raise ValueError("Prompt cannot be empty.")
async with ClientSession() as session: async with ClientSession() as session:
yield Reasoning(label="Lookup") yield Reasoning(label="Lookup")
has_video = False found_urls = []
for skip in range(0, 9): for skip in range(0, 9):
async with session.get(cls.search_url + quote_plus(prompt) + f"?skip={skip}", timeout=ClientTimeout(total=10)) as response: async with session.get(cls.search_url + quote_plus(prompt) + f"?skip={skip}", timeout=ClientTimeout(total=10)) as response:
if response.ok: if response.ok:
yield Reasoning(label=f"Found {skip+1}", status="") yield Reasoning(label=f"Found {skip+1}", status="")
yield VideoResponse(str(response.url), prompt) found_urls.append(str(response.url))
has_video = True
else: else:
break break
if has_video: if found_urls:
yield Reasoning(label=f"Finished", status="") yield Reasoning(label=f"Finished", status="")
yield VideoResponse(found_urls, prompt)
return return
response = RequestConfig.get_response(prompt)
if response:
yield Reasoning(label="Found cached Video", status="")
yield response
return
try: try:
yield Reasoning(label="Open browser") yield Reasoning(label="Open browser")
browser, stop_browser = await get_nodriver(proxy=proxy, user_data_dir="gemini") browser, stop_browser = await get_nodriver(proxy=proxy, user_data_dir="gemini")
@@ -87,17 +109,14 @@ class Video(AsyncGeneratorProvider):
yield VideoResponse(str(response.url), prompt) yield VideoResponse(str(response.url), prompt)
return return
raise MissingRequirementsError("Video provider requires a browser to be installed.") raise MissingRequirementsError("Video provider requires a browser to be installed.")
RequestConfig.urls = []
try: try:
cls.page = await browser.get(random.choice(cls.urls)) cls.page = await browser.get(random.choice(cls.urls))
except Exception as e: except Exception as e:
debug.error(f"Error opening page:", e) debug.error(f"Error opening page:", e)
if RequestConfig.urls: response = RequestConfig.get_response(prompt)
RequestConfig.urls = list(set(RequestConfig.urls)) if response:
debug.log(f"Video URL: {len(RequestConfig.urls)}") yield Reasoning(label="Found", status="")
yield VideoResponse(RequestConfig.urls, prompt, { yield response
"headers": {"authorization": RequestConfig.headers.get("authorization")} if RequestConfig.headers.get("authorization") else {}
})
return return
try: try:
page = cls.page page = cls.page
@@ -117,6 +136,21 @@ class Video(AsyncGeneratorProvider):
debug.error("No 'Video' button found.") debug.error("No 'Video' button found.")
except Exception as e: except Exception as e:
debug.error(f"Error clicking button:", e) debug.error(f"Error clicking button:", e)
try:
if aspect_ratio:
button = await page.find("2:3")
if button:
await button.click()
else:
debug.error("No '2:3' button found.")
button = await page.find(aspect_ratio)
if button:
await button.click()
yield Reasoning(label=f"Clicked '{aspect_ratio}' button")
else:
debug.error(f"No '{aspect_ratio}' button found.")
except Exception as e:
debug.error(f"Error clicking button:", e)
debug.log(f"Using prompt: {prompt}") debug.log(f"Using prompt: {prompt}")
textarea = await page.select("textarea", 180) textarea = await page.select("textarea", 180)
await textarea.send_keys(prompt) await textarea.send_keys(prompt)
@@ -148,35 +182,35 @@ class Video(AsyncGeneratorProvider):
await button.click() await button.click()
yield Reasoning(label=f"Clicked 'Queued' button") yield Reasoning(label=f"Clicked 'Queued' button")
break break
except Exception as e: except ProtocolException as e:
debug.error(f"Error clicking 'Queued' button:", e) pass
yield Reasoning(label=f"Waiting for Video URL...") if prompt not in RequestConfig.urls:
RequestConfig.urls[prompt] = []
def on_request(event: nodriver.cdp.network.RequestWillBeSent, page=None): def on_request(event: nodriver.cdp.network.RequestWillBeSent, page=None):
if "mp4" in event.request.url: if ".mp4" in event.request.url:
RequestConfig.headers = {} RequestConfig.headers = {}
for key, value in event.request.headers.items(): for key, value in event.request.headers.items():
RequestConfig.headers[key.lower()] = value RequestConfig.headers[key.lower()] = value
RequestConfig.urls.append(event.request.url) RequestConfig.urls[prompt].append(event.request.url)
elif event.request.url.startswith(cls.drive_url): elif event.request.url.startswith(cls.drive_url):
RequestConfig.headers = {} RequestConfig.headers = {}
for key, value in event.request.headers.items(): for key, value in event.request.headers.items():
RequestConfig.headers[key.lower()] = value RequestConfig.headers[key.lower()] = value
RequestConfig.urls.append(event.request.url) RequestConfig.urls[prompt].append(event.request.url)
await page.send(nodriver.cdp.network.enable()) await page.send(nodriver.cdp.network.enable())
page.add_handler(nodriver.cdp.network.RequestWillBeSent, on_request) page.add_handler(nodriver.cdp.network.RequestWillBeSent, on_request)
for idx in range(600): for idx in range(600):
yield Reasoning(label=f"Waiting for Video... {idx+1}/600")
if time.time() - started > 30: if time.time() - started > 30:
yield ContinueResponse("Timeout waiting for Video URL") yield ContinueResponse("Timeout waiting for Video URL")
await asyncio.sleep(1) await asyncio.sleep(1)
if RequestConfig.urls: if RequestConfig.urls[prompt]:
await asyncio.sleep(2) await asyncio.sleep(2)
RequestConfig.urls = list(set(RequestConfig.urls)) response = RequestConfig.get_response(prompt)
debug.log(f"Video URL: {len(RequestConfig.urls)}") if response:
yield VideoResponse(RequestConfig.urls, prompt, { yield Reasoning(label="Finished", status="")
"headers": {"authorization": RequestConfig.headers.get("authorization")} if RequestConfig.headers.get("authorization") else {} yield response
}) return
yield Reasoning(label=f"Finished", status="")
break
if idx == 599: if idx == 599:
raise RuntimeError("Failed to get Video URL") raise RuntimeError("Failed to get Video URL")
finally: finally:

View File

@@ -34,12 +34,15 @@ model_aliases = {
"flux": "black-forest-labs/FLUX.1-dev", "flux": "black-forest-labs/FLUX.1-dev",
"flux-dev": "black-forest-labs/FLUX.1-dev", "flux-dev": "black-forest-labs/FLUX.1-dev",
"flux-schnell": "black-forest-labs/FLUX.1-schnell", "flux-schnell": "black-forest-labs/FLUX.1-schnell",
"stable-diffusion-3.5-large": "stabilityai/stable-diffusion-3.5-large",
"sdxl-1.0": "stabilityai/stable-diffusion-xl-base-1.0",
"sdxl-turbo": "stabilityai/sdxl-turbo",
"sd-3.5-large": "stabilityai/stable-diffusion-3.5-large",
### Used in other providers ### ### Used in other providers ###
"qwen-2-vl-7b": "Qwen/Qwen2-VL-7B-Instruct", "qwen-2-vl-7b": "Qwen/Qwen2-VL-7B-Instruct",
"gemma-2-27b": "google/gemma-2-27b-it", "gemma-2-27b": "google/gemma-2-27b-it",
"qwen-2-72b": "Qwen/Qwen2-72B-Instruct", "qwen-2-72b": "Qwen/Qwen2-72B-Instruct",
"qvq-72b": "Qwen/QVQ-72B-Preview", "qvq-72b": "Qwen/QVQ-72B-Preview",
"stable-diffusion-3.5-large": "stabilityai/stable-diffusion-3.5-large",
} }
extra_models = [ extra_models = [
"meta-llama/Llama-3.2-11B-Vision-Instruct", "meta-llama/Llama-3.2-11B-Vision-Instruct",

View File

@@ -82,6 +82,7 @@ from g4f import debug
logger = logging.getLogger(__name__) logger = logging.getLogger(__name__)
DEFAULT_PORT = 1337 DEFAULT_PORT = 1337
DEFAULT_TIMEOUT = 600
@asynccontextmanager @asynccontextmanager
async def lifespan(app: FastAPI): async def lifespan(app: FastAPI):
@@ -141,6 +142,7 @@ def create_app_with_demo_and_debug():
g4f.debug.logging = True g4f.debug.logging = True
AppConfig.gui = True AppConfig.gui = True
AppConfig.demo = True AppConfig.demo = True
AppConfig.timeout = 60
return create_app() return create_app()
class ErrorResponse(Response): class ErrorResponse(Response):
@@ -169,6 +171,7 @@ class AppConfig:
proxy: str = None proxy: str = None
gui: bool = False gui: bool = False
demo: bool = False demo: bool = False
timeout: int = DEFAULT_TIMEOUT
@classmethod @classmethod
def set_config(cls, **data): def set_config(cls, **data):
@@ -347,6 +350,8 @@ class Api:
config.provider = AppConfig.provider if provider is None else provider config.provider = AppConfig.provider if provider is None else provider
if config.conversation_id is None: if config.conversation_id is None:
config.conversation_id = conversation_id config.conversation_id = conversation_id
if config.timeout is None:
config.timeout = AppConfig.timeout
if credentials is not None and credentials.credentials != "secret": if credentials is not None and credentials.credentials != "secret":
config.api_key = credentials.credentials config.api_key = credentials.credentials

View File

@@ -386,12 +386,14 @@ class Backend_Api(Api):
process_image(image, save=os.path.join(thumbnail_dir, filename)) process_image(image, save=os.path.join(thumbnail_dir, filename))
except Exception as e: except Exception as e:
logger.exception(e) logger.exception(e)
elif is_supported: elif is_supported and not result:
newfile = os.path.join(bucket_dir, filename) newfile = os.path.join(bucket_dir, filename)
filenames.append(filename) filenames.append(filename)
else: else:
os.remove(copyfile) os.remove(copyfile)
raise ValueError(f"Unsupported file type: {filename}") if not result:
raise ValueError(f"Unsupported file type: {filename}")
continue
try: try:
os.rename(copyfile, newfile) os.rename(copyfile, newfile)
except OSError: except OSError:

View File

@@ -6,7 +6,6 @@ from typing import Dict, List, Optional
from .Provider import IterListProvider, ProviderType from .Provider import IterListProvider, ProviderType
from .Provider import ( from .Provider import (
### No Auth Required ### ### No Auth Required ###
ARTA,
Blackbox, Blackbox,
Chatai, Chatai,
Cloudflare, Cloudflare,
@@ -41,6 +40,7 @@ from .Provider import (
HailuoAI, HailuoAI,
HuggingChat, HuggingChat,
HuggingFace, HuggingFace,
HuggingFaceMedia,
HuggingFaceAPI, HuggingFaceAPI,
MetaAI, MetaAI,
MicrosoftDesigner, MicrosoftDesigner,
@@ -287,7 +287,7 @@ dall_e_3 = ImageModel(
gpt_image = ImageModel( gpt_image = ImageModel(
name = 'gpt-image', name = 'gpt-image',
base_provider = 'OpenAI', base_provider = 'OpenAI',
best_provider = IterListProvider([PollinationsImage, ARTA]) best_provider = IterListProvider([PollinationsImage])
) )
### Meta ### ### Meta ###
@@ -880,48 +880,35 @@ evil = Model(
best_provider = PollinationsAI best_provider = PollinationsAI
) )
### Stability AI ###
sdxl_1_0 = ImageModel(
name = 'sdxl-1.0',
base_provider = 'Stability AI',
best_provider = ARTA
)
sdxl_l = ImageModel(
name = 'sdxl-l',
base_provider = 'Stability AI',
best_provider = ARTA
)
sdxl_turbo = ImageModel( sdxl_turbo = ImageModel(
name = 'sdxl-turbo', name = 'sdxl-turbo',
base_provider = 'Stability AI', base_provider = 'Stability AI',
best_provider = IterListProvider([PollinationsImage, ImageLabs]) best_provider = IterListProvider([HuggingFaceMedia, PollinationsImage, ImageLabs])
) )
sd_3_5_large = ImageModel( sd_3_5_large = ImageModel(
name = 'sd-3.5-large', name = 'sd-3.5-large',
base_provider = 'Stability AI', base_provider = 'Stability AI',
best_provider = HuggingSpace best_provider = IterListProvider([HuggingFaceMedia, HuggingSpace])
) )
### Black Forest Labs ### ### Black Forest Labs ###
flux = ImageModel( flux = ImageModel(
name = 'flux', name = 'flux',
base_provider = 'Black Forest Labs', base_provider = 'Black Forest Labs',
best_provider = IterListProvider([PollinationsImage, Websim, Together, HuggingSpace, ARTA]) best_provider = IterListProvider([HuggingFaceMedia, PollinationsImage, Websim, Together, HuggingSpace])
) )
flux_pro = ImageModel( flux_pro = ImageModel(
name = 'flux-pro', name = 'flux-pro',
base_provider = 'Black Forest Labs', base_provider = 'Black Forest Labs',
best_provider = IterListProvider([PollinationsImage, Together, ARTA]) best_provider = IterListProvider([PollinationsImage, Together])
) )
flux_dev = ImageModel( flux_dev = ImageModel(
name = 'flux-dev', name = 'flux-dev',
base_provider = 'Black Forest Labs', base_provider = 'Black Forest Labs',
best_provider = IterListProvider([PollinationsImage, HuggingSpace, Together, ARTA, HuggingChat, HuggingFace]) best_provider = IterListProvider([PollinationsImage, HuggingSpace, Together, HuggingChat, HuggingFace])
) )
flux_schnell = ImageModel( flux_schnell = ImageModel(

View File

@@ -11,7 +11,7 @@ from ..Provider.hf_space import HuggingSpace
from ..Provider import __map__ from ..Provider import __map__
from ..Provider import Cloudflare, Gemini, Grok, DeepSeekAPI, PerplexityLabs, LambdaChat, PollinationsAI, PuterJS from ..Provider import Cloudflare, Gemini, Grok, DeepSeekAPI, PerplexityLabs, LambdaChat, PollinationsAI, PuterJS
from ..Provider import Microsoft_Phi_4_Multimodal, DeepInfraChat, Blackbox, OIVSCodeSer2, OIVSCodeSer0501, TeachAnything from ..Provider import Microsoft_Phi_4_Multimodal, DeepInfraChat, Blackbox, OIVSCodeSer2, OIVSCodeSer0501, TeachAnything
from ..Provider import Together, WeWordle, Yqcloud, Chatai, Free2GPT, ARTA, ImageLabs, LegacyLMArena, LMArenaBeta from ..Provider import Together, WeWordle, Yqcloud, Chatai, Free2GPT, ImageLabs, LegacyLMArena, LMArenaBeta
from ..Provider import EdgeTTS, gTTS, MarkItDown, OpenAIFM, Video from ..Provider import EdgeTTS, gTTS, MarkItDown, OpenAIFM, Video
from ..Provider import HarProvider, HuggingFace, HuggingFaceMedia from ..Provider import HarProvider, HuggingFace, HuggingFaceMedia
from .base_provider import AsyncGeneratorProvider, ProviderModelMixin from .base_provider import AsyncGeneratorProvider, ProviderModelMixin
@@ -21,13 +21,13 @@ from .. import debug
PROVIERS_LIST_1 = [ PROVIERS_LIST_1 = [
OpenaiChat, PollinationsAI, Cloudflare, PerplexityLabs, Gemini, Grok, DeepSeekAPI, Blackbox, OpenAIFM, OpenaiChat, PollinationsAI, Cloudflare, PerplexityLabs, Gemini, Grok, DeepSeekAPI, Blackbox, OpenAIFM,
OIVSCodeSer2, OIVSCodeSer0501, TeachAnything, Together, WeWordle, Yqcloud, Chatai, Free2GPT, ARTA, ImageLabs, OIVSCodeSer2, OIVSCodeSer0501, TeachAnything, Together, WeWordle, Yqcloud, Chatai, Free2GPT, ImageLabs,
HarProvider, LegacyLMArena, LMArenaBeta, LambdaChat, CopilotAccount, DeepInfraChat, HarProvider, LegacyLMArena, LMArenaBeta, LambdaChat, CopilotAccount, DeepInfraChat,
HuggingSpace, HuggingFace, HuggingFaceMedia, Together HuggingSpace, HuggingFace, HuggingFaceMedia, Together
] ]
PROVIERS_LIST_2 = [ PROVIERS_LIST_2 = [
OpenaiChat, CopilotAccount, PollinationsAI, PerplexityLabs, Gemini, Grok, ARTA OpenaiChat, CopilotAccount, PollinationsAI, PerplexityLabs, Gemini, Grok
] ]
PROVIERS_LIST_3 = [ PROVIERS_LIST_3 = [
@@ -48,7 +48,6 @@ LABELS = {
"phi": "Microsoft: Phi / WizardLM", "phi": "Microsoft: Phi / WizardLM",
"mistral": "Mistral", "mistral": "Mistral",
"PollinationsAI": "Pollinations AI", "PollinationsAI": "Pollinations AI",
"ARTA": "ARTA",
"voices": "Voices", "voices": "Voices",
"perplexity": "Perplexity Labs", "perplexity": "Perplexity Labs",
"openrouter": "OpenRouter", "openrouter": "OpenRouter",
@@ -81,7 +80,7 @@ class AnyProvider(AsyncGeneratorProvider, ProviderModelMixin):
added = False added = False
# Check for models with prefix # Check for models with prefix
start = model.split(":")[0] start = model.split(":")[0]
if start in ("PollinationsAI", "ARTA", "openrouter"): if start in ("PollinationsAI", "openrouter"):
submodel = model.split(":", maxsplit=1)[1] submodel = model.split(":", maxsplit=1)[1]
if submodel in OpenAIFM.voices or submodel in PollinationsAI.audio_models[PollinationsAI.default_audio_model]: if submodel in OpenAIFM.voices or submodel in PollinationsAI.audio_models[PollinationsAI.default_audio_model]:
groups["voices"].append(submodel) groups["voices"].append(submodel)
@@ -184,13 +183,12 @@ class AnyProvider(AsyncGeneratorProvider, ProviderModelMixin):
try: try:
if provider == CopilotAccount: if provider == CopilotAccount:
all_models.extend(list(provider.model_aliases.keys())) all_models.extend(list(provider.model_aliases.keys()))
elif provider in [PollinationsAI, ARTA]: elif provider == PollinationsAI:
all_models.extend([f"{provider.__name__}:{model}" for model in provider.get_models() if model not in all_models]) all_models.extend([f"{provider.__name__}:{model}" for model in provider.get_models() if model not in all_models])
cls.audio_models.update({f"{provider.__name__}:{model}": [] for model in provider.get_models() if model in provider.audio_models}) cls.audio_models.update({f"{provider.__name__}:{model}": [] for model in provider.get_models() if model in provider.audio_models})
cls.image_models.extend([f"{provider.__name__}:{model}" for model in provider.get_models() if model in provider.image_models]) cls.image_models.extend([f"{provider.__name__}:{model}" for model in provider.get_models() if model in provider.image_models])
cls.vision_models.extend([f"{provider.__name__}:{model}" for model in provider.get_models() if model in provider.vision_models]) cls.vision_models.extend([f"{provider.__name__}:{model}" for model in provider.get_models() if model in provider.vision_models])
if provider == PollinationsAI: all_models.extend(list(provider.model_aliases.keys()))
all_models.extend(list(provider.model_aliases.keys()))
else: else:
all_models.extend(provider.get_models()) all_models.extend(provider.get_models())
except Exception as e: except Exception as e:

View File

@@ -22,8 +22,6 @@ from .helper import concat_chunks
from ..cookies import get_cookies_dir from ..cookies import get_cookies_dir
from ..errors import ModelNotFoundError, ResponseError, MissingAuthError, NoValidHarFileError, PaymentRequiredError from ..errors import ModelNotFoundError, ResponseError, MissingAuthError, NoValidHarFileError, PaymentRequiredError
DEFAULT_TIMEOUT = 600
SAFE_PARAMETERS = [ SAFE_PARAMETERS = [
"model", "messages", "stream", "timeout", "model", "messages", "stream", "timeout",
"proxy", "media", "response_format", "proxy", "media", "response_format",
@@ -97,7 +95,7 @@ class AbstractProvider(BaseProvider):
model: str, model: str,
messages: Messages, messages: Messages,
*, *,
timeout: int = DEFAULT_TIMEOUT, timeout: int = None,
loop: AbstractEventLoop = None, loop: AbstractEventLoop = None,
executor: ThreadPoolExecutor = None, executor: ThreadPoolExecutor = None,
**kwargs **kwargs
@@ -295,7 +293,7 @@ class AsyncGeneratorProvider(AbstractProvider):
model: str, model: str,
messages: Messages, messages: Messages,
stream: bool = True, stream: bool = True,
timeout: int = DEFAULT_TIMEOUT, timeout: int = None,
**kwargs **kwargs
) -> CreateResult: ) -> CreateResult:
""" """

View File

@@ -199,7 +199,7 @@ def stream_read_files(bucket_dir: Path, filenames: list[str], delete_files: bool
else: else:
os.unlink(filepath) os.unlink(filepath)
continue continue
yield f"```{filename}\n" yield f"<!-- File: {filename} -->\n"
if has_pypdf2 and filename.endswith(".pdf"): if has_pypdf2 and filename.endswith(".pdf"):
try: try:
reader = PyPDF2.PdfReader(file_path) reader = PyPDF2.PdfReader(file_path)
@@ -237,8 +237,8 @@ def stream_read_files(bucket_dir: Path, filenames: list[str], delete_files: bool
elif has_beautifulsoup4 and filename.endswith(".html"): elif has_beautifulsoup4 and filename.endswith(".html"):
yield from scrape_text(file_path.read_text(errors="ignore")) yield from scrape_text(file_path.read_text(errors="ignore"))
elif extension in PLAIN_FILE_EXTENSIONS: elif extension in PLAIN_FILE_EXTENSIONS:
yield file_path.read_text(errors="ignore") yield file_path.read_text(errors="ignore").strip()
yield f"\n```\n\n" yield f"\n<-- End -->\n\n"
def cache_stream(stream: Iterator[str], bucket_dir: Path) -> Iterator[str]: def cache_stream(stream: Iterator[str], bucket_dir: Path) -> Iterator[str]:
cache_file = bucket_dir / PLAIN_CACHE cache_file = bucket_dir / PLAIN_CACHE