feat: Refactor PollinationsAI and ARTA provider structure

- Updated `PollinationsAI.py` to strip trailing periods and newlines from the prompt before encoding.
- Modified the encoding of the prompt to remove trailing percent signs after URL encoding.
- Simplified the audio response handling in `PollinationsAI.py` by removing unnecessary checks and yielding chunks directly.
- Renamed `ARTA.py` to `deprecated/ARTA.py` and updated import paths accordingly in `__init__.py`.
- Changed the `working` status of the `ARTA` class to `False` to indicate it is deprecated.
- Enhanced the `Video` class in `Video.py` to include aspect ratio handling and improved URL response caching.
- Updated the `RequestConfig` class to use a dictionary for storing URLs associated with prompts.
- Removed references to the `ARTA` provider in various files, including `models.py` and `any_provider.py`.
- Adjusted the `best_provider` assignments in `models.py` to exclude `ARTA` and include `HuggingFaceMedia` where applicable.
- Updated the response handling in `Video.py` to yield cached responses when available.
This commit is contained in:
hlohaus
2025-06-19 00:42:41 +02:00
parent faf94ccfbb
commit d824d77d65
11 changed files with 106 additions and 87 deletions

View File

@@ -392,10 +392,10 @@ class PollinationsAI(AsyncGeneratorProvider, ProviderModelMixin):
**params
}, "1:1" if aspect_ratio is None else aspect_ratio)
query = "&".join(f"{k}={quote_plus(str(v))}" for k, v in params.items() if v is not None)
encoded_prompt = prompt
encoded_prompt = prompt.strip(". \n")
if model == "gptimage" and aspect_ratio is not None:
encoded_prompt = f"{encoded_prompt} aspect-ratio: {aspect_ratio}"
encoded_prompt = quote_plus(encoded_prompt)[:4096-len(cls.image_api_endpoint)-len(query)-8]
encoded_prompt = quote_plus(encoded_prompt)[:4096-len(cls.image_api_endpoint)-len(query)-8].rstrip("%")
url = f"{cls.image_api_endpoint}prompt/{encoded_prompt}?{query}"
def get_url_with_seed(i: int, seed: Optional[int] = None):
if model == "gptimage":
@@ -583,14 +583,6 @@ class PollinationsAI(AsyncGeneratorProvider, ProviderModelMixin):
audio = message.get("audio", {})
if "data" in audio:
async for chunk in save_response_media(audio["data"], prompt, [model, extra_body.get("audio", {}).get("voice")]):
if isinstance(chunk, AudioResponse) and not download_media and voice and len(messages) == 1:
prompt = messages[0].get("content")
if isinstance(prompt, str):
url = f"https://text.pollinations.ai/{quote(prompt)}?model={quote(model)}&voice={quote(voice)}&seed={quote(str(seed))}"
yield AudioResponse(url)
else:
yield chunk
else:
yield chunk
if "transcript" in audio:
yield "\n\n"

View File

@@ -32,7 +32,7 @@ try:
except ImportError as e:
debug.error("Audio providers not loaded:", e)
from .ARTA import ARTA
from .deprecated.ARTA import ARTA
from .Blackbox import Blackbox
from .Chatai import Chatai
from .Cloudflare import Cloudflare

View File

@@ -8,13 +8,13 @@ from pathlib import Path
from aiohttp import ClientSession, ClientResponse
import asyncio
from ..typing import AsyncResult, Messages
from ..providers.response import ImageResponse, Reasoning
from ..errors import ResponseError, ModelNotFoundError
from ..cookies import get_cookies_dir
from .base_provider import AsyncGeneratorProvider, ProviderModelMixin
from .helper import format_media_prompt
from .. import debug
from ...typing import AsyncResult, Messages
from ...providers.response import ImageResponse, Reasoning
from ...errors import ResponseError, ModelNotFoundError
from ...cookies import get_cookies_dir
from ..base_provider import AsyncGeneratorProvider, ProviderModelMixin
from ..helper import format_media_prompt
from ... import debug
class ARTA(AsyncGeneratorProvider, ProviderModelMixin):
url = "https://ai-arta.com"
@@ -23,7 +23,7 @@ class ARTA(AsyncGeneratorProvider, ProviderModelMixin):
image_generation_url = "https://img-gen-prod.ai-arta.com/api/v1/text2image"
status_check_url = "https://img-gen-prod.ai-arta.com/api/v1/text2image/{record_id}/status"
working = True
working = False # Take down request
default_model = "flux"
default_image_model = default_model

View File

@@ -10,22 +10,33 @@ from aiohttp import ClientSession
try:
import nodriver
from nodriver.core.connection import ProtocolException
except:
pass
from ...typing import Messages, AsyncResult
from ...providers.response import VideoResponse, Reasoning, ContinueResponse
from ...providers.response import VideoResponse, Reasoning, ContinueResponse, ProviderInfo
from ...requests import get_nodriver
from ...errors import MissingRequirementsError
from ..base_provider import AsyncGeneratorProvider
from ..base_provider import AsyncGeneratorProvider, ProviderModelMixin
from ..helper import format_media_prompt
from ... import debug
class RequestConfig:
urls: list[str] = []
urls: dict[str, list[str]] = {}
headers: dict = {}
class Video(AsyncGeneratorProvider):
@classmethod
def get_response(cls, prompt: str) -> VideoResponse | None:
if prompt in cls.urls and cls.urls[prompt]:
cls.urls[prompt] = list(set(cls.urls[prompt]))
debug.log(f"Video URL: {len(cls.urls[prompt])}")
return VideoResponse(cls.urls[prompt], prompt, {
"headers": {"authorization": cls.headers.get("authorization")} if cls.headers.get("authorization") else {},
"preview": [url.replace("md.mp4", "thumb.webp") for url in cls.urls[prompt]]
})
class Video(AsyncGeneratorProvider, ProviderModelMixin):
urls = [
"https://sora.chatgpt.com/explore",
#"https://aistudio.google.com/generate-video"
@@ -35,6 +46,10 @@ class Video(AsyncGeneratorProvider):
search_url = f"{pub_url}/search/video+"
drive_url = "https://www.googleapis.com/drive/v3/"
active_by_default = True
default_model = "sora"
video_models = [default_model]
needs_auth = True
working = True
@@ -48,25 +63,32 @@ class Video(AsyncGeneratorProvider):
messages: Messages,
proxy: str = None,
prompt: str = None,
aspect_ratio: str = None,
**kwargs
) -> AsyncResult:
yield ProviderInfo(**cls.get_dict(), model="sora")
started = time.time()
prompt = format_media_prompt(messages, prompt)
if not prompt:
raise ValueError("Prompt cannot be empty.")
async with ClientSession() as session:
yield Reasoning(label="Lookup")
has_video = False
found_urls = []
for skip in range(0, 9):
async with session.get(cls.search_url + quote_plus(prompt) + f"?skip={skip}", timeout=ClientTimeout(total=10)) as response:
if response.ok:
yield Reasoning(label=f"Found {skip+1}", status="")
yield VideoResponse(str(response.url), prompt)
has_video = True
found_urls.append(str(response.url))
else:
break
if has_video:
if found_urls:
yield Reasoning(label=f"Finished", status="")
yield VideoResponse(found_urls, prompt)
return
response = RequestConfig.get_response(prompt)
if response:
yield Reasoning(label="Found cached Video", status="")
yield response
return
try:
yield Reasoning(label="Open browser")
@@ -87,17 +109,14 @@ class Video(AsyncGeneratorProvider):
yield VideoResponse(str(response.url), prompt)
return
raise MissingRequirementsError("Video provider requires a browser to be installed.")
RequestConfig.urls = []
try:
cls.page = await browser.get(random.choice(cls.urls))
except Exception as e:
debug.error(f"Error opening page:", e)
if RequestConfig.urls:
RequestConfig.urls = list(set(RequestConfig.urls))
debug.log(f"Video URL: {len(RequestConfig.urls)}")
yield VideoResponse(RequestConfig.urls, prompt, {
"headers": {"authorization": RequestConfig.headers.get("authorization")} if RequestConfig.headers.get("authorization") else {}
})
response = RequestConfig.get_response(prompt)
if response:
yield Reasoning(label="Found", status="")
yield response
return
try:
page = cls.page
@@ -117,6 +136,21 @@ class Video(AsyncGeneratorProvider):
debug.error("No 'Video' button found.")
except Exception as e:
debug.error(f"Error clicking button:", e)
try:
if aspect_ratio:
button = await page.find("2:3")
if button:
await button.click()
else:
debug.error("No '2:3' button found.")
button = await page.find(aspect_ratio)
if button:
await button.click()
yield Reasoning(label=f"Clicked '{aspect_ratio}' button")
else:
debug.error(f"No '{aspect_ratio}' button found.")
except Exception as e:
debug.error(f"Error clicking button:", e)
debug.log(f"Using prompt: {prompt}")
textarea = await page.select("textarea", 180)
await textarea.send_keys(prompt)
@@ -148,35 +182,35 @@ class Video(AsyncGeneratorProvider):
await button.click()
yield Reasoning(label=f"Clicked 'Queued' button")
break
except Exception as e:
debug.error(f"Error clicking 'Queued' button:", e)
yield Reasoning(label=f"Waiting for Video URL...")
except ProtocolException as e:
pass
if prompt not in RequestConfig.urls:
RequestConfig.urls[prompt] = []
def on_request(event: nodriver.cdp.network.RequestWillBeSent, page=None):
if "mp4" in event.request.url:
if ".mp4" in event.request.url:
RequestConfig.headers = {}
for key, value in event.request.headers.items():
RequestConfig.headers[key.lower()] = value
RequestConfig.urls.append(event.request.url)
RequestConfig.urls[prompt].append(event.request.url)
elif event.request.url.startswith(cls.drive_url):
RequestConfig.headers = {}
for key, value in event.request.headers.items():
RequestConfig.headers[key.lower()] = value
RequestConfig.urls.append(event.request.url)
RequestConfig.urls[prompt].append(event.request.url)
await page.send(nodriver.cdp.network.enable())
page.add_handler(nodriver.cdp.network.RequestWillBeSent, on_request)
for idx in range(600):
yield Reasoning(label=f"Waiting for Video... {idx+1}/600")
if time.time() - started > 30:
yield ContinueResponse("Timeout waiting for Video URL")
await asyncio.sleep(1)
if RequestConfig.urls:
if RequestConfig.urls[prompt]:
await asyncio.sleep(2)
RequestConfig.urls = list(set(RequestConfig.urls))
debug.log(f"Video URL: {len(RequestConfig.urls)}")
yield VideoResponse(RequestConfig.urls, prompt, {
"headers": {"authorization": RequestConfig.headers.get("authorization")} if RequestConfig.headers.get("authorization") else {}
})
yield Reasoning(label=f"Finished", status="")
break
response = RequestConfig.get_response(prompt)
if response:
yield Reasoning(label="Finished", status="")
yield response
return
if idx == 599:
raise RuntimeError("Failed to get Video URL")
finally:

View File

@@ -34,12 +34,15 @@ model_aliases = {
"flux": "black-forest-labs/FLUX.1-dev",
"flux-dev": "black-forest-labs/FLUX.1-dev",
"flux-schnell": "black-forest-labs/FLUX.1-schnell",
"stable-diffusion-3.5-large": "stabilityai/stable-diffusion-3.5-large",
"sdxl-1.0": "stabilityai/stable-diffusion-xl-base-1.0",
"sdxl-turbo": "stabilityai/sdxl-turbo",
"sd-3.5-large": "stabilityai/stable-diffusion-3.5-large",
### Used in other providers ###
"qwen-2-vl-7b": "Qwen/Qwen2-VL-7B-Instruct",
"gemma-2-27b": "google/gemma-2-27b-it",
"qwen-2-72b": "Qwen/Qwen2-72B-Instruct",
"qvq-72b": "Qwen/QVQ-72B-Preview",
"stable-diffusion-3.5-large": "stabilityai/stable-diffusion-3.5-large",
}
extra_models = [
"meta-llama/Llama-3.2-11B-Vision-Instruct",

View File

@@ -82,6 +82,7 @@ from g4f import debug
logger = logging.getLogger(__name__)
DEFAULT_PORT = 1337
DEFAULT_TIMEOUT = 600
@asynccontextmanager
async def lifespan(app: FastAPI):
@@ -141,6 +142,7 @@ def create_app_with_demo_and_debug():
g4f.debug.logging = True
AppConfig.gui = True
AppConfig.demo = True
AppConfig.timeout = 60
return create_app()
class ErrorResponse(Response):
@@ -169,6 +171,7 @@ class AppConfig:
proxy: str = None
gui: bool = False
demo: bool = False
timeout: int = DEFAULT_TIMEOUT
@classmethod
def set_config(cls, **data):
@@ -347,6 +350,8 @@ class Api:
config.provider = AppConfig.provider if provider is None else provider
if config.conversation_id is None:
config.conversation_id = conversation_id
if config.timeout is None:
config.timeout = AppConfig.timeout
if credentials is not None and credentials.credentials != "secret":
config.api_key = credentials.credentials

View File

@@ -386,12 +386,14 @@ class Backend_Api(Api):
process_image(image, save=os.path.join(thumbnail_dir, filename))
except Exception as e:
logger.exception(e)
elif is_supported:
elif is_supported and not result:
newfile = os.path.join(bucket_dir, filename)
filenames.append(filename)
else:
os.remove(copyfile)
if not result:
raise ValueError(f"Unsupported file type: {filename}")
continue
try:
os.rename(copyfile, newfile)
except OSError:

View File

@@ -6,7 +6,6 @@ from typing import Dict, List, Optional
from .Provider import IterListProvider, ProviderType
from .Provider import (
### No Auth Required ###
ARTA,
Blackbox,
Chatai,
Cloudflare,
@@ -41,6 +40,7 @@ from .Provider import (
HailuoAI,
HuggingChat,
HuggingFace,
HuggingFaceMedia,
HuggingFaceAPI,
MetaAI,
MicrosoftDesigner,
@@ -287,7 +287,7 @@ dall_e_3 = ImageModel(
gpt_image = ImageModel(
name = 'gpt-image',
base_provider = 'OpenAI',
best_provider = IterListProvider([PollinationsImage, ARTA])
best_provider = IterListProvider([PollinationsImage])
)
### Meta ###
@@ -880,48 +880,35 @@ evil = Model(
best_provider = PollinationsAI
)
### Stability AI ###
sdxl_1_0 = ImageModel(
name = 'sdxl-1.0',
base_provider = 'Stability AI',
best_provider = ARTA
)
sdxl_l = ImageModel(
name = 'sdxl-l',
base_provider = 'Stability AI',
best_provider = ARTA
)
sdxl_turbo = ImageModel(
name = 'sdxl-turbo',
base_provider = 'Stability AI',
best_provider = IterListProvider([PollinationsImage, ImageLabs])
best_provider = IterListProvider([HuggingFaceMedia, PollinationsImage, ImageLabs])
)
sd_3_5_large = ImageModel(
name = 'sd-3.5-large',
base_provider = 'Stability AI',
best_provider = HuggingSpace
best_provider = IterListProvider([HuggingFaceMedia, HuggingSpace])
)
### Black Forest Labs ###
flux = ImageModel(
name = 'flux',
base_provider = 'Black Forest Labs',
best_provider = IterListProvider([PollinationsImage, Websim, Together, HuggingSpace, ARTA])
best_provider = IterListProvider([HuggingFaceMedia, PollinationsImage, Websim, Together, HuggingSpace])
)
flux_pro = ImageModel(
name = 'flux-pro',
base_provider = 'Black Forest Labs',
best_provider = IterListProvider([PollinationsImage, Together, ARTA])
best_provider = IterListProvider([PollinationsImage, Together])
)
flux_dev = ImageModel(
name = 'flux-dev',
base_provider = 'Black Forest Labs',
best_provider = IterListProvider([PollinationsImage, HuggingSpace, Together, ARTA, HuggingChat, HuggingFace])
best_provider = IterListProvider([PollinationsImage, HuggingSpace, Together, HuggingChat, HuggingFace])
)
flux_schnell = ImageModel(

View File

@@ -11,7 +11,7 @@ from ..Provider.hf_space import HuggingSpace
from ..Provider import __map__
from ..Provider import Cloudflare, Gemini, Grok, DeepSeekAPI, PerplexityLabs, LambdaChat, PollinationsAI, PuterJS
from ..Provider import Microsoft_Phi_4_Multimodal, DeepInfraChat, Blackbox, OIVSCodeSer2, OIVSCodeSer0501, TeachAnything
from ..Provider import Together, WeWordle, Yqcloud, Chatai, Free2GPT, ARTA, ImageLabs, LegacyLMArena, LMArenaBeta
from ..Provider import Together, WeWordle, Yqcloud, Chatai, Free2GPT, ImageLabs, LegacyLMArena, LMArenaBeta
from ..Provider import EdgeTTS, gTTS, MarkItDown, OpenAIFM, Video
from ..Provider import HarProvider, HuggingFace, HuggingFaceMedia
from .base_provider import AsyncGeneratorProvider, ProviderModelMixin
@@ -21,13 +21,13 @@ from .. import debug
PROVIERS_LIST_1 = [
OpenaiChat, PollinationsAI, Cloudflare, PerplexityLabs, Gemini, Grok, DeepSeekAPI, Blackbox, OpenAIFM,
OIVSCodeSer2, OIVSCodeSer0501, TeachAnything, Together, WeWordle, Yqcloud, Chatai, Free2GPT, ARTA, ImageLabs,
OIVSCodeSer2, OIVSCodeSer0501, TeachAnything, Together, WeWordle, Yqcloud, Chatai, Free2GPT, ImageLabs,
HarProvider, LegacyLMArena, LMArenaBeta, LambdaChat, CopilotAccount, DeepInfraChat,
HuggingSpace, HuggingFace, HuggingFaceMedia, Together
]
PROVIERS_LIST_2 = [
OpenaiChat, CopilotAccount, PollinationsAI, PerplexityLabs, Gemini, Grok, ARTA
OpenaiChat, CopilotAccount, PollinationsAI, PerplexityLabs, Gemini, Grok
]
PROVIERS_LIST_3 = [
@@ -48,7 +48,6 @@ LABELS = {
"phi": "Microsoft: Phi / WizardLM",
"mistral": "Mistral",
"PollinationsAI": "Pollinations AI",
"ARTA": "ARTA",
"voices": "Voices",
"perplexity": "Perplexity Labs",
"openrouter": "OpenRouter",
@@ -81,7 +80,7 @@ class AnyProvider(AsyncGeneratorProvider, ProviderModelMixin):
added = False
# Check for models with prefix
start = model.split(":")[0]
if start in ("PollinationsAI", "ARTA", "openrouter"):
if start in ("PollinationsAI", "openrouter"):
submodel = model.split(":", maxsplit=1)[1]
if submodel in OpenAIFM.voices or submodel in PollinationsAI.audio_models[PollinationsAI.default_audio_model]:
groups["voices"].append(submodel)
@@ -184,12 +183,11 @@ class AnyProvider(AsyncGeneratorProvider, ProviderModelMixin):
try:
if provider == CopilotAccount:
all_models.extend(list(provider.model_aliases.keys()))
elif provider in [PollinationsAI, ARTA]:
elif provider == PollinationsAI:
all_models.extend([f"{provider.__name__}:{model}" for model in provider.get_models() if model not in all_models])
cls.audio_models.update({f"{provider.__name__}:{model}": [] for model in provider.get_models() if model in provider.audio_models})
cls.image_models.extend([f"{provider.__name__}:{model}" for model in provider.get_models() if model in provider.image_models])
cls.vision_models.extend([f"{provider.__name__}:{model}" for model in provider.get_models() if model in provider.vision_models])
if provider == PollinationsAI:
all_models.extend(list(provider.model_aliases.keys()))
else:
all_models.extend(provider.get_models())

View File

@@ -22,8 +22,6 @@ from .helper import concat_chunks
from ..cookies import get_cookies_dir
from ..errors import ModelNotFoundError, ResponseError, MissingAuthError, NoValidHarFileError, PaymentRequiredError
DEFAULT_TIMEOUT = 600
SAFE_PARAMETERS = [
"model", "messages", "stream", "timeout",
"proxy", "media", "response_format",
@@ -97,7 +95,7 @@ class AbstractProvider(BaseProvider):
model: str,
messages: Messages,
*,
timeout: int = DEFAULT_TIMEOUT,
timeout: int = None,
loop: AbstractEventLoop = None,
executor: ThreadPoolExecutor = None,
**kwargs
@@ -295,7 +293,7 @@ class AsyncGeneratorProvider(AbstractProvider):
model: str,
messages: Messages,
stream: bool = True,
timeout: int = DEFAULT_TIMEOUT,
timeout: int = None,
**kwargs
) -> CreateResult:
"""

View File

@@ -199,7 +199,7 @@ def stream_read_files(bucket_dir: Path, filenames: list[str], delete_files: bool
else:
os.unlink(filepath)
continue
yield f"```{filename}\n"
yield f"<!-- File: {filename} -->\n"
if has_pypdf2 and filename.endswith(".pdf"):
try:
reader = PyPDF2.PdfReader(file_path)
@@ -237,8 +237,8 @@ def stream_read_files(bucket_dir: Path, filenames: list[str], delete_files: bool
elif has_beautifulsoup4 and filename.endswith(".html"):
yield from scrape_text(file_path.read_text(errors="ignore"))
elif extension in PLAIN_FILE_EXTENSIONS:
yield file_path.read_text(errors="ignore")
yield f"\n```\n\n"
yield file_path.read_text(errors="ignore").strip()
yield f"\n<-- End -->\n\n"
def cache_stream(stream: Iterator[str], bucket_dir: Path) -> Iterator[str]:
cache_file = bucket_dir / PLAIN_CACHE