feat: add model alias handling and update Cloudflare provider

- Introduced `clean_name` function in `Cloudflare.py` to format model names by removing specific suffixes.
- Updated `model_aliases` in `Cloudflare` class with new model mappings, and cleaned up redundant entries.
- Set `models` in the `Cloudflare` class to use keys from `model_aliases`.
- Adjusted the caching logic in `Cloudflare` to include new headers for requests.
- Added `parent` and `login_url` attributes in `DeepInfraChat` class.
- Updated `PollinationsAI` to clean up model retrieval logic and fixed handling of existing checks.
- Refactored `HarProvider` to inherit models and aliases from `LegacyLMArena`.
- Implemented loading environment variables from `.env` file in `cookies.py`.
- Updated default headers in `defaults.py` for user agent and `sec-ch-ua`.
- Cleaned up various model references in `any_model_map.py` to reflect differences in audio, vision, and other model types.
- Added a more centralized handling for API key management in `run_tools.py` to accommodate new nomenclature.
- Enhanced existing logic to allow for more granular loading and utilization of API keys from environment variables.
This commit is contained in:
hlohaus
2025-07-10 03:28:02 +02:00
parent bdc356c4c2
commit 51b4b8bcb3
19 changed files with 1281 additions and 1041 deletions

View File

@@ -45,6 +45,7 @@ from ...errors import ProviderNotFoundError
from ...image import is_allowed_extension, process_image, MEDIA_TYPE_MAP
from ...cookies import get_cookies_dir
from ...image.copy_images import secure_filename, get_source_url, get_media_dir, copy_media
from ...client.service import get_model_and_provider
from ... import ChatCompletion
from ... import models
from .api import Api
@@ -302,12 +303,15 @@ class Backend_Api(Api):
})
do_filter = request.args.get("filter_markdown", request.args.get("json"))
cache_id = request.args.get('cache')
model, provider_handler = get_model_and_provider(
request.args.get("model"), request.args.get("provider", request.args.get("audio_provider")),
stream=request.args.get("stream") and not do_filter and not cache_id,
ignore_stream=not request.args.get("stream"),
)
parameters = {
"model": request.args.get("model"),
"model": model,
"messages": [{"role": "user", "content": request.args.get("prompt")}],
"provider": request.args.get("provider", request.args.get("audio_provider", "AnyProvider")),
"stream": not do_filter and not cache_id,
"ignore_stream": not request.args.get("stream"),
"tool_calls": tool_calls,
}
if request.args.get("audio_provider") or request.args.get("audio"):
@@ -348,7 +352,7 @@ class Backend_Api(Api):
with cache_file.open("r") as f:
response = f.read()
if not response:
response = iter_run_tools(ChatCompletion.create, **parameters)
response = iter_run_tools(provider_handler, **parameters)
cache_dir.mkdir(parents=True, exist_ok=True)
response = cast_str(response)
response = response if isinstance(response, str) else "".join(response)
@@ -356,7 +360,7 @@ class Backend_Api(Api):
with cache_file.open("w") as f:
f.write(response)
else:
response = cast_str(iter_run_tools(ChatCompletion.create, **parameters))
response = cast_str(iter_run_tools(provider_handler, **parameters))
if isinstance(response, str) and "\n" not in response:
if response.startswith("/media/"):
media_dir = get_media_dir()