fix: improve message formatting and set default activation for providers

- In PerplexityLabs.py, added logic to filter consecutive assistant messages and update message array accordingly
- Modified PerplexityLabs.py to change "messages" field to use the new formatted message list
- Adjusted error handling in PerplexityLabs.py to include a newline in error messages
- Import os in BlackForestLabs_Flux1KontextDev.py and replace media filename assignment with basename if media is None
- In Groq.py, set "active_by_default" to True for the provider
- In OpenRouter.py, added "active_by_default" as True
- In Together.py, set "active_by_default" to True
- In HuggingFaceInference.py, set "working" to False
- In models.py, changed default_model to "openai/gpt-oss-120b" instead of previous value
- In backend_api.py, added a null check in jsonify_provider_models to return 404 if response is None, and simplified get_provider_models call
This commit is contained in:
hlohaus
2025-08-06 04:46:28 +02:00
parent bf285b5665
commit e7a1bcdf54
8 changed files with 28 additions and 10 deletions

View File

@@ -66,11 +66,25 @@ class PerplexityLabs(AsyncGeneratorProvider, ProviderModelMixin):
assert(await ws.receive_str() == "3probe") assert(await ws.receive_str() == "3probe")
await ws.send_str("5") await ws.send_str("5")
assert(await ws.receive_str() == "6") assert(await ws.receive_str() == "6")
format_messages = []
last_is_assistant = False
for message in messages:
if message["role"] == "assistant":
if last_is_assistant:
continue
last_is_assistant = True
else:
last_is_assistant = False
if isinstance(message["content"], str):
format_messages.append({
"role": message["role"],
"text": message["content"]
})
message_data = { message_data = {
"version": "2.18", "version": "2.18",
"source": "default", "source": "default",
"model": model, "model": model,
"messages": [message for message in messages if isinstance(message["content"], str)], "messages": format_messages
} }
await ws.send_str("42" + json.dumps(["perplexity_labs", message_data])) await ws.send_str("42" + json.dumps(["perplexity_labs", message_data]))
last_message = 0 last_message = 0
@@ -92,7 +106,7 @@ class PerplexityLabs(AsyncGeneratorProvider, ProviderModelMixin):
# Handle error responses # Handle error responses
if message_type.endswith("_query_progress") and data.get("status") == "failed": if message_type.endswith("_query_progress") and data.get("status") == "failed":
error_message = data.get("text", "Unknown API error") error_message = data.get("text", "Unknown API error")
raise ResponseError(f"API Error: {error_message}") raise ResponseError(f"API Error: {error_message}\n")
# Handle normal responses # Handle normal responses
if "output" in data: if "output" in data:

View File

@@ -1,5 +1,6 @@
from __future__ import annotations from __future__ import annotations
import os
import uuid import uuid
from ...typing import AsyncResult, Messages, MediaListType from ...typing import AsyncResult, Messages, MediaListType
@@ -83,6 +84,8 @@ class BlackForestLabs_Flux1KontextDev(AsyncGeneratorProvider, ProviderModelMixin
if media: if media:
data = FormData() data = FormData()
for i in range(len(media)): for i in range(len(media)):
if media[i][1] is None and isinstance(media[i][0], str):
media[i] = media[i][0], os.path.basename(media[i][0])
media[i] = (to_bytes(media[i][0]), media[i][1]) media[i] = (to_bytes(media[i][0]), media[i][1])
for image, image_name in media: for image, image_name in media:
data.add_field(f"files", image, filename=image_name) data.add_field(f"files", image, filename=image_name)

View File

@@ -8,6 +8,7 @@ class Groq(OpenaiTemplate):
api_base = "https://api.groq.com/openai/v1" api_base = "https://api.groq.com/openai/v1"
working = True working = True
needs_auth = True needs_auth = True
active_by_default = True
default_model = "mixtral-8x7b-32768" default_model = "mixtral-8x7b-32768"
fallback_models = [ fallback_models = [
"distil-whisper-large-v3-en", "distil-whisper-large-v3-en",

View File

@@ -8,4 +8,5 @@ class OpenRouter(OpenaiTemplate):
login_url = "https://openrouter.ai/settings/keys" login_url = "https://openrouter.ai/settings/keys"
api_base = "https://openrouter.ai/api/v1" api_base = "https://openrouter.ai/api/v1"
working = True working = True
needs_auth = True needs_auth = True
active_by_default = True

View File

@@ -16,7 +16,7 @@ class Together(OpenaiTemplate):
api_base = "https://api.together.xyz/v1" api_base = "https://api.together.xyz/v1"
models_endpoint = "https://api.together.xyz/v1/models" models_endpoint = "https://api.together.xyz/v1/models"
active_by_default = False active_by_default = True
working = True working = True
needs_auth = True needs_auth = True
supports_stream = True supports_stream = True

View File

@@ -24,7 +24,7 @@ provider_together_urls = {
class HuggingFaceInference(AsyncGeneratorProvider, ProviderModelMixin): class HuggingFaceInference(AsyncGeneratorProvider, ProviderModelMixin):
url = "https://huggingface.co" url = "https://huggingface.co"
parent = "HuggingFace" parent = "HuggingFace"
working = True working = False
default_model = default_model default_model = default_model
default_image_model = default_image_model default_image_model = default_image_model

View File

@@ -1,4 +1,4 @@
default_model = "Qwen/Qwen2.5-72B-Instruct" default_model = "openai/gpt-oss-120b"
default_image_model = "black-forest-labs/FLUX.1-dev" default_image_model = "black-forest-labs/FLUX.1-dev"
image_models = [ image_models = [
default_image_model, default_image_model,

View File

@@ -122,6 +122,8 @@ class Backend_Api(Api):
def jsonify_provider_models(**kwargs): def jsonify_provider_models(**kwargs):
try: try:
response = self.get_provider_models(**kwargs) response = self.get_provider_models(**kwargs)
if response is None:
return jsonify({"error": {"message": "Provider not found"}}), 404
except MissingAuthError as e: except MissingAuthError as e:
return jsonify({"error": {"message": f"{type(e).__name__}: {e}"}}), 401 return jsonify({"error": {"message": f"{type(e).__name__}: {e}"}}), 401
except Exception as e: except Exception as e:
@@ -596,10 +598,7 @@ class Backend_Api(Api):
api_key = request.headers.get("x_api_key") api_key = request.headers.get("x_api_key")
api_base = request.headers.get("x_api_base") api_base = request.headers.get("x_api_base")
ignored = request.headers.get("x_ignored", "").split() ignored = request.headers.get("x_ignored", "").split()
models = super().get_provider_models(provider, api_key, api_base, ignored) return super().get_provider_models(provider, api_key, api_base, ignored)
if models is None:
return "Provider not found", 404
return models
def _format_json(self, response_type: str, content = None, **kwargs) -> str: def _format_json(self, response_type: str, content = None, **kwargs) -> str:
""" """