fix: update provider status, models, error handling, and imports

- Set `working = False` in Free2GPT, Startnest, and Reka providers
- Changed `default_model` in LambdaChat from `deepseek-v3-0324` to `deepseek-r1`
- Removed `deepseek-v3` alias from LambdaChat's `model_aliases`
- In Kimi provider:
  - Replaced manual status check with `await raise_for_status(response)`
  - Set `model` field to `"k2"` in chat completion request
  - Removed unused `pass` statement
- In WeWordle provider:
  - Removed `**kwargs` from `data_payload` construction
- In Reka provider:
  - Set default value for `stream` to `True`
  - Modified `get_cookies` call to use `cache_result=False`
- In `cli/client.py`:
  - Added conditional import for `MarkItDown` with `has_markitdown` flag
  - Raised `MissingRequirementsError` if `MarkItDown` is not installed
- In `gui/server/backend_api.py`:
  - Imported `MissingAuthError`
  - Wrapped `get_provider_models` call in try-except block to return 401 if `MissingAuthError` is raised
This commit is contained in:
hlohaus
2025-07-27 18:03:54 +02:00
parent 8892b00ac1
commit f83c92446e
8 changed files with 27 additions and 24 deletions

View File

@@ -15,7 +15,7 @@ from .base_provider import AsyncGeneratorProvider, ProviderModelMixin
class Free2GPT(AsyncGeneratorProvider, ProviderModelMixin): class Free2GPT(AsyncGeneratorProvider, ProviderModelMixin):
url = "https://chat10.free2gpt.xyz" url = "https://chat10.free2gpt.xyz"
working = True working = False
supports_message_history = True supports_message_history = True
default_model = 'gemini-1.5-pro' default_model = 'gemini-1.5-pro'

View File

@@ -5,7 +5,7 @@ from typing import AsyncIterator
from .base_provider import AsyncAuthedProvider, ProviderModelMixin from .base_provider import AsyncAuthedProvider, ProviderModelMixin
from ..providers.helper import get_last_user_message from ..providers.helper import get_last_user_message
from ..requests import StreamSession, see_stream from ..requests import StreamSession, see_stream, raise_for_status
from ..providers.response import AuthResult, TitleGeneration, JsonConversation, FinishReason from ..providers.response import AuthResult, TitleGeneration, JsonConversation, FinishReason
from ..typing import AsyncResult, Messages from ..typing import AsyncResult, Messages
@@ -29,8 +29,7 @@ class Kimi(AsyncAuthedProvider, ProviderModelMixin):
"x-traffic-id": device_id "x-traffic-id": device_id
} }
) as response: ) as response:
if response.status != 200: await raise_for_status(response)
raise Exception("Failed to register device")
data = await response.json() data = await response.json()
if not data.get("access_token"): if not data.get("access_token"):
raise Exception("No access token received") raise Exception("No access token received")
@@ -50,7 +49,6 @@ class Kimi(AsyncAuthedProvider, ProviderModelMixin):
web_search: bool = False, web_search: bool = False,
**kwargs **kwargs
) -> AsyncResult: ) -> AsyncResult:
pass
async with StreamSession( async with StreamSession(
proxy=proxy, proxy=proxy,
impersonate="chrome", impersonate="chrome",
@@ -67,14 +65,13 @@ class Kimi(AsyncAuthedProvider, ProviderModelMixin):
"source":"web", "source":"web",
"tags":[] "tags":[]
}) as response: }) as response:
if response.status != 200: await raise_for_status(response)
raise Exception("Failed to create chat")
chat_data = await response.json() chat_data = await response.json()
conversation = JsonConversation(chat_id=chat_data.get("id")) conversation = JsonConversation(chat_id=chat_data.get("id"))
data = { data = {
"kimiplus_id": "kimi", "kimiplus_id": "kimi",
"extend": {"sidebar": True}, "extend": {"sidebar": True},
"model": model, "model": "k2",
"use_search": web_search, "use_search": web_search,
"messages": [ "messages": [
{ {
@@ -92,8 +89,7 @@ class Kimi(AsyncAuthedProvider, ProviderModelMixin):
f"https://www.kimi.com/api/chat/{conversation.chat_id}/completion/stream", f"https://www.kimi.com/api/chat/{conversation.chat_id}/completion/stream",
json=data json=data
) as response: ) as response:
if response.status != 200: await raise_for_status(response)
raise Exception("Failed to start chat completion")
async for line in see_stream(response): async for line in see_stream(response):
if line.get("event") == "cmpl": if line.get("event") == "cmpl":
yield line.get("text") yield line.get("text")

View File

@@ -22,7 +22,7 @@ class LambdaChat(AsyncGeneratorProvider, ProviderModelMixin):
working = True working = True
default_model = "deepseek-v3-0324" default_model = "deepseek-r1"
models = [ models = [
"deepseek-llama3.3-70b", "deepseek-llama3.3-70b",
"deepseek-r1", "deepseek-r1",
@@ -34,15 +34,13 @@ class LambdaChat(AsyncGeneratorProvider, ProviderModelMixin):
"lfm-40b", "lfm-40b",
"llama3.3-70b-instruct-fp8", "llama3.3-70b-instruct-fp8",
"qwen25-coder-32b-instruct", "qwen25-coder-32b-instruct",
"deepseek-v3", "deepseek-v3-0324",
default_model,
"llama-4-maverick-17b-128e-instruct-fp8", "llama-4-maverick-17b-128e-instruct-fp8",
"llama-4-scout-17b-16e-instruct", "llama-4-scout-17b-16e-instruct",
"llama3.3-70b-instruct-fp8", "llama3.3-70b-instruct-fp8",
"qwen3-32b-fp8", "qwen3-32b-fp8",
] ]
model_aliases = { model_aliases = {
"deepseek-v3-0324": "deepseek-v3",
"hermes-3": "hermes3-405b-fp8-128k", "hermes-3": "hermes3-405b-fp8-128k",
"hermes-3-405b": ["hermes3-405b-fp8-128k", "hermes-3-llama-3.1-405b-fp8"], "hermes-3-405b": ["hermes3-405b-fp8-128k", "hermes-3-llama-3.1-405b-fp8"],
"nemotron-70b": "llama3.1-nemotron-70b-instruct", "nemotron-70b": "llama3.1-nemotron-70b-instruct",

View File

@@ -18,7 +18,7 @@ class Startnest(AsyncGeneratorProvider, ProviderModelMixin):
url = "https://play.google.com/store/apps/details?id=starnest.aitype.aikeyboard.chatbot.chatgpt" url = "https://play.google.com/store/apps/details?id=starnest.aitype.aikeyboard.chatbot.chatgpt"
api_endpoint = "https://api.startnest.uk/api/completions/stream" api_endpoint = "https://api.startnest.uk/api/completions/stream"
working = True working = False
needs_auth = False needs_auth = False
supports_stream = True supports_stream = True
supports_system_message = True supports_system_message = True

View File

@@ -92,12 +92,11 @@ class WeWordle(AsyncGeneratorProvider, ProviderModelMixin):
} }
if isinstance(messages, list) and all(isinstance(m, dict) and "role" in m and "content" in m for m in messages): if isinstance(messages, list) and all(isinstance(m, dict) and "role" in m and "content" in m for m in messages):
data_payload = {"messages": messages, "model": model, **kwargs} data_payload = {"messages": messages, "model": model}
else: else:
data_payload = { data_payload = {
"messages": messages, "messages": messages,
"model": model, "model": model
**kwargs
} }
retries = 0 retries = 0

View File

@@ -9,7 +9,7 @@ from ...image import to_bytes
class Reka(AbstractProvider): class Reka(AbstractProvider):
domain = "space.reka.ai" domain = "space.reka.ai"
url = f"https://{domain}" url = f"https://{domain}"
working = True working = False
needs_auth = True needs_auth = True
supports_stream = True supports_stream = True
default_vision_model = "reka" default_vision_model = "reka"
@@ -20,7 +20,7 @@ class Reka(AbstractProvider):
cls, cls,
model: str, model: str,
messages: Messages, messages: Messages,
stream: bool, stream: bool = True,
proxy: str = None, proxy: str = None,
api_key: str = None, api_key: str = None,
image: ImageType = None, image: ImageType = None,
@@ -29,7 +29,7 @@ class Reka(AbstractProvider):
cls.proxy = proxy cls.proxy = proxy
if not api_key: if not api_key:
cls.cookies = get_cookies(cls.domain) cls.cookies = get_cookies(cls.domain,cache_result=False)
if not cls.cookies: if not cls.cookies:
raise ValueError(f"No cookies found for {cls.domain}") raise ValueError(f"No cookies found for {cls.domain}")
elif "appSession" not in cls.cookies: elif "appSession" not in cls.cookies:

View File

@@ -16,7 +16,12 @@ from g4f.Provider import ProviderUtils
from g4f.image import extract_data_uri, is_accepted_format from g4f.image import extract_data_uri, is_accepted_format
from g4f.image.copy_images import get_media_dir from g4f.image.copy_images import get_media_dir
from g4f.client.helper import filter_markdown from g4f.client.helper import filter_markdown
from g4f.integration.markitdown import MarkItDown from g4f.errors import MissingRequirementsError
try:
from g4f.integration.markitdown import MarkItDown
has_markitdown = True
except ImportError:
has_markitdown = False
from g4f.config import CONFIG_DIR, COOKIES_DIR from g4f.config import CONFIG_DIR, COOKIES_DIR
from g4f import debug from g4f import debug
@@ -284,6 +289,8 @@ def run_client_args(args):
media.append(input_value) media.append(input_value)
else: else:
try: try:
if not has_markitdown:
raise MissingRequirementsError("MarkItDown is not installed. Install it with `pip install -U markitdown`.")
md = MarkItDown() md = MarkItDown()
text_content = md.convert_url(input_value).text_content text_content = md.convert_url(input_value).text_content
input_text += f"\n```\n{text_content}\n\nSource: {input_value}\n```\n" input_text += f"\n```\n{text_content}\n\nSource: {input_value}\n```\n"

View File

@@ -40,7 +40,7 @@ from ...providers.response import FinishReason, AudioResponse, MediaResponse, Re
from ...client.helper import filter_markdown from ...client.helper import filter_markdown
from ...tools.files import supports_filename, get_streaming, get_bucket_dir, get_tempfile from ...tools.files import supports_filename, get_streaming, get_bucket_dir, get_tempfile
from ...tools.run_tools import iter_run_tools from ...tools.run_tools import iter_run_tools
from ...errors import ProviderNotFoundError from ...errors import ProviderNotFoundError, MissingAuthError
from ...image import is_allowed_extension, process_image, MEDIA_TYPE_MAP from ...image import is_allowed_extension, process_image, MEDIA_TYPE_MAP
from ...cookies import get_cookies_dir from ...cookies import get_cookies_dir
from ...image.copy_images import secure_filename, get_source_url, get_media_dir, copy_media from ...image.copy_images import secure_filename, get_source_url, get_media_dir, copy_media
@@ -120,7 +120,10 @@ class Backend_Api(Api):
@app.route('/backend-api/v2/models/<provider>', methods=['GET']) @app.route('/backend-api/v2/models/<provider>', methods=['GET'])
def jsonify_provider_models(**kwargs): def jsonify_provider_models(**kwargs):
response = self.get_provider_models(**kwargs) try:
response = self.get_provider_models(**kwargs)
except MissingAuthError as e:
return jsonify({"error": {"message": str(e)}}), 401
return jsonify(response) return jsonify(response)
@app.route('/backend-api/v2/providers', methods=['GET']) @app.route('/backend-api/v2/providers', methods=['GET'])