diff --git a/g4f/Provider/Free2GPT.py b/g4f/Provider/Free2GPT.py index 5edb02c1..1e743da1 100644 --- a/g4f/Provider/Free2GPT.py +++ b/g4f/Provider/Free2GPT.py @@ -15,7 +15,7 @@ from .base_provider import AsyncGeneratorProvider, ProviderModelMixin class Free2GPT(AsyncGeneratorProvider, ProviderModelMixin): url = "https://chat10.free2gpt.xyz" - working = True + working = False supports_message_history = True default_model = 'gemini-1.5-pro' diff --git a/g4f/Provider/Kimi.py b/g4f/Provider/Kimi.py index 73dd619b..e98ca79c 100644 --- a/g4f/Provider/Kimi.py +++ b/g4f/Provider/Kimi.py @@ -5,7 +5,7 @@ from typing import AsyncIterator from .base_provider import AsyncAuthedProvider, ProviderModelMixin from ..providers.helper import get_last_user_message -from ..requests import StreamSession, see_stream +from ..requests import StreamSession, see_stream, raise_for_status from ..providers.response import AuthResult, TitleGeneration, JsonConversation, FinishReason from ..typing import AsyncResult, Messages @@ -29,8 +29,7 @@ class Kimi(AsyncAuthedProvider, ProviderModelMixin): "x-traffic-id": device_id } ) as response: - if response.status != 200: - raise Exception("Failed to register device") + await raise_for_status(response) data = await response.json() if not data.get("access_token"): raise Exception("No access token received") @@ -50,7 +49,6 @@ class Kimi(AsyncAuthedProvider, ProviderModelMixin): web_search: bool = False, **kwargs ) -> AsyncResult: - pass async with StreamSession( proxy=proxy, impersonate="chrome", @@ -67,14 +65,13 @@ class Kimi(AsyncAuthedProvider, ProviderModelMixin): "source":"web", "tags":[] }) as response: - if response.status != 200: - raise Exception("Failed to create chat") + await raise_for_status(response) chat_data = await response.json() conversation = JsonConversation(chat_id=chat_data.get("id")) data = { "kimiplus_id": "kimi", "extend": {"sidebar": True}, - "model": model, + "model": "k2", "use_search": web_search, "messages": [ { @@ -92,8 +89,7 @@ class Kimi(AsyncAuthedProvider, ProviderModelMixin): f"https://www.kimi.com/api/chat/{conversation.chat_id}/completion/stream", json=data ) as response: - if response.status != 200: - raise Exception("Failed to start chat completion") + await raise_for_status(response) async for line in see_stream(response): if line.get("event") == "cmpl": yield line.get("text") diff --git a/g4f/Provider/LambdaChat.py b/g4f/Provider/LambdaChat.py index 6c835901..afdcf7da 100644 --- a/g4f/Provider/LambdaChat.py +++ b/g4f/Provider/LambdaChat.py @@ -22,7 +22,7 @@ class LambdaChat(AsyncGeneratorProvider, ProviderModelMixin): working = True - default_model = "deepseek-v3-0324" + default_model = "deepseek-r1" models = [ "deepseek-llama3.3-70b", "deepseek-r1", @@ -34,15 +34,13 @@ class LambdaChat(AsyncGeneratorProvider, ProviderModelMixin): "lfm-40b", "llama3.3-70b-instruct-fp8", "qwen25-coder-32b-instruct", - "deepseek-v3", - default_model, + "deepseek-v3-0324", "llama-4-maverick-17b-128e-instruct-fp8", "llama-4-scout-17b-16e-instruct", "llama3.3-70b-instruct-fp8", "qwen3-32b-fp8", ] model_aliases = { - "deepseek-v3-0324": "deepseek-v3", "hermes-3": "hermes3-405b-fp8-128k", "hermes-3-405b": ["hermes3-405b-fp8-128k", "hermes-3-llama-3.1-405b-fp8"], "nemotron-70b": "llama3.1-nemotron-70b-instruct", diff --git a/g4f/Provider/Startnest.py b/g4f/Provider/Startnest.py index 461372b0..d62707b5 100755 --- a/g4f/Provider/Startnest.py +++ b/g4f/Provider/Startnest.py @@ -18,7 +18,7 @@ class Startnest(AsyncGeneratorProvider, ProviderModelMixin): url = "https://play.google.com/store/apps/details?id=starnest.aitype.aikeyboard.chatbot.chatgpt" api_endpoint = "https://api.startnest.uk/api/completions/stream" - working = True + working = False needs_auth = False supports_stream = True supports_system_message = True diff --git a/g4f/Provider/WeWordle.py b/g4f/Provider/WeWordle.py index 1887ce53..9f6f9650 100644 --- a/g4f/Provider/WeWordle.py +++ b/g4f/Provider/WeWordle.py @@ -92,12 +92,11 @@ class WeWordle(AsyncGeneratorProvider, ProviderModelMixin): } if isinstance(messages, list) and all(isinstance(m, dict) and "role" in m and "content" in m for m in messages): - data_payload = {"messages": messages, "model": model, **kwargs} + data_payload = {"messages": messages, "model": model} else: data_payload = { "messages": messages, - "model": model, - **kwargs + "model": model } retries = 0 diff --git a/g4f/Provider/needs_auth/Reka.py b/g4f/Provider/needs_auth/Reka.py index 3b7d5a6b..19f2759b 100644 --- a/g4f/Provider/needs_auth/Reka.py +++ b/g4f/Provider/needs_auth/Reka.py @@ -9,7 +9,7 @@ from ...image import to_bytes class Reka(AbstractProvider): domain = "space.reka.ai" url = f"https://{domain}" - working = True + working = False needs_auth = True supports_stream = True default_vision_model = "reka" @@ -20,7 +20,7 @@ class Reka(AbstractProvider): cls, model: str, messages: Messages, - stream: bool, + stream: bool = True, proxy: str = None, api_key: str = None, image: ImageType = None, @@ -29,7 +29,7 @@ class Reka(AbstractProvider): cls.proxy = proxy if not api_key: - cls.cookies = get_cookies(cls.domain) + cls.cookies = get_cookies(cls.domain,cache_result=False) if not cls.cookies: raise ValueError(f"No cookies found for {cls.domain}") elif "appSession" not in cls.cookies: diff --git a/g4f/cli/client.py b/g4f/cli/client.py index 6767e4fa..88fc16f9 100644 --- a/g4f/cli/client.py +++ b/g4f/cli/client.py @@ -16,7 +16,12 @@ from g4f.Provider import ProviderUtils from g4f.image import extract_data_uri, is_accepted_format from g4f.image.copy_images import get_media_dir from g4f.client.helper import filter_markdown -from g4f.integration.markitdown import MarkItDown +from g4f.errors import MissingRequirementsError +try: + from g4f.integration.markitdown import MarkItDown + has_markitdown = True +except ImportError: + has_markitdown = False from g4f.config import CONFIG_DIR, COOKIES_DIR from g4f import debug @@ -284,6 +289,8 @@ def run_client_args(args): media.append(input_value) else: try: + if not has_markitdown: + raise MissingRequirementsError("MarkItDown is not installed. Install it with `pip install -U markitdown`.") md = MarkItDown() text_content = md.convert_url(input_value).text_content input_text += f"\n```\n{text_content}\n\nSource: {input_value}\n```\n" diff --git a/g4f/gui/server/backend_api.py b/g4f/gui/server/backend_api.py index 76c9481a..6501245a 100644 --- a/g4f/gui/server/backend_api.py +++ b/g4f/gui/server/backend_api.py @@ -40,7 +40,7 @@ from ...providers.response import FinishReason, AudioResponse, MediaResponse, Re from ...client.helper import filter_markdown from ...tools.files import supports_filename, get_streaming, get_bucket_dir, get_tempfile from ...tools.run_tools import iter_run_tools -from ...errors import ProviderNotFoundError +from ...errors import ProviderNotFoundError, MissingAuthError from ...image import is_allowed_extension, process_image, MEDIA_TYPE_MAP from ...cookies import get_cookies_dir from ...image.copy_images import secure_filename, get_source_url, get_media_dir, copy_media @@ -120,7 +120,10 @@ class Backend_Api(Api): @app.route('/backend-api/v2/models/', methods=['GET']) def jsonify_provider_models(**kwargs): - response = self.get_provider_models(**kwargs) + try: + response = self.get_provider_models(**kwargs) + except MissingAuthError as e: + return jsonify({"error": {"message": str(e)}}), 401 return jsonify(response) @app.route('/backend-api/v2/providers', methods=['GET'])