diff --git a/README.md b/README.md index c0d6075f..86e553e2 100644 --- a/README.md +++ b/README.md @@ -242,11 +242,11 @@ python -m g4f --port 8080 --debug --- -### 🤖 Inference API +### 🤖 Interference API -The **Inference API** enables seamless integration with OpenAI's services through G4F, allowing you to deploy efficient AI solutions. +The **Interference API** enables seamless integration with OpenAI's services through G4F, allowing you to deploy efficient AI solutions. -- **Documentation**: [Inference API Docs](https://github.com/gpt4free/g4f.dev/blob/main/docs/inference-api.md) +- **Documentation**: [Interference API Docs](https://github.com/gpt4free/g4f.dev/blob/main/docs/interference-api.md) - **Endpoint**: `http://localhost:1337/v1` - **Swagger UI**: Explore the OpenAPI documentation via Swagger UI at `http://localhost:1337/docs` - **Provider Selection**: [How to Specify a Provider?](https://github.com/gpt4free/g4f.dev/blob/main/docs/selecting_a_provider.md) diff --git a/g4f/Provider/Copilot.py b/g4f/Provider/Copilot.py index 7bf66781..a867e230 100644 --- a/g4f/Provider/Copilot.py +++ b/g4f/Provider/Copilot.py @@ -50,8 +50,6 @@ class Copilot(AsyncAuthedProvider, ProviderModelMixin): model_aliases = { "gpt-4": default_model, "gpt-4o": default_model, - "o1": "Think Deeper", - "dall-e-3": default_model } websocket_url = "wss://copilot.microsoft.com/c/api/chat?api-version=2" diff --git a/g4f/Provider/needs_auth/Azure.py b/g4f/Provider/needs_auth/Azure.py index 78efd48c..42bec679 100644 --- a/g4f/Provider/needs_auth/Azure.py +++ b/g4f/Provider/needs_auth/Azure.py @@ -46,15 +46,14 @@ class Azure(OpenaiTemplate): cls, model: str, messages: Messages, + stream: bool = True, + extra_body: dict = None, api_key: str = None, api_endpoint: str = None, **kwargs ) -> AsyncResult: if not model: model = os.environ.get("AZURE_DEFAULT_MODEL", cls.default_model) - if model in cls.model_extra_body: - for key, value in cls.model_extra_body[model].items(): - kwargs.setdefault(key, value) if not api_key: raise ValueError(f"API key is required for Azure provider. Ask for API key in the {cls.login_url} Discord server.") if not api_endpoint: @@ -65,12 +64,21 @@ class Azure(OpenaiTemplate): raise ModelNotFoundError(f"No API endpoint found for model: {model}") if not api_endpoint: api_endpoint = os.environ.get("AZURE_API_ENDPOINT") + if extra_body is None: + if model in cls.model_extra_body: + extra_body = cls.model_extra_body[model] + else: + extra_body = {} + if stream: + extra_body.setdefault("stream_options", {"include_usage": True}) try: async for chunk in super().create_async_generator( model=model, messages=messages, + stream=stream, api_key=api_key, api_endpoint=api_endpoint, + extra_body=extra_body, **kwargs ): yield chunk diff --git a/g4f/Provider/needs_auth/CopilotAccount.py b/g4f/Provider/needs_auth/CopilotAccount.py index 0a8c61c1..3c72ae79 100644 --- a/g4f/Provider/needs_auth/CopilotAccount.py +++ b/g4f/Provider/needs_auth/CopilotAccount.py @@ -3,10 +3,8 @@ from __future__ import annotations import os from typing import AsyncIterator -from ..base_provider import AsyncAuthedProvider from ..Copilot import Copilot, readHAR, has_nodriver, get_access_token_and_cookies from ...providers.response import AuthResult, RequestLogin -from ...typing import AsyncResult, Messages from ...errors import NoValidHarFileError from ... import debug @@ -16,6 +14,12 @@ class CopilotAccount(Copilot): parent = "Copilot" default_model = "Copilot" default_vision_model = default_model + model_aliases = { + "gpt-4": default_model, + "gpt-4o": default_model, + "o1": "Think Deeper", + "dall-e-3": default_model + } @classmethod async def on_auth_async(cls, proxy: str = None, **kwargs) -> AsyncIterator: diff --git a/g4f/Provider/template/OpenaiTemplate.py b/g4f/Provider/template/OpenaiTemplate.py index b7480a7f..7d3b4740 100644 --- a/g4f/Provider/template/OpenaiTemplate.py +++ b/g4f/Provider/template/OpenaiTemplate.py @@ -67,6 +67,7 @@ class OpenaiTemplate(AsyncGeneratorProvider, ProviderModelMixin, RaiseErrorMixin stop: Union[str, list[str]] = None, stream: bool = None, prompt: str = None, + user: str = None, headers: dict = None, impersonate: str = None, download_media: bool = True, @@ -120,6 +121,7 @@ class OpenaiTemplate(AsyncGeneratorProvider, ProviderModelMixin, RaiseErrorMixin top_p=top_p, stop=stop, stream="audio" not in extra_parameters if stream is None else stream, + user=user, **extra_parameters, **extra_body ) @@ -185,30 +187,28 @@ async def read_response(response: StreamResponse, stream: bool, prompt: str, pro yield ProviderInfo(**provider_info, model=model) model_returned = True choice = next(iter(data["choices"]), None) - if not choice: - continue - if "content" in choice["delta"] and choice["delta"]["content"]: - delta = choice["delta"]["content"] - if first: - delta = delta.lstrip() - if delta: - first = False - if reasoning: - yield Reasoning(status="") - reasoning = False - yield delta - tool_calls = choice.get("delta", {}).get("tool_calls") - if tool_calls: - yield ToolCalls(choice["delta"]["tool_calls"]) - reasoning_content = choice.get("delta", {}).get("reasoning_content") - if reasoning_content: - reasoning = True - yield Reasoning(reasoning_content) + if choice: + if "content" in choice["delta"] and choice["delta"]["content"]: + delta = choice["delta"]["content"] + if first: + delta = delta.lstrip() + if delta: + first = False + if reasoning: + yield Reasoning(status="") + reasoning = False + yield delta + tool_calls = choice.get("delta", {}).get("tool_calls") + if tool_calls: + yield ToolCalls(choice["delta"]["tool_calls"]) + reasoning_content = choice.get("delta", {}).get("reasoning_content") + if reasoning_content: + reasoning = True + yield Reasoning(reasoning_content) if "usage" in data and data["usage"]: yield Usage(**data["usage"]) if choice and choice.get("finish_reason") is not None: yield FinishReason(choice["finish_reason"]) - break else: await raise_for_status(response) async for chunk in save_response_media(response, prompt, [model]): diff --git a/g4f/api/__init__.py b/g4f/api/__init__.py index 871a05e3..a3ac8947 100644 --- a/g4f/api/__init__.py +++ b/g4f/api/__init__.py @@ -175,7 +175,7 @@ class ErrorResponse(Response): class AppConfig: ignored_providers: Optional[list[str]] = None - g4f_api_key: Optional[str] = None + g4f_api_key: Optional[str] = os.environ.get("G4F_API_KEY", None) ignore_cookie_files: bool = False model: str = None provider: str = None @@ -188,7 +188,8 @@ class AppConfig: @classmethod def set_config(cls, **data): for key, value in data.items(): - setattr(cls, key, value) + if value is not None: + setattr(cls, key, value) def update_headers(request: Request, user: str) -> Request: new_headers = request.headers.mutablecopy() diff --git a/g4f/gui/server/backend_api.py b/g4f/gui/server/backend_api.py index 82c2ee6d..cee1de79 100644 --- a/g4f/gui/server/backend_api.py +++ b/g4f/gui/server/backend_api.py @@ -213,8 +213,9 @@ class Backend_Api(Api): cache_dir = Path(get_cookies_dir()) / ".usage" cache_file = cache_dir / f"{datetime.date.today()}.jsonl" cache_dir.mkdir(parents=True, exist_ok=True) + data = {**request.json, "user": request.headers.get("x-user", "unknown")} with cache_file.open("a" if cache_file.exists() else "w") as f: - f.write(f"{json.dumps(request.json)}\n") + f.write(f"{json.dumps(data)}\n") return {} @app.route('/backend-api/v2/usage/', methods=['GET']) diff --git a/g4f/providers/any_model_map.py b/g4f/providers/any_model_map.py index e638de1e..81de832d 100644 --- a/g4f/providers/any_model_map.py +++ b/g4f/providers/any_model_map.py @@ -27,7 +27,7 @@ model_map = { "Yqcloud": "gpt-4", "WeWordle": "gpt-4", "OpenaiChat": "gpt-4", - "CopilotAccount": "Copilot", + "Copilot": "Copilot", "HarProvider": [ "gpt-4-1106-preview", "gpt-4-0125-preview", @@ -52,7 +52,7 @@ model_map = { "Blackbox": "gpt-4o", "PollinationsAI": "openai", "OpenaiChat": "gpt-4o", - "CopilotAccount": "Copilot", + "Copilot": "Copilot", "HarProvider": [ "chatgpt-4o-latest-20250326", "chatgpt-4o-latest-20250129", diff --git a/g4f/providers/any_provider.py b/g4f/providers/any_provider.py index ae8c0a56..899f101d 100644 --- a/g4f/providers/any_provider.py +++ b/g4f/providers/any_provider.py @@ -9,7 +9,7 @@ from ..image import is_data_an_audio from ..providers.retry_provider import IterListProvider from ..Provider.needs_auth import OpenaiChat, CopilotAccount from ..Provider.hf_space import HuggingSpace -from ..Provider import Cloudflare, Gemini, GeminiPro, Grok, DeepSeekAPI, PerplexityLabs, LambdaChat, PollinationsAI, PuterJS +from ..Provider import Copilot, Cloudflare, Gemini, GeminiPro, Grok, DeepSeekAPI, PerplexityLabs, LambdaChat, PollinationsAI, PuterJS from ..Provider import Microsoft_Phi_4_Multimodal, DeepInfraChat, Blackbox, OIVSCodeSer0501, OIVSCodeSer2, TeachAnything, OperaAria, Startnest from ..Provider import WeWordle, Yqcloud, Chatai, ImageLabs, LegacyLMArena, LMArenaBeta, Free2GPT from ..Provider import EdgeTTS, gTTS, MarkItDown, OpenAIFM @@ -29,7 +29,7 @@ PROVIERS_LIST_1 = [ ] PROVIERS_LIST_2 = [ - OpenaiChat, CopilotAccount, PollinationsAI, PerplexityLabs, Gemini, Grok + OpenaiChat, Copilot, CopilotAccount, PollinationsAI, PerplexityLabs, Gemini, Grok ] PROVIERS_LIST_3 = [ @@ -129,7 +129,7 @@ class AnyModelProviderMixin(ProviderModelMixin): if not provider.working: continue try: - if provider == CopilotAccount: + if provider in [Copilot, CopilotAccount]: for model in provider.model_aliases.keys(): if model not in cls.model_map: cls.model_map[model] = {}