fix: correct typo in API section title and update links, and adjust provider aliases

- Changed "Inference API" to "Interference API" and updated corresponding documentation links in README.md
- Removed "o1" and "dall-e-3" entries from Copilot.py model_aliases
- Added "stream" and "extra_body" parameters with default values in Azure.py's create_async_generator method
- In CopilotAccount.py, included model_aliases with "gpt-4", "gpt-4o", "o1", and "dall-e-3"
- Updated conditional for provider comparison from "==" to "in" list in any_provider.py
- Modified g4f/api/__init__.py to set g4f_api_key from environment variable
- In backend_api.py, added "user" field to cached data with default "unknown"
- Changed logic in OpenaiTemplate.py read_response to check if "choice" exists before processing, and cleaned up indentation and conditionals in response parsing
- Removed unnecessary "stop" and "prompt" parameters from comments or unused code in OpenaiTemplate.py
- Tightened the check for "provider" comparison in any_provider.py to handle multiple providers properly
This commit is contained in:
hlohaus
2025-08-01 00:18:29 +02:00
parent f246e7cfa8
commit d4b46f34de
9 changed files with 50 additions and 38 deletions

View File

@@ -242,11 +242,11 @@ python -m g4f --port 8080 --debug
---
### 🤖 Inference API
### 🤖 Interference API
The **Inference API** enables seamless integration with OpenAI's services through G4F, allowing you to deploy efficient AI solutions.
The **Interference API** enables seamless integration with OpenAI's services through G4F, allowing you to deploy efficient AI solutions.
- **Documentation**: [Inference API Docs](https://github.com/gpt4free/g4f.dev/blob/main/docs/inference-api.md)
- **Documentation**: [Interference API Docs](https://github.com/gpt4free/g4f.dev/blob/main/docs/interference-api.md)
- **Endpoint**: `http://localhost:1337/v1`
- **Swagger UI**: Explore the OpenAPI documentation via Swagger UI at `http://localhost:1337/docs`
- **Provider Selection**: [How to Specify a Provider?](https://github.com/gpt4free/g4f.dev/blob/main/docs/selecting_a_provider.md)

View File

@@ -50,8 +50,6 @@ class Copilot(AsyncAuthedProvider, ProviderModelMixin):
model_aliases = {
"gpt-4": default_model,
"gpt-4o": default_model,
"o1": "Think Deeper",
"dall-e-3": default_model
}
websocket_url = "wss://copilot.microsoft.com/c/api/chat?api-version=2"

View File

@@ -46,15 +46,14 @@ class Azure(OpenaiTemplate):
cls,
model: str,
messages: Messages,
stream: bool = True,
extra_body: dict = None,
api_key: str = None,
api_endpoint: str = None,
**kwargs
) -> AsyncResult:
if not model:
model = os.environ.get("AZURE_DEFAULT_MODEL", cls.default_model)
if model in cls.model_extra_body:
for key, value in cls.model_extra_body[model].items():
kwargs.setdefault(key, value)
if not api_key:
raise ValueError(f"API key is required for Azure provider. Ask for API key in the {cls.login_url} Discord server.")
if not api_endpoint:
@@ -65,12 +64,21 @@ class Azure(OpenaiTemplate):
raise ModelNotFoundError(f"No API endpoint found for model: {model}")
if not api_endpoint:
api_endpoint = os.environ.get("AZURE_API_ENDPOINT")
if extra_body is None:
if model in cls.model_extra_body:
extra_body = cls.model_extra_body[model]
else:
extra_body = {}
if stream:
extra_body.setdefault("stream_options", {"include_usage": True})
try:
async for chunk in super().create_async_generator(
model=model,
messages=messages,
stream=stream,
api_key=api_key,
api_endpoint=api_endpoint,
extra_body=extra_body,
**kwargs
):
yield chunk

View File

@@ -3,10 +3,8 @@ from __future__ import annotations
import os
from typing import AsyncIterator
from ..base_provider import AsyncAuthedProvider
from ..Copilot import Copilot, readHAR, has_nodriver, get_access_token_and_cookies
from ...providers.response import AuthResult, RequestLogin
from ...typing import AsyncResult, Messages
from ...errors import NoValidHarFileError
from ... import debug
@@ -16,6 +14,12 @@ class CopilotAccount(Copilot):
parent = "Copilot"
default_model = "Copilot"
default_vision_model = default_model
model_aliases = {
"gpt-4": default_model,
"gpt-4o": default_model,
"o1": "Think Deeper",
"dall-e-3": default_model
}
@classmethod
async def on_auth_async(cls, proxy: str = None, **kwargs) -> AsyncIterator:

View File

@@ -67,6 +67,7 @@ class OpenaiTemplate(AsyncGeneratorProvider, ProviderModelMixin, RaiseErrorMixin
stop: Union[str, list[str]] = None,
stream: bool = None,
prompt: str = None,
user: str = None,
headers: dict = None,
impersonate: str = None,
download_media: bool = True,
@@ -120,6 +121,7 @@ class OpenaiTemplate(AsyncGeneratorProvider, ProviderModelMixin, RaiseErrorMixin
top_p=top_p,
stop=stop,
stream="audio" not in extra_parameters if stream is None else stream,
user=user,
**extra_parameters,
**extra_body
)
@@ -185,8 +187,7 @@ async def read_response(response: StreamResponse, stream: bool, prompt: str, pro
yield ProviderInfo(**provider_info, model=model)
model_returned = True
choice = next(iter(data["choices"]), None)
if not choice:
continue
if choice:
if "content" in choice["delta"] and choice["delta"]["content"]:
delta = choice["delta"]["content"]
if first:
@@ -208,7 +209,6 @@ async def read_response(response: StreamResponse, stream: bool, prompt: str, pro
yield Usage(**data["usage"])
if choice and choice.get("finish_reason") is not None:
yield FinishReason(choice["finish_reason"])
break
else:
await raise_for_status(response)
async for chunk in save_response_media(response, prompt, [model]):

View File

@@ -175,7 +175,7 @@ class ErrorResponse(Response):
class AppConfig:
ignored_providers: Optional[list[str]] = None
g4f_api_key: Optional[str] = None
g4f_api_key: Optional[str] = os.environ.get("G4F_API_KEY", None)
ignore_cookie_files: bool = False
model: str = None
provider: str = None
@@ -188,6 +188,7 @@ class AppConfig:
@classmethod
def set_config(cls, **data):
for key, value in data.items():
if value is not None:
setattr(cls, key, value)
def update_headers(request: Request, user: str) -> Request:

View File

@@ -213,8 +213,9 @@ class Backend_Api(Api):
cache_dir = Path(get_cookies_dir()) / ".usage"
cache_file = cache_dir / f"{datetime.date.today()}.jsonl"
cache_dir.mkdir(parents=True, exist_ok=True)
data = {**request.json, "user": request.headers.get("x-user", "unknown")}
with cache_file.open("a" if cache_file.exists() else "w") as f:
f.write(f"{json.dumps(request.json)}\n")
f.write(f"{json.dumps(data)}\n")
return {}
@app.route('/backend-api/v2/usage/<date>', methods=['GET'])

View File

@@ -27,7 +27,7 @@ model_map = {
"Yqcloud": "gpt-4",
"WeWordle": "gpt-4",
"OpenaiChat": "gpt-4",
"CopilotAccount": "Copilot",
"Copilot": "Copilot",
"HarProvider": [
"gpt-4-1106-preview",
"gpt-4-0125-preview",
@@ -52,7 +52,7 @@ model_map = {
"Blackbox": "gpt-4o",
"PollinationsAI": "openai",
"OpenaiChat": "gpt-4o",
"CopilotAccount": "Copilot",
"Copilot": "Copilot",
"HarProvider": [
"chatgpt-4o-latest-20250326",
"chatgpt-4o-latest-20250129",

View File

@@ -9,7 +9,7 @@ from ..image import is_data_an_audio
from ..providers.retry_provider import IterListProvider
from ..Provider.needs_auth import OpenaiChat, CopilotAccount
from ..Provider.hf_space import HuggingSpace
from ..Provider import Cloudflare, Gemini, GeminiPro, Grok, DeepSeekAPI, PerplexityLabs, LambdaChat, PollinationsAI, PuterJS
from ..Provider import Copilot, Cloudflare, Gemini, GeminiPro, Grok, DeepSeekAPI, PerplexityLabs, LambdaChat, PollinationsAI, PuterJS
from ..Provider import Microsoft_Phi_4_Multimodal, DeepInfraChat, Blackbox, OIVSCodeSer0501, OIVSCodeSer2, TeachAnything, OperaAria, Startnest
from ..Provider import WeWordle, Yqcloud, Chatai, ImageLabs, LegacyLMArena, LMArenaBeta, Free2GPT
from ..Provider import EdgeTTS, gTTS, MarkItDown, OpenAIFM
@@ -29,7 +29,7 @@ PROVIERS_LIST_1 = [
]
PROVIERS_LIST_2 = [
OpenaiChat, CopilotAccount, PollinationsAI, PerplexityLabs, Gemini, Grok
OpenaiChat, Copilot, CopilotAccount, PollinationsAI, PerplexityLabs, Gemini, Grok
]
PROVIERS_LIST_3 = [
@@ -129,7 +129,7 @@ class AnyModelProviderMixin(ProviderModelMixin):
if not provider.working:
continue
try:
if provider == CopilotAccount:
if provider in [Copilot, CopilotAccount]:
for model in provider.model_aliases.keys():
if model not in cls.model_map:
cls.model_map[model] = {}