mirror of
https://github.com/xtekky/gpt4free.git
synced 2025-10-07 09:11:38 +08:00
Refactor build scripts and API to enhance model handling and improve timeout functionality
This commit is contained in:
16
.github/workflows/build-packages.yml
vendored
16
.github/workflows/build-packages.yml
vendored
@@ -64,7 +64,7 @@ jobs:
|
|||||||
name: pypi-package
|
name: pypi-package
|
||||||
path: dist/
|
path: dist/
|
||||||
|
|
||||||
# Windows Executables with Nuitka
|
# Windows Executables
|
||||||
build-windows-exe:
|
build-windows-exe:
|
||||||
runs-on: windows-latest
|
runs-on: windows-latest
|
||||||
needs: prepare
|
needs: prepare
|
||||||
@@ -128,7 +128,7 @@ jobs:
|
|||||||
name: windows-exe-${{ matrix.architecture }}
|
name: windows-exe-${{ matrix.architecture }}
|
||||||
path: dist/g4f-windows-*.zip
|
path: dist/g4f-windows-*.zip
|
||||||
|
|
||||||
# Linux Executables with Nuitka
|
# Linux Executables
|
||||||
build-linux-exe:
|
build-linux-exe:
|
||||||
runs-on: ubuntu-latest
|
runs-on: ubuntu-latest
|
||||||
needs: prepare
|
needs: prepare
|
||||||
@@ -136,9 +136,11 @@ jobs:
|
|||||||
matrix:
|
matrix:
|
||||||
include:
|
include:
|
||||||
- architecture: x64
|
- architecture: x64
|
||||||
|
runner: ubuntu-latest
|
||||||
runner-arch: x86_64
|
runner-arch: x86_64
|
||||||
# Note: ARM64 cross-compilation requires additional setup
|
- architecture: arm64
|
||||||
# Keeping architecture in matrix for future expansion
|
runner: buildjet-4vcpu-ubuntu-2204-arm
|
||||||
|
runner-arch: aarch64
|
||||||
steps:
|
steps:
|
||||||
- uses: actions/checkout@v4
|
- uses: actions/checkout@v4
|
||||||
- name: Set up Python
|
- name: Set up Python
|
||||||
@@ -148,7 +150,7 @@ jobs:
|
|||||||
- name: Install dependencies
|
- name: Install dependencies
|
||||||
run: |
|
run: |
|
||||||
python -m pip install --upgrade pip
|
python -m pip install --upgrade pip
|
||||||
pip install -r requirements-slim.txt
|
pip install -r requirements.txt
|
||||||
pip install nuitka
|
pip install nuitka
|
||||||
pip install -e .
|
pip install -e .
|
||||||
- name: Write g4f_cli.py
|
- name: Write g4f_cli.py
|
||||||
@@ -181,7 +183,7 @@ jobs:
|
|||||||
name: linux-exe-${{ matrix.architecture }}
|
name: linux-exe-${{ matrix.architecture }}
|
||||||
path: dist/g4f-linux-*
|
path: dist/g4f-linux-*
|
||||||
|
|
||||||
# macOS Executables with Nuitka
|
# macOS Executables
|
||||||
build-macos-exe:
|
build-macos-exe:
|
||||||
runs-on: macos-latest
|
runs-on: macos-latest
|
||||||
needs: prepare
|
needs: prepare
|
||||||
@@ -234,7 +236,7 @@ jobs:
|
|||||||
name: macos-exe-${{ matrix.architecture }}
|
name: macos-exe-${{ matrix.architecture }}
|
||||||
path: dist/g4f-macos-*
|
path: dist/g4f-macos-*
|
||||||
|
|
||||||
# Docker Images (reuse existing workflow logic)
|
# Docker Images
|
||||||
build-docker:
|
build-docker:
|
||||||
runs-on: ubuntu-latest
|
runs-on: ubuntu-latest
|
||||||
needs: prepare
|
needs: prepare
|
||||||
|
@@ -14,10 +14,16 @@ try:
|
|||||||
except ImportError:
|
except ImportError:
|
||||||
has_curl_cffi = False
|
has_curl_cffi = False
|
||||||
|
|
||||||
|
try:
|
||||||
|
import nodriver
|
||||||
|
has_nodriver = True
|
||||||
|
except ImportError:
|
||||||
|
has_nodriver = False
|
||||||
|
|
||||||
from ...typing import AsyncResult, Messages, MediaListType
|
from ...typing import AsyncResult, Messages, MediaListType
|
||||||
from ...requests import StreamSession, get_args_from_nodriver, raise_for_status, merge_cookies, has_nodriver
|
from ...requests import StreamSession, get_args_from_nodriver, raise_for_status, merge_cookies
|
||||||
from ...errors import ModelNotFoundError, CloudflareError, MissingAuthError
|
from ...errors import ModelNotFoundError, CloudflareError, MissingAuthError
|
||||||
from ...providers.response import FinishReason, Usage, JsonConversation, ImageResponse
|
from ...providers.response import FinishReason, Usage, JsonConversation, ImageResponse, Reasoning
|
||||||
from ...tools.media import merge_media
|
from ...tools.media import merge_media
|
||||||
from ..base_provider import AsyncGeneratorProvider, ProviderModelMixin,AuthFileMixin
|
from ..base_provider import AsyncGeneratorProvider, ProviderModelMixin,AuthFileMixin
|
||||||
from ..helper import get_last_user_message
|
from ..helper import get_last_user_message
|
||||||
@@ -416,6 +422,22 @@ text_models = {model["publicName"]: model["id"] for model in models if "text" in
|
|||||||
image_models = {model["publicName"]: model["id"] for model in models if "image" in model["capabilities"]["outputCapabilities"]}
|
image_models = {model["publicName"]: model["id"] for model in models if "image" in model["capabilities"]["outputCapabilities"]}
|
||||||
vision_models = [model["publicName"] for model in models if "image" in model["capabilities"]["inputCapabilities"]]
|
vision_models = [model["publicName"] for model in models if "image" in model["capabilities"]["inputCapabilities"]]
|
||||||
|
|
||||||
|
if has_nodriver:
|
||||||
|
async def click_trunstile(page: nodriver.Tab, element = 'document.getElementById("cf-turnstile")'):
|
||||||
|
for _ in range(3):
|
||||||
|
size = None
|
||||||
|
for idx in range(15):
|
||||||
|
size = await page.js_dumps(f'{element}?.getBoundingClientRect()||{{}}')
|
||||||
|
debug.log(f"Found size: {size.get('x'), size.get('y')}")
|
||||||
|
if "x" not in size:
|
||||||
|
break
|
||||||
|
await page.flash_point(size.get("x") + idx * 3, size.get("y") + idx * 3)
|
||||||
|
await page.mouse_click(size.get("x") + idx * 3, size.get("y") + idx * 3)
|
||||||
|
await asyncio.sleep(2)
|
||||||
|
if "x" not in size:
|
||||||
|
break
|
||||||
|
debug.log("Finished clicking trunstile.")
|
||||||
|
|
||||||
class LMArena(AsyncGeneratorProvider, ProviderModelMixin, AuthFileMixin):
|
class LMArena(AsyncGeneratorProvider, ProviderModelMixin, AuthFileMixin):
|
||||||
label = "LMArena"
|
label = "LMArena"
|
||||||
url = "https://lmarena.ai"
|
url = "https://lmarena.ai"
|
||||||
@@ -423,6 +445,7 @@ class LMArena(AsyncGeneratorProvider, ProviderModelMixin, AuthFileMixin):
|
|||||||
api_endpoint = "https://lmarena.ai/nextjs-api/stream/create-evaluation"
|
api_endpoint = "https://lmarena.ai/nextjs-api/stream/create-evaluation"
|
||||||
working = True
|
working = True
|
||||||
active_by_default = True
|
active_by_default = True
|
||||||
|
use_stream_timeout = False
|
||||||
|
|
||||||
default_model = list(text_models.keys())[0]
|
default_model = list(text_models.keys())[0]
|
||||||
models = list(text_models) + list(image_models)
|
models = list(text_models) + list(image_models)
|
||||||
@@ -496,6 +519,9 @@ class LMArena(AsyncGeneratorProvider, ProviderModelMixin, AuthFileMixin):
|
|||||||
pass
|
pass
|
||||||
elif has_nodriver or cls.share_url is None:
|
elif has_nodriver or cls.share_url is None:
|
||||||
async def callback(page):
|
async def callback(page):
|
||||||
|
element = await page.select('[style="display: grid;"]')
|
||||||
|
if element:
|
||||||
|
await click_trunstile(page, 'document.querySelector(\'[style="display: grid;"]\')')
|
||||||
await page.find("Ask anything…", 120)
|
await page.find("Ask anything…", 120)
|
||||||
button = await page.find("Accept Cookies")
|
button = await page.find("Accept Cookies")
|
||||||
if button:
|
if button:
|
||||||
@@ -507,19 +533,7 @@ class LMArena(AsyncGeneratorProvider, ProviderModelMixin, AuthFileMixin):
|
|||||||
await page.select('#cf-turnstile', 300)
|
await page.select('#cf-turnstile', 300)
|
||||||
debug.log("Found Element: 'cf-turnstile'")
|
debug.log("Found Element: 'cf-turnstile'")
|
||||||
await asyncio.sleep(3)
|
await asyncio.sleep(3)
|
||||||
for _ in range(3):
|
await click_trunstile(page)
|
||||||
size = None
|
|
||||||
for idx in range(15):
|
|
||||||
size = await page.js_dumps('document.getElementById("cf-turnstile")?.getBoundingClientRect()||{}')
|
|
||||||
debug.log("Found size:", {size.get("x"), size.get("y")})
|
|
||||||
if "x" not in size:
|
|
||||||
break
|
|
||||||
await page.flash_point(size.get("x") + idx * 2, size.get("y") + idx * 2)
|
|
||||||
await page.mouse_click(size.get("x") + idx * 2, size.get("y") + idx * 2)
|
|
||||||
await asyncio.sleep(1)
|
|
||||||
if "x" not in size:
|
|
||||||
break
|
|
||||||
debug.log("Clicked on the turnstile.")
|
|
||||||
while not await page.evaluate('document.cookie.indexOf("arena-auth-prod-v1") >= 0'):
|
while not await page.evaluate('document.cookie.indexOf("arena-auth-prod-v1") >= 0'):
|
||||||
await asyncio.sleep(1)
|
await asyncio.sleep(1)
|
||||||
while not await page.evaluate('document.querySelector(\'textarea\')'):
|
while not await page.evaluate('document.querySelector(\'textarea\')'):
|
||||||
|
@@ -70,6 +70,7 @@ from g4f.cookies import read_cookie_files, get_cookies_dir
|
|||||||
from g4f.providers.types import ProviderType
|
from g4f.providers.types import ProviderType
|
||||||
from g4f.providers.response import AudioResponse
|
from g4f.providers.response import AudioResponse
|
||||||
from g4f.providers.any_provider import AnyProvider
|
from g4f.providers.any_provider import AnyProvider
|
||||||
|
from g4f.providers.any_model_map import model_map, vision_models, image_models, audio_models, video_models
|
||||||
from g4f import Provider
|
from g4f import Provider
|
||||||
from g4f.gui import get_gui_app
|
from g4f.gui import get_gui_app
|
||||||
from .stubs import (
|
from .stubs import (
|
||||||
@@ -356,6 +357,21 @@ class Api:
|
|||||||
})
|
})
|
||||||
async def models(provider: str, credentials: Annotated[HTTPAuthorizationCredentials, Depends(Api.security)] = None):
|
async def models(provider: str, credentials: Annotated[HTTPAuthorizationCredentials, Depends(Api.security)] = None):
|
||||||
if provider not in Provider.__map__:
|
if provider not in Provider.__map__:
|
||||||
|
if provider in model_map:
|
||||||
|
return {
|
||||||
|
"object": "list",
|
||||||
|
"data": [{
|
||||||
|
"id": provider,
|
||||||
|
"object": "model",
|
||||||
|
"created": 0,
|
||||||
|
"owned_by": getattr(provider, "label", provider.__name__),
|
||||||
|
"image": provider in image_models,
|
||||||
|
"vision": provider in vision_models,
|
||||||
|
"audio": provider in audio_models,
|
||||||
|
"video": provider in video_models,
|
||||||
|
"type": "image" if provider in image_models else "chat",
|
||||||
|
}]
|
||||||
|
}
|
||||||
return ErrorResponse.from_message("The provider does not exist.", 404)
|
return ErrorResponse.from_message("The provider does not exist.", 404)
|
||||||
provider: ProviderType = Provider.__map__[provider]
|
provider: ProviderType = Provider.__map__[provider]
|
||||||
if not hasattr(provider, "get_models"):
|
if not hasattr(provider, "get_models"):
|
||||||
@@ -415,6 +431,11 @@ class Api:
|
|||||||
conversation_id: str = None,
|
conversation_id: str = None,
|
||||||
x_user: Annotated[str | None, Header()] = None
|
x_user: Annotated[str | None, Header()] = None
|
||||||
):
|
):
|
||||||
|
if provider is not None and provider not in Provider.__map__:
|
||||||
|
if provider in model_map:
|
||||||
|
config.model = provider
|
||||||
|
provider = None
|
||||||
|
return ErrorResponse.from_message("Invalid provider.", HTTP_404_NOT_FOUND)
|
||||||
try:
|
try:
|
||||||
if config.provider is None:
|
if config.provider is None:
|
||||||
config.provider = AppConfig.provider if provider is None else provider
|
config.provider = AppConfig.provider if provider is None else provider
|
||||||
@@ -500,58 +521,6 @@ class Api:
|
|||||||
logger.exception(e)
|
logger.exception(e)
|
||||||
return ErrorResponse.from_exception(e, config, HTTP_500_INTERNAL_SERVER_ERROR)
|
return ErrorResponse.from_exception(e, config, HTTP_500_INTERNAL_SERVER_ERROR)
|
||||||
|
|
||||||
responses = {
|
|
||||||
HTTP_200_OK: {"model": ClientResponse},
|
|
||||||
HTTP_401_UNAUTHORIZED: {"model": ErrorResponseModel},
|
|
||||||
HTTP_404_NOT_FOUND: {"model": ErrorResponseModel},
|
|
||||||
HTTP_422_UNPROCESSABLE_ENTITY: {"model": ErrorResponseModel},
|
|
||||||
HTTP_500_INTERNAL_SERVER_ERROR: {"model": ErrorResponseModel},
|
|
||||||
}
|
|
||||||
@self.app.post("/v1/responses", responses=responses)
|
|
||||||
async def v1_responses(
|
|
||||||
config: ResponsesConfig,
|
|
||||||
credentials: Annotated[HTTPAuthorizationCredentials, Depends(Api.security)] = None,
|
|
||||||
provider: str = None
|
|
||||||
):
|
|
||||||
try:
|
|
||||||
if config.provider is None:
|
|
||||||
config.provider = AppConfig.provider if provider is None else provider
|
|
||||||
if config.api_key is None and credentials is not None and credentials.credentials != "secret":
|
|
||||||
config.api_key = credentials.credentials
|
|
||||||
|
|
||||||
conversation = None
|
|
||||||
if config.conversation is not None:
|
|
||||||
conversation = JsonConversation(**config.conversation)
|
|
||||||
|
|
||||||
return await self.client.responses.create(
|
|
||||||
**filter_none(
|
|
||||||
**{
|
|
||||||
"model": AppConfig.model,
|
|
||||||
"proxy": AppConfig.proxy,
|
|
||||||
**config.dict(exclude_none=True),
|
|
||||||
"conversation": conversation
|
|
||||||
},
|
|
||||||
ignored=AppConfig.ignored_providers
|
|
||||||
),
|
|
||||||
)
|
|
||||||
except (ModelNotFoundError, ProviderNotFoundError) as e:
|
|
||||||
logger.exception(e)
|
|
||||||
return ErrorResponse.from_exception(e, config, HTTP_404_NOT_FOUND)
|
|
||||||
except (MissingAuthError, NoValidHarFileError) as e:
|
|
||||||
logger.exception(e)
|
|
||||||
return ErrorResponse.from_exception(e, config, HTTP_401_UNAUTHORIZED)
|
|
||||||
except Exception as e:
|
|
||||||
logger.exception(e)
|
|
||||||
return ErrorResponse.from_exception(e, config, HTTP_500_INTERNAL_SERVER_ERROR)
|
|
||||||
|
|
||||||
@self.app.post("/api/{provider}/responses", responses=responses)
|
|
||||||
async def provider_responses(
|
|
||||||
provider: str,
|
|
||||||
config: ChatCompletionsConfig,
|
|
||||||
credentials: Annotated[HTTPAuthorizationCredentials, Depends(Api.security)] = None,
|
|
||||||
):
|
|
||||||
return await v1_responses(config, credentials, provider)
|
|
||||||
|
|
||||||
responses = {
|
responses = {
|
||||||
HTTP_200_OK: {"model": ImagesResponse},
|
HTTP_200_OK: {"model": ImagesResponse},
|
||||||
HTTP_401_UNAUTHORIZED: {"model": ErrorResponseModel},
|
HTTP_401_UNAUTHORIZED: {"model": ErrorResponseModel},
|
||||||
@@ -568,6 +537,11 @@ class Api:
|
|||||||
provider: str = None,
|
provider: str = None,
|
||||||
credentials: Annotated[HTTPAuthorizationCredentials, Depends(Api.security)] = None
|
credentials: Annotated[HTTPAuthorizationCredentials, Depends(Api.security)] = None
|
||||||
):
|
):
|
||||||
|
if provider is not None and provider not in Provider.__map__:
|
||||||
|
if provider in model_map:
|
||||||
|
config.model = provider
|
||||||
|
provider = None
|
||||||
|
return ErrorResponse.from_message("Invalid provider.", HTTP_404_NOT_FOUND)
|
||||||
if config.provider is None:
|
if config.provider is None:
|
||||||
config.provider = provider
|
config.provider = provider
|
||||||
if config.provider is None:
|
if config.provider is None:
|
||||||
@@ -646,6 +620,11 @@ class Api:
|
|||||||
prompt: Annotated[Optional[str], Form()] = "Transcribe this audio"
|
prompt: Annotated[Optional[str], Form()] = "Transcribe this audio"
|
||||||
):
|
):
|
||||||
provider = provider if path_provider is None else path_provider
|
provider = provider if path_provider is None else path_provider
|
||||||
|
if provider is not None and provider not in Provider.__map__:
|
||||||
|
if provider in model_map:
|
||||||
|
model = provider
|
||||||
|
provider = None
|
||||||
|
return ErrorResponse.from_message("Invalid provider.", HTTP_404_NOT_FOUND)
|
||||||
kwargs = {"modalities": ["text"]}
|
kwargs = {"modalities": ["text"]}
|
||||||
if provider == "MarkItDown":
|
if provider == "MarkItDown":
|
||||||
kwargs = {
|
kwargs = {
|
||||||
@@ -686,6 +665,11 @@ class Api:
|
|||||||
api_key = None
|
api_key = None
|
||||||
if credentials is not None and credentials.credentials != "secret":
|
if credentials is not None and credentials.credentials != "secret":
|
||||||
api_key = credentials.credentials
|
api_key = credentials.credentials
|
||||||
|
if provider is not None and provider not in Provider.__map__:
|
||||||
|
if provider in model_map:
|
||||||
|
config.model = provider
|
||||||
|
provider = None
|
||||||
|
return ErrorResponse.from_message("Invalid provider.", HTTP_404_NOT_FOUND)
|
||||||
try:
|
try:
|
||||||
audio = filter_none(voice=config.voice, format=config.response_format, language=config.language)
|
audio = filter_none(voice=config.voice, format=config.response_format, language=config.language)
|
||||||
response = await self.client.chat.completions.create(
|
response = await self.client.chat.completions.create(
|
||||||
@@ -744,11 +728,6 @@ class Api:
|
|||||||
read_cookie_files()
|
read_cookie_files()
|
||||||
return response_data
|
return response_data
|
||||||
|
|
||||||
@self.app.post("/json/{filename}")
|
|
||||||
async def get_json(filename, request: Request):
|
|
||||||
await asyncio.sleep(30)
|
|
||||||
return ""
|
|
||||||
|
|
||||||
@self.app.get("/images/{filename}", responses={
|
@self.app.get("/images/{filename}", responses={
|
||||||
HTTP_200_OK: {"content": {"image/*": {}}},
|
HTTP_200_OK: {"content": {"image/*": {}}},
|
||||||
HTTP_404_NOT_FOUND: {}
|
HTTP_404_NOT_FOUND: {}
|
||||||
@@ -854,7 +833,7 @@ class Api:
|
|||||||
return await get_media(filename, request, True)
|
return await get_media(filename, request, True)
|
||||||
|
|
||||||
def format_exception(e: Union[Exception, str], config: Union[ChatCompletionsConfig, ImageGenerationConfig] = None, image: bool = False) -> str:
|
def format_exception(e: Union[Exception, str], config: Union[ChatCompletionsConfig, ImageGenerationConfig] = None, image: bool = False) -> str:
|
||||||
last_provider = {} if not image else g4f.get_last_provider(True)
|
last_provider = {}
|
||||||
provider = (AppConfig.media_provider if image else AppConfig.provider)
|
provider = (AppConfig.media_provider if image else AppConfig.provider)
|
||||||
model = AppConfig.model
|
model = AppConfig.model
|
||||||
if config is not None:
|
if config is not None:
|
||||||
@@ -883,23 +862,23 @@ def run_api(
|
|||||||
**kwargs
|
**kwargs
|
||||||
) -> None:
|
) -> None:
|
||||||
print(f'Starting server... [g4f v-{g4f.version.utils.current_version}]' + (" (debug)" if debug else ""))
|
print(f'Starting server... [g4f v-{g4f.version.utils.current_version}]' + (" (debug)" if debug else ""))
|
||||||
|
|
||||||
if use_colors is None:
|
if use_colors is None:
|
||||||
use_colors = debug
|
use_colors = debug
|
||||||
|
|
||||||
if bind is not None:
|
if bind is not None:
|
||||||
host, port = bind.split(":")
|
host, port = bind.split(":")
|
||||||
|
|
||||||
if port is None:
|
if port is None:
|
||||||
port = DEFAULT_PORT
|
port = DEFAULT_PORT
|
||||||
|
|
||||||
if AppConfig.demo and debug:
|
if AppConfig.demo and debug:
|
||||||
method = "create_app_with_demo_and_debug"
|
method = "create_app_with_demo_and_debug"
|
||||||
elif AppConfig.gui and debug:
|
elif AppConfig.gui and debug:
|
||||||
method = "create_app_with_gui_and_debug"
|
method = "create_app_with_gui_and_debug"
|
||||||
else:
|
else:
|
||||||
method = "create_app_debug" if debug else "create_app"
|
method = "create_app_debug" if debug else "create_app"
|
||||||
|
|
||||||
uvicorn.run(
|
uvicorn.run(
|
||||||
f"g4f.api:{method}",
|
f"g4f.api:{method}",
|
||||||
host=host,
|
host=host,
|
||||||
|
@@ -22,8 +22,10 @@ from ...providers.base_provider import ProviderModelMixin
|
|||||||
from ...providers.retry_provider import BaseRetryProvider
|
from ...providers.retry_provider import BaseRetryProvider
|
||||||
from ...providers.helper import format_media_prompt
|
from ...providers.helper import format_media_prompt
|
||||||
from ...providers.response import *
|
from ...providers.response import *
|
||||||
|
from ...providers.any_model_map import model_map
|
||||||
|
from ...providers.any_provider import AnyProvider
|
||||||
|
from ...client.service import get_model_and_provider
|
||||||
from ... import version, models
|
from ... import version, models
|
||||||
from ... import ChatCompletion, get_model_and_provider
|
|
||||||
from ... import debug
|
from ... import debug
|
||||||
|
|
||||||
logger = logging.getLogger(__name__)
|
logger = logging.getLogger(__name__)
|
||||||
@@ -47,11 +49,11 @@ class Api:
|
|||||||
|
|
||||||
@staticmethod
|
@staticmethod
|
||||||
def get_provider_models(provider: str, api_key: str = None, api_base: str = None, ignored: list = None):
|
def get_provider_models(provider: str, api_key: str = None, api_base: str = None, ignored: list = None):
|
||||||
def get_model_data(provider: ProviderModelMixin, model: str):
|
def get_model_data(provider: ProviderModelMixin, model: str, default: bool = False) -> dict:
|
||||||
return {
|
return {
|
||||||
"model": model,
|
"model": model,
|
||||||
"label": model.split(":")[-1] if provider.__name__ == "AnyProvider" and not model.startswith("openrouter:") else model,
|
"label": model.split(":")[-1] if provider.__name__ == "AnyProvider" and not model.startswith("openrouter:") else model,
|
||||||
"default": model == provider.default_model,
|
"default": default or model == provider.default_model,
|
||||||
"vision": model in provider.vision_models,
|
"vision": model in provider.vision_models,
|
||||||
"audio": False if provider.audio_models is None else model in provider.audio_models,
|
"audio": False if provider.audio_models is None else model in provider.audio_models,
|
||||||
"video": model in provider.video_models,
|
"video": model in provider.video_models,
|
||||||
@@ -78,6 +80,9 @@ class Api:
|
|||||||
get_model_data(provider, model)
|
get_model_data(provider, model)
|
||||||
for model in models
|
for model in models
|
||||||
]
|
]
|
||||||
|
elif provider in model_map:
|
||||||
|
return [get_model_data(AnyProvider, provider, True)]
|
||||||
|
|
||||||
return []
|
return []
|
||||||
|
|
||||||
@staticmethod
|
@staticmethod
|
||||||
@@ -144,10 +149,10 @@ class Api:
|
|||||||
|
|
||||||
def _prepare_conversation_kwargs(self, json_data: dict):
|
def _prepare_conversation_kwargs(self, json_data: dict):
|
||||||
kwargs = {**json_data}
|
kwargs = {**json_data}
|
||||||
model = json_data.get('model')
|
model = kwargs.pop('model', None)
|
||||||
provider = json_data.get('provider')
|
provider = kwargs.pop('provider', None)
|
||||||
messages = json_data.get('messages')
|
messages = kwargs.pop('messages', None)
|
||||||
action = json_data.get('action')
|
action = kwargs.get('action')
|
||||||
if action == "continue":
|
if action == "continue":
|
||||||
kwargs["tool_calls"].append({
|
kwargs["tool_calls"].append({
|
||||||
"function": {
|
"function": {
|
||||||
@@ -155,7 +160,7 @@ class Api:
|
|||||||
},
|
},
|
||||||
"type": "function"
|
"type": "function"
|
||||||
})
|
})
|
||||||
conversation = json_data.get("conversation")
|
conversation = kwargs.pop("conversation", None)
|
||||||
if isinstance(conversation, dict):
|
if isinstance(conversation, dict):
|
||||||
kwargs["conversation"] = JsonConversation(**conversation)
|
kwargs["conversation"] = JsonConversation(**conversation)
|
||||||
return {
|
return {
|
||||||
@@ -174,10 +179,9 @@ class Api:
|
|||||||
if "user" not in kwargs:
|
if "user" not in kwargs:
|
||||||
debug.log = decorated_log
|
debug.log = decorated_log
|
||||||
proxy = os.environ.get("G4F_PROXY")
|
proxy = os.environ.get("G4F_PROXY")
|
||||||
provider = kwargs.pop("provider", None)
|
|
||||||
try:
|
try:
|
||||||
model, provider_handler = get_model_and_provider(
|
model, provider_handler = get_model_and_provider(
|
||||||
kwargs.get("model"), provider,
|
kwargs.get("model"), provider or AnyProvider,
|
||||||
has_images="media" in kwargs,
|
has_images="media" in kwargs,
|
||||||
)
|
)
|
||||||
if "user" in kwargs:
|
if "user" in kwargs:
|
||||||
|
@@ -47,6 +47,8 @@ from ...image import is_allowed_extension, process_image, MEDIA_TYPE_MAP
|
|||||||
from ...cookies import get_cookies_dir
|
from ...cookies import get_cookies_dir
|
||||||
from ...image.copy_images import secure_filename, get_source_url, get_media_dir, copy_media
|
from ...image.copy_images import secure_filename, get_source_url, get_media_dir, copy_media
|
||||||
from ...client.service import get_model_and_provider
|
from ...client.service import get_model_and_provider
|
||||||
|
from ...providers.any_model_map import model_map
|
||||||
|
from ... import Provider
|
||||||
from ... import models
|
from ... import models
|
||||||
from .api import Api
|
from .api import Api
|
||||||
|
|
||||||
@@ -208,11 +210,19 @@ class Backend_Api(Api):
|
|||||||
json_data["user"] = request.headers.get("x-user", "error")
|
json_data["user"] = request.headers.get("x-user", "error")
|
||||||
json_data["referer"] = request.headers.get("referer", "")
|
json_data["referer"] = request.headers.get("referer", "")
|
||||||
json_data["user-agent"] = request.headers.get("user-agent", "")
|
json_data["user-agent"] = request.headers.get("user-agent", "")
|
||||||
|
|
||||||
kwargs = self._prepare_conversation_kwargs(json_data)
|
kwargs = self._prepare_conversation_kwargs(json_data)
|
||||||
|
provider = kwargs.pop("provider", None)
|
||||||
|
if provider and provider not in Provider.__map__:
|
||||||
|
if provider in model_map:
|
||||||
|
kwargs['model'] = provider
|
||||||
|
provider = None
|
||||||
|
else:
|
||||||
|
return jsonify({"error": {"message": "Provider not found"}}), 404
|
||||||
return self.app.response_class(
|
return self.app.response_class(
|
||||||
safe_iter_generator(self._create_response_stream(
|
safe_iter_generator(self._create_response_stream(
|
||||||
kwargs,
|
kwargs,
|
||||||
json_data.get("provider"),
|
provider,
|
||||||
json_data.get("download_media", True),
|
json_data.get("download_media", True),
|
||||||
tempfiles
|
tempfiles
|
||||||
)),
|
)),
|
||||||
@@ -277,18 +287,10 @@ class Backend_Api(Api):
|
|||||||
@app.route('/backend-api/v2/create', methods=['GET'])
|
@app.route('/backend-api/v2/create', methods=['GET'])
|
||||||
def create():
|
def create():
|
||||||
try:
|
try:
|
||||||
tool_calls = []
|
|
||||||
web_search = request.args.get("web_search")
|
web_search = request.args.get("web_search")
|
||||||
if web_search:
|
if web_search:
|
||||||
is_true_web_search = web_search.lower() in ["true", "1"]
|
is_true_web_search = web_search.lower() in ["true", "1"]
|
||||||
web_search = None if is_true_web_search else web_search
|
web_search = True if is_true_web_search else web_search
|
||||||
tool_calls.append({
|
|
||||||
"function": {
|
|
||||||
"name": "search_tool",
|
|
||||||
"arguments": {"query": web_search, "instructions": "", "max_words": 1000} if web_search != "true" else {}
|
|
||||||
},
|
|
||||||
"type": "function"
|
|
||||||
})
|
|
||||||
do_filter = request.args.get("filter_markdown", request.args.get("json"))
|
do_filter = request.args.get("filter_markdown", request.args.get("json"))
|
||||||
cache_id = request.args.get('cache')
|
cache_id = request.args.get('cache')
|
||||||
model, provider_handler = get_model_and_provider(
|
model, provider_handler = get_model_and_provider(
|
||||||
@@ -300,7 +302,7 @@ class Backend_Api(Api):
|
|||||||
"model": model,
|
"model": model,
|
||||||
"messages": [{"role": "user", "content": request.args.get("prompt")}],
|
"messages": [{"role": "user", "content": request.args.get("prompt")}],
|
||||||
"stream": not do_filter and not cache_id,
|
"stream": not do_filter and not cache_id,
|
||||||
"tool_calls": tool_calls,
|
"web_search": web_search,
|
||||||
}
|
}
|
||||||
if request.args.get("audio_provider") or request.args.get("audio"):
|
if request.args.get("audio_provider") or request.args.get("audio"):
|
||||||
parameters["audio"] = {}
|
parameters["audio"] = {}
|
||||||
|
@@ -179,9 +179,7 @@ model_map = {
|
|||||||
},
|
},
|
||||||
"gpt-oss-120b": {
|
"gpt-oss-120b": {
|
||||||
"Together": "openai/gpt-oss-120b",
|
"Together": "openai/gpt-oss-120b",
|
||||||
"DeepInfra": "openai/gpt-oss-120b",
|
|
||||||
"HuggingFace": "openai/gpt-oss-120b",
|
"HuggingFace": "openai/gpt-oss-120b",
|
||||||
"OpenRouter": "openai/gpt-oss-120b:free",
|
|
||||||
"Groq": "openai/gpt-oss-120b",
|
"Groq": "openai/gpt-oss-120b",
|
||||||
"Azure": "gpt-oss-120b",
|
"Azure": "gpt-oss-120b",
|
||||||
"OpenRouterFree": "openai/gpt-oss-120b",
|
"OpenRouterFree": "openai/gpt-oss-120b",
|
||||||
|
@@ -284,6 +284,7 @@ class AsyncGeneratorProvider(AbstractProvider):
|
|||||||
Provides asynchronous generator functionality for streaming results.
|
Provides asynchronous generator functionality for streaming results.
|
||||||
"""
|
"""
|
||||||
supports_stream = True
|
supports_stream = True
|
||||||
|
use_stream_timeout = True
|
||||||
|
|
||||||
@classmethod
|
@classmethod
|
||||||
def create_completion(
|
def create_completion(
|
||||||
@@ -309,7 +310,7 @@ class AsyncGeneratorProvider(AbstractProvider):
|
|||||||
"""
|
"""
|
||||||
return to_sync_generator(
|
return to_sync_generator(
|
||||||
cls.create_async_generator(model, messages, **kwargs),
|
cls.create_async_generator(model, messages, **kwargs),
|
||||||
timeout=timeout if stream_timeout is None else stream_timeout,
|
timeout=stream_timeout if cls.use_stream_timeout is None else timeout,
|
||||||
)
|
)
|
||||||
|
|
||||||
@staticmethod
|
@staticmethod
|
||||||
@@ -336,7 +337,7 @@ class AsyncGeneratorProvider(AbstractProvider):
|
|||||||
raise NotImplementedError()
|
raise NotImplementedError()
|
||||||
|
|
||||||
@classmethod
|
@classmethod
|
||||||
def async_create_function(cls, *args, **kwargs) -> AsyncResult:
|
async def async_create_function(cls, *args, **kwargs) -> AsyncResult:
|
||||||
"""
|
"""
|
||||||
Creates a completion using the synchronous method.
|
Creates a completion using the synchronous method.
|
||||||
|
|
||||||
@@ -346,7 +347,19 @@ class AsyncGeneratorProvider(AbstractProvider):
|
|||||||
Returns:
|
Returns:
|
||||||
CreateResult: The result of the completion creation.
|
CreateResult: The result of the completion creation.
|
||||||
"""
|
"""
|
||||||
return cls.create_async_generator(*args, **kwargs)
|
response = cls.create_async_generator(*args, **kwargs)
|
||||||
|
if "stream_timeout" in kwargs or "timeout" in kwargs:
|
||||||
|
while True:
|
||||||
|
try:
|
||||||
|
yield await asyncio.wait_for(
|
||||||
|
response.__anext__(),
|
||||||
|
timeout=kwargs.get("stream_timeout") if cls.use_stream_timeout else kwargs.get("timeout")
|
||||||
|
)
|
||||||
|
except StopAsyncIteration:
|
||||||
|
break
|
||||||
|
else:
|
||||||
|
async for chunk in response:
|
||||||
|
yield chunk
|
||||||
|
|
||||||
class ProviderModelMixin:
|
class ProviderModelMixin:
|
||||||
default_model: str = None
|
default_model: str = None
|
||||||
@@ -501,10 +514,13 @@ class AsyncAuthedProvider(AsyncGeneratorProvider, AuthFileMixin):
|
|||||||
try:
|
try:
|
||||||
auth_result = cls.get_auth_result()
|
auth_result = cls.get_auth_result()
|
||||||
response = to_async_iterator(cls.create_authed(model, messages, **kwargs, auth_result=auth_result))
|
response = to_async_iterator(cls.create_authed(model, messages, **kwargs, auth_result=auth_result))
|
||||||
if "stream_timeout" in kwargs:
|
if "stream_timeout" in kwargs or "timeout" in kwargs:
|
||||||
while True:
|
while True:
|
||||||
try:
|
try:
|
||||||
yield await asyncio.wait_for(response.__anext__(), timeout=kwargs["stream_timeout"])
|
yield await asyncio.wait_for(
|
||||||
|
response.__anext__(),
|
||||||
|
timeout=kwargs.get("stream_timeout") if cls.use_stream_timeout else kwargs.get("timeout")
|
||||||
|
)
|
||||||
except StopAsyncIteration:
|
except StopAsyncIteration:
|
||||||
break
|
break
|
||||||
else:
|
else:
|
||||||
|
@@ -43,22 +43,21 @@ case "${PLATFORM}" in
|
|||||||
;;
|
;;
|
||||||
"darwin"|"macos")
|
"darwin"|"macos")
|
||||||
OUTPUT_NAME="g4f-macos-${VERSION}-${ARCH}"
|
OUTPUT_NAME="g4f-macos-${VERSION}-${ARCH}"
|
||||||
NUITKA_ARGS="--macos-create-app-bundle"
|
NUITKA_ARGS="--macos-create-app-bundle --onefile"
|
||||||
;;
|
;;
|
||||||
"linux")
|
"linux")
|
||||||
OUTPUT_NAME="g4f-linux-${VERSION}-${ARCH}"
|
OUTPUT_NAME="g4f-linux-${VERSION}-${ARCH}"
|
||||||
NUITKA_ARGS=""
|
NUITKA_ARGS="--onefile"
|
||||||
;;
|
;;
|
||||||
*)
|
*)
|
||||||
OUTPUT_NAME="g4f-${PLATFORM}-${VERSION}-${ARCH}"
|
OUTPUT_NAME="g4f-${PLATFORM}-${VERSION}-${ARCH}"
|
||||||
NUITKA_ARGS=""
|
NUITKA_ARGS="--onefile"
|
||||||
;;
|
;;
|
||||||
esac
|
esac
|
||||||
|
|
||||||
# Basic Nuitka arguments
|
# Basic Nuitka arguments
|
||||||
NUITKA_COMMON_ARGS="
|
NUITKA_COMMON_ARGS="
|
||||||
--standalone
|
--standalone
|
||||||
--onefile
|
|
||||||
--output-filename=${OUTPUT_NAME}
|
--output-filename=${OUTPUT_NAME}
|
||||||
--output-dir=${OUTPUT_DIR}
|
--output-dir=${OUTPUT_DIR}
|
||||||
--remove-output
|
--remove-output
|
||||||
|
Reference in New Issue
Block a user