diff --git a/g4f/Provider/PollinationsAI.py b/g4f/Provider/PollinationsAI.py
index ebdff660..43f7a934 100644
--- a/g4f/Provider/PollinationsAI.py
+++ b/g4f/Provider/PollinationsAI.py
@@ -123,9 +123,6 @@ class PollinationsAI(AsyncGeneratorProvider, ProviderModelMixin):
if model not in cls.image_models:
raise
- if not cache and seed is None:
- seed = random.randint(1000, 999999)
-
if model in cls.image_models:
async for chunk in cls._generate_image(
model=model,
@@ -134,6 +131,7 @@ class PollinationsAI(AsyncGeneratorProvider, ProviderModelMixin):
width=width,
height=height,
seed=seed,
+ cache=cache,
nologo=nologo,
private=private,
enhance=enhance,
@@ -165,11 +163,14 @@ class PollinationsAI(AsyncGeneratorProvider, ProviderModelMixin):
width: int,
height: int,
seed: Optional[int],
+ cache: bool,
nologo: bool,
private: bool,
enhance: bool,
safe: bool
) -> AsyncResult:
+ if not cache and seed is None:
+ seed = random.randint(9999, 99999999)
params = {
"seed": str(seed) if seed is not None else None,
"width": str(width),
@@ -207,6 +208,8 @@ class PollinationsAI(AsyncGeneratorProvider, ProviderModelMixin):
seed: Optional[int],
cache: bool
) -> AsyncResult:
+ if not cache and seed is None:
+ seed = random.randint(9999, 99999999)
json_mode = False
if response_format and response_format.get("type") == "json_object":
json_mode = True
diff --git a/g4f/Provider/PollinationsImage.py b/g4f/Provider/PollinationsImage.py
index e901a3a7..c9f302f8 100644
--- a/g4f/Provider/PollinationsImage.py
+++ b/g4f/Provider/PollinationsImage.py
@@ -28,6 +28,7 @@ class PollinationsImage(PollinationsAI):
width: int = 1024,
height: int = 1024,
seed: Optional[int] = None,
+ cache: bool = False,
nologo: bool = True,
private: bool = False,
enhance: bool = False,
@@ -41,6 +42,7 @@ class PollinationsImage(PollinationsAI):
width=width,
height=height,
seed=seed,
+ cache=cache,
nologo=nologo,
private=private,
enhance=enhance,
diff --git a/g4f/Provider/hf/HuggingChat.py b/g4f/Provider/hf/HuggingChat.py
index 83ef34eb..d4656996 100644
--- a/g4f/Provider/hf/HuggingChat.py
+++ b/g4f/Provider/hf/HuggingChat.py
@@ -8,7 +8,8 @@ import base64
from typing import AsyncIterator
try:
- from curl_cffi.requests import Session, CurlMime
+ from curl_cffi.requests import Session
+ from curl_cffi import CurlMime
has_curl_cffi = True
except ImportError:
has_curl_cffi = False
diff --git a/g4f/Provider/hf/HuggingFaceAPI.py b/g4f/Provider/hf/HuggingFaceAPI.py
index 976deaf0..e03ffc37 100644
--- a/g4f/Provider/hf/HuggingFaceAPI.py
+++ b/g4f/Provider/hf/HuggingFaceAPI.py
@@ -4,6 +4,7 @@ from ...providers.types import Messages
from ...typing import ImagesType
from ...requests import StreamSession, raise_for_status
from ...errors import ModelNotSupportedError
+from ...providers.helper import get_last_user_message
from ..template.OpenaiTemplate import OpenaiTemplate
from .models import model_aliases, vision_models, default_vision_model
from .HuggingChat import HuggingChat
@@ -22,7 +23,7 @@ class HuggingFaceAPI(OpenaiTemplate):
vision_models = vision_models
model_aliases = model_aliases
- pipeline_tag: dict[str, str] = {}
+ pipeline_tags: dict[str, str] = {}
@classmethod
def get_models(cls, **kwargs):
@@ -36,8 +37,8 @@ class HuggingFaceAPI(OpenaiTemplate):
@classmethod
async def get_pipline_tag(cls, model: str, api_key: str = None):
- if model in cls.pipeline_tag:
- return cls.pipeline_tag[model]
+ if model in cls.pipeline_tags:
+ return cls.pipeline_tags[model]
async with StreamSession(
timeout=30,
headers=cls.get_headers(False, api_key),
@@ -45,8 +46,8 @@ class HuggingFaceAPI(OpenaiTemplate):
async with session.get(f"https://huggingface.co/api/models/{model}") as response:
await raise_for_status(response)
model_data = await response.json()
- cls.pipeline_tag[model] = model_data.get("pipeline_tag")
- return cls.pipeline_tag[model]
+ cls.pipeline_tags[model] = model_data.get("pipeline_tag")
+ return cls.pipeline_tags[model]
@classmethod
async def create_async_generator(
@@ -73,10 +74,11 @@ class HuggingFaceAPI(OpenaiTemplate):
if len(messages) > 6:
messages = messages[:3] + messages[-3:]
if calculate_lenght(messages) > max_inputs_lenght:
+ last_user_message = [{"role": "user", "content": get_last_user_message(messages)}]
if len(messages) > 2:
- messages = [m for m in messages if m["role"] == "system"] + messages[-1:]
+ messages = [m for m in messages if m["role"] == "system"] + last_user_message
if len(messages) > 1 and calculate_lenght(messages) > max_inputs_lenght:
- messages = [messages[-1]]
+ messages = last_user_message
debug.log(f"Messages trimmed from: {start} to: {calculate_lenght(messages)}")
async for chunk in super().create_async_generator(model, messages, api_base=api_base, api_key=api_key, max_tokens=max_tokens, images=images, **kwargs):
yield chunk
diff --git a/g4f/Provider/hf/HuggingFaceInference.py b/g4f/Provider/hf/HuggingFaceInference.py
index 1ada7347..1be744b1 100644
--- a/g4f/Provider/hf/HuggingFaceInference.py
+++ b/g4f/Provider/hf/HuggingFaceInference.py
@@ -7,7 +7,7 @@ import requests
from ...typing import AsyncResult, Messages
from ..base_provider import AsyncGeneratorProvider, ProviderModelMixin, format_prompt
-from ...errors import ModelNotFoundError, ModelNotSupportedError, ResponseError
+from ...errors import ModelNotSupportedError, ResponseError
from ...requests import StreamSession, raise_for_status
from ...providers.response import FinishReason, ImageResponse
from ..helper import format_image_prompt, get_last_user_message
@@ -24,6 +24,8 @@ class HuggingFaceInference(AsyncGeneratorProvider, ProviderModelMixin):
model_aliases = model_aliases
image_models = image_models
+ model_data: dict[str, dict] = {}
+
@classmethod
def get_models(cls) -> list[str]:
if not cls.models:
@@ -43,6 +45,17 @@ class HuggingFaceInference(AsyncGeneratorProvider, ProviderModelMixin):
cls.models = models
return cls.models
+ @classmethod
+ async def get_model_data(cls, session: StreamSession, model: str) -> str:
+ if model in cls.model_data:
+ return cls.model_data[model]
+ async with session.get(f"https://huggingface.co/api/models/{model}") as response:
+ if response.status == 404:
+ raise ModelNotSupportedError(f"Model is not supported: {model} in: {cls.__name__}")
+ await raise_for_status(response)
+ cls.model_data[model] = await response.json()
+ return cls.model_data[model]
+
@classmethod
async def create_async_generator(
cls,
@@ -96,41 +109,37 @@ class HuggingFaceInference(AsyncGeneratorProvider, ProviderModelMixin):
timeout=600
) as session:
if payload is None:
- async with session.get(f"https://huggingface.co/api/models/{model}") as response:
- if response.status == 404:
- raise ModelNotSupportedError(f"Model is not supported: {model} in: {cls.__name__}")
- await raise_for_status(response)
- model_data = await response.json()
- pipeline_tag = model_data.get("pipeline_tag")
- if pipeline_tag == "text-to-image":
- stream = False
- inputs = format_image_prompt(messages, prompt)
- payload = {"inputs": inputs, "parameters": {"seed": random.randint(0, 2**32) if seed is None else seed, **extra_data}}
- elif pipeline_tag in ("text-generation", "image-text-to-text"):
- model_type = None
- if "config" in model_data and "model_type" in model_data["config"]:
- model_type = model_data["config"]["model_type"]
- debug.log(f"Model type: {model_type}")
+ model_data = await cls.get_model_data(session, model)
+ pipeline_tag = model_data.get("pipeline_tag")
+ if pipeline_tag == "text-to-image":
+ stream = False
+ inputs = format_image_prompt(messages, prompt)
+ payload = {"inputs": inputs, "parameters": {"seed": random.randint(0, 2**32) if seed is None else seed, **extra_data}}
+ elif pipeline_tag in ("text-generation", "image-text-to-text"):
+ model_type = None
+ if "config" in model_data and "model_type" in model_data["config"]:
+ model_type = model_data["config"]["model_type"]
+ debug.log(f"Model type: {model_type}")
+ inputs = get_inputs(messages, model_data, model_type, do_continue)
+ debug.log(f"Inputs len: {len(inputs)}")
+ if len(inputs) > 4096:
+ if len(messages) > 6:
+ messages = messages[:3] + messages[-3:]
+ else:
+ messages = [m for m in messages if m["role"] == "system"] + [{"role": "user", "content": get_last_user_message(messages)}]
inputs = get_inputs(messages, model_data, model_type, do_continue)
- debug.log(f"Inputs len: {len(inputs)}")
- if len(inputs) > 4096:
- if len(messages) > 6:
- messages = messages[:3] + messages[-3:]
- else:
- messages = [m for m in messages if m["role"] == "system"] + [get_last_user_message(messages)]
- inputs = get_inputs(messages, model_data, model_type, do_continue)
- debug.log(f"New len: {len(inputs)}")
- if model_type == "gpt2" and max_tokens >= 1024:
- params["max_new_tokens"] = 512
- if seed is not None:
- params["seed"] = seed
- payload = {"inputs": inputs, "parameters": params, "stream": stream}
- else:
- raise ModelNotSupportedError(f"Model is not supported: {model} in: {cls.__name__} pipeline_tag: {pipeline_tag}")
+ debug.log(f"New len: {len(inputs)}")
+ if model_type == "gpt2" and max_tokens >= 1024:
+ params["max_new_tokens"] = 512
+ if seed is not None:
+ params["seed"] = seed
+ payload = {"inputs": inputs, "parameters": params, "stream": stream}
+ else:
+ raise ModelNotSupportedError(f"Model is not supported: {model} in: {cls.__name__} pipeline_tag: {pipeline_tag}")
async with session.post(f"{api_base.rstrip('/')}/models/{model}", json=payload) as response:
if response.status == 404:
- raise ModelNotFoundError(f"Model is not supported: {model}")
+ raise ModelNotSupportedError(f"Model is not supported: {model}")
await raise_for_status(response)
if stream:
first = True
diff --git a/g4f/Provider/hf/__init__.py b/g4f/Provider/hf/__init__.py
index ec103831..d6c2e3bb 100644
--- a/g4f/Provider/hf/__init__.py
+++ b/g4f/Provider/hf/__init__.py
@@ -36,7 +36,7 @@ class HuggingFace(AsyncGeneratorProvider, ProviderModelMixin):
messages: Messages,
**kwargs
) -> AsyncResult:
- if "api_key" not in kwargs and "images" not in kwargs and random.random() >= 0.5:
+ if "images" not in kwargs and "deepseek" in model or random.random() >= 0.5:
try:
is_started = False
async for chunk in HuggingFaceInference.create_async_generator(model, messages, **kwargs):
diff --git a/g4f/Provider/needs_auth/DeepSeekAPI.py b/g4f/Provider/needs_auth/DeepSeekAPI.py
index 2c5a8bf7..0c120266 100644
--- a/g4f/Provider/needs_auth/DeepSeekAPI.py
+++ b/g4f/Provider/needs_auth/DeepSeekAPI.py
@@ -13,7 +13,6 @@ from ...errors import MissingAuthError
from ...requests import get_args_from_nodriver, get_nodriver
from ...providers.response import AuthResult, RequestLogin, Reasoning, JsonConversation, FinishReason
from ...typing import AsyncResult, Messages
-from ... import debug
try:
from curl_cffi import requests
from dsk.api import DeepSeekAPI, AuthenticationError, DeepSeekPOW
diff --git a/g4f/Provider/needs_auth/OpenaiAccount.py b/g4f/Provider/needs_auth/OpenaiAccount.py
index 478b8be3..e5c71e58 100644
--- a/g4f/Provider/needs_auth/OpenaiAccount.py
+++ b/g4f/Provider/needs_auth/OpenaiAccount.py
@@ -4,4 +4,5 @@ from .OpenaiChat import OpenaiChat
class OpenaiAccount(OpenaiChat):
needs_auth = True
- parent = "OpenaiChat"
\ No newline at end of file
+ parent = "OpenaiChat"
+ use_nodriver = False # Show (Auth) in the model name
\ No newline at end of file
diff --git a/g4f/Provider/template/OpenaiTemplate.py b/g4f/Provider/template/OpenaiTemplate.py
index d8b54578..d8427727 100644
--- a/g4f/Provider/template/OpenaiTemplate.py
+++ b/g4f/Provider/template/OpenaiTemplate.py
@@ -65,7 +65,7 @@ class OpenaiTemplate(AsyncGeneratorProvider, ProviderModelMixin, RaiseErrorMixin
prompt: str = None,
headers: dict = None,
impersonate: str = None,
- extra_parameters: list[str] = ["tools", "parallel_tool_calls", "", "reasoning_effort", "logit_bias"],
+ extra_parameters: list[str] = ["tools", "parallel_tool_calls", "tool_choice", "reasoning_effort", "logit_bias"],
extra_data: dict = {},
**kwargs
) -> AsyncResult:
diff --git a/g4f/client/__init__.py b/g4f/client/__init__.py
index 88ff09a8..3c7de6a7 100644
--- a/g4f/client/__init__.py
+++ b/g4f/client/__init__.py
@@ -365,7 +365,7 @@ class Images:
break
except Exception as e:
error = e
- debug.error(e, name=f"{provider.__name__} {type(e).__name__}")
+ debug.error(f"{provider.__name__} {type(e).__name__}: {e}")
else:
response = await self._generate_image_response(provider_handler, provider_name, model, prompt, **kwargs)
@@ -460,7 +460,7 @@ class Images:
break
except Exception as e:
error = e
- debug.error(e, name=f"{provider.__name__} {type(e).__name__}")
+ debug.error(f"{provider.__name__} {type(e).__name__}: {e}")
else:
response = await self._generate_image_response(provider_handler, provider_name, model, prompt, **kwargs)
diff --git a/g4f/gui/client/static/js/chat.v1.js b/g4f/gui/client/static/js/chat.v1.js
index 0c087084..bec8b976 100644
--- a/g4f/gui/client/static/js/chat.v1.js
+++ b/g4f/gui/client/static/js/chat.v1.js
@@ -1932,7 +1932,7 @@ const load_provider_option = (input, provider_name) => {
providerSelect.querySelectorAll(`option[data-parent="${provider_name}"]`).forEach(
(el) => el.removeAttribute("disabled")
);
- settings.querySelector(`.field:has(#${provider_name}-api_key)`)?.classList.remove("hidden");
+ //settings.querySelector(`.field:has(#${provider_name}-api_key)`)?.classList.remove("hidden");
} else {
modelSelect.querySelectorAll(`option[data-providers*="${provider_name}"]`).forEach(
(el) => {
@@ -1947,7 +1947,7 @@ const load_provider_option = (input, provider_name) => {
providerSelect.querySelectorAll(`option[data-parent="${provider_name}"]`).forEach(
(el) => el.setAttribute("disabled", "disabled")
);
- settings.querySelector(`.field:has(#${provider_name}-api_key)`)?.classList.add("hidden");
+ //settings.querySelector(`.field:has(#${provider_name}-api_key)`)?.classList.add("hidden");
}
};
@@ -2039,13 +2039,13 @@ async function on_api() {
if (provider.parent) {
if (!login_urls[provider.parent]) {
- login_urls[provider.parent] = [provider.label, provider.login_url, [provider.name]];
+ login_urls[provider.parent] = [provider.label, provider.login_url, [provider.name], provider.auth];
} else {
login_urls[provider.parent][2].push(provider.name);
}
} else if (provider.login_url) {
if (!login_urls[provider.name]) {
- login_urls[provider.name] = [provider.label, provider.login_url, []];
+ login_urls[provider.name] = [provider.label, provider.login_url, [], provider.auth];
} else {
login_urls[provider.name][0] = provider.label;
login_urls[provider.name][1] = provider.login_url;
@@ -2068,9 +2068,10 @@ async function on_api() {
if (!provider.parent) {
let option = document.createElement("div");
option.classList.add("provider-item");
+ let api_key = appStorage.getItem(`${provider.name}-api_key`);
option.innerHTML = `
Enable ${provider.label}
-
+
`;
option.querySelector("input").addEventListener("change", (event) => load_provider_option(event.target, provider.name));
@@ -2102,7 +2103,7 @@ async function on_api() {
`;
settings.querySelector(".paper").appendChild(providersListContainer);
- for (let [name, [label, login_url, childs]] of Object.entries(login_urls)) {
+ for (let [name, [label, login_url, childs, auth]] of Object.entries(login_urls)) {
if (!login_url && !is_demo) {
continue;
}
@@ -2113,6 +2114,13 @@ async function on_api() {
` + (login_url ? `Get API key` : "");
+ if (auth) {
+ providerBox.querySelector("input").addEventListener("input", (event) => {
+ const input = document.getElementById(`Provider${name}`);
+ input.checked = !!event.target.value;
+ load_provider_option(input, name);
+ });
+ }
providersListContainer.querySelector(".collapsible-content").appendChild(providerBox);
}
diff --git a/g4f/gui/server/api.py b/g4f/gui/server/api.py
index 154f70a8..e6a76b2d 100644
--- a/g4f/gui/server/api.py
+++ b/g4f/gui/server/api.py
@@ -143,7 +143,7 @@ class Api:
def decorated_log(text: str, file = None):
debug.logs.append(text)
if debug.logging:
- debug.log_handler(text, file)
+ debug.log_handler(text, file=file)
debug.log = decorated_log
proxy = os.environ.get("G4F_PROXY")
provider = kwargs.get("provider")
@@ -187,7 +187,7 @@ class Api:
yield self._format_json("conversation_id", conversation_id)
elif isinstance(chunk, Exception):
logger.exception(chunk)
- debug.error(e)
+ debug.error(chunk)
yield self._format_json('message', get_error_message(chunk), error=type(chunk).__name__)
elif isinstance(chunk, PreviewResponse):
yield self._format_json("preview", chunk.to_string())
diff --git a/g4f/image/copy_images.py b/g4f/image/copy_images.py
index 5539584e..3b448d70 100644
--- a/g4f/image/copy_images.py
+++ b/g4f/image/copy_images.py
@@ -123,7 +123,7 @@ async def copy_images(
return f"/images/{url_filename}{'?url=' + quote(image) if add_url and not image.startswith('data:') else ''}"
except (ClientError, IOError, OSError) as e:
- debug.error(f"Image processing failed: {type(e).__name__}: {e}")
+ debug.error(f"Image copying failed: {type(e).__name__}: {e}")
if target_path and os.path.exists(target_path):
os.unlink(target_path)
return get_source_url(image, image)
diff --git a/g4f/providers/retry_provider.py b/g4f/providers/retry_provider.py
index e77d5d17..6b8ca3b0 100644
--- a/g4f/providers/retry_provider.py
+++ b/g4f/providers/retry_provider.py
@@ -105,7 +105,7 @@ class IterListProvider(BaseRetryProvider):
return
except Exception as e:
exceptions[provider.__name__] = e
- debug.error(name=f"{provider.__name__} {type(e).__name__}: {e}")
+ debug.error(f"{provider.__name__} {type(e).__name__}: {e}")
if started:
raise e
yield e