Fix model and provider in chat completion response

Add login button to HuggingFace demo
Custom conversation ids in chat ui
Remove rate limiter in demo mode
Improve YouTube support in Gemini
This commit is contained in:
hlohaus
2025-02-24 08:53:43 +01:00
parent 6314d27dc4
commit 5cbbe2fd3d
8 changed files with 160 additions and 92 deletions

View File

@@ -203,7 +203,14 @@ class Gemini(AsyncGeneratorProvider, ProviderModelMixin):
skip -= 1 skip -= 1
continue continue
yield item yield item
reasoning = "".join(find_str(response_part[4][0], 3)) reasoning = "\n\n".join(find_str(response_part[4][0], 3))
reasoning = re.sub(r"<b>|</b>", "**", reasoning)
def replace_image(match):
return f"![](https:{match.group(0)})"
reasoning = re.sub(r"//yt3.(?:ggpht.com|googleusercontent.com/ytc)/[\w=-]+", replace_image, reasoning)
reasoning = re.sub(r"\nyoutube\n", "\n\n\n", reasoning)
reasoning = re.sub(r"\nYouTube\n", "\nYouTube ", reasoning)
reasoning = reasoning.replace('https://www.gstatic.com/images/branding/productlogos/youtube/v9/192px.svg', '<i class="fa-brands fa-youtube"></i>')
content = response_part[4][0][1][0] content = response_part[4][0][1][0]
if reasoning: if reasoning:
yield Reasoning(status="🤔") yield Reasoning(status="🤔")
@@ -215,8 +222,12 @@ class Gemini(AsyncGeneratorProvider, ProviderModelMixin):
if match: if match:
image_prompt = match.group(1) image_prompt = match.group(1)
content = content.replace(match.group(0), '') content = content.replace(match.group(0), '')
pattern = r"http://googleusercontent.com/image_generation_content/\d+" pattern = r"http://googleusercontent.com/(?:image_generation|youtube)_content/\d+"
content = re.sub(pattern, "", content) content = re.sub(pattern, "", content)
content = content.replace("<!-- end list -->", "")
content = content.replace("https://www.google.com/search?q=http://", "https://")
content = content.replace("https://www.google.com/search?q=https://", "https://")
content = content.replace("https://www.google.com/url?sa=E&source=gmail&q=http://", "http://")
if last_content and content.startswith(last_content): if last_content and content.startswith(last_content):
yield content[len(last_content):] yield content[len(last_content):]
else: else:

View File

@@ -170,7 +170,9 @@ class Api:
try: try:
user_g4f_api_key = await self.get_g4f_api_key(request) user_g4f_api_key = await self.get_g4f_api_key(request)
except HTTPException: except HTTPException:
user_g4f_api_key = None user_g4f_api_key = await self.security(request)
if hasattr(user_g4f_api_key, "credentials"):
user_g4f_api_key = user_g4f_api_key.credentials
path = request.url.path path = request.url.path
if path.startswith("/v1") or path.startswith("/api/") or (AppConfig.demo and path == '/backend-api/v2/upload_cookies'): if path.startswith("/v1") or path.startswith("/api/") or (AppConfig.demo and path == '/backend-api/v2/upload_cookies'):
if user_g4f_api_key is None: if user_g4f_api_key is None:
@@ -581,11 +583,17 @@ class Api:
pass pass
if not os.path.isfile(target): if not os.path.isfile(target):
source_url = get_source_url(str(request.query_params)) source_url = get_source_url(str(request.query_params))
ssl = None
if source_url is None:
backend_url = os.environ.get("G4F_BACKEND_URL")
if backend_url:
source_url = f"{backend_url}/images/{filename}"
ssl = False
if source_url is not None: if source_url is not None:
try: try:
await copy_images( await copy_images(
[source_url], [source_url],
target=target) target=target, ssl=ssl)
debug.log(f"Image copied from {source_url}") debug.log(f"Image copied from {source_url}")
except Exception as e: except Exception as e:
debug.error(f"Download failed: {source_url}\n{type(e).__name__}: {e}") debug.error(f"Download failed: {source_url}\n{type(e).__name__}: {e}")

View File

@@ -12,7 +12,7 @@ from typing import Union, AsyncIterator, Iterator, Awaitable, Optional
from ..image.copy_images import copy_images from ..image.copy_images import copy_images
from ..typing import Messages, ImageType from ..typing import Messages, ImageType
from ..providers.types import ProviderType, BaseRetryProvider from ..providers.types import ProviderType, BaseRetryProvider
from ..providers.response import ResponseType, ImageResponse, FinishReason, BaseConversation, SynthesizeData, ToolCalls, Usage from ..providers.response import *
from ..errors import NoImageResponseError from ..errors import NoImageResponseError
from ..providers.retry_provider import IterListProvider from ..providers.retry_provider import IterListProvider
from ..providers.asyncio import to_sync_generator from ..providers.asyncio import to_sync_generator
@@ -49,6 +49,7 @@ def iter_response(
finish_reason = None finish_reason = None
tool_calls = None tool_calls = None
usage = None usage = None
provider: ProviderInfo = None
completion_id = ''.join(random.choices(string.ascii_letters + string.digits, k=28)) completion_id = ''.join(random.choices(string.ascii_letters + string.digits, k=28))
idx = 0 idx = 0
@@ -65,10 +66,13 @@ def iter_response(
elif isinstance(chunk, Usage): elif isinstance(chunk, Usage):
usage = chunk usage = chunk
continue continue
elif isinstance(chunk, ProviderInfo):
provider = chunk
continue
elif isinstance(chunk, BaseConversation): elif isinstance(chunk, BaseConversation):
yield chunk yield chunk
continue continue
elif isinstance(chunk, SynthesizeData) or not chunk: elif isinstance(chunk, HiddenResponse):
continue continue
elif isinstance(chunk, Exception): elif isinstance(chunk, Exception):
continue continue
@@ -76,7 +80,6 @@ def iter_response(
if isinstance(chunk, list): if isinstance(chunk, list):
chunk = "".join(map(str, chunk)) chunk = "".join(map(str, chunk))
else: else:
temp = chunk.__str__() temp = chunk.__str__()
if not isinstance(temp, str): if not isinstance(temp, str):
if isinstance(temp, list): if isinstance(temp, list):
@@ -84,6 +87,8 @@ def iter_response(
else: else:
temp = repr(chunk) temp = repr(chunk)
chunk = temp chunk = temp
if not chunk:
continue
content += chunk content += chunk
@@ -96,7 +101,11 @@ def iter_response(
finish_reason = "stop" finish_reason = "stop"
if stream: if stream:
yield ChatCompletionChunk.model_construct(chunk, None, completion_id, int(time.time())) chunk = ChatCompletionChunk.model_construct(chunk, None, completion_id, int(time.time()))
if provider is not None:
chunk.provider = provider.name
chunk.model = provider.model
yield chunk
if finish_reason is not None: if finish_reason is not None:
break break
@@ -108,7 +117,7 @@ def iter_response(
finish_reason = "stop" if finish_reason is None else finish_reason finish_reason = "stop" if finish_reason is None else finish_reason
if stream: if stream:
yield ChatCompletionChunk.model_construct( chat_completion = ChatCompletionChunk.model_construct(
None, finish_reason, completion_id, int(time.time()), None, finish_reason, completion_id, int(time.time()),
usage=usage usage=usage
) )
@@ -116,19 +125,24 @@ def iter_response(
if response_format is not None and "type" in response_format: if response_format is not None and "type" in response_format:
if response_format["type"] == "json_object": if response_format["type"] == "json_object":
content = filter_json(content) content = filter_json(content)
yield ChatCompletion.model_construct( chat_completion = ChatCompletion.model_construct(
content, finish_reason, completion_id, int(time.time()), content, finish_reason, completion_id, int(time.time()),
usage=UsageModel.model_construct(**usage.get_dict()), usage=UsageModel.model_construct(**usage.get_dict()),
**filter_none(tool_calls=[ToolCallModel.model_construct(**tool_call) for tool_call in tool_calls]) if tool_calls is not None else {} **filter_none(tool_calls=[ToolCallModel.model_construct(**tool_call) for tool_call in tool_calls]) if tool_calls is not None else {}
) )
if provider is not None:
chat_completion.provider = provider.name
chat_completion.model = provider.model
yield chat_completion
# Synchronous iter_append_model_and_provider function # Synchronous iter_append_model_and_provider function
def iter_append_model_and_provider(response: ChatCompletionResponseType, last_model: str, last_provider: ProviderType) -> ChatCompletionResponseType: def iter_append_model_and_provider(response: ChatCompletionResponseType, last_model: str, last_provider: ProviderType) -> ChatCompletionResponseType:
if isinstance(last_provider, BaseRetryProvider): if isinstance(last_provider, BaseRetryProvider):
last_provider = last_provider.last_provider yield from response
return
for chunk in response: for chunk in response:
if isinstance(chunk, (ChatCompletion, ChatCompletionChunk)): if isinstance(chunk, (ChatCompletion, ChatCompletionChunk)):
if last_provider is not None: if chunk.provider is None and last_provider is not None:
chunk.model = getattr(last_provider, "last_model", last_model) chunk.model = getattr(last_provider, "last_model", last_model)
chunk.provider = last_provider.__name__ chunk.provider = last_provider.__name__
yield chunk yield chunk
@@ -146,6 +160,7 @@ async def async_iter_response(
idx = 0 idx = 0
tool_calls = None tool_calls = None
usage = None usage = None
provider: ProviderInfo = None
try: try:
async for chunk in response: async for chunk in response:
@@ -161,12 +176,17 @@ async def async_iter_response(
elif isinstance(chunk, Usage): elif isinstance(chunk, Usage):
usage = chunk usage = chunk
continue continue
elif isinstance(chunk, SynthesizeData) or not chunk: elif isinstance(chunk, ProviderInfo):
provider = chunk
continue
elif isinstance(chunk, HiddenResponse):
continue continue
elif isinstance(chunk, Exception): elif isinstance(chunk, Exception):
continue continue
chunk = str(chunk) chunk = str(chunk)
if not chunk:
continue
content += chunk content += chunk
idx += 1 idx += 1
@@ -179,7 +199,11 @@ async def async_iter_response(
finish_reason = "stop" finish_reason = "stop"
if stream: if stream:
yield ChatCompletionChunk.model_construct(chunk, None, completion_id, int(time.time())) chunk = ChatCompletionChunk.model_construct(chunk, None, completion_id, int(time.time()))
if provider is not None:
chunk.provider = provider.name
chunk.model = provider.model
yield chunk
if finish_reason is not None: if finish_reason is not None:
break break
@@ -190,7 +214,7 @@ async def async_iter_response(
usage = Usage(completion_tokens=idx, total_tokens=idx) usage = Usage(completion_tokens=idx, total_tokens=idx)
if stream: if stream:
yield ChatCompletionChunk.model_construct( chat_completion = ChatCompletionChunk.model_construct(
None, finish_reason, completion_id, int(time.time()), None, finish_reason, completion_id, int(time.time()),
usage=usage.get_dict() usage=usage.get_dict()
) )
@@ -198,11 +222,15 @@ async def async_iter_response(
if response_format is not None and "type" in response_format: if response_format is not None and "type" in response_format:
if response_format["type"] == "json_object": if response_format["type"] == "json_object":
content = filter_json(content) content = filter_json(content)
yield ChatCompletion.model_construct( chat_completion = ChatCompletion.model_construct(
content, finish_reason, completion_id, int(time.time()), content, finish_reason, completion_id, int(time.time()),
usage=UsageModel.model_construct(**usage.get_dict()), usage=UsageModel.model_construct(**usage.get_dict()),
**filter_none(tool_calls=[ToolCallModel.model_construct(**tool_call) for tool_call in tool_calls]) if tool_calls is not None else {} **filter_none(tool_calls=[ToolCallModel.model_construct(**tool_call) for tool_call in tool_calls]) if tool_calls is not None else {}
) )
if provider is not None:
chat_completion.provider = provider.name
chat_completion.model = provider.model
yield chat_completion
finally: finally:
await safe_aclose(response) await safe_aclose(response)
@@ -214,11 +242,12 @@ async def async_iter_append_model_and_provider(
last_provider = None last_provider = None
try: try:
if isinstance(last_provider, BaseRetryProvider): if isinstance(last_provider, BaseRetryProvider):
if last_provider is not None: async for chunk in response:
last_provider = last_provider.last_provider yield chunk
return
async for chunk in response: async for chunk in response:
if isinstance(chunk, (ChatCompletion, ChatCompletionChunk)): if isinstance(chunk, (ChatCompletion, ChatCompletionChunk)):
if last_provider is not None: if chunk.provider is None and last_provider is not None:
chunk.model = getattr(last_provider, "last_model", last_model) chunk.model = getattr(last_provider, "last_model", last_model)
chunk.provider = last_provider.__name__ chunk.provider = last_provider.__name__
yield chunk yield chunk

View File

@@ -183,25 +183,10 @@
const isIframe = window.self !== window.top; const isIframe = window.self !== window.top;
const backendUrl = "{{backend_url}}"; const backendUrl = "{{backend_url}}";
let url = new URL(window.location.href) let url = new URL(window.location.href)
let params = new URLSearchParams(url.search);
if (isIframe && backendUrl) { if (isIframe && backendUrl) {
if (params.get("get_gpu_token")) { window.location.replace(`${backendUrl}${url.search}`);
window.addEventListener('DOMContentLoaded', async function() {
const link = document.getElementById("new_window");
link.href = `${backendUrl}${url.search}`;
link.click();
});
} else {
window.location.replace(`${backendUrl}${url.search}`);
}
return; return;
} }
if (params.get("__sign")) {
localStorage.setItem("HuggingSpace-api_key", params.get("__sign"));
if (!isIframe) {
window.location.replace("/");
}
}
})(); })();
</script> </script>
<script src="https://unpkg.com/es-module-shims@1.7.0/dist/es-module-shims.js"></script> <script src="https://unpkg.com/es-module-shims@1.7.0/dist/es-module-shims.js"></script>
@@ -240,10 +225,13 @@
<p> <p>
<a href="https://huggingface.co/settings/tokens" target="_blank">Get Access Token</a> <a href="https://huggingface.co/settings/tokens" target="_blank">Get Access Token</a>
</p> </p>
<img src="https://huggingface.co/datasets/huggingface/badges/resolve/main/sign-in-with-huggingface-xl-dark.svg" alt="Sign in with Hugging Face" style="cursor: pointer; display: none;" id="signin">
<button id="signout" style="display: none">Sign out</button>
</form> </form>
<script type="module"> <script type="module">
import * as hub from "@huggingface/hub"; import * as hub from "@huggingface/hub";
import { init } from "@huggingface/space-header"; import { init } from "@huggingface/space-header";
import { oauthLoginUrl, oauthHandleRedirectIfPresent } from "@huggingface/hub";
const isIframe = window.self !== window.top; const isIframe = window.self !== window.top;
const button = document.querySelector('form a.button'); const button = document.querySelector('form a.button');
@@ -269,7 +257,6 @@
return; return;
} }
localStorage.setItem("HuggingFace-api_key", accessToken); localStorage.setItem("HuggingFace-api_key", accessToken);
localStorage.setItem("HuggingFace-user", JSON.stringify(user));
localStorage.setItem("user", user.name); localStorage.setItem("user", user.name);
localStorage.setItem("report_error", "true") localStorage.setItem("report_error", "true")
location.href = "/chat/"; location.href = "/chat/";
@@ -280,6 +267,34 @@
event.preventDefault(); event.preventDefault();
check_access_token(); check_access_token();
}); });
let oauthResult = localStorage.getItem("oauth");
if (oauthResult) {
try {
oauthResult = JSON.parse(oauthResult);
} catch {
oauthResult = null;
}
}
oauthResult ||= await oauthHandleRedirectIfPresent();
if (oauthResult) {
localStorage.setItem("oauth", JSON.stringify(oauthResult));
localStorage.setItem("HuggingFace-api_key", oauthResult.accessToken);
localStorage.setItem("user", oauthResult.userInfo.fullname);
document.getElementById("signout").style.removeProperty("display");
document.getElementById("signout").onclick = async function() {
localStorage.removeItem("oauth");
localStorage.removeItem("HuggingFace-api_key");
window.location.href = window.location.href.replace(/\?.*$/, '');
window.location.reload();
}
} else {
document.getElementById("signin").style.removeProperty("display");
document.getElementById("signin").onclick = async function() {
// prompt=consent to re-trigger the consent screen instead of silently redirecting
window.location.href = (await oauthLoginUrl({clientId: 'ed074164-4f8d-4fb2-8bec-44952707965e', scopes: ['inference-api']})) + "&prompt=consent";
}
}
</script> </script>
<!-- Footer --> <!-- Footer -->

View File

@@ -39,6 +39,7 @@ let finish_storage = {};
let usage_storage = {}; let usage_storage = {};
let reasoning_storage = {}; let reasoning_storage = {};
let generate_storage = {}; let generate_storage = {};
let title_ids_storage = {};
let is_demo = false; let is_demo = false;
let wakeLock = null; let wakeLock = null;
let countTokensEnabled = true; let countTokensEnabled = true;
@@ -74,6 +75,8 @@ if (window.markdownit) {
) )
.replaceAll("<a href=", '<a target="_blank" href=') .replaceAll("<a href=", '<a target="_blank" href=')
.replaceAll('<code>', '<code class="language-plaintext">') .replaceAll('<code>', '<code class="language-plaintext">')
.replaceAll('&lt;i class=&quot;', '<i class="')
.replaceAll('&quot;&gt;&lt;/i&gt;', '"></i>')
} }
} }
@@ -301,7 +304,9 @@ const register_message_buttons = async () => {
const conversation = await get_conversation(window.conversation_id); const conversation = await get_conversation(window.conversation_id);
let buffer = ""; let buffer = "";
conversation.items.forEach(message => { conversation.items.forEach(message => {
buffer += render_reasoning_text(message.reasoning); if (message.reasoning) {
buffer += render_reasoning_text(message.reasoning);
}
buffer += `${message.role == 'user' ? 'User' : 'Assistant'}: ${message.content.trim()}\n\n\n`; buffer += `${message.role == 'user' ? 'User' : 'Assistant'}: ${message.content.trim()}\n\n\n`;
}); });
var download = document.getElementById("download"); var download = document.getElementById("download");
@@ -435,25 +440,27 @@ const handle_ask = async (do_ask_gpt = true) => {
imageInput.dataset.objects = images.join(" "); imageInput.dataset.objects = images.join(" ");
} }
} }
message_box.innerHTML += ` const message_el = document.createElement("div");
<div class="message" data-index="${message_index}"> message_el.classList.add("message");
<div class="user"> message_el.dataset.index = message_index;
${user_image} message_el.innerHTML = `
<i class="fa-solid fa-xmark"></i> <div class="user">
<i class="fa-regular fa-phone-arrow-up-right"></i> ${user_image}
<i class="fa-solid fa-xmark"></i>
<i class="fa-regular fa-phone-arrow-up-right"></i>
</div>
<div class="content" id="user_${message_id}">
<div class="content_inner">
${markdown_render(message)}
${images.map((object)=>`<img src="${object}" alt="Image upload">`).join("")}
</div> </div>
<div class="content" id="user_${message_id}"> <div class="count">
<div class="content_inner"> ${countTokensEnabled ? count_words_and_tokens(message, get_selected_model()?.value) : ""}
${markdown_render(message)}
${images.map((object)=>'<img src="' + object + '" alt="Image upload">').join("")}
</div>
<div class="count">
${countTokensEnabled ? count_words_and_tokens(message, get_selected_model()?.value) : ""}
</div>
</div> </div>
</div> </div>
`; `;
highlight(message_box); message_box.appendChild(message_el);
highlight(message_el);
if (do_ask_gpt) { if (do_ask_gpt) {
const all_pinned = document.querySelectorAll(".buttons button.pinned") const all_pinned = document.querySelectorAll(".buttons button.pinned")
if (all_pinned.length > 0) { if (all_pinned.length > 0) {
@@ -1012,7 +1019,7 @@ const ask_gpt = async (message_id, message_index = -1, regenerate = false, provi
} }
try { try {
let api_key; let api_key;
if (is_demo && provider == "Feature") { if (is_demo && ["OpenaiChat", "DeepSeekAPI", "PollinationsAI"].includes(provider)) {
api_key = localStorage.getItem("user"); api_key = localStorage.getItem("user");
} else if (["HuggingSpace", "G4F"].includes(provider)) { } else if (["HuggingSpace", "G4F"].includes(provider)) {
api_key = localStorage.getItem("HuggingSpace-api_key"); api_key = localStorage.getItem("HuggingSpace-api_key");
@@ -1096,9 +1103,30 @@ const clear_conversation = async () => {
} }
}; };
var illegalRe = /[\/\?<>\\:\*\|":]/g;
var controlRe = /[\x00-\x1f\x80-\x9f]/g;
var reservedRe = /^\.+$/;
var windowsReservedRe = /^(con|prn|aux|nul|com[0-9]|lpt[0-9])(\..*)?$/i;
function sanitize(input, replacement) {
var sanitized = input
.replace(illegalRe, replacement)
.replace(controlRe, replacement)
.replace(reservedRe, replacement)
.replace(windowsReservedRe, replacement);
return sanitized.replaceAll(/\/|#|\s{2,}/g, replacement).trim();
}
async function set_conversation_title(conversation_id, title) { async function set_conversation_title(conversation_id, title) {
conversation = await get_conversation(conversation_id) conversation = await get_conversation(conversation_id)
conversation.new_title = title; conversation.new_title = title;
const new_id = sanitize(title, " ");
if (new_id && !appStorage.getItem(`conversation:${new_id}`)) {
appStorage.removeItem(`conversation:${conversation.id}`);
title_ids_storage[conversation_id] = new_id;
conversation.id = new_id;
add_url_to_history(`/chat/${conversation_id}`);
}
appStorage.setItem( appStorage.setItem(
`conversation:${conversation.id}`, `conversation:${conversation.id}`,
JSON.stringify(conversation) JSON.stringify(conversation)
@@ -1123,6 +1151,7 @@ const show_option = async (conversation_id) => {
input_el.onclick = (e) => e.stopPropagation() input_el.onclick = (e) => e.stopPropagation()
input_el.onfocus = () => trash_el.style.display = "none"; input_el.onfocus = () => trash_el.style.display = "none";
input_el.onchange = () => set_conversation_title(conversation_id, input_el.value); input_el.onchange = () => set_conversation_title(conversation_id, input_el.value);
input_el.onblur = () => set_conversation_title(conversation_id, input_el.value);
left_el.removeChild(title_el); left_el.removeChild(title_el);
left_el.appendChild(input_el); left_el.appendChild(input_el);
} }
@@ -1162,6 +1191,9 @@ const delete_conversation = async (conversation_id) => {
}; };
const set_conversation = async (conversation_id) => { const set_conversation = async (conversation_id) => {
if (title_ids_storage[conversation_id]) {
conversation_id = title_ids_storage[conversation_id];
}
try { try {
add_url_to_history(`/chat/${conversation_id}`); add_url_to_history(`/chat/${conversation_id}`);
} catch (e) { } catch (e) {
@@ -1912,11 +1944,11 @@ async function on_load() {
messageInput.focus(); messageInput.focus();
//await handle_ask(); //await handle_ask();
} }
} else if (/\/chat\/[^?]+/.test(window.location.href)) { } else if (/\/chat\/[?$]/.test(window.location.href)) {
load_conversation(window.conversation_id);
} else {
chatPrompt.value = document.getElementById("systemPrompt")?.value || ""; chatPrompt.value = document.getElementById("systemPrompt")?.value || "";
say_hello(); say_hello();
} else {
load_conversation(window.conversation_id);
} }
load_conversations(); load_conversations();
} }
@@ -2007,7 +2039,8 @@ async function on_api() {
} }
providerSelect.innerHTML = ` providerSelect.innerHTML = `
<option value="" selected="selected">Demo Mode</option> <option value="" selected="selected">Demo Mode</option>
<option value="Feature">Feature Provider</option> <option value="DeepSeekAPI">DeepSeek Provider</option>
<option value="OpenaiChat">OpenAI Provider</option>
<option value="PollinationsAI">Pollinations AI</option> <option value="PollinationsAI">Pollinations AI</option>
<option value="G4F">G4F framework</option> <option value="G4F">G4F framework</option>
<option value="HuggingFace">HuggingFace</option> <option value="HuggingFace">HuggingFace</option>
@@ -2340,7 +2373,6 @@ fileInput.addEventListener('change', async (event) => {
Object.keys(data).forEach(key => { Object.keys(data).forEach(key => {
if (key == "options") { if (key == "options") {
Object.keys(data[key]).forEach(keyOption => { Object.keys(data[key]).forEach(keyOption => {
console.log(keyOption, data[key][keyOption]);
appStorage.setItem(keyOption, data[key][keyOption]); appStorage.setItem(keyOption, data[key][keyOption]);
count += 1; count += 1;
}); });

View File

@@ -14,12 +14,6 @@ from pathlib import Path
from urllib.parse import quote_plus from urllib.parse import quote_plus
from hashlib import sha256 from hashlib import sha256
from werkzeug.utils import secure_filename from werkzeug.utils import secure_filename
try:
from flask_limiter import Limiter
from flask_limiter.util import get_remote_address
has_flask_limiter = True
except ImportError:
has_flask_limiter = False
from ...image import is_allowed_extension, to_image from ...image import is_allowed_extension, to_image
from ...client.service import convert_to_provider from ...client.service import convert_to_provider
@@ -62,19 +56,8 @@ class Backend_Api(Api):
""" """
self.app: Flask = app self.app: Flask = app
if has_flask_limiter and app.demo: if app.demo:
limiter = Limiter(
get_remote_address,
app=app,
default_limits=["200 per day", "50 per hour"],
storage_uri="memory://",
auto_check=False,
strategy="moving-window",
)
if has_flask_limiter and app.demo:
@app.route('/', methods=['GET']) @app.route('/', methods=['GET'])
@limiter.exempt
def home(): def home():
return render_template('demo.html', backend_url=os.environ.get("G4F_BACKEND_URL", "")) return render_template('demo.html', backend_url=os.environ.get("G4F_BACKEND_URL", ""))
else: else:
@@ -116,7 +99,7 @@ class Backend_Api(Api):
} }
for model, providers in models.demo_models.values()] for model, providers in models.demo_models.values()]
def handle_conversation(limiter_check: callable = None): def handle_conversation():
""" """
Handles conversation requests and streams responses back. Handles conversation requests and streams responses back.
@@ -135,7 +118,7 @@ class Backend_Api(Api):
else: else:
json_data = request.json json_data = request.json
if app.demo and json_data.get("provider") not in ["Custom", "Feature", "HuggingFace", "HuggingSpace", "HuggingChat", "G4F", "PollinationsAI"]: if app.demo and json_data.get("provider") not in ["DeepSeekAPI", "OpenaiChat", "HuggingFace", "HuggingSpace", "HuggingChat", "G4F", "PollinationsAI"]:
model = json_data.get("model") model = json_data.get("model")
if model != "default" and model in models.demo_models: if model != "default" and model in models.demo_models:
json_data["provider"] = random.choice(models.demo_models[model][1]) json_data["provider"] = random.choice(models.demo_models[model][1])
@@ -143,8 +126,6 @@ class Backend_Api(Api):
if not model or model == "default": if not model or model == "default":
json_data["model"] = models.demo_models["default"][0].name json_data["model"] = models.demo_models["default"][0].name
json_data["provider"] = random.choice(models.demo_models["default"][1]) json_data["provider"] = random.choice(models.demo_models["default"][1])
if limiter_check is not None and json_data.get("provider") in ["Feature"]:
limiter_check()
if "images" in json_data: if "images" in json_data:
kwargs["images"] = json_data["images"] kwargs["images"] = json_data["images"]
kwargs = self._prepare_conversation_kwargs(json_data, kwargs) kwargs = self._prepare_conversation_kwargs(json_data, kwargs)
@@ -158,15 +139,9 @@ class Backend_Api(Api):
mimetype='text/event-stream' mimetype='text/event-stream'
) )
if has_flask_limiter and app.demo: @app.route('/backend-api/v2/conversation', methods=['POST'])
@app.route('/backend-api/v2/conversation', methods=['POST']) def _handle_conversation():
@limiter.limit("2 per minute") return handle_conversation()
def _handle_conversation():
return handle_conversation(limiter.check)
else:
@app.route('/backend-api/v2/conversation', methods=['POST'])
def _handle_conversation():
return handle_conversation()
@app.route('/backend-api/v2/usage', methods=['POST']) @app.route('/backend-api/v2/usage', methods=['POST'])
def add_usage(): def add_usage():

View File

@@ -33,8 +33,6 @@ class Website:
def _chat(self, conversation_id): def _chat(self, conversation_id):
if conversation_id == "share": if conversation_id == "share":
return render_template('index.html', chat_id=str(uuid.uuid4())) return render_template('index.html', chat_id=str(uuid.uuid4()))
if '-' not in conversation_id:
return redirect_home()
return render_template('index.html', chat_id=conversation_id) return render_template('index.html', chat_id=conversation_id)
def _index(self): def _index(self):

View File

@@ -87,7 +87,7 @@ class IterListProvider(BaseRetryProvider):
for provider in self.get_providers(stream and not ignore_stream, ignored): for provider in self.get_providers(stream and not ignore_stream, ignored):
self.last_provider = provider self.last_provider = provider
debug.log(f"Using {provider.__name__} provider") debug.log(f"Using {provider.__name__} provider")
yield ProviderInfo(**provider.get_dict()) yield ProviderInfo(**provider.get_dict(), model=model if model else getattr(provider, "default_model"))
try: try:
response = provider.get_async_create_function()(model, messages, stream=stream, **kwargs) response = provider.get_async_create_function()(model, messages, stream=stream, **kwargs)
if hasattr(response, "__aiter__"): if hasattr(response, "__aiter__"):