mirror of
https://github.com/xtekky/gpt4free.git
synced 2025-10-17 22:00:46 +08:00
Fix model and provider in chat completion response
Add login button to HuggingFace demo Custom conversation ids in chat ui Remove rate limiter in demo mode Improve YouTube support in Gemini
This commit is contained in:
@@ -203,7 +203,14 @@ class Gemini(AsyncGeneratorProvider, ProviderModelMixin):
|
||||
skip -= 1
|
||||
continue
|
||||
yield item
|
||||
reasoning = "".join(find_str(response_part[4][0], 3))
|
||||
reasoning = "\n\n".join(find_str(response_part[4][0], 3))
|
||||
reasoning = re.sub(r"<b>|</b>", "**", reasoning)
|
||||
def replace_image(match):
|
||||
return f"})"
|
||||
reasoning = re.sub(r"//yt3.(?:ggpht.com|googleusercontent.com/ytc)/[\w=-]+", replace_image, reasoning)
|
||||
reasoning = re.sub(r"\nyoutube\n", "\n\n\n", reasoning)
|
||||
reasoning = re.sub(r"\nYouTube\n", "\nYouTube ", reasoning)
|
||||
reasoning = reasoning.replace('https://www.gstatic.com/images/branding/productlogos/youtube/v9/192px.svg', '<i class="fa-brands fa-youtube"></i>')
|
||||
content = response_part[4][0][1][0]
|
||||
if reasoning:
|
||||
yield Reasoning(status="🤔")
|
||||
@@ -215,8 +222,12 @@ class Gemini(AsyncGeneratorProvider, ProviderModelMixin):
|
||||
if match:
|
||||
image_prompt = match.group(1)
|
||||
content = content.replace(match.group(0), '')
|
||||
pattern = r"http://googleusercontent.com/image_generation_content/\d+"
|
||||
pattern = r"http://googleusercontent.com/(?:image_generation|youtube)_content/\d+"
|
||||
content = re.sub(pattern, "", content)
|
||||
content = content.replace("<!-- end list -->", "")
|
||||
content = content.replace("https://www.google.com/search?q=http://", "https://")
|
||||
content = content.replace("https://www.google.com/search?q=https://", "https://")
|
||||
content = content.replace("https://www.google.com/url?sa=E&source=gmail&q=http://", "http://")
|
||||
if last_content and content.startswith(last_content):
|
||||
yield content[len(last_content):]
|
||||
else:
|
||||
|
@@ -170,7 +170,9 @@ class Api:
|
||||
try:
|
||||
user_g4f_api_key = await self.get_g4f_api_key(request)
|
||||
except HTTPException:
|
||||
user_g4f_api_key = None
|
||||
user_g4f_api_key = await self.security(request)
|
||||
if hasattr(user_g4f_api_key, "credentials"):
|
||||
user_g4f_api_key = user_g4f_api_key.credentials
|
||||
path = request.url.path
|
||||
if path.startswith("/v1") or path.startswith("/api/") or (AppConfig.demo and path == '/backend-api/v2/upload_cookies'):
|
||||
if user_g4f_api_key is None:
|
||||
@@ -581,11 +583,17 @@ class Api:
|
||||
pass
|
||||
if not os.path.isfile(target):
|
||||
source_url = get_source_url(str(request.query_params))
|
||||
ssl = None
|
||||
if source_url is None:
|
||||
backend_url = os.environ.get("G4F_BACKEND_URL")
|
||||
if backend_url:
|
||||
source_url = f"{backend_url}/images/{filename}"
|
||||
ssl = False
|
||||
if source_url is not None:
|
||||
try:
|
||||
await copy_images(
|
||||
[source_url],
|
||||
target=target)
|
||||
target=target, ssl=ssl)
|
||||
debug.log(f"Image copied from {source_url}")
|
||||
except Exception as e:
|
||||
debug.error(f"Download failed: {source_url}\n{type(e).__name__}: {e}")
|
||||
|
@@ -12,7 +12,7 @@ from typing import Union, AsyncIterator, Iterator, Awaitable, Optional
|
||||
from ..image.copy_images import copy_images
|
||||
from ..typing import Messages, ImageType
|
||||
from ..providers.types import ProviderType, BaseRetryProvider
|
||||
from ..providers.response import ResponseType, ImageResponse, FinishReason, BaseConversation, SynthesizeData, ToolCalls, Usage
|
||||
from ..providers.response import *
|
||||
from ..errors import NoImageResponseError
|
||||
from ..providers.retry_provider import IterListProvider
|
||||
from ..providers.asyncio import to_sync_generator
|
||||
@@ -49,6 +49,7 @@ def iter_response(
|
||||
finish_reason = None
|
||||
tool_calls = None
|
||||
usage = None
|
||||
provider: ProviderInfo = None
|
||||
completion_id = ''.join(random.choices(string.ascii_letters + string.digits, k=28))
|
||||
idx = 0
|
||||
|
||||
@@ -65,10 +66,13 @@ def iter_response(
|
||||
elif isinstance(chunk, Usage):
|
||||
usage = chunk
|
||||
continue
|
||||
elif isinstance(chunk, ProviderInfo):
|
||||
provider = chunk
|
||||
continue
|
||||
elif isinstance(chunk, BaseConversation):
|
||||
yield chunk
|
||||
continue
|
||||
elif isinstance(chunk, SynthesizeData) or not chunk:
|
||||
elif isinstance(chunk, HiddenResponse):
|
||||
continue
|
||||
elif isinstance(chunk, Exception):
|
||||
continue
|
||||
@@ -76,7 +80,6 @@ def iter_response(
|
||||
if isinstance(chunk, list):
|
||||
chunk = "".join(map(str, chunk))
|
||||
else:
|
||||
|
||||
temp = chunk.__str__()
|
||||
if not isinstance(temp, str):
|
||||
if isinstance(temp, list):
|
||||
@@ -84,6 +87,8 @@ def iter_response(
|
||||
else:
|
||||
temp = repr(chunk)
|
||||
chunk = temp
|
||||
if not chunk:
|
||||
continue
|
||||
|
||||
content += chunk
|
||||
|
||||
@@ -96,7 +101,11 @@ def iter_response(
|
||||
finish_reason = "stop"
|
||||
|
||||
if stream:
|
||||
yield ChatCompletionChunk.model_construct(chunk, None, completion_id, int(time.time()))
|
||||
chunk = ChatCompletionChunk.model_construct(chunk, None, completion_id, int(time.time()))
|
||||
if provider is not None:
|
||||
chunk.provider = provider.name
|
||||
chunk.model = provider.model
|
||||
yield chunk
|
||||
|
||||
if finish_reason is not None:
|
||||
break
|
||||
@@ -108,7 +117,7 @@ def iter_response(
|
||||
finish_reason = "stop" if finish_reason is None else finish_reason
|
||||
|
||||
if stream:
|
||||
yield ChatCompletionChunk.model_construct(
|
||||
chat_completion = ChatCompletionChunk.model_construct(
|
||||
None, finish_reason, completion_id, int(time.time()),
|
||||
usage=usage
|
||||
)
|
||||
@@ -116,19 +125,24 @@ def iter_response(
|
||||
if response_format is not None and "type" in response_format:
|
||||
if response_format["type"] == "json_object":
|
||||
content = filter_json(content)
|
||||
yield ChatCompletion.model_construct(
|
||||
chat_completion = ChatCompletion.model_construct(
|
||||
content, finish_reason, completion_id, int(time.time()),
|
||||
usage=UsageModel.model_construct(**usage.get_dict()),
|
||||
**filter_none(tool_calls=[ToolCallModel.model_construct(**tool_call) for tool_call in tool_calls]) if tool_calls is not None else {}
|
||||
)
|
||||
if provider is not None:
|
||||
chat_completion.provider = provider.name
|
||||
chat_completion.model = provider.model
|
||||
yield chat_completion
|
||||
|
||||
# Synchronous iter_append_model_and_provider function
|
||||
def iter_append_model_and_provider(response: ChatCompletionResponseType, last_model: str, last_provider: ProviderType) -> ChatCompletionResponseType:
|
||||
if isinstance(last_provider, BaseRetryProvider):
|
||||
last_provider = last_provider.last_provider
|
||||
yield from response
|
||||
return
|
||||
for chunk in response:
|
||||
if isinstance(chunk, (ChatCompletion, ChatCompletionChunk)):
|
||||
if last_provider is not None:
|
||||
if chunk.provider is None and last_provider is not None:
|
||||
chunk.model = getattr(last_provider, "last_model", last_model)
|
||||
chunk.provider = last_provider.__name__
|
||||
yield chunk
|
||||
@@ -146,6 +160,7 @@ async def async_iter_response(
|
||||
idx = 0
|
||||
tool_calls = None
|
||||
usage = None
|
||||
provider: ProviderInfo = None
|
||||
|
||||
try:
|
||||
async for chunk in response:
|
||||
@@ -161,12 +176,17 @@ async def async_iter_response(
|
||||
elif isinstance(chunk, Usage):
|
||||
usage = chunk
|
||||
continue
|
||||
elif isinstance(chunk, SynthesizeData) or not chunk:
|
||||
elif isinstance(chunk, ProviderInfo):
|
||||
provider = chunk
|
||||
continue
|
||||
elif isinstance(chunk, HiddenResponse):
|
||||
continue
|
||||
elif isinstance(chunk, Exception):
|
||||
continue
|
||||
|
||||
chunk = str(chunk)
|
||||
if not chunk:
|
||||
continue
|
||||
content += chunk
|
||||
idx += 1
|
||||
|
||||
@@ -179,7 +199,11 @@ async def async_iter_response(
|
||||
finish_reason = "stop"
|
||||
|
||||
if stream:
|
||||
yield ChatCompletionChunk.model_construct(chunk, None, completion_id, int(time.time()))
|
||||
chunk = ChatCompletionChunk.model_construct(chunk, None, completion_id, int(time.time()))
|
||||
if provider is not None:
|
||||
chunk.provider = provider.name
|
||||
chunk.model = provider.model
|
||||
yield chunk
|
||||
|
||||
if finish_reason is not None:
|
||||
break
|
||||
@@ -190,7 +214,7 @@ async def async_iter_response(
|
||||
usage = Usage(completion_tokens=idx, total_tokens=idx)
|
||||
|
||||
if stream:
|
||||
yield ChatCompletionChunk.model_construct(
|
||||
chat_completion = ChatCompletionChunk.model_construct(
|
||||
None, finish_reason, completion_id, int(time.time()),
|
||||
usage=usage.get_dict()
|
||||
)
|
||||
@@ -198,11 +222,15 @@ async def async_iter_response(
|
||||
if response_format is not None and "type" in response_format:
|
||||
if response_format["type"] == "json_object":
|
||||
content = filter_json(content)
|
||||
yield ChatCompletion.model_construct(
|
||||
chat_completion = ChatCompletion.model_construct(
|
||||
content, finish_reason, completion_id, int(time.time()),
|
||||
usage=UsageModel.model_construct(**usage.get_dict()),
|
||||
**filter_none(tool_calls=[ToolCallModel.model_construct(**tool_call) for tool_call in tool_calls]) if tool_calls is not None else {}
|
||||
)
|
||||
if provider is not None:
|
||||
chat_completion.provider = provider.name
|
||||
chat_completion.model = provider.model
|
||||
yield chat_completion
|
||||
finally:
|
||||
await safe_aclose(response)
|
||||
|
||||
@@ -214,11 +242,12 @@ async def async_iter_append_model_and_provider(
|
||||
last_provider = None
|
||||
try:
|
||||
if isinstance(last_provider, BaseRetryProvider):
|
||||
if last_provider is not None:
|
||||
last_provider = last_provider.last_provider
|
||||
async for chunk in response:
|
||||
yield chunk
|
||||
return
|
||||
async for chunk in response:
|
||||
if isinstance(chunk, (ChatCompletion, ChatCompletionChunk)):
|
||||
if last_provider is not None:
|
||||
if chunk.provider is None and last_provider is not None:
|
||||
chunk.model = getattr(last_provider, "last_model", last_model)
|
||||
chunk.provider = last_provider.__name__
|
||||
yield chunk
|
||||
|
@@ -183,25 +183,10 @@
|
||||
const isIframe = window.self !== window.top;
|
||||
const backendUrl = "{{backend_url}}";
|
||||
let url = new URL(window.location.href)
|
||||
let params = new URLSearchParams(url.search);
|
||||
if (isIframe && backendUrl) {
|
||||
if (params.get("get_gpu_token")) {
|
||||
window.addEventListener('DOMContentLoaded', async function() {
|
||||
const link = document.getElementById("new_window");
|
||||
link.href = `${backendUrl}${url.search}`;
|
||||
link.click();
|
||||
});
|
||||
} else {
|
||||
window.location.replace(`${backendUrl}${url.search}`);
|
||||
}
|
||||
return;
|
||||
}
|
||||
if (params.get("__sign")) {
|
||||
localStorage.setItem("HuggingSpace-api_key", params.get("__sign"));
|
||||
if (!isIframe) {
|
||||
window.location.replace("/");
|
||||
}
|
||||
}
|
||||
})();
|
||||
</script>
|
||||
<script src="https://unpkg.com/es-module-shims@1.7.0/dist/es-module-shims.js"></script>
|
||||
@@ -240,10 +225,13 @@
|
||||
<p>
|
||||
<a href="https://huggingface.co/settings/tokens" target="_blank">Get Access Token</a>
|
||||
</p>
|
||||
<img src="https://huggingface.co/datasets/huggingface/badges/resolve/main/sign-in-with-huggingface-xl-dark.svg" alt="Sign in with Hugging Face" style="cursor: pointer; display: none;" id="signin">
|
||||
<button id="signout" style="display: none">Sign out</button>
|
||||
</form>
|
||||
<script type="module">
|
||||
import * as hub from "@huggingface/hub";
|
||||
import { init } from "@huggingface/space-header";
|
||||
import { oauthLoginUrl, oauthHandleRedirectIfPresent } from "@huggingface/hub";
|
||||
|
||||
const isIframe = window.self !== window.top;
|
||||
const button = document.querySelector('form a.button');
|
||||
@@ -269,7 +257,6 @@
|
||||
return;
|
||||
}
|
||||
localStorage.setItem("HuggingFace-api_key", accessToken);
|
||||
localStorage.setItem("HuggingFace-user", JSON.stringify(user));
|
||||
localStorage.setItem("user", user.name);
|
||||
localStorage.setItem("report_error", "true")
|
||||
location.href = "/chat/";
|
||||
@@ -280,6 +267,34 @@
|
||||
event.preventDefault();
|
||||
check_access_token();
|
||||
});
|
||||
|
||||
let oauthResult = localStorage.getItem("oauth");
|
||||
if (oauthResult) {
|
||||
try {
|
||||
oauthResult = JSON.parse(oauthResult);
|
||||
} catch {
|
||||
oauthResult = null;
|
||||
}
|
||||
}
|
||||
oauthResult ||= await oauthHandleRedirectIfPresent();
|
||||
if (oauthResult) {
|
||||
localStorage.setItem("oauth", JSON.stringify(oauthResult));
|
||||
localStorage.setItem("HuggingFace-api_key", oauthResult.accessToken);
|
||||
localStorage.setItem("user", oauthResult.userInfo.fullname);
|
||||
document.getElementById("signout").style.removeProperty("display");
|
||||
document.getElementById("signout").onclick = async function() {
|
||||
localStorage.removeItem("oauth");
|
||||
localStorage.removeItem("HuggingFace-api_key");
|
||||
window.location.href = window.location.href.replace(/\?.*$/, '');
|
||||
window.location.reload();
|
||||
}
|
||||
} else {
|
||||
document.getElementById("signin").style.removeProperty("display");
|
||||
document.getElementById("signin").onclick = async function() {
|
||||
// prompt=consent to re-trigger the consent screen instead of silently redirecting
|
||||
window.location.href = (await oauthLoginUrl({clientId: 'ed074164-4f8d-4fb2-8bec-44952707965e', scopes: ['inference-api']})) + "&prompt=consent";
|
||||
}
|
||||
}
|
||||
</script>
|
||||
|
||||
<!-- Footer -->
|
||||
|
@@ -39,6 +39,7 @@ let finish_storage = {};
|
||||
let usage_storage = {};
|
||||
let reasoning_storage = {};
|
||||
let generate_storage = {};
|
||||
let title_ids_storage = {};
|
||||
let is_demo = false;
|
||||
let wakeLock = null;
|
||||
let countTokensEnabled = true;
|
||||
@@ -74,6 +75,8 @@ if (window.markdownit) {
|
||||
)
|
||||
.replaceAll("<a href=", '<a target="_blank" href=')
|
||||
.replaceAll('<code>', '<code class="language-plaintext">')
|
||||
.replaceAll('<i class="', '<i class="')
|
||||
.replaceAll('"></i>', '"></i>')
|
||||
}
|
||||
}
|
||||
|
||||
@@ -301,7 +304,9 @@ const register_message_buttons = async () => {
|
||||
const conversation = await get_conversation(window.conversation_id);
|
||||
let buffer = "";
|
||||
conversation.items.forEach(message => {
|
||||
if (message.reasoning) {
|
||||
buffer += render_reasoning_text(message.reasoning);
|
||||
}
|
||||
buffer += `${message.role == 'user' ? 'User' : 'Assistant'}: ${message.content.trim()}\n\n\n`;
|
||||
});
|
||||
var download = document.getElementById("download");
|
||||
@@ -435,8 +440,10 @@ const handle_ask = async (do_ask_gpt = true) => {
|
||||
imageInput.dataset.objects = images.join(" ");
|
||||
}
|
||||
}
|
||||
message_box.innerHTML += `
|
||||
<div class="message" data-index="${message_index}">
|
||||
const message_el = document.createElement("div");
|
||||
message_el.classList.add("message");
|
||||
message_el.dataset.index = message_index;
|
||||
message_el.innerHTML = `
|
||||
<div class="user">
|
||||
${user_image}
|
||||
<i class="fa-solid fa-xmark"></i>
|
||||
@@ -445,15 +452,15 @@ const handle_ask = async (do_ask_gpt = true) => {
|
||||
<div class="content" id="user_${message_id}">
|
||||
<div class="content_inner">
|
||||
${markdown_render(message)}
|
||||
${images.map((object)=>'<img src="' + object + '" alt="Image upload">').join("")}
|
||||
${images.map((object)=>`<img src="${object}" alt="Image upload">`).join("")}
|
||||
</div>
|
||||
<div class="count">
|
||||
${countTokensEnabled ? count_words_and_tokens(message, get_selected_model()?.value) : ""}
|
||||
</div>
|
||||
</div>
|
||||
</div>
|
||||
`;
|
||||
highlight(message_box);
|
||||
message_box.appendChild(message_el);
|
||||
highlight(message_el);
|
||||
if (do_ask_gpt) {
|
||||
const all_pinned = document.querySelectorAll(".buttons button.pinned")
|
||||
if (all_pinned.length > 0) {
|
||||
@@ -1012,7 +1019,7 @@ const ask_gpt = async (message_id, message_index = -1, regenerate = false, provi
|
||||
}
|
||||
try {
|
||||
let api_key;
|
||||
if (is_demo && provider == "Feature") {
|
||||
if (is_demo && ["OpenaiChat", "DeepSeekAPI", "PollinationsAI"].includes(provider)) {
|
||||
api_key = localStorage.getItem("user");
|
||||
} else if (["HuggingSpace", "G4F"].includes(provider)) {
|
||||
api_key = localStorage.getItem("HuggingSpace-api_key");
|
||||
@@ -1096,9 +1103,30 @@ const clear_conversation = async () => {
|
||||
}
|
||||
};
|
||||
|
||||
var illegalRe = /[\/\?<>\\:\*\|":]/g;
|
||||
var controlRe = /[\x00-\x1f\x80-\x9f]/g;
|
||||
var reservedRe = /^\.+$/;
|
||||
var windowsReservedRe = /^(con|prn|aux|nul|com[0-9]|lpt[0-9])(\..*)?$/i;
|
||||
|
||||
function sanitize(input, replacement) {
|
||||
var sanitized = input
|
||||
.replace(illegalRe, replacement)
|
||||
.replace(controlRe, replacement)
|
||||
.replace(reservedRe, replacement)
|
||||
.replace(windowsReservedRe, replacement);
|
||||
return sanitized.replaceAll(/\/|#|\s{2,}/g, replacement).trim();
|
||||
}
|
||||
|
||||
async function set_conversation_title(conversation_id, title) {
|
||||
conversation = await get_conversation(conversation_id)
|
||||
conversation.new_title = title;
|
||||
const new_id = sanitize(title, " ");
|
||||
if (new_id && !appStorage.getItem(`conversation:${new_id}`)) {
|
||||
appStorage.removeItem(`conversation:${conversation.id}`);
|
||||
title_ids_storage[conversation_id] = new_id;
|
||||
conversation.id = new_id;
|
||||
add_url_to_history(`/chat/${conversation_id}`);
|
||||
}
|
||||
appStorage.setItem(
|
||||
`conversation:${conversation.id}`,
|
||||
JSON.stringify(conversation)
|
||||
@@ -1123,6 +1151,7 @@ const show_option = async (conversation_id) => {
|
||||
input_el.onclick = (e) => e.stopPropagation()
|
||||
input_el.onfocus = () => trash_el.style.display = "none";
|
||||
input_el.onchange = () => set_conversation_title(conversation_id, input_el.value);
|
||||
input_el.onblur = () => set_conversation_title(conversation_id, input_el.value);
|
||||
left_el.removeChild(title_el);
|
||||
left_el.appendChild(input_el);
|
||||
}
|
||||
@@ -1162,6 +1191,9 @@ const delete_conversation = async (conversation_id) => {
|
||||
};
|
||||
|
||||
const set_conversation = async (conversation_id) => {
|
||||
if (title_ids_storage[conversation_id]) {
|
||||
conversation_id = title_ids_storage[conversation_id];
|
||||
}
|
||||
try {
|
||||
add_url_to_history(`/chat/${conversation_id}`);
|
||||
} catch (e) {
|
||||
@@ -1912,11 +1944,11 @@ async function on_load() {
|
||||
messageInput.focus();
|
||||
//await handle_ask();
|
||||
}
|
||||
} else if (/\/chat\/[^?]+/.test(window.location.href)) {
|
||||
load_conversation(window.conversation_id);
|
||||
} else {
|
||||
} else if (/\/chat\/[?$]/.test(window.location.href)) {
|
||||
chatPrompt.value = document.getElementById("systemPrompt")?.value || "";
|
||||
say_hello();
|
||||
} else {
|
||||
load_conversation(window.conversation_id);
|
||||
}
|
||||
load_conversations();
|
||||
}
|
||||
@@ -2007,7 +2039,8 @@ async function on_api() {
|
||||
}
|
||||
providerSelect.innerHTML = `
|
||||
<option value="" selected="selected">Demo Mode</option>
|
||||
<option value="Feature">Feature Provider</option>
|
||||
<option value="DeepSeekAPI">DeepSeek Provider</option>
|
||||
<option value="OpenaiChat">OpenAI Provider</option>
|
||||
<option value="PollinationsAI">Pollinations AI</option>
|
||||
<option value="G4F">G4F framework</option>
|
||||
<option value="HuggingFace">HuggingFace</option>
|
||||
@@ -2340,7 +2373,6 @@ fileInput.addEventListener('change', async (event) => {
|
||||
Object.keys(data).forEach(key => {
|
||||
if (key == "options") {
|
||||
Object.keys(data[key]).forEach(keyOption => {
|
||||
console.log(keyOption, data[key][keyOption]);
|
||||
appStorage.setItem(keyOption, data[key][keyOption]);
|
||||
count += 1;
|
||||
});
|
||||
|
@@ -14,12 +14,6 @@ from pathlib import Path
|
||||
from urllib.parse import quote_plus
|
||||
from hashlib import sha256
|
||||
from werkzeug.utils import secure_filename
|
||||
try:
|
||||
from flask_limiter import Limiter
|
||||
from flask_limiter.util import get_remote_address
|
||||
has_flask_limiter = True
|
||||
except ImportError:
|
||||
has_flask_limiter = False
|
||||
|
||||
from ...image import is_allowed_extension, to_image
|
||||
from ...client.service import convert_to_provider
|
||||
@@ -62,19 +56,8 @@ class Backend_Api(Api):
|
||||
"""
|
||||
self.app: Flask = app
|
||||
|
||||
if has_flask_limiter and app.demo:
|
||||
limiter = Limiter(
|
||||
get_remote_address,
|
||||
app=app,
|
||||
default_limits=["200 per day", "50 per hour"],
|
||||
storage_uri="memory://",
|
||||
auto_check=False,
|
||||
strategy="moving-window",
|
||||
)
|
||||
|
||||
if has_flask_limiter and app.demo:
|
||||
if app.demo:
|
||||
@app.route('/', methods=['GET'])
|
||||
@limiter.exempt
|
||||
def home():
|
||||
return render_template('demo.html', backend_url=os.environ.get("G4F_BACKEND_URL", ""))
|
||||
else:
|
||||
@@ -116,7 +99,7 @@ class Backend_Api(Api):
|
||||
}
|
||||
for model, providers in models.demo_models.values()]
|
||||
|
||||
def handle_conversation(limiter_check: callable = None):
|
||||
def handle_conversation():
|
||||
"""
|
||||
Handles conversation requests and streams responses back.
|
||||
|
||||
@@ -135,7 +118,7 @@ class Backend_Api(Api):
|
||||
else:
|
||||
json_data = request.json
|
||||
|
||||
if app.demo and json_data.get("provider") not in ["Custom", "Feature", "HuggingFace", "HuggingSpace", "HuggingChat", "G4F", "PollinationsAI"]:
|
||||
if app.demo and json_data.get("provider") not in ["DeepSeekAPI", "OpenaiChat", "HuggingFace", "HuggingSpace", "HuggingChat", "G4F", "PollinationsAI"]:
|
||||
model = json_data.get("model")
|
||||
if model != "default" and model in models.demo_models:
|
||||
json_data["provider"] = random.choice(models.demo_models[model][1])
|
||||
@@ -143,8 +126,6 @@ class Backend_Api(Api):
|
||||
if not model or model == "default":
|
||||
json_data["model"] = models.demo_models["default"][0].name
|
||||
json_data["provider"] = random.choice(models.demo_models["default"][1])
|
||||
if limiter_check is not None and json_data.get("provider") in ["Feature"]:
|
||||
limiter_check()
|
||||
if "images" in json_data:
|
||||
kwargs["images"] = json_data["images"]
|
||||
kwargs = self._prepare_conversation_kwargs(json_data, kwargs)
|
||||
@@ -158,12 +139,6 @@ class Backend_Api(Api):
|
||||
mimetype='text/event-stream'
|
||||
)
|
||||
|
||||
if has_flask_limiter and app.demo:
|
||||
@app.route('/backend-api/v2/conversation', methods=['POST'])
|
||||
@limiter.limit("2 per minute")
|
||||
def _handle_conversation():
|
||||
return handle_conversation(limiter.check)
|
||||
else:
|
||||
@app.route('/backend-api/v2/conversation', methods=['POST'])
|
||||
def _handle_conversation():
|
||||
return handle_conversation()
|
||||
|
@@ -33,8 +33,6 @@ class Website:
|
||||
def _chat(self, conversation_id):
|
||||
if conversation_id == "share":
|
||||
return render_template('index.html', chat_id=str(uuid.uuid4()))
|
||||
if '-' not in conversation_id:
|
||||
return redirect_home()
|
||||
return render_template('index.html', chat_id=conversation_id)
|
||||
|
||||
def _index(self):
|
||||
|
@@ -87,7 +87,7 @@ class IterListProvider(BaseRetryProvider):
|
||||
for provider in self.get_providers(stream and not ignore_stream, ignored):
|
||||
self.last_provider = provider
|
||||
debug.log(f"Using {provider.__name__} provider")
|
||||
yield ProviderInfo(**provider.get_dict())
|
||||
yield ProviderInfo(**provider.get_dict(), model=model if model else getattr(provider, "default_model"))
|
||||
try:
|
||||
response = provider.get_async_create_function()(model, messages, stream=stream, **kwargs)
|
||||
if hasattr(response, "__aiter__"):
|
||||
|
Reference in New Issue
Block a user