mirror of
https://github.com/xtekky/gpt4free.git
synced 2025-10-23 16:23:33 +08:00
Add leagcy port to docker-compose files
Update demo model list Disable upload cookies in demo Track usage in demo mode Add messages without asking the ai Add hint for browser usage in provider list Add qwen2 prompt template to HuggingFace provider Trim automatic messages in HuggingFaceAPI
This commit is contained in:
@@ -12,3 +12,4 @@ services:
|
||||
- .:/app
|
||||
ports:
|
||||
- '8080:8080'
|
||||
- '1337:8080'
|
@@ -11,6 +11,7 @@ services:
|
||||
- .:/app
|
||||
ports:
|
||||
- '8080:8080'
|
||||
- '1337:8080'
|
||||
- '7900:7900'
|
||||
environment:
|
||||
- OLLAMA_HOST=host.docker.internal
|
||||
|
@@ -15,9 +15,10 @@ from ..errors import ResponseStatusError, ModelNotFoundError
|
||||
class Cloudflare(AsyncGeneratorProvider, ProviderModelMixin):
|
||||
label = "Cloudflare AI"
|
||||
url = "https://playground.ai.cloudflare.com"
|
||||
working = True
|
||||
use_nodriver = True
|
||||
api_endpoint = "https://playground.ai.cloudflare.com/api/inference"
|
||||
models_url = "https://playground.ai.cloudflare.com/api/models"
|
||||
working = True
|
||||
supports_stream = True
|
||||
supports_system_message = True
|
||||
supports_message_history = True
|
||||
|
@@ -69,7 +69,6 @@ class Copilot(AbstractProvider, ProviderModelMixin):
|
||||
conversation: BaseConversation = None,
|
||||
return_conversation: bool = False,
|
||||
api_key: str = None,
|
||||
web_search: bool = False,
|
||||
**kwargs
|
||||
) -> CreateResult:
|
||||
if not has_curl_cffi:
|
||||
|
@@ -9,7 +9,9 @@ from ..requests import StreamSession, get_args_from_nodriver, raise_for_status,
|
||||
class Pi(AsyncGeneratorProvider):
|
||||
url = "https://pi.ai/talk"
|
||||
working = True
|
||||
use_nodriver = True
|
||||
supports_stream = True
|
||||
use_nodriver = True
|
||||
default_model = "pi"
|
||||
models = [default_model]
|
||||
_headers: dict = None
|
||||
|
@@ -75,6 +75,8 @@ class You(AsyncGeneratorProvider, ProviderModelMixin):
|
||||
try:
|
||||
cookies = get_cookies(".you.com")
|
||||
except MissingRequirementsError:
|
||||
pass
|
||||
if not cookies or "afUserId" not in cookies:
|
||||
browser = await get_nodriver(proxy=proxy)
|
||||
try:
|
||||
page = await browser.get(cls.url)
|
||||
|
@@ -22,6 +22,7 @@ class HailuoAI(AsyncAuthedProvider, ProviderModelMixin):
|
||||
label = "Hailuo AI"
|
||||
url = "https://www.hailuo.ai"
|
||||
working = True
|
||||
use_nodriver = True
|
||||
supports_stream = True
|
||||
default_model = "MiniMax"
|
||||
|
||||
|
@@ -4,6 +4,7 @@ from ..Copilot import Copilot
|
||||
|
||||
class CopilotAccount(Copilot):
|
||||
needs_auth = True
|
||||
use_nodriver = True
|
||||
parent = "Copilot"
|
||||
default_model = "Copilot"
|
||||
default_vision_model = default_model
|
||||
|
@@ -58,6 +58,7 @@ class Gemini(AsyncGeneratorProvider, ProviderModelMixin):
|
||||
|
||||
needs_auth = True
|
||||
working = True
|
||||
use_nodriver = True
|
||||
|
||||
default_model = 'gemini'
|
||||
default_image_model = default_model
|
||||
|
@@ -32,6 +32,7 @@ class HuggingChat(AsyncAuthedProvider, ProviderModelMixin):
|
||||
url = "https://huggingface.co/chat"
|
||||
|
||||
working = True
|
||||
use_nodriver = True
|
||||
supports_stream = True
|
||||
needs_auth = True
|
||||
|
||||
@@ -68,10 +69,11 @@ class HuggingChat(AsyncAuthedProvider, ProviderModelMixin):
|
||||
### Image ###
|
||||
"flux-dev": "black-forest-labs/FLUX.1-dev",
|
||||
"flux-schnell": "black-forest-labs/FLUX.1-schnell",
|
||||
### API ###
|
||||
### Used in other providers ###
|
||||
"qwen-2-vl-7b": "Qwen/Qwen2-VL-7B-Instruct",
|
||||
"gemma-2-27b": "google/gemma-2-27b-it",
|
||||
"qvq-72b": "Qwen/QVQ-72B-Preview"
|
||||
"qwen-2-72b": "Qwen/Qwen2-72B-Instruct",
|
||||
"qvq-72b": "Qwen/QVQ-72B-Preview",
|
||||
}
|
||||
|
||||
@classmethod
|
||||
|
@@ -102,6 +102,8 @@ class HuggingFace(AsyncGeneratorProvider, ProviderModelMixin):
|
||||
) as session:
|
||||
if payload is None:
|
||||
async with session.get(f"https://huggingface.co/api/models/{model}") as response:
|
||||
if response.status == 404:
|
||||
raise ModelNotSupportedError(f"Model is not supported: {model} in: {cls.__name__}")
|
||||
await raise_for_status(response)
|
||||
model_data = await response.json()
|
||||
model_type = None
|
||||
@@ -172,6 +174,14 @@ def format_prompt_qwen(messages: Messages, do_continue: bool = False) -> str:
|
||||
return prompt[:-len("\n<|im_end|>\n")]
|
||||
return prompt
|
||||
|
||||
def format_prompt_qwen2(messages: Messages, do_continue: bool = False) -> str:
|
||||
prompt = "".join([
|
||||
f"\u003C|{message['role'].capitalize()}|\u003E{message['content']}\u003C|end▁of▁sentence|\u003E" for message in messages
|
||||
]) + ("" if do_continue else "\u003C|Assistant|\u003E")
|
||||
if do_continue:
|
||||
return prompt[:-len("\u003C|Assistant|\u003E")]
|
||||
return prompt
|
||||
|
||||
def format_prompt_llama(messages: Messages, do_continue: bool = False) -> str:
|
||||
prompt = "<|begin_of_text|>" + "".join([
|
||||
f"<|start_header_id|>{message['role']}<|end_header_id|>\n\n{message['content']}\n<|eot_id|>\n" for message in messages
|
||||
@@ -199,6 +209,8 @@ def get_inputs(messages: Messages, model_data: dict, model_type: str, do_continu
|
||||
inputs = format_prompt_custom(messages, eos_token, do_continue)
|
||||
elif eos_token == "<|im_end|>":
|
||||
inputs = format_prompt_qwen(messages, do_continue)
|
||||
elif "content" in eos_token and eos_token["content"] == "\u003C|end▁of▁sentence|\u003E":
|
||||
inputs = format_prompt_qwen2(messages, do_continue)
|
||||
elif eos_token == "<|eot_id|>":
|
||||
inputs = format_prompt_llama(messages, do_continue)
|
||||
else:
|
||||
|
@@ -3,6 +3,7 @@ from __future__ import annotations
|
||||
from .OpenaiTemplate import OpenaiTemplate
|
||||
from .HuggingChat import HuggingChat
|
||||
from ...providers.types import Messages
|
||||
from ... import debug
|
||||
|
||||
class HuggingFaceAPI(OpenaiTemplate):
|
||||
label = "HuggingFace (Inference API)"
|
||||
@@ -31,6 +32,7 @@ class HuggingFaceAPI(OpenaiTemplate):
|
||||
messages: Messages,
|
||||
api_base: str = None,
|
||||
max_tokens: int = 2048,
|
||||
max_inputs_lenght: int = 10000,
|
||||
**kwargs
|
||||
):
|
||||
if api_base is None:
|
||||
@@ -38,5 +40,18 @@ class HuggingFaceAPI(OpenaiTemplate):
|
||||
if model in cls.model_aliases:
|
||||
model_name = cls.model_aliases[model]
|
||||
api_base = f"https://api-inference.huggingface.co/models/{model_name}/v1"
|
||||
start = calculate_lenght(messages)
|
||||
if start > max_inputs_lenght:
|
||||
if len(messages) > 6:
|
||||
messages = messages[:3] + messages[-3:]
|
||||
if calculate_lenght(messages) > max_inputs_lenght:
|
||||
if len(messages) > 2:
|
||||
messages = [m for m in messages if m["role"] == "system"] + messages[-1:]
|
||||
if len(messages) > 1 and calculate_lenght(messages) > max_inputs_lenght:
|
||||
messages = [messages[-1]]
|
||||
debug.log(f"Messages trimmed from: {start} to: {calculate_lenght(messages)}")
|
||||
async for chunk in super().create_async_generator(model, messages, api_base=api_base, max_tokens=max_tokens, **kwargs):
|
||||
yield chunk
|
||||
|
||||
def calculate_lenght(messages: Messages) -> int:
|
||||
return sum([len(message["content"]) + 16 for message in messages])
|
@@ -21,6 +21,7 @@ class MicrosoftDesigner(AsyncGeneratorProvider, ProviderModelMixin):
|
||||
label = "Microsoft Designer"
|
||||
url = "https://designer.microsoft.com"
|
||||
working = True
|
||||
use_nodriver = True
|
||||
needs_auth = True
|
||||
default_image_model = "dall-e-3"
|
||||
image_models = [default_image_model, "1024x1024", "1024x1792", "1792x1024"]
|
||||
|
@@ -91,6 +91,7 @@ class OpenaiChat(AsyncAuthedProvider, ProviderModelMixin):
|
||||
label = "OpenAI ChatGPT"
|
||||
url = "https://chatgpt.com"
|
||||
working = True
|
||||
use_nodriver = True
|
||||
supports_gpt_4 = True
|
||||
supports_message_history = True
|
||||
supports_system_message = True
|
||||
|
@@ -169,7 +169,7 @@ class Api:
|
||||
except HTTPException:
|
||||
user_g4f_api_key = None
|
||||
path = request.url.path
|
||||
if path.startswith("/v1"):
|
||||
if path.startswith("/v1") or (AppConfig.demo and path == '/backend-api/v2/upload_cookies'):
|
||||
if user_g4f_api_key is None:
|
||||
return ErrorResponse.from_message("G4F API key required", HTTP_401_UNAUTHORIZED)
|
||||
if not secrets.compare_digest(AppConfig.g4f_api_key, user_g4f_api_key):
|
||||
|
@@ -12,7 +12,7 @@ def get_api_parser():
|
||||
api_parser.add_argument("--bind", default=None, help="The bind string. (Default: 0.0.0.0:1337)")
|
||||
api_parser.add_argument("--port", "-p", default=None, help="Change the port of the server.")
|
||||
api_parser.add_argument("--debug", "-d", action="store_true", help="Enable verbose logging.")
|
||||
api_parser.add_argument("--gui", "-g", default=None, action="store_true", help="Add gui to the api.")
|
||||
api_parser.add_argument("--gui", "-g", default=None, action="store_true", help="Start also the gui.")
|
||||
api_parser.add_argument("--model", default=None, help="Default model for chat completion. (incompatible with --reload and --workers)")
|
||||
api_parser.add_argument("--provider", choices=[provider.__name__ for provider in Provider.__providers__ if provider.working],
|
||||
default=None, help="Default provider for chat completion. (incompatible with --reload and --workers)")
|
||||
@@ -28,7 +28,7 @@ def get_api_parser():
|
||||
api_parser.add_argument("--cookie-browsers", nargs="+", choices=[browser.__name__ for browser in g4f.cookies.browsers],
|
||||
default=[], help="List of browsers to access or retrieve cookies from. (incompatible with --reload and --workers)")
|
||||
api_parser.add_argument("--reload", action="store_true", help="Enable reloading.")
|
||||
api_parser.add_argument("--demo", action="store_true", help="Enable demo modus.")
|
||||
api_parser.add_argument("--demo", action="store_true", help="Enable demo mode.")
|
||||
return api_parser
|
||||
|
||||
def main():
|
||||
|
@@ -215,6 +215,7 @@
|
||||
}
|
||||
localStorage.setItem("HuggingFace-api_key", accessToken);
|
||||
localStorage.setItem("HuggingFace-user", JSON.stringify(user));
|
||||
localStorage.setItem("user", user.name);
|
||||
location.href = "/chat/";
|
||||
}
|
||||
input.addEventListener("input", () => check_access_token());
|
||||
|
@@ -142,6 +142,11 @@
|
||||
<input type="checkbox" id="refine"/>
|
||||
<label for="refine" class="toogle" title=""></label>
|
||||
</div>
|
||||
<div class="field">
|
||||
<span class="label">Track usage</span>
|
||||
<input type="checkbox" id="track_usage"/>
|
||||
<label for="track_usage" class="toogle" title=""></label>
|
||||
</div>
|
||||
<div class="field box">
|
||||
<label for="systemPrompt" class="label" title="">System prompt</label>
|
||||
<textarea id="systemPrompt" placeholder="You are a helpful assistant."></textarea>
|
||||
@@ -184,6 +189,12 @@
|
||||
<a href="" onclick="return false;">Show log</a>
|
||||
</button>
|
||||
</div>
|
||||
<div class="bottom_buttons memory hidden">
|
||||
<button onclick="import_memory()">
|
||||
<i class="fa-solid fa-arrow-up-from-bracket"></i>
|
||||
<a href="" onclick="return false;">Import Messages to Memory</a>
|
||||
</button>
|
||||
</div>
|
||||
</div>
|
||||
<div class="provider_forms hidden">
|
||||
<div class="bottom_buttons">
|
||||
@@ -228,7 +239,7 @@
|
||||
<textarea id="message-input" placeholder="Ask a question" cols="30" rows="10"
|
||||
style="white-space: pre-wrap;resize: none;"></textarea>
|
||||
<label class="file-label image-label" for="image" title="">
|
||||
<input type="file" id="image" name="image" accept="image/*" required multiple/>
|
||||
<input type="file" id="image" name="image" accept="image/*" required multiple capture="filesystem"/>
|
||||
<i class="fa-regular fa-image"></i>
|
||||
</label>
|
||||
<label class="file-label image-label" for="camera">
|
||||
@@ -243,6 +254,7 @@
|
||||
<i class="fa-solid fa-microphone-slash"></i>
|
||||
</label>
|
||||
<div id="send-button">
|
||||
<i class="fa-solid fa-square-plus"></i>
|
||||
<i class="fa-regular fa-paper-plane"></i>
|
||||
</div>
|
||||
</div>
|
||||
|
@@ -361,7 +361,7 @@ const delete_conversations = async () => {
|
||||
await new_conversation();
|
||||
};
|
||||
|
||||
const handle_ask = async () => {
|
||||
const handle_ask = async (do_ask_gpt = true) => {
|
||||
messageInput.style.height = "82px";
|
||||
messageInput.focus();
|
||||
await scroll_to_bottom();
|
||||
@@ -377,18 +377,20 @@ const handle_ask = async () => {
|
||||
let message_index = await add_message(window.conversation_id, "user", message);
|
||||
let message_id = get_message_id();
|
||||
|
||||
let images = [];
|
||||
if (do_ask_gpt) {
|
||||
if (imageInput.dataset.objects) {
|
||||
imageInput.dataset.objects.split(" ").forEach((object)=>URL.revokeObjectURL(object))
|
||||
delete imageInput.dataset.objects;
|
||||
}
|
||||
const input = imageInput && imageInput.files.length > 0 ? imageInput : cameraInput
|
||||
images = [];
|
||||
if (input.files.length > 0) {
|
||||
for (const file of input.files) {
|
||||
images.push(URL.createObjectURL(file));
|
||||
}
|
||||
imageInput.dataset.objects = images.join(" ");
|
||||
}
|
||||
}
|
||||
message_box.innerHTML += `
|
||||
<div class="message" data-index="${message_index}">
|
||||
<div class="user">
|
||||
@@ -408,7 +410,7 @@ const handle_ask = async () => {
|
||||
</div>
|
||||
`;
|
||||
highlight(message_box);
|
||||
|
||||
if (do_ask_gpt) {
|
||||
const all_pinned = document.querySelectorAll(".buttons button.pinned")
|
||||
if (all_pinned.length > 0) {
|
||||
all_pinned.forEach((el, idx) => ask_gpt(
|
||||
@@ -421,6 +423,9 @@ const handle_ask = async () => {
|
||||
} else {
|
||||
await ask_gpt(message_id);
|
||||
}
|
||||
} else {
|
||||
await lazy_scroll_to_bottom();
|
||||
}
|
||||
};
|
||||
|
||||
async function safe_remove_cancel_button() {
|
||||
@@ -450,7 +455,9 @@ stop_generating.addEventListener("click", async () => {
|
||||
for (key in controller_storage) {
|
||||
if (!controller_storage[key].signal.aborted) {
|
||||
console.log(`aborted ${window.conversation_id} #${key}`);
|
||||
try {
|
||||
controller_storage[key].abort();
|
||||
} finally {
|
||||
let message = message_storage[key];
|
||||
if (message) {
|
||||
content_storage[key].inner.innerHTML += " [aborted]";
|
||||
@@ -458,6 +465,7 @@ stop_generating.addEventListener("click", async () => {
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
await load_conversation(window.conversation_id, false);
|
||||
});
|
||||
|
||||
@@ -486,7 +494,7 @@ const prepare_messages = (messages, message_index = -1, do_continue = false, do_
|
||||
}
|
||||
}
|
||||
}
|
||||
// Combine messages with same role
|
||||
// Combine assistant messages
|
||||
let last_message;
|
||||
let new_messages = [];
|
||||
messages.forEach((message) => {
|
||||
@@ -503,22 +511,26 @@ const prepare_messages = (messages, message_index = -1, do_continue = false, do_
|
||||
messages = new_messages;
|
||||
|
||||
// Insert system prompt as first message
|
||||
new_messages = [];
|
||||
let final_messages = [];
|
||||
if (chatPrompt?.value) {
|
||||
new_messages.push({
|
||||
final_messages.push({
|
||||
"role": "system",
|
||||
"content": chatPrompt.value
|
||||
});
|
||||
}
|
||||
|
||||
// Remove history, if it's selected
|
||||
if (document.getElementById('history')?.checked && do_filter) {
|
||||
if (message_index == null) {
|
||||
messages = [messages.pop(), messages.pop()];
|
||||
} else {
|
||||
messages = [messages.pop()];
|
||||
// Remove history, only add new user messages
|
||||
let filtered_messages = [];
|
||||
// The message_index is null on count total tokens
|
||||
if (document.getElementById('history')?.checked && do_filter && message_index != null) {
|
||||
while (last_message = messages.pop()) {
|
||||
if (last_message["role"] == "user") {
|
||||
filtered_messages.push(last_message);
|
||||
break;
|
||||
}
|
||||
}
|
||||
messages = filtered_messages.reverse();
|
||||
}
|
||||
|
||||
messages.forEach((new_message, i) => {
|
||||
// Copy message first
|
||||
@@ -845,28 +857,63 @@ const ask_gpt = async (message_id, message_index = -1, regenerate = false, provi
|
||||
}
|
||||
if (message_storage[message_id]) {
|
||||
const message_provider = message_id in provider_storage ? provider_storage[message_id] : null;
|
||||
let usage;
|
||||
if (usage_storage[message_id]) {
|
||||
usage = usage_storage[message_id];
|
||||
delete usage_storage[message_id];
|
||||
}
|
||||
// Calculate usage if we have no usage result jet
|
||||
if (document.getElementById("track_usage").checked && !usage && window.GPTTokenizer_cl100k_base) {
|
||||
const prompt_token_model = model?.startsWith("gpt-3") ? "gpt-3.5-turbo" : "gpt-4"
|
||||
const prompt_tokens = GPTTokenizer_cl100k_base?.encodeChat(messages, prompt_token_model).length;
|
||||
const completion_tokens = count_tokens(message_provider?.model, message_storage[message_id]);
|
||||
usage = {
|
||||
model: message_provider?.model,
|
||||
provider: message_provider?.name,
|
||||
prompt_tokens: prompt_tokens,
|
||||
completion_tokens: completion_tokens,
|
||||
total_tokens: prompt_tokens + completion_tokens
|
||||
}
|
||||
}
|
||||
// It is not regenerated, if it is the first response to a new question
|
||||
if (regenerate && message_index == -1) {
|
||||
let conversation = await get_conversation(window.conversation_id);
|
||||
regenerate = conversation.items[conversation.items.length-1]["role"] != "user";
|
||||
}
|
||||
// Create final message content
|
||||
const final_message = message_storage[message_id]
|
||||
+ (error_storage[message_id] ? " [error]" : "")
|
||||
+ (stop_generating.classList.contains('stop_generating-hidden') ? " [aborted]" : "")
|
||||
// Save message in local storage
|
||||
await add_message(
|
||||
window.conversation_id,
|
||||
"assistant",
|
||||
message_storage[message_id] + (error_storage[message_id] ? " [error]" : "") + (stop_generating.classList.contains('stop_generating-hidden') ? " [aborted]" : ""),
|
||||
final_message,
|
||||
message_provider,
|
||||
message_index,
|
||||
synthesize_storage[message_id],
|
||||
regenerate,
|
||||
title_storage[message_id],
|
||||
finish_storage[message_id],
|
||||
usage_storage[message_id],
|
||||
usage,
|
||||
reasoning_storage[message_id],
|
||||
action=="continue"
|
||||
);
|
||||
delete message_storage[message_id];
|
||||
// Send usage to the server
|
||||
if (document.getElementById("track_usage").checked) {
|
||||
const user = localStorage.getItem("user");
|
||||
if (user) {
|
||||
usage = {user: user, ...usage};
|
||||
}
|
||||
api("usage", usage);
|
||||
}
|
||||
}
|
||||
// Update controller storage
|
||||
if (controller_storage[message_id]) {
|
||||
if (!controller_storage[message_id].signal.aborted) {
|
||||
controller_storage[message_id].abort();
|
||||
}
|
||||
delete controller_storage[message_id];
|
||||
}
|
||||
// Reload conversation if no error
|
||||
if (!error_storage[message_id]) {
|
||||
await safe_load_conversation(window.conversation_id, scroll);
|
||||
}
|
||||
@@ -1485,22 +1532,27 @@ async function hide_sidebar() {
|
||||
settings.classList.add("hidden");
|
||||
chat.classList.remove("hidden");
|
||||
log_storage.classList.add("hidden");
|
||||
await hide_settings();
|
||||
if (window.location.pathname == "/menu/" || window.location.pathname == "/settings/") {
|
||||
history.back();
|
||||
}
|
||||
}
|
||||
|
||||
window.addEventListener('popstate', hide_sidebar, false);
|
||||
|
||||
sidebar_button.addEventListener("click", async () => {
|
||||
async function hide_settings() {
|
||||
settings.classList.add("hidden");
|
||||
let provider_forms = document.querySelectorAll(".provider_forms from");
|
||||
Array.from(provider_forms).forEach((form) => form.classList.add("hidden"));
|
||||
}
|
||||
|
||||
window.addEventListener('popstate', hide_sidebar, false);
|
||||
|
||||
sidebar_button.addEventListener("click", async () => {
|
||||
if (sidebar.classList.contains("shown")) {
|
||||
await hide_sidebar();
|
||||
} else {
|
||||
sidebar.classList.add("shown");
|
||||
sidebar_button.classList.add("rotated");
|
||||
await hide_settings();
|
||||
add_url_to_history("/menu/");
|
||||
}
|
||||
window.scrollTo(0, 0);
|
||||
@@ -1797,13 +1849,16 @@ async function on_api() {
|
||||
messageInput.style.height = messageInput.scrollHeight + "px";
|
||||
}
|
||||
});
|
||||
sendButton.addEventListener(`click`, async () => {
|
||||
sendButton.querySelector(".fa-paper-plane").addEventListener(`click`, async () => {
|
||||
console.log("clicked send");
|
||||
if (prompt_lock) return;
|
||||
prompt_lock = true;
|
||||
setTimeout(()=>prompt_lock=false, 3000);
|
||||
await handle_ask();
|
||||
});
|
||||
sendButton.querySelector(".fa-square-plus").addEventListener(`click`, async () => {
|
||||
await handle_ask(false);
|
||||
});
|
||||
messageInput.focus();
|
||||
let provider_options = [];
|
||||
models = await api("models");
|
||||
@@ -1817,13 +1872,16 @@ async function on_api() {
|
||||
});
|
||||
let login_urls;
|
||||
if (is_demo) {
|
||||
if (!localStorage.getItem("HuggingFace-api_key")) {
|
||||
if (!localStorage.getItem("user")) {
|
||||
location.href = "/";
|
||||
return;
|
||||
}
|
||||
providerSelect.innerHTML = '<option value="" selected>Demo Modus</option>'
|
||||
document.getElementById("pin")?.remove();
|
||||
providerSelect.innerHTML = '<option value="" selected>Demo Mode</option>'
|
||||
document.getElementById("pin").disabled = true;
|
||||
document.getElementById("refine")?.parentElement.remove();
|
||||
const track_usage = document.getElementById("track_usage");
|
||||
track_usage.checked = true;
|
||||
track_usage.disabled = true;
|
||||
Array.from(modelSelect.querySelectorAll(':not([data-providers])')).forEach((option)=>{
|
||||
if (!option.disabled && option.value) {
|
||||
option.remove();
|
||||
@@ -1844,8 +1902,8 @@ async function on_api() {
|
||||
option.text = provider.label
|
||||
+ (provider.vision ? " (Image Upload)" : "")
|
||||
+ (provider.image ? " (Image Generation)" : "")
|
||||
+ (provider.webdriver ? " (Webdriver)" : "")
|
||||
+ (provider.auth ? " (Auth)" : "");
|
||||
+ (provider.nodriver ? " (Browser)" : "")
|
||||
+ (!provider.nodriver && provider.auth ? " (Auth)" : "");
|
||||
if (provider.parent)
|
||||
option.dataset.parent = provider.parent;
|
||||
providerSelect.appendChild(option);
|
||||
@@ -2151,6 +2209,8 @@ async function api(ressource, args=null, files=null, message_id=null, scroll=tru
|
||||
return pywebview.api[`get_${ressource}`]();
|
||||
}
|
||||
const headers = {};
|
||||
const url = new URL(`/backend-api/v2/${ressource}`, window?.location || "http://localhost:8080");
|
||||
let response;
|
||||
if (ressource == "models" && args) {
|
||||
api_key = get_api_key_by_provider(args);
|
||||
if (api_key) {
|
||||
@@ -2161,9 +2221,7 @@ async function api(ressource, args=null, files=null, message_id=null, scroll=tru
|
||||
headers.x_api_base = api_base;
|
||||
}
|
||||
ressource = `${ressource}/${args}`;
|
||||
}
|
||||
const url = new URL(`/backend-api/v2/${ressource}`, window?.location || "http://localhost:8080");
|
||||
if (ressource == "conversation") {
|
||||
} else if (ressource == "conversation") {
|
||||
let body = JSON.stringify(args);
|
||||
headers.accept = 'text/event-stream';
|
||||
if (files !== null) {
|
||||
@@ -2210,8 +2268,17 @@ async function api(ressource, args=null, files=null, message_id=null, scroll=tru
|
||||
await finish_message();
|
||||
return;
|
||||
}
|
||||
} else if (args) {
|
||||
headers['content-type'] = 'application/json';
|
||||
response = await fetch(url, {
|
||||
method: 'POST',
|
||||
headers: headers,
|
||||
body: JSON.stringify(args),
|
||||
});
|
||||
}
|
||||
if (!response) {
|
||||
response = await fetch(url, {headers: headers});
|
||||
}
|
||||
if (response.status != 200) {
|
||||
console.error(response);
|
||||
}
|
||||
@@ -2349,7 +2416,8 @@ function save_storage() {
|
||||
}
|
||||
}
|
||||
|
||||
function add_memory() {
|
||||
function import_memory() {
|
||||
hide_sidebar();
|
||||
let conversations = [];
|
||||
for (let i = 0; i < appStorage.length; i++) {
|
||||
if (appStorage.key(i).startsWith("conversation:")) {
|
||||
@@ -2357,7 +2425,8 @@ function add_memory() {
|
||||
conversations.push(JSON.parse(conversation));
|
||||
}
|
||||
}
|
||||
conversations.sort((a, b) => (b.updated||0)-(a.updated||0));
|
||||
conversations.sort((a, b) => (a.updated||0)-(b.updated||0));
|
||||
let count = 0;
|
||||
conversations.forEach(async (conversation)=>{
|
||||
let body = JSON.stringify(conversation);
|
||||
response = await fetch("/backend-api/v2/memory", {
|
||||
@@ -2365,6 +2434,9 @@ function add_memory() {
|
||||
body: body,
|
||||
headers: {"content-type": "application/json"}
|
||||
});
|
||||
const result = await response.json();
|
||||
count += result.count;
|
||||
inputCount.innerText = `${count} Messages are imported`;
|
||||
});
|
||||
}
|
||||
|
||||
|
@@ -65,6 +65,7 @@ class Api:
|
||||
"parent": getattr(provider, "parent", None),
|
||||
"image": bool(getattr(provider, "image_models", False)),
|
||||
"vision": getattr(provider, "default_vision_model", None) is not None,
|
||||
"nodriver": getattr(provider, "use_nodriver", False),
|
||||
"auth": provider.needs_auth,
|
||||
"login_url": getattr(provider, "login_url", None),
|
||||
} for provider in __providers__ if provider.working]
|
||||
|
@@ -7,6 +7,7 @@ import logging
|
||||
import asyncio
|
||||
import shutil
|
||||
import random
|
||||
import datetime
|
||||
from flask import Flask, Response, request, jsonify
|
||||
from typing import Generator
|
||||
from pathlib import Path
|
||||
@@ -61,7 +62,7 @@ class Backend_Api(Api):
|
||||
"""
|
||||
self.app: Flask = app
|
||||
|
||||
if has_flask_limiter:
|
||||
if has_flask_limiter and app.demo:
|
||||
limiter = Limiter(
|
||||
get_remote_address,
|
||||
app=app,
|
||||
@@ -71,8 +72,8 @@ class Backend_Api(Api):
|
||||
else:
|
||||
class Dummy():
|
||||
def limit(self, value):
|
||||
def callback(v):
|
||||
return v
|
||||
def callback(value):
|
||||
return value
|
||||
return callback
|
||||
limiter = Dummy()
|
||||
|
||||
@@ -111,7 +112,7 @@ class Backend_Api(Api):
|
||||
for model, providers in models.demo_models.values()]
|
||||
|
||||
@app.route('/backend-api/v2/conversation', methods=['POST'])
|
||||
@limiter.limit("4 per minute")
|
||||
@limiter.limit("4 per minute") # 1 request in 15 seconds
|
||||
def handle_conversation():
|
||||
"""
|
||||
Handles conversation requests and streams responses back.
|
||||
@@ -150,6 +151,41 @@ class Backend_Api(Api):
|
||||
mimetype='text/event-stream'
|
||||
)
|
||||
|
||||
@app.route('/backend-api/v2/usage', methods=['POST'])
|
||||
def add_usage():
|
||||
cache_dir = Path(get_cookies_dir()) / ".usage"
|
||||
cache_file = cache_dir / f"{datetime.date.today()}.jsonl"
|
||||
cache_dir.mkdir(parents=True, exist_ok=True)
|
||||
with cache_file.open("a" if cache_file.exists() else "w") as f:
|
||||
f.write(f"{json.dumps(request.json)}\n")
|
||||
return {}
|
||||
|
||||
@app.route('/backend-api/v2/memory', methods=['POST'])
|
||||
def add_memory():
|
||||
api_key = request.headers.get("x_api_key")
|
||||
json_data = request.json
|
||||
from mem0 import MemoryClient
|
||||
client = MemoryClient(api_key=api_key)
|
||||
client.add(
|
||||
[{"role": item["role"], "content": item["content"]} for item in json_data.get("items")],
|
||||
user_id="user",
|
||||
metadata={"conversation_id": json_data.get("id"), "title": json_data.get("title")}
|
||||
)
|
||||
return {"count": len(json_data.get("items"))}
|
||||
|
||||
@app.route('/backend-api/v2/memory/<user_id>', methods=['GET'])
|
||||
def read_memory(user_id: str):
|
||||
api_key = request.headers.get("x_api_key")
|
||||
from mem0 import MemoryClient
|
||||
client = MemoryClient(api_key=api_key)
|
||||
if request.args.search:
|
||||
return client.search(
|
||||
request.args.search,
|
||||
user_id=user_id,
|
||||
metadata=json.loads(request.args.metadata) if request.args.metadata else None
|
||||
)
|
||||
return {}
|
||||
|
||||
self.routes = {
|
||||
'/backend-api/v2/version': {
|
||||
'function': self.get_version,
|
||||
@@ -159,10 +195,6 @@ class Backend_Api(Api):
|
||||
'function': self.handle_synthesize,
|
||||
'methods': ['GET']
|
||||
},
|
||||
'/backend-api/v2/upload_cookies': {
|
||||
'function': self.upload_cookies,
|
||||
'methods': ['POST']
|
||||
},
|
||||
'/images/<path:name>': {
|
||||
'function': self.serve_images,
|
||||
'methods': ['GET']
|
||||
@@ -301,6 +333,7 @@ class Backend_Api(Api):
|
||||
except Exception as e:
|
||||
return jsonify({"error": {"message": f"Error uploading file: {str(e)}"}}), 500
|
||||
|
||||
@app.route('/backend-api/v2/upload_cookies', methods=['POST'])
|
||||
def upload_cookies(self):
|
||||
file = None
|
||||
if "file" in request.files:
|
||||
|
@@ -771,22 +771,22 @@ class ModelUtils:
|
||||
}
|
||||
|
||||
demo_models = {
|
||||
gpt_4o.name: [gpt_4o, [PollinationsAI]],
|
||||
gpt_4o.name: [gpt_4o, [PollinationsAI, Blackbox]],
|
||||
"default": [llama_3_2_11b, [HuggingFaceAPI]],
|
||||
qwen_2_vl_7b.name: [qwen_2_vl_7b, [HuggingFaceAPI]],
|
||||
qvq_72b.name: [qvq_72b, [HuggingSpace]],
|
||||
qvq_72b.name: [qvq_72b, [HuggingSpace, HuggingFaceAPI]],
|
||||
deepseek_r1.name: [deepseek_r1, [HuggingFace, HuggingFaceAPI]],
|
||||
claude_3_haiku.name: [claude_3_haiku, [DDG]],
|
||||
claude_3_haiku.name: [claude_3_haiku, [DDG, Jmuz]],
|
||||
command_r.name: [command_r, [HuggingSpace]],
|
||||
command_r_plus.name: [command_r_plus, [HuggingSpace]],
|
||||
command_r7b.name: [command_r7b, [HuggingSpace]],
|
||||
gemma_2_27b.name: [gemma_2_27b, [HuggingFaceAPI]],
|
||||
qwen_2_72b.name: [qwen_2_72b, [HuggingFace]],
|
||||
qwen_2_72b.name: [qwen_2_72b, [HuggingFaceAPI]],
|
||||
qwen_2_5_coder_32b.name: [qwen_2_5_coder_32b, [HuggingFace]],
|
||||
qwq_32b.name: [qwq_32b, [HuggingFace]],
|
||||
llama_3_3_70b.name: [llama_3_3_70b, [HuggingFace]],
|
||||
sd_3_5.name: [sd_3_5, [HuggingSpace]],
|
||||
flux_dev.name: [flux_dev, [HuggingSpace]],
|
||||
sd_3_5.name: [sd_3_5, [HuggingSpace, HuggingFace]],
|
||||
flux_dev.name: [flux_dev, [HuggingSpace, HuggingFace]],
|
||||
flux_schnell.name: [flux_schnell, [HuggingFace]],
|
||||
}
|
||||
|
||||
|
Reference in New Issue
Block a user