Add leagcy port to docker-compose files

Update demo model list
Disable upload cookies in demo
Track usage in demo mode
Add messages without asking the ai
Add hint for browser usage in provider list
Add qwen2 prompt template to HuggingFace provider
Trim automatic messages in HuggingFaceAPI
This commit is contained in:
hlohaus
2025-01-26 16:32:32 +01:00
parent 5a0afdb110
commit 42805ac789
22 changed files with 253 additions and 94 deletions

View File

@@ -11,4 +11,5 @@ services:
volumes: volumes:
- .:/app - .:/app
ports: ports:
- '8080:8080' - '8080:8080'
- '1337:8080'

View File

@@ -11,6 +11,7 @@ services:
- .:/app - .:/app
ports: ports:
- '8080:8080' - '8080:8080'
- '1337:8080'
- '7900:7900' - '7900:7900'
environment: environment:
- OLLAMA_HOST=host.docker.internal - OLLAMA_HOST=host.docker.internal

View File

@@ -15,9 +15,10 @@ from ..errors import ResponseStatusError, ModelNotFoundError
class Cloudflare(AsyncGeneratorProvider, ProviderModelMixin): class Cloudflare(AsyncGeneratorProvider, ProviderModelMixin):
label = "Cloudflare AI" label = "Cloudflare AI"
url = "https://playground.ai.cloudflare.com" url = "https://playground.ai.cloudflare.com"
working = True
use_nodriver = True
api_endpoint = "https://playground.ai.cloudflare.com/api/inference" api_endpoint = "https://playground.ai.cloudflare.com/api/inference"
models_url = "https://playground.ai.cloudflare.com/api/models" models_url = "https://playground.ai.cloudflare.com/api/models"
working = True
supports_stream = True supports_stream = True
supports_system_message = True supports_system_message = True
supports_message_history = True supports_message_history = True

View File

@@ -69,7 +69,6 @@ class Copilot(AbstractProvider, ProviderModelMixin):
conversation: BaseConversation = None, conversation: BaseConversation = None,
return_conversation: bool = False, return_conversation: bool = False,
api_key: str = None, api_key: str = None,
web_search: bool = False,
**kwargs **kwargs
) -> CreateResult: ) -> CreateResult:
if not has_curl_cffi: if not has_curl_cffi:

View File

@@ -7,9 +7,11 @@ from .base_provider import AsyncGeneratorProvider, format_prompt
from ..requests import StreamSession, get_args_from_nodriver, raise_for_status, merge_cookies from ..requests import StreamSession, get_args_from_nodriver, raise_for_status, merge_cookies
class Pi(AsyncGeneratorProvider): class Pi(AsyncGeneratorProvider):
url = "https://pi.ai/talk" url = "https://pi.ai/talk"
working = True working = True
use_nodriver = True
supports_stream = True supports_stream = True
use_nodriver = True
default_model = "pi" default_model = "pi"
models = [default_model] models = [default_model]
_headers: dict = None _headers: dict = None

View File

@@ -75,6 +75,8 @@ class You(AsyncGeneratorProvider, ProviderModelMixin):
try: try:
cookies = get_cookies(".you.com") cookies = get_cookies(".you.com")
except MissingRequirementsError: except MissingRequirementsError:
pass
if not cookies or "afUserId" not in cookies:
browser = await get_nodriver(proxy=proxy) browser = await get_nodriver(proxy=proxy)
try: try:
page = await browser.get(cls.url) page = await browser.get(cls.url)

View File

@@ -22,6 +22,7 @@ class HailuoAI(AsyncAuthedProvider, ProviderModelMixin):
label = "Hailuo AI" label = "Hailuo AI"
url = "https://www.hailuo.ai" url = "https://www.hailuo.ai"
working = True working = True
use_nodriver = True
supports_stream = True supports_stream = True
default_model = "MiniMax" default_model = "MiniMax"

View File

@@ -4,6 +4,7 @@ from ..Copilot import Copilot
class CopilotAccount(Copilot): class CopilotAccount(Copilot):
needs_auth = True needs_auth = True
use_nodriver = True
parent = "Copilot" parent = "Copilot"
default_model = "Copilot" default_model = "Copilot"
default_vision_model = default_model default_vision_model = default_model

View File

@@ -58,6 +58,7 @@ class Gemini(AsyncGeneratorProvider, ProviderModelMixin):
needs_auth = True needs_auth = True
working = True working = True
use_nodriver = True
default_model = 'gemini' default_model = 'gemini'
default_image_model = default_model default_image_model = default_model

View File

@@ -32,6 +32,7 @@ class HuggingChat(AsyncAuthedProvider, ProviderModelMixin):
url = "https://huggingface.co/chat" url = "https://huggingface.co/chat"
working = True working = True
use_nodriver = True
supports_stream = True supports_stream = True
needs_auth = True needs_auth = True
@@ -68,10 +69,11 @@ class HuggingChat(AsyncAuthedProvider, ProviderModelMixin):
### Image ### ### Image ###
"flux-dev": "black-forest-labs/FLUX.1-dev", "flux-dev": "black-forest-labs/FLUX.1-dev",
"flux-schnell": "black-forest-labs/FLUX.1-schnell", "flux-schnell": "black-forest-labs/FLUX.1-schnell",
### API ### ### Used in other providers ###
"qwen-2-vl-7b": "Qwen/Qwen2-VL-7B-Instruct", "qwen-2-vl-7b": "Qwen/Qwen2-VL-7B-Instruct",
"gemma-2-27b": "google/gemma-2-27b-it", "gemma-2-27b": "google/gemma-2-27b-it",
"qvq-72b": "Qwen/QVQ-72B-Preview" "qwen-2-72b": "Qwen/Qwen2-72B-Instruct",
"qvq-72b": "Qwen/QVQ-72B-Preview",
} }
@classmethod @classmethod

View File

@@ -102,6 +102,8 @@ class HuggingFace(AsyncGeneratorProvider, ProviderModelMixin):
) as session: ) as session:
if payload is None: if payload is None:
async with session.get(f"https://huggingface.co/api/models/{model}") as response: async with session.get(f"https://huggingface.co/api/models/{model}") as response:
if response.status == 404:
raise ModelNotSupportedError(f"Model is not supported: {model} in: {cls.__name__}")
await raise_for_status(response) await raise_for_status(response)
model_data = await response.json() model_data = await response.json()
model_type = None model_type = None
@@ -172,6 +174,14 @@ def format_prompt_qwen(messages: Messages, do_continue: bool = False) -> str:
return prompt[:-len("\n<|im_end|>\n")] return prompt[:-len("\n<|im_end|>\n")]
return prompt return prompt
def format_prompt_qwen2(messages: Messages, do_continue: bool = False) -> str:
prompt = "".join([
f"\u003C{message['role'].capitalize()}\u003E{message['content']}\u003Cend▁of▁sentence\u003E" for message in messages
]) + ("" if do_continue else "\u003CAssistant\u003E")
if do_continue:
return prompt[:-len("\u003CAssistant\u003E")]
return prompt
def format_prompt_llama(messages: Messages, do_continue: bool = False) -> str: def format_prompt_llama(messages: Messages, do_continue: bool = False) -> str:
prompt = "<|begin_of_text|>" + "".join([ prompt = "<|begin_of_text|>" + "".join([
f"<|start_header_id|>{message['role']}<|end_header_id|>\n\n{message['content']}\n<|eot_id|>\n" for message in messages f"<|start_header_id|>{message['role']}<|end_header_id|>\n\n{message['content']}\n<|eot_id|>\n" for message in messages
@@ -199,6 +209,8 @@ def get_inputs(messages: Messages, model_data: dict, model_type: str, do_continu
inputs = format_prompt_custom(messages, eos_token, do_continue) inputs = format_prompt_custom(messages, eos_token, do_continue)
elif eos_token == "<|im_end|>": elif eos_token == "<|im_end|>":
inputs = format_prompt_qwen(messages, do_continue) inputs = format_prompt_qwen(messages, do_continue)
elif "content" in eos_token and eos_token["content"] == "\u003Cend▁of▁sentence\u003E":
inputs = format_prompt_qwen2(messages, do_continue)
elif eos_token == "<|eot_id|>": elif eos_token == "<|eot_id|>":
inputs = format_prompt_llama(messages, do_continue) inputs = format_prompt_llama(messages, do_continue)
else: else:

View File

@@ -3,6 +3,7 @@ from __future__ import annotations
from .OpenaiTemplate import OpenaiTemplate from .OpenaiTemplate import OpenaiTemplate
from .HuggingChat import HuggingChat from .HuggingChat import HuggingChat
from ...providers.types import Messages from ...providers.types import Messages
from ... import debug
class HuggingFaceAPI(OpenaiTemplate): class HuggingFaceAPI(OpenaiTemplate):
label = "HuggingFace (Inference API)" label = "HuggingFace (Inference API)"
@@ -31,6 +32,7 @@ class HuggingFaceAPI(OpenaiTemplate):
messages: Messages, messages: Messages,
api_base: str = None, api_base: str = None,
max_tokens: int = 2048, max_tokens: int = 2048,
max_inputs_lenght: int = 10000,
**kwargs **kwargs
): ):
if api_base is None: if api_base is None:
@@ -38,5 +40,18 @@ class HuggingFaceAPI(OpenaiTemplate):
if model in cls.model_aliases: if model in cls.model_aliases:
model_name = cls.model_aliases[model] model_name = cls.model_aliases[model]
api_base = f"https://api-inference.huggingface.co/models/{model_name}/v1" api_base = f"https://api-inference.huggingface.co/models/{model_name}/v1"
start = calculate_lenght(messages)
if start > max_inputs_lenght:
if len(messages) > 6:
messages = messages[:3] + messages[-3:]
if calculate_lenght(messages) > max_inputs_lenght:
if len(messages) > 2:
messages = [m for m in messages if m["role"] == "system"] + messages[-1:]
if len(messages) > 1 and calculate_lenght(messages) > max_inputs_lenght:
messages = [messages[-1]]
debug.log(f"Messages trimmed from: {start} to: {calculate_lenght(messages)}")
async for chunk in super().create_async_generator(model, messages, api_base=api_base, max_tokens=max_tokens, **kwargs): async for chunk in super().create_async_generator(model, messages, api_base=api_base, max_tokens=max_tokens, **kwargs):
yield chunk yield chunk
def calculate_lenght(messages: Messages) -> int:
return sum([len(message["content"]) + 16 for message in messages])

View File

@@ -21,6 +21,7 @@ class MicrosoftDesigner(AsyncGeneratorProvider, ProviderModelMixin):
label = "Microsoft Designer" label = "Microsoft Designer"
url = "https://designer.microsoft.com" url = "https://designer.microsoft.com"
working = True working = True
use_nodriver = True
needs_auth = True needs_auth = True
default_image_model = "dall-e-3" default_image_model = "dall-e-3"
image_models = [default_image_model, "1024x1024", "1024x1792", "1792x1024"] image_models = [default_image_model, "1024x1024", "1024x1792", "1792x1024"]

View File

@@ -91,6 +91,7 @@ class OpenaiChat(AsyncAuthedProvider, ProviderModelMixin):
label = "OpenAI ChatGPT" label = "OpenAI ChatGPT"
url = "https://chatgpt.com" url = "https://chatgpt.com"
working = True working = True
use_nodriver = True
supports_gpt_4 = True supports_gpt_4 = True
supports_message_history = True supports_message_history = True
supports_system_message = True supports_system_message = True

View File

@@ -169,7 +169,7 @@ class Api:
except HTTPException: except HTTPException:
user_g4f_api_key = None user_g4f_api_key = None
path = request.url.path path = request.url.path
if path.startswith("/v1"): if path.startswith("/v1") or (AppConfig.demo and path == '/backend-api/v2/upload_cookies'):
if user_g4f_api_key is None: if user_g4f_api_key is None:
return ErrorResponse.from_message("G4F API key required", HTTP_401_UNAUTHORIZED) return ErrorResponse.from_message("G4F API key required", HTTP_401_UNAUTHORIZED)
if not secrets.compare_digest(AppConfig.g4f_api_key, user_g4f_api_key): if not secrets.compare_digest(AppConfig.g4f_api_key, user_g4f_api_key):

View File

@@ -12,7 +12,7 @@ def get_api_parser():
api_parser.add_argument("--bind", default=None, help="The bind string. (Default: 0.0.0.0:1337)") api_parser.add_argument("--bind", default=None, help="The bind string. (Default: 0.0.0.0:1337)")
api_parser.add_argument("--port", "-p", default=None, help="Change the port of the server.") api_parser.add_argument("--port", "-p", default=None, help="Change the port of the server.")
api_parser.add_argument("--debug", "-d", action="store_true", help="Enable verbose logging.") api_parser.add_argument("--debug", "-d", action="store_true", help="Enable verbose logging.")
api_parser.add_argument("--gui", "-g", default=None, action="store_true", help="Add gui to the api.") api_parser.add_argument("--gui", "-g", default=None, action="store_true", help="Start also the gui.")
api_parser.add_argument("--model", default=None, help="Default model for chat completion. (incompatible with --reload and --workers)") api_parser.add_argument("--model", default=None, help="Default model for chat completion. (incompatible with --reload and --workers)")
api_parser.add_argument("--provider", choices=[provider.__name__ for provider in Provider.__providers__ if provider.working], api_parser.add_argument("--provider", choices=[provider.__name__ for provider in Provider.__providers__ if provider.working],
default=None, help="Default provider for chat completion. (incompatible with --reload and --workers)") default=None, help="Default provider for chat completion. (incompatible with --reload and --workers)")
@@ -28,7 +28,7 @@ def get_api_parser():
api_parser.add_argument("--cookie-browsers", nargs="+", choices=[browser.__name__ for browser in g4f.cookies.browsers], api_parser.add_argument("--cookie-browsers", nargs="+", choices=[browser.__name__ for browser in g4f.cookies.browsers],
default=[], help="List of browsers to access or retrieve cookies from. (incompatible with --reload and --workers)") default=[], help="List of browsers to access or retrieve cookies from. (incompatible with --reload and --workers)")
api_parser.add_argument("--reload", action="store_true", help="Enable reloading.") api_parser.add_argument("--reload", action="store_true", help="Enable reloading.")
api_parser.add_argument("--demo", action="store_true", help="Enable demo modus.") api_parser.add_argument("--demo", action="store_true", help="Enable demo mode.")
return api_parser return api_parser
def main(): def main():

View File

@@ -215,6 +215,7 @@
} }
localStorage.setItem("HuggingFace-api_key", accessToken); localStorage.setItem("HuggingFace-api_key", accessToken);
localStorage.setItem("HuggingFace-user", JSON.stringify(user)); localStorage.setItem("HuggingFace-user", JSON.stringify(user));
localStorage.setItem("user", user.name);
location.href = "/chat/"; location.href = "/chat/";
} }
input.addEventListener("input", () => check_access_token()); input.addEventListener("input", () => check_access_token());

View File

@@ -142,6 +142,11 @@
<input type="checkbox" id="refine"/> <input type="checkbox" id="refine"/>
<label for="refine" class="toogle" title=""></label> <label for="refine" class="toogle" title=""></label>
</div> </div>
<div class="field">
<span class="label">Track usage</span>
<input type="checkbox" id="track_usage"/>
<label for="track_usage" class="toogle" title=""></label>
</div>
<div class="field box"> <div class="field box">
<label for="systemPrompt" class="label" title="">System prompt</label> <label for="systemPrompt" class="label" title="">System prompt</label>
<textarea id="systemPrompt" placeholder="You are a helpful assistant."></textarea> <textarea id="systemPrompt" placeholder="You are a helpful assistant."></textarea>
@@ -184,6 +189,12 @@
<a href="" onclick="return false;">Show log</a> <a href="" onclick="return false;">Show log</a>
</button> </button>
</div> </div>
<div class="bottom_buttons memory hidden">
<button onclick="import_memory()">
<i class="fa-solid fa-arrow-up-from-bracket"></i>
<a href="" onclick="return false;">Import Messages to Memory</a>
</button>
</div>
</div> </div>
<div class="provider_forms hidden"> <div class="provider_forms hidden">
<div class="bottom_buttons"> <div class="bottom_buttons">
@@ -228,7 +239,7 @@
<textarea id="message-input" placeholder="Ask a question" cols="30" rows="10" <textarea id="message-input" placeholder="Ask a question" cols="30" rows="10"
style="white-space: pre-wrap;resize: none;"></textarea> style="white-space: pre-wrap;resize: none;"></textarea>
<label class="file-label image-label" for="image" title=""> <label class="file-label image-label" for="image" title="">
<input type="file" id="image" name="image" accept="image/*" required multiple/> <input type="file" id="image" name="image" accept="image/*" required multiple capture="filesystem"/>
<i class="fa-regular fa-image"></i> <i class="fa-regular fa-image"></i>
</label> </label>
<label class="file-label image-label" for="camera"> <label class="file-label image-label" for="camera">
@@ -243,6 +254,7 @@
<i class="fa-solid fa-microphone-slash"></i> <i class="fa-solid fa-microphone-slash"></i>
</label> </label>
<div id="send-button"> <div id="send-button">
<i class="fa-solid fa-square-plus"></i>
<i class="fa-regular fa-paper-plane"></i> <i class="fa-regular fa-paper-plane"></i>
</div> </div>
</div> </div>

View File

@@ -361,7 +361,7 @@ const delete_conversations = async () => {
await new_conversation(); await new_conversation();
}; };
const handle_ask = async () => { const handle_ask = async (do_ask_gpt = true) => {
messageInput.style.height = "82px"; messageInput.style.height = "82px";
messageInput.focus(); messageInput.focus();
await scroll_to_bottom(); await scroll_to_bottom();
@@ -377,17 +377,19 @@ const handle_ask = async () => {
let message_index = await add_message(window.conversation_id, "user", message); let message_index = await add_message(window.conversation_id, "user", message);
let message_id = get_message_id(); let message_id = get_message_id();
if (imageInput.dataset.objects) { let images = [];
imageInput.dataset.objects.split(" ").forEach((object)=>URL.revokeObjectURL(object)) if (do_ask_gpt) {
delete imageInput.dataset.objects; if (imageInput.dataset.objects) {
} imageInput.dataset.objects.split(" ").forEach((object)=>URL.revokeObjectURL(object))
const input = imageInput && imageInput.files.length > 0 ? imageInput : cameraInput delete imageInput.dataset.objects;
images = []; }
if (input.files.length > 0) { const input = imageInput && imageInput.files.length > 0 ? imageInput : cameraInput
for (const file of input.files) { if (input.files.length > 0) {
images.push(URL.createObjectURL(file)); for (const file of input.files) {
images.push(URL.createObjectURL(file));
}
imageInput.dataset.objects = images.join(" ");
} }
imageInput.dataset.objects = images.join(" ");
} }
message_box.innerHTML += ` message_box.innerHTML += `
<div class="message" data-index="${message_index}"> <div class="message" data-index="${message_index}">
@@ -408,18 +410,21 @@ const handle_ask = async () => {
</div> </div>
`; `;
highlight(message_box); highlight(message_box);
if (do_ask_gpt) {
const all_pinned = document.querySelectorAll(".buttons button.pinned") const all_pinned = document.querySelectorAll(".buttons button.pinned")
if (all_pinned.length > 0) { if (all_pinned.length > 0) {
all_pinned.forEach((el, idx) => ask_gpt( all_pinned.forEach((el, idx) => ask_gpt(
idx == 0 ? message_id : get_message_id(), idx == 0 ? message_id : get_message_id(),
-1, -1,
idx != 0, idx != 0,
el.dataset.provider, el.dataset.provider,
el.dataset.model el.dataset.model
)); ));
} else {
await ask_gpt(message_id);
}
} else { } else {
await ask_gpt(message_id); await lazy_scroll_to_bottom();
} }
}; };
@@ -450,11 +455,14 @@ stop_generating.addEventListener("click", async () => {
for (key in controller_storage) { for (key in controller_storage) {
if (!controller_storage[key].signal.aborted) { if (!controller_storage[key].signal.aborted) {
console.log(`aborted ${window.conversation_id} #${key}`); console.log(`aborted ${window.conversation_id} #${key}`);
controller_storage[key].abort(); try {
let message = message_storage[key]; controller_storage[key].abort();
if (message) { } finally {
content_storage[key].inner.innerHTML += " [aborted]"; let message = message_storage[key];
message_storage[key] += " [aborted]"; if (message) {
content_storage[key].inner.innerHTML += " [aborted]";
message_storage[key] += " [aborted]";
}
} }
} }
} }
@@ -486,7 +494,7 @@ const prepare_messages = (messages, message_index = -1, do_continue = false, do_
} }
} }
} }
// Combine messages with same role // Combine assistant messages
let last_message; let last_message;
let new_messages = []; let new_messages = [];
messages.forEach((message) => { messages.forEach((message) => {
@@ -503,21 +511,25 @@ const prepare_messages = (messages, message_index = -1, do_continue = false, do_
messages = new_messages; messages = new_messages;
// Insert system prompt as first message // Insert system prompt as first message
new_messages = []; let final_messages = [];
if (chatPrompt?.value) { if (chatPrompt?.value) {
new_messages.push({ final_messages.push({
"role": "system", "role": "system",
"content": chatPrompt.value "content": chatPrompt.value
}); });
} }
// Remove history, if it's selected // Remove history, only add new user messages
if (document.getElementById('history')?.checked && do_filter) { let filtered_messages = [];
if (message_index == null) { // The message_index is null on count total tokens
messages = [messages.pop(), messages.pop()]; if (document.getElementById('history')?.checked && do_filter && message_index != null) {
} else { while (last_message = messages.pop()) {
messages = [messages.pop()]; if (last_message["role"] == "user") {
filtered_messages.push(last_message);
break;
}
} }
messages = filtered_messages.reverse();
} }
messages.forEach((new_message, i) => { messages.forEach((new_message, i) => {
@@ -845,28 +857,63 @@ const ask_gpt = async (message_id, message_index = -1, regenerate = false, provi
} }
if (message_storage[message_id]) { if (message_storage[message_id]) {
const message_provider = message_id in provider_storage ? provider_storage[message_id] : null; const message_provider = message_id in provider_storage ? provider_storage[message_id] : null;
let usage;
if (usage_storage[message_id]) {
usage = usage_storage[message_id];
delete usage_storage[message_id];
}
// Calculate usage if we have no usage result jet
if (document.getElementById("track_usage").checked && !usage && window.GPTTokenizer_cl100k_base) {
const prompt_token_model = model?.startsWith("gpt-3") ? "gpt-3.5-turbo" : "gpt-4"
const prompt_tokens = GPTTokenizer_cl100k_base?.encodeChat(messages, prompt_token_model).length;
const completion_tokens = count_tokens(message_provider?.model, message_storage[message_id]);
usage = {
model: message_provider?.model,
provider: message_provider?.name,
prompt_tokens: prompt_tokens,
completion_tokens: completion_tokens,
total_tokens: prompt_tokens + completion_tokens
}
}
// It is not regenerated, if it is the first response to a new question
if (regenerate && message_index == -1) {
let conversation = await get_conversation(window.conversation_id);
regenerate = conversation.items[conversation.items.length-1]["role"] != "user";
}
// Create final message content
const final_message = message_storage[message_id]
+ (error_storage[message_id] ? " [error]" : "")
+ (stop_generating.classList.contains('stop_generating-hidden') ? " [aborted]" : "")
// Save message in local storage
await add_message( await add_message(
window.conversation_id, window.conversation_id,
"assistant", "assistant",
message_storage[message_id] + (error_storage[message_id] ? " [error]" : "") + (stop_generating.classList.contains('stop_generating-hidden') ? " [aborted]" : ""), final_message,
message_provider, message_provider,
message_index, message_index,
synthesize_storage[message_id], synthesize_storage[message_id],
regenerate, regenerate,
title_storage[message_id], title_storage[message_id],
finish_storage[message_id], finish_storage[message_id],
usage_storage[message_id], usage,
reasoning_storage[message_id], reasoning_storage[message_id],
action=="continue" action=="continue"
); );
delete message_storage[message_id]; delete message_storage[message_id];
} // Send usage to the server
if (controller_storage[message_id]) { if (document.getElementById("track_usage").checked) {
if (!controller_storage[message_id].signal.aborted) { const user = localStorage.getItem("user");
controller_storage[message_id].abort(); if (user) {
usage = {user: user, ...usage};
}
api("usage", usage);
} }
}
// Update controller storage
if (controller_storage[message_id]) {
delete controller_storage[message_id]; delete controller_storage[message_id];
} }
// Reload conversation if no error
if (!error_storage[message_id]) { if (!error_storage[message_id]) {
await safe_load_conversation(window.conversation_id, scroll); await safe_load_conversation(window.conversation_id, scroll);
} }
@@ -1485,22 +1532,27 @@ async function hide_sidebar() {
settings.classList.add("hidden"); settings.classList.add("hidden");
chat.classList.remove("hidden"); chat.classList.remove("hidden");
log_storage.classList.add("hidden"); log_storage.classList.add("hidden");
await hide_settings();
if (window.location.pathname == "/menu/" || window.location.pathname == "/settings/") { if (window.location.pathname == "/menu/" || window.location.pathname == "/settings/") {
history.back(); history.back();
} }
} }
window.addEventListener('popstate', hide_sidebar, false); async function hide_settings() {
sidebar_button.addEventListener("click", async () => {
settings.classList.add("hidden"); settings.classList.add("hidden");
let provider_forms = document.querySelectorAll(".provider_forms from"); let provider_forms = document.querySelectorAll(".provider_forms from");
Array.from(provider_forms).forEach((form) => form.classList.add("hidden")); Array.from(provider_forms).forEach((form) => form.classList.add("hidden"));
}
window.addEventListener('popstate', hide_sidebar, false);
sidebar_button.addEventListener("click", async () => {
if (sidebar.classList.contains("shown")) { if (sidebar.classList.contains("shown")) {
await hide_sidebar(); await hide_sidebar();
} else { } else {
sidebar.classList.add("shown"); sidebar.classList.add("shown");
sidebar_button.classList.add("rotated"); sidebar_button.classList.add("rotated");
await hide_settings();
add_url_to_history("/menu/"); add_url_to_history("/menu/");
} }
window.scrollTo(0, 0); window.scrollTo(0, 0);
@@ -1797,13 +1849,16 @@ async function on_api() {
messageInput.style.height = messageInput.scrollHeight + "px"; messageInput.style.height = messageInput.scrollHeight + "px";
} }
}); });
sendButton.addEventListener(`click`, async () => { sendButton.querySelector(".fa-paper-plane").addEventListener(`click`, async () => {
console.log("clicked send"); console.log("clicked send");
if (prompt_lock) return; if (prompt_lock) return;
prompt_lock = true; prompt_lock = true;
setTimeout(()=>prompt_lock=false, 3000); setTimeout(()=>prompt_lock=false, 3000);
await handle_ask(); await handle_ask();
}); });
sendButton.querySelector(".fa-square-plus").addEventListener(`click`, async () => {
await handle_ask(false);
});
messageInput.focus(); messageInput.focus();
let provider_options = []; let provider_options = [];
models = await api("models"); models = await api("models");
@@ -1817,13 +1872,16 @@ async function on_api() {
}); });
let login_urls; let login_urls;
if (is_demo) { if (is_demo) {
if (!localStorage.getItem("HuggingFace-api_key")) { if (!localStorage.getItem("user")) {
location.href = "/"; location.href = "/";
return; return;
} }
providerSelect.innerHTML = '<option value="" selected>Demo Modus</option>' providerSelect.innerHTML = '<option value="" selected>Demo Mode</option>'
document.getElementById("pin")?.remove(); document.getElementById("pin").disabled = true;
document.getElementById("refine")?.parentElement.remove(); document.getElementById("refine")?.parentElement.remove();
const track_usage = document.getElementById("track_usage");
track_usage.checked = true;
track_usage.disabled = true;
Array.from(modelSelect.querySelectorAll(':not([data-providers])')).forEach((option)=>{ Array.from(modelSelect.querySelectorAll(':not([data-providers])')).forEach((option)=>{
if (!option.disabled && option.value) { if (!option.disabled && option.value) {
option.remove(); option.remove();
@@ -1844,8 +1902,8 @@ async function on_api() {
option.text = provider.label option.text = provider.label
+ (provider.vision ? " (Image Upload)" : "") + (provider.vision ? " (Image Upload)" : "")
+ (provider.image ? " (Image Generation)" : "") + (provider.image ? " (Image Generation)" : "")
+ (provider.webdriver ? " (Webdriver)" : "") + (provider.nodriver ? " (Browser)" : "")
+ (provider.auth ? " (Auth)" : ""); + (!provider.nodriver && provider.auth ? " (Auth)" : "");
if (provider.parent) if (provider.parent)
option.dataset.parent = provider.parent; option.dataset.parent = provider.parent;
providerSelect.appendChild(option); providerSelect.appendChild(option);
@@ -2151,6 +2209,8 @@ async function api(ressource, args=null, files=null, message_id=null, scroll=tru
return pywebview.api[`get_${ressource}`](); return pywebview.api[`get_${ressource}`]();
} }
const headers = {}; const headers = {};
const url = new URL(`/backend-api/v2/${ressource}`, window?.location || "http://localhost:8080");
let response;
if (ressource == "models" && args) { if (ressource == "models" && args) {
api_key = get_api_key_by_provider(args); api_key = get_api_key_by_provider(args);
if (api_key) { if (api_key) {
@@ -2161,9 +2221,7 @@ async function api(ressource, args=null, files=null, message_id=null, scroll=tru
headers.x_api_base = api_base; headers.x_api_base = api_base;
} }
ressource = `${ressource}/${args}`; ressource = `${ressource}/${args}`;
} } else if (ressource == "conversation") {
const url = new URL(`/backend-api/v2/${ressource}`, window?.location || "http://localhost:8080");
if (ressource == "conversation") {
let body = JSON.stringify(args); let body = JSON.stringify(args);
headers.accept = 'text/event-stream'; headers.accept = 'text/event-stream';
if (files !== null) { if (files !== null) {
@@ -2210,8 +2268,17 @@ async function api(ressource, args=null, files=null, message_id=null, scroll=tru
await finish_message(); await finish_message();
return; return;
} }
} else if (args) {
headers['content-type'] = 'application/json';
response = await fetch(url, {
method: 'POST',
headers: headers,
body: JSON.stringify(args),
});
}
if (!response) {
response = await fetch(url, {headers: headers});
} }
response = await fetch(url, {headers: headers});
if (response.status != 200) { if (response.status != 200) {
console.error(response); console.error(response);
} }
@@ -2349,7 +2416,8 @@ function save_storage() {
} }
} }
function add_memory() { function import_memory() {
hide_sidebar();
let conversations = []; let conversations = [];
for (let i = 0; i < appStorage.length; i++) { for (let i = 0; i < appStorage.length; i++) {
if (appStorage.key(i).startsWith("conversation:")) { if (appStorage.key(i).startsWith("conversation:")) {
@@ -2357,7 +2425,8 @@ function add_memory() {
conversations.push(JSON.parse(conversation)); conversations.push(JSON.parse(conversation));
} }
} }
conversations.sort((a, b) => (b.updated||0)-(a.updated||0)); conversations.sort((a, b) => (a.updated||0)-(b.updated||0));
let count = 0;
conversations.forEach(async (conversation)=>{ conversations.forEach(async (conversation)=>{
let body = JSON.stringify(conversation); let body = JSON.stringify(conversation);
response = await fetch("/backend-api/v2/memory", { response = await fetch("/backend-api/v2/memory", {
@@ -2365,6 +2434,9 @@ function add_memory() {
body: body, body: body,
headers: {"content-type": "application/json"} headers: {"content-type": "application/json"}
}); });
const result = await response.json();
count += result.count;
inputCount.innerText = `${count} Messages are imported`;
}); });
} }

View File

@@ -65,6 +65,7 @@ class Api:
"parent": getattr(provider, "parent", None), "parent": getattr(provider, "parent", None),
"image": bool(getattr(provider, "image_models", False)), "image": bool(getattr(provider, "image_models", False)),
"vision": getattr(provider, "default_vision_model", None) is not None, "vision": getattr(provider, "default_vision_model", None) is not None,
"nodriver": getattr(provider, "use_nodriver", False),
"auth": provider.needs_auth, "auth": provider.needs_auth,
"login_url": getattr(provider, "login_url", None), "login_url": getattr(provider, "login_url", None),
} for provider in __providers__ if provider.working] } for provider in __providers__ if provider.working]

View File

@@ -7,6 +7,7 @@ import logging
import asyncio import asyncio
import shutil import shutil
import random import random
import datetime
from flask import Flask, Response, request, jsonify from flask import Flask, Response, request, jsonify
from typing import Generator from typing import Generator
from pathlib import Path from pathlib import Path
@@ -61,7 +62,7 @@ class Backend_Api(Api):
""" """
self.app: Flask = app self.app: Flask = app
if has_flask_limiter: if has_flask_limiter and app.demo:
limiter = Limiter( limiter = Limiter(
get_remote_address, get_remote_address,
app=app, app=app,
@@ -71,8 +72,8 @@ class Backend_Api(Api):
else: else:
class Dummy(): class Dummy():
def limit(self, value): def limit(self, value):
def callback(v): def callback(value):
return v return value
return callback return callback
limiter = Dummy() limiter = Dummy()
@@ -111,7 +112,7 @@ class Backend_Api(Api):
for model, providers in models.demo_models.values()] for model, providers in models.demo_models.values()]
@app.route('/backend-api/v2/conversation', methods=['POST']) @app.route('/backend-api/v2/conversation', methods=['POST'])
@limiter.limit("4 per minute") @limiter.limit("4 per minute") # 1 request in 15 seconds
def handle_conversation(): def handle_conversation():
""" """
Handles conversation requests and streams responses back. Handles conversation requests and streams responses back.
@@ -150,6 +151,41 @@ class Backend_Api(Api):
mimetype='text/event-stream' mimetype='text/event-stream'
) )
@app.route('/backend-api/v2/usage', methods=['POST'])
def add_usage():
cache_dir = Path(get_cookies_dir()) / ".usage"
cache_file = cache_dir / f"{datetime.date.today()}.jsonl"
cache_dir.mkdir(parents=True, exist_ok=True)
with cache_file.open("a" if cache_file.exists() else "w") as f:
f.write(f"{json.dumps(request.json)}\n")
return {}
@app.route('/backend-api/v2/memory', methods=['POST'])
def add_memory():
api_key = request.headers.get("x_api_key")
json_data = request.json
from mem0 import MemoryClient
client = MemoryClient(api_key=api_key)
client.add(
[{"role": item["role"], "content": item["content"]} for item in json_data.get("items")],
user_id="user",
metadata={"conversation_id": json_data.get("id"), "title": json_data.get("title")}
)
return {"count": len(json_data.get("items"))}
@app.route('/backend-api/v2/memory/<user_id>', methods=['GET'])
def read_memory(user_id: str):
api_key = request.headers.get("x_api_key")
from mem0 import MemoryClient
client = MemoryClient(api_key=api_key)
if request.args.search:
return client.search(
request.args.search,
user_id=user_id,
metadata=json.loads(request.args.metadata) if request.args.metadata else None
)
return {}
self.routes = { self.routes = {
'/backend-api/v2/version': { '/backend-api/v2/version': {
'function': self.get_version, 'function': self.get_version,
@@ -159,10 +195,6 @@ class Backend_Api(Api):
'function': self.handle_synthesize, 'function': self.handle_synthesize,
'methods': ['GET'] 'methods': ['GET']
}, },
'/backend-api/v2/upload_cookies': {
'function': self.upload_cookies,
'methods': ['POST']
},
'/images/<path:name>': { '/images/<path:name>': {
'function': self.serve_images, 'function': self.serve_images,
'methods': ['GET'] 'methods': ['GET']
@@ -301,17 +333,18 @@ class Backend_Api(Api):
except Exception as e: except Exception as e:
return jsonify({"error": {"message": f"Error uploading file: {str(e)}"}}), 500 return jsonify({"error": {"message": f"Error uploading file: {str(e)}"}}), 500
def upload_cookies(self): @app.route('/backend-api/v2/upload_cookies', methods=['POST'])
file = None def upload_cookies(self):
if "file" in request.files: file = None
file = request.files['file'] if "file" in request.files:
if file.filename == '': file = request.files['file']
return 'No selected file', 400 if file.filename == '':
if file and file.filename.endswith(".json") or file.filename.endswith(".har"): return 'No selected file', 400
filename = secure_filename(file.filename) if file and file.filename.endswith(".json") or file.filename.endswith(".har"):
file.save(os.path.join(get_cookies_dir(), filename)) filename = secure_filename(file.filename)
return "File saved", 200 file.save(os.path.join(get_cookies_dir(), filename))
return 'Not supported file', 400 return "File saved", 200
return 'Not supported file', 400
def handle_synthesize(self, provider: str): def handle_synthesize(self, provider: str):
try: try:

View File

@@ -771,22 +771,22 @@ class ModelUtils:
} }
demo_models = { demo_models = {
gpt_4o.name: [gpt_4o, [PollinationsAI]], gpt_4o.name: [gpt_4o, [PollinationsAI, Blackbox]],
"default": [llama_3_2_11b, [HuggingFaceAPI]], "default": [llama_3_2_11b, [HuggingFaceAPI]],
qwen_2_vl_7b.name: [qwen_2_vl_7b, [HuggingFaceAPI]], qwen_2_vl_7b.name: [qwen_2_vl_7b, [HuggingFaceAPI]],
qvq_72b.name: [qvq_72b, [HuggingSpace]], qvq_72b.name: [qvq_72b, [HuggingSpace, HuggingFaceAPI]],
deepseek_r1.name: [deepseek_r1, [HuggingFace, HuggingFaceAPI]], deepseek_r1.name: [deepseek_r1, [HuggingFace, HuggingFaceAPI]],
claude_3_haiku.name: [claude_3_haiku, [DDG]], claude_3_haiku.name: [claude_3_haiku, [DDG, Jmuz]],
command_r.name: [command_r, [HuggingSpace]], command_r.name: [command_r, [HuggingSpace]],
command_r_plus.name: [command_r_plus, [HuggingSpace]], command_r_plus.name: [command_r_plus, [HuggingSpace]],
command_r7b.name: [command_r7b, [HuggingSpace]], command_r7b.name: [command_r7b, [HuggingSpace]],
gemma_2_27b.name: [gemma_2_27b, [HuggingFaceAPI]], gemma_2_27b.name: [gemma_2_27b, [HuggingFaceAPI]],
qwen_2_72b.name: [qwen_2_72b, [HuggingFace]], qwen_2_72b.name: [qwen_2_72b, [HuggingFaceAPI]],
qwen_2_5_coder_32b.name: [qwen_2_5_coder_32b, [HuggingFace]], qwen_2_5_coder_32b.name: [qwen_2_5_coder_32b, [HuggingFace]],
qwq_32b.name: [qwq_32b, [HuggingFace]], qwq_32b.name: [qwq_32b, [HuggingFace]],
llama_3_3_70b.name: [llama_3_3_70b, [HuggingFace]], llama_3_3_70b.name: [llama_3_3_70b, [HuggingFace]],
sd_3_5.name: [sd_3_5, [HuggingSpace]], sd_3_5.name: [sd_3_5, [HuggingSpace, HuggingFace]],
flux_dev.name: [flux_dev, [HuggingSpace]], flux_dev.name: [flux_dev, [HuggingSpace, HuggingFace]],
flux_schnell.name: [flux_schnell, [HuggingFace]], flux_schnell.name: [flux_schnell, [HuggingFace]],
} }