Remove login url from error message

Remove print in CablyAI
Fix Vision Support in HuggingSpace provider
Support images in BackendApi provider
Add missing import in Blackbox provider
This commit is contained in:
hlohaus
2025-02-05 13:37:55 +01:00
parent 03d0c3053f
commit 88918cb897
10 changed files with 55 additions and 29 deletions

View File

@@ -15,7 +15,7 @@ from .base_provider import AsyncGeneratorProvider, ProviderModelMixin
from ..image import to_data_uri from ..image import to_data_uri
from ..cookies import get_cookies_dir from ..cookies import get_cookies_dir
from .helper import format_prompt, format_image_prompt from .helper import format_prompt, format_image_prompt
from ..providers.response import JsonConversation, ImageResponse from ..providers.response import JsonConversation, ImageResponse, Reasoning
class Conversation(JsonConversation): class Conversation(JsonConversation):
validated_value: str = None validated_value: str = None

View File

@@ -87,7 +87,6 @@ class CablyAI(AsyncGeneratorProvider, ProviderModelMixin):
continue continue
line = line.decode('utf-8').strip() line = line.decode('utf-8').strip()
print(line)
if not line.startswith("data: "): if not line.startswith("data: "):
continue continue

View File

@@ -148,7 +148,7 @@ class HuggingFaceInference(AsyncGeneratorProvider, ProviderModelMixin):
if response.headers["content-type"].startswith("image/"): if response.headers["content-type"].startswith("image/"):
base64_data = base64.b64encode(b"".join([chunk async for chunk in response.iter_content()])) base64_data = base64.b64encode(b"".join([chunk async for chunk in response.iter_content()]))
url = f"data:{response.headers['content-type']};base64,{base64_data.decode()}" url = f"data:{response.headers['content-type']};base64,{base64_data.decode()}"
yield ImageResponse(url, prompt) yield ImageResponse(url, inputs)
else: else:
yield (await response.json())[0]["generated_text"].strip() yield (await response.json())[0]["generated_text"].strip()

View File

@@ -2,14 +2,16 @@ from __future__ import annotations
from aiohttp import ClientSession from aiohttp import ClientSession
import time import time
import asyncio
from ...typing import AsyncResult, Messages from ...typing import AsyncResult, Messages
from ...providers.response import ImageResponse from ...providers.response import ImageResponse, Reasoning
from ...requests.raise_for_status import raise_for_status from ...requests.raise_for_status import raise_for_status
from ..helper import format_image_prompt, get_random_string from ..helper import format_image_prompt, get_random_string
from .Janus_Pro_7B import Janus_Pro_7B, JsonConversation, get_zerogpu_token from .Janus_Pro_7B import Janus_Pro_7B, JsonConversation, get_zerogpu_token
class G4F(Janus_Pro_7B): class G4F(Janus_Pro_7B):
label = "G4F framework"
space = "roxky/Janus-Pro-7B" space = "roxky/Janus-Pro-7B"
url = f"https://huggingface.co/spaces/roxky/g4f-space" url = f"https://huggingface.co/spaces/roxky/g4f-space"
api_url = "https://roxky-janus-pro-7b.hf.space" api_url = "https://roxky-janus-pro-7b.hf.space"
@@ -62,13 +64,25 @@ class G4F(Janus_Pro_7B):
"trigger_id": 10 "trigger_id": 10
} }
async with ClientSession() as session: async with ClientSession() as session:
yield Reasoning(status="Acquiring GPU Token")
zerogpu_uuid, zerogpu_token = await get_zerogpu_token(cls.space, session, JsonConversation(), cookies) zerogpu_uuid, zerogpu_token = await get_zerogpu_token(cls.space, session, JsonConversation(), cookies)
headers = { headers = {
"x-zerogpu-token": zerogpu_token, "x-zerogpu-token": zerogpu_token,
"x-zerogpu-uuid": zerogpu_uuid, "x-zerogpu-uuid": zerogpu_uuid,
} }
async with session.post(cls.url_flux, json=payload, proxy=proxy, headers=headers) as response: async def generate():
await raise_for_status(response) async with session.post(cls.url_flux, json=payload, proxy=proxy, headers=headers) as response:
response_data = await response.json() await raise_for_status(response)
image_url = response_data["data"][0]['url'] response_data = await response.json()
yield ImageResponse(images=[image_url], alt=prompt) image_url = response_data["data"][0]['url']
return ImageResponse(images=[image_url], alt=prompt)
background_tasks = set()
started = time.time()
task = asyncio.create_task(generate())
background_tasks.add(task)
task.add_done_callback(background_tasks.discard)
while background_tasks:
yield Reasoning(status=f"Generating {time.time() - started:.2f}s")
await asyncio.sleep(0.2)
yield await task
yield Reasoning(status=f"Finished {time.time() - started:.2f}s")

View File

@@ -82,10 +82,8 @@ class Janus_Pro_7B(AsyncGeneratorProvider, ProviderModelMixin):
method = "post" method = "post"
if model == cls.default_image_model or prompt is not None: if model == cls.default_image_model or prompt is not None:
method = "image" method = "image"
prompt = format_prompt(messages) if prompt is None and conversation is None else prompt prompt = format_prompt(messages) if prompt is None and conversation is None else prompt
prompt = format_image_prompt(messages, prompt) prompt = format_image_prompt(messages, prompt)
if seed is None: if seed is None:
seed = int(time.time()) seed = int(time.time())
@@ -139,7 +137,8 @@ class Janus_Pro_7B(AsyncGeneratorProvider, ProviderModelMixin):
if json_data.get('msg') == 'process_completed': if json_data.get('msg') == 'process_completed':
if 'output' in json_data and 'error' in json_data['output']: if 'output' in json_data and 'error' in json_data['output']:
raise ResponseError("Missing image input" if json_data['output']['error'] and "AttributeError" in json_data['output']['error'] else json_data['output']['error']) json_data['output']['error'] = json_data['output']['error'].split(" <a ")[0]
raise ResponseError("Missing images input" if json_data['output']['error'] and "AttributeError" in json_data['output']['error'] else json_data['output']['error'])
if 'output' in json_data and 'data' in json_data['output']: if 'output' in json_data and 'data' in json_data['output']:
yield Reasoning(status="Finished") yield Reasoning(status="Finished")
if "image" in json_data['output']['data'][0][0]: if "image" in json_data['output']['data'][0][0]:

View File

@@ -27,10 +27,14 @@ class HuggingSpace(AsyncGeneratorProvider, ProviderModelMixin):
default_image_model = BlackForestLabsFlux1Dev.default_model default_image_model = BlackForestLabsFlux1Dev.default_model
default_vision_model = Qwen_QVQ_72B.default_model default_vision_model = Qwen_QVQ_72B.default_model
providers = [ providers = [
BlackForestLabsFlux1Dev, BlackForestLabsFlux1Schnell, BlackForestLabsFlux1Dev,
BlackForestLabsFlux1Schnell,
VoodoohopFlux1Schnell, VoodoohopFlux1Schnell,
CohereForAI, Janus_Pro_7B, CohereForAI,
Qwen_QVQ_72B, Qwen_Qwen_2_5M_Demo, Qwen_Qwen_2_72B_Instruct, Janus_Pro_7B,
Qwen_QVQ_72B,
Qwen_Qwen_2_5M_Demo,
Qwen_Qwen_2_72B_Instruct,
StableDiffusion35Large, StableDiffusion35Large,
G4F G4F
] ]
@@ -70,7 +74,7 @@ class HuggingSpace(AsyncGeneratorProvider, ProviderModelMixin):
random.shuffle(cls.providers) random.shuffle(cls.providers)
for provider in cls.providers: for provider in cls.providers:
if model in provider.model_aliases: if model in provider.model_aliases:
async for chunk in provider.create_async_generator(provider.model_aliases[model], messages, **kwargs): async for chunk in provider.create_async_generator(provider.model_aliases[model], messages, images=images, **kwargs):
is_started = True is_started = True
yield chunk yield chunk
if is_started: if is_started:
@@ -79,7 +83,7 @@ class HuggingSpace(AsyncGeneratorProvider, ProviderModelMixin):
for provider in cls.providers: for provider in cls.providers:
if model in provider.get_models(): if model in provider.get_models():
try: try:
async for chunk in provider.create_async_generator(model, messages, **kwargs): async for chunk in provider.create_async_generator(model, messages, images=images, **kwargs):
is_started = True is_started = True
yield chunk yield chunk
if is_started: if is_started:

View File

@@ -68,12 +68,17 @@
const currSlideElement = lightbox.pswp.currSlide.data.element; const currSlideElement = lightbox.pswp.currSlide.data.element;
if (currSlideElement) { if (currSlideElement) {
const img = currSlideElement.querySelector('img'); const img = currSlideElement.querySelector('img');
el.innerText = img.getAttribute('alt');
const download = document.createElement("a"); const download = document.createElement("a");
download.setAttribute("href", img.getAttribute('src')); download.setAttribute("href", img.getAttribute('src'));
download.setAttribute("download", `${img.getAttribute('alt')}${lightbox.pswp.currSlide.index}.jpg`); let extension = img.getAttribute('src').includes(".webp") ? ".webp" : ".jpg";
download.setAttribute("download", `${img.getAttribute('alt')} ${lightbox.pswp.currSlide.index}${extension}`);
download.style.float = "right";
download.innerHTML = '<i class="fa-solid fa-download"></i>'; download.innerHTML = '<i class="fa-solid fa-download"></i>';
let span = document.createElement("span");
span.innerText = img.getAttribute('alt');
el.innerHTML = '';
el.appendChild(download); el.appendChild(download);
el.appendChild(span);
} }
}); });
} }

View File

@@ -1165,6 +1165,10 @@ ul {
.settings h3 { .settings h3 {
text-align: center; text-align: center;
} }
.field.collapsible {
flex-direction: column;
}
} }
.shown { .shown {

View File

@@ -1023,14 +1023,11 @@ const ask_gpt = async (message_id, message_index = -1, regenerate = false, provi
let api_key; let api_key;
if (is_demo && provider == "Feature") { if (is_demo && provider == "Feature") {
api_key = localStorage.getItem("user"); api_key = localStorage.getItem("user");
} else if (is_demo && provider != "Custom") { } else if (is_demo) {
api_key = localStorage.getItem("HuggingFace-api_key"); api_key = localStorage.getItem("HuggingFace-api_key");
} else { } else {
api_key = get_api_key_by_provider(provider); api_key = get_api_key_by_provider(provider);
} }
if (is_demo && !api_key) {
api_key = localStorage.getItem("HuggingFace-api_key");
}
if (is_demo && !api_key) { if (is_demo && !api_key) {
location.href = "/"; location.href = "/";
return; return;
@@ -2005,7 +2002,9 @@ async function on_api() {
providerSelect.innerHTML = ` providerSelect.innerHTML = `
<option value="">Demo Mode</option> <option value="">Demo Mode</option>
<option value="Feature">Feature Provider</option> <option value="Feature">Feature Provider</option>
<option value="Custom">Custom Provider</option>`; <option value="G4F">G4F framework</option>
<option value="HuggingFace">HuggingFace</option>
<option value="HuggingSpace">HuggingSpace</option>`;
providerSelect.selectedIndex = 0; providerSelect.selectedIndex = 0;
document.getElementById("pin").disabled = true; document.getElementById("pin").disabled = true;
document.getElementById("refine")?.parentElement.classList.add("hidden") document.getElementById("refine")?.parentElement.classList.add("hidden")
@@ -2018,7 +2017,6 @@ async function on_api() {
} }
}); });
login_urls = { login_urls = {
"Custom": ["Custom Provider", "", []],
"HuggingFace": ["HuggingFace", "", []], "HuggingFace": ["HuggingFace", "", []],
}; };
} else { } else {

View File

@@ -116,7 +116,7 @@ class Backend_Api(Api):
} }
for model, providers in models.demo_models.values()] for model, providers in models.demo_models.values()]
def handle_conversation(): def handle_conversation(limiter_check: callable = None):
""" """
Handles conversation requests and streams responses back. Handles conversation requests and streams responses back.
@@ -135,7 +135,7 @@ class Backend_Api(Api):
else: else:
json_data = request.json json_data = request.json
if app.demo and json_data.get("provider") not in ["Custom", "Feature"]: if app.demo and json_data.get("provider") not in ["Custom", "Feature", "HuggingFace", "HuggingSpace", "G4F"]:
model = json_data.get("model") model = json_data.get("model")
if model != "default" and model in models.demo_models: if model != "default" and model in models.demo_models:
json_data["provider"] = random.choice(models.demo_models[model][1]) json_data["provider"] = random.choice(models.demo_models[model][1])
@@ -143,6 +143,10 @@ class Backend_Api(Api):
if not model or model == "default": if not model or model == "default":
json_data["model"] = models.demo_models["default"][0].name json_data["model"] = models.demo_models["default"][0].name
json_data["provider"] = random.choice(models.demo_models["default"][1]) json_data["provider"] = random.choice(models.demo_models["default"][1])
if limiter_check is not None and json_data.get("provider") in ["Feature"]:
limiter_check()
if "images" in json_data:
kwargs["images"] = json_data["images"]
kwargs = self._prepare_conversation_kwargs(json_data, kwargs) kwargs = self._prepare_conversation_kwargs(json_data, kwargs)
return self.app.response_class( return self.app.response_class(
self._create_response_stream( self._create_response_stream(
@@ -158,8 +162,7 @@ class Backend_Api(Api):
@app.route('/backend-api/v2/conversation', methods=['POST']) @app.route('/backend-api/v2/conversation', methods=['POST'])
@limiter.limit("2 per minute") @limiter.limit("2 per minute")
def _handle_conversation(): def _handle_conversation():
limiter.check() return handle_conversation(limiter.check)
return handle_conversation()
else: else:
@app.route('/backend-api/v2/conversation', methods=['POST']) @app.route('/backend-api/v2/conversation', methods=['POST'])
def _handle_conversation(): def _handle_conversation():