Remove login url from error message

Remove print in CablyAI
Fix Vision Support in HuggingSpace provider
Support images in BackendApi provider
Add missing import in Blackbox provider
This commit is contained in:
hlohaus
2025-02-05 13:37:55 +01:00
parent 03d0c3053f
commit 88918cb897
10 changed files with 55 additions and 29 deletions

View File

@@ -15,7 +15,7 @@ from .base_provider import AsyncGeneratorProvider, ProviderModelMixin
from ..image import to_data_uri
from ..cookies import get_cookies_dir
from .helper import format_prompt, format_image_prompt
from ..providers.response import JsonConversation, ImageResponse
from ..providers.response import JsonConversation, ImageResponse, Reasoning
class Conversation(JsonConversation):
validated_value: str = None

View File

@@ -87,7 +87,6 @@ class CablyAI(AsyncGeneratorProvider, ProviderModelMixin):
continue
line = line.decode('utf-8').strip()
print(line)
if not line.startswith("data: "):
continue

View File

@@ -148,7 +148,7 @@ class HuggingFaceInference(AsyncGeneratorProvider, ProviderModelMixin):
if response.headers["content-type"].startswith("image/"):
base64_data = base64.b64encode(b"".join([chunk async for chunk in response.iter_content()]))
url = f"data:{response.headers['content-type']};base64,{base64_data.decode()}"
yield ImageResponse(url, prompt)
yield ImageResponse(url, inputs)
else:
yield (await response.json())[0]["generated_text"].strip()

View File

@@ -2,14 +2,16 @@ from __future__ import annotations
from aiohttp import ClientSession
import time
import asyncio
from ...typing import AsyncResult, Messages
from ...providers.response import ImageResponse
from ...providers.response import ImageResponse, Reasoning
from ...requests.raise_for_status import raise_for_status
from ..helper import format_image_prompt, get_random_string
from .Janus_Pro_7B import Janus_Pro_7B, JsonConversation, get_zerogpu_token
class G4F(Janus_Pro_7B):
label = "G4F framework"
space = "roxky/Janus-Pro-7B"
url = f"https://huggingface.co/spaces/roxky/g4f-space"
api_url = "https://roxky-janus-pro-7b.hf.space"
@@ -62,13 +64,25 @@ class G4F(Janus_Pro_7B):
"trigger_id": 10
}
async with ClientSession() as session:
yield Reasoning(status="Acquiring GPU Token")
zerogpu_uuid, zerogpu_token = await get_zerogpu_token(cls.space, session, JsonConversation(), cookies)
headers = {
"x-zerogpu-token": zerogpu_token,
"x-zerogpu-uuid": zerogpu_uuid,
}
async def generate():
async with session.post(cls.url_flux, json=payload, proxy=proxy, headers=headers) as response:
await raise_for_status(response)
response_data = await response.json()
image_url = response_data["data"][0]['url']
yield ImageResponse(images=[image_url], alt=prompt)
return ImageResponse(images=[image_url], alt=prompt)
background_tasks = set()
started = time.time()
task = asyncio.create_task(generate())
background_tasks.add(task)
task.add_done_callback(background_tasks.discard)
while background_tasks:
yield Reasoning(status=f"Generating {time.time() - started:.2f}s")
await asyncio.sleep(0.2)
yield await task
yield Reasoning(status=f"Finished {time.time() - started:.2f}s")

View File

@@ -82,10 +82,8 @@ class Janus_Pro_7B(AsyncGeneratorProvider, ProviderModelMixin):
method = "post"
if model == cls.default_image_model or prompt is not None:
method = "image"
prompt = format_prompt(messages) if prompt is None and conversation is None else prompt
prompt = format_image_prompt(messages, prompt)
if seed is None:
seed = int(time.time())
@@ -139,7 +137,8 @@ class Janus_Pro_7B(AsyncGeneratorProvider, ProviderModelMixin):
if json_data.get('msg') == 'process_completed':
if 'output' in json_data and 'error' in json_data['output']:
raise ResponseError("Missing image input" if json_data['output']['error'] and "AttributeError" in json_data['output']['error'] else json_data['output']['error'])
json_data['output']['error'] = json_data['output']['error'].split(" <a ")[0]
raise ResponseError("Missing images input" if json_data['output']['error'] and "AttributeError" in json_data['output']['error'] else json_data['output']['error'])
if 'output' in json_data and 'data' in json_data['output']:
yield Reasoning(status="Finished")
if "image" in json_data['output']['data'][0][0]:

View File

@@ -27,10 +27,14 @@ class HuggingSpace(AsyncGeneratorProvider, ProviderModelMixin):
default_image_model = BlackForestLabsFlux1Dev.default_model
default_vision_model = Qwen_QVQ_72B.default_model
providers = [
BlackForestLabsFlux1Dev, BlackForestLabsFlux1Schnell,
BlackForestLabsFlux1Dev,
BlackForestLabsFlux1Schnell,
VoodoohopFlux1Schnell,
CohereForAI, Janus_Pro_7B,
Qwen_QVQ_72B, Qwen_Qwen_2_5M_Demo, Qwen_Qwen_2_72B_Instruct,
CohereForAI,
Janus_Pro_7B,
Qwen_QVQ_72B,
Qwen_Qwen_2_5M_Demo,
Qwen_Qwen_2_72B_Instruct,
StableDiffusion35Large,
G4F
]
@@ -70,7 +74,7 @@ class HuggingSpace(AsyncGeneratorProvider, ProviderModelMixin):
random.shuffle(cls.providers)
for provider in cls.providers:
if model in provider.model_aliases:
async for chunk in provider.create_async_generator(provider.model_aliases[model], messages, **kwargs):
async for chunk in provider.create_async_generator(provider.model_aliases[model], messages, images=images, **kwargs):
is_started = True
yield chunk
if is_started:
@@ -79,7 +83,7 @@ class HuggingSpace(AsyncGeneratorProvider, ProviderModelMixin):
for provider in cls.providers:
if model in provider.get_models():
try:
async for chunk in provider.create_async_generator(model, messages, **kwargs):
async for chunk in provider.create_async_generator(model, messages, images=images, **kwargs):
is_started = True
yield chunk
if is_started:

View File

@@ -68,12 +68,17 @@
const currSlideElement = lightbox.pswp.currSlide.data.element;
if (currSlideElement) {
const img = currSlideElement.querySelector('img');
el.innerText = img.getAttribute('alt');
const download = document.createElement("a");
download.setAttribute("href", img.getAttribute('src'));
download.setAttribute("download", `${img.getAttribute('alt')}${lightbox.pswp.currSlide.index}.jpg`);
let extension = img.getAttribute('src').includes(".webp") ? ".webp" : ".jpg";
download.setAttribute("download", `${img.getAttribute('alt')} ${lightbox.pswp.currSlide.index}${extension}`);
download.style.float = "right";
download.innerHTML = '<i class="fa-solid fa-download"></i>';
let span = document.createElement("span");
span.innerText = img.getAttribute('alt');
el.innerHTML = '';
el.appendChild(download);
el.appendChild(span);
}
});
}

View File

@@ -1165,6 +1165,10 @@ ul {
.settings h3 {
text-align: center;
}
.field.collapsible {
flex-direction: column;
}
}
.shown {

View File

@@ -1023,14 +1023,11 @@ const ask_gpt = async (message_id, message_index = -1, regenerate = false, provi
let api_key;
if (is_demo && provider == "Feature") {
api_key = localStorage.getItem("user");
} else if (is_demo && provider != "Custom") {
} else if (is_demo) {
api_key = localStorage.getItem("HuggingFace-api_key");
} else {
api_key = get_api_key_by_provider(provider);
}
if (is_demo && !api_key) {
api_key = localStorage.getItem("HuggingFace-api_key");
}
if (is_demo && !api_key) {
location.href = "/";
return;
@@ -2005,7 +2002,9 @@ async function on_api() {
providerSelect.innerHTML = `
<option value="">Demo Mode</option>
<option value="Feature">Feature Provider</option>
<option value="Custom">Custom Provider</option>`;
<option value="G4F">G4F framework</option>
<option value="HuggingFace">HuggingFace</option>
<option value="HuggingSpace">HuggingSpace</option>`;
providerSelect.selectedIndex = 0;
document.getElementById("pin").disabled = true;
document.getElementById("refine")?.parentElement.classList.add("hidden")
@@ -2018,7 +2017,6 @@ async function on_api() {
}
});
login_urls = {
"Custom": ["Custom Provider", "", []],
"HuggingFace": ["HuggingFace", "", []],
};
} else {

View File

@@ -116,7 +116,7 @@ class Backend_Api(Api):
}
for model, providers in models.demo_models.values()]
def handle_conversation():
def handle_conversation(limiter_check: callable = None):
"""
Handles conversation requests and streams responses back.
@@ -135,7 +135,7 @@ class Backend_Api(Api):
else:
json_data = request.json
if app.demo and json_data.get("provider") not in ["Custom", "Feature"]:
if app.demo and json_data.get("provider") not in ["Custom", "Feature", "HuggingFace", "HuggingSpace", "G4F"]:
model = json_data.get("model")
if model != "default" and model in models.demo_models:
json_data["provider"] = random.choice(models.demo_models[model][1])
@@ -143,6 +143,10 @@ class Backend_Api(Api):
if not model or model == "default":
json_data["model"] = models.demo_models["default"][0].name
json_data["provider"] = random.choice(models.demo_models["default"][1])
if limiter_check is not None and json_data.get("provider") in ["Feature"]:
limiter_check()
if "images" in json_data:
kwargs["images"] = json_data["images"]
kwargs = self._prepare_conversation_kwargs(json_data, kwargs)
return self.app.response_class(
self._create_response_stream(
@@ -158,8 +162,7 @@ class Backend_Api(Api):
@app.route('/backend-api/v2/conversation', methods=['POST'])
@limiter.limit("2 per minute")
def _handle_conversation():
limiter.check()
return handle_conversation()
return handle_conversation(limiter.check)
else:
@app.route('/backend-api/v2/conversation', methods=['POST'])
def _handle_conversation():