mirror of
https://github.com/xtekky/gpt4free.git
synced 2025-09-26 20:31:14 +08:00
Add mew G4F provider
This commit is contained in:
@@ -75,7 +75,8 @@ class PollinationsAI(AsyncGeneratorProvider, ProviderModelMixin):
|
||||
|
||||
### Image Models ###
|
||||
"sdxl-turbo": "turbo",
|
||||
"flux-schnell": "flux",
|
||||
"flux-schnell": "flux",
|
||||
"flux-dev": "flux",
|
||||
}
|
||||
text_models = []
|
||||
|
||||
|
74
g4f/Provider/hf_space/G4F.py
Normal file
74
g4f/Provider/hf_space/G4F.py
Normal file
@@ -0,0 +1,74 @@
|
||||
from __future__ import annotations
|
||||
|
||||
from aiohttp import ClientSession
|
||||
import time
|
||||
|
||||
from ...typing import AsyncResult, Messages
|
||||
from ...providers.response import ImageResponse
|
||||
from ...requests.raise_for_status import raise_for_status
|
||||
from ..helper import format_image_prompt, get_random_string
|
||||
from .Janus_Pro_7B import Janus_Pro_7B, JsonConversation, get_zerogpu_token
|
||||
|
||||
class G4F(Janus_Pro_7B):
|
||||
space = "roxky/Janus-Pro-7B"
|
||||
url = f"https://huggingface.co/spaces/roxky/g4f-space"
|
||||
api_url = "https://roxky-janus-pro-7b.hf.space"
|
||||
url_flux = "https://roxky-g4f-flux.hf.space/run/predict"
|
||||
referer = f"{api_url}?__theme=light"
|
||||
|
||||
default_model = "flux"
|
||||
model_aliases = {"flux-schnell": default_model, "flux-dev": default_model}
|
||||
image_models = [Janus_Pro_7B.default_image_model, default_model, *model_aliases.keys()]
|
||||
models = [Janus_Pro_7B.default_model, *image_models]
|
||||
|
||||
@classmethod
|
||||
async def create_async_generator(
|
||||
cls,
|
||||
model: str,
|
||||
messages: Messages,
|
||||
proxy: str = None,
|
||||
prompt: str = None,
|
||||
width: int = 1024,
|
||||
height: int = 1024,
|
||||
seed: int = None,
|
||||
cookies: dict = None,
|
||||
**kwargs
|
||||
) -> AsyncResult:
|
||||
if cls.default_model not in model:
|
||||
async for chunk in super().create_async_generator(model, messages, prompt=prompt, seed=seed, cookies=cookies, **kwargs):
|
||||
yield chunk
|
||||
return
|
||||
|
||||
model = cls.get_model(model)
|
||||
width = max(32, width - (width % 8))
|
||||
height = max(32, height - (height % 8))
|
||||
if prompt is None:
|
||||
prompt = format_image_prompt(messages)
|
||||
if seed is None:
|
||||
seed = int(time.time())
|
||||
|
||||
payload = {
|
||||
"data": [
|
||||
prompt,
|
||||
seed,
|
||||
width,
|
||||
height,
|
||||
True,
|
||||
1
|
||||
],
|
||||
"event_data": None,
|
||||
"fn_index": 3,
|
||||
"session_hash": get_random_string(),
|
||||
"trigger_id": 10
|
||||
}
|
||||
async with ClientSession() as session:
|
||||
zerogpu_uuid, zerogpu_token = await get_zerogpu_token(cls.space, session, JsonConversation(), cookies)
|
||||
headers = {
|
||||
"x-zerogpu-token": zerogpu_token,
|
||||
"x-zerogpu-uuid": zerogpu_uuid,
|
||||
}
|
||||
async with session.post(cls.url_flux, json=payload, proxy=proxy, headers=headers) as response:
|
||||
await raise_for_status(response)
|
||||
response_data = await response.json()
|
||||
image_url = response_data["data"][0]['url']
|
||||
yield ImageResponse(images=[image_url], alt=prompt)
|
@@ -3,21 +3,24 @@ from __future__ import annotations
|
||||
import json
|
||||
import uuid
|
||||
import re
|
||||
import time
|
||||
from datetime import datetime, timezone, timedelta
|
||||
import urllib.parse
|
||||
|
||||
from ...typing import AsyncResult, Messages, Cookies
|
||||
from ...typing import AsyncResult, Messages, Cookies, ImagesType
|
||||
from ..base_provider import AsyncGeneratorProvider, ProviderModelMixin
|
||||
from ..helper import format_prompt, format_image_prompt
|
||||
from ...providers.response import JsonConversation, ImageResponse, DebugResponse
|
||||
from ...requests.aiohttp import StreamSession, StreamResponse
|
||||
from ...providers.response import JsonConversation, ImageResponse, Reasoning
|
||||
from ...requests.aiohttp import StreamSession, StreamResponse, FormData
|
||||
from ...requests.raise_for_status import raise_for_status
|
||||
from ...image import to_bytes, is_accepted_format
|
||||
from ...cookies import get_cookies
|
||||
from ...errors import ResponseError
|
||||
from ... import debug
|
||||
|
||||
class Janus_Pro_7B(AsyncGeneratorProvider, ProviderModelMixin):
|
||||
url = "https://huggingface.co/spaces/deepseek-ai/Janus-Pro-7B"
|
||||
space = "deepseek-ai/Janus-Pro-7B"
|
||||
url = f"https://huggingface.co/spaces/{space}"
|
||||
api_url = "https://deepseek-ai-janus-pro-7b.hf.space"
|
||||
referer = f"{api_url}?__theme=light"
|
||||
|
||||
@@ -28,30 +31,27 @@ class Janus_Pro_7B(AsyncGeneratorProvider, ProviderModelMixin):
|
||||
|
||||
default_model = "janus-pro-7b"
|
||||
default_image_model = "janus-pro-7b-image"
|
||||
default_vision_model = default_model
|
||||
models = [default_model, default_image_model]
|
||||
image_models = [default_image_model]
|
||||
|
||||
@classmethod
|
||||
def run(cls, method: str, session: StreamSession, prompt: str, conversation: JsonConversation):
|
||||
def run(cls, method: str, session: StreamSession, prompt: str, conversation: JsonConversation, image: dict = None, seed: int = 0):
|
||||
headers = {
|
||||
"content-type": "application/json",
|
||||
"x-zerogpu-token": conversation.zerogpu_token,
|
||||
"x-zerogpu-uuid": conversation.zerogpu_uuid,
|
||||
"referer": cls.referer,
|
||||
}
|
||||
if method == "post":
|
||||
return session.post(f"{cls.api_url}/gradio_api/queue/join?__theme=light", **{
|
||||
"headers": {
|
||||
"content-type": "application/json",
|
||||
"x-zerogpu-token": conversation.zerogpu_token,
|
||||
"x-zerogpu-uuid": conversation.zerogpu_uuid,
|
||||
"referer": cls.referer,
|
||||
},
|
||||
"json": {"data":[None,prompt,42,0.95,0.1],"event_data":None,"fn_index":2,"trigger_id":10,"session_hash":conversation.session_hash},
|
||||
"headers": {k: v for k, v in headers.items() if v is not None},
|
||||
"json": {"data":[image,prompt,seed,0.95,0.1],"event_data":None,"fn_index":2,"trigger_id":10,"session_hash":conversation.session_hash},
|
||||
})
|
||||
elif method == "image":
|
||||
return session.post(f"{cls.api_url}/gradio_api/queue/join?__theme=light", **{
|
||||
"headers": {
|
||||
"content-type": "application/json",
|
||||
"x-zerogpu-token": conversation.zerogpu_token,
|
||||
"x-zerogpu-uuid": conversation.zerogpu_uuid,
|
||||
"referer": cls.referer,
|
||||
},
|
||||
"json": {"data":[prompt,1234,5,1],"event_data":None,"fn_index":3,"trigger_id":20,"session_hash":conversation.session_hash},
|
||||
"headers": {k: v for k, v in headers.items() if v is not None},
|
||||
"json": {"data":[prompt,seed,5,1],"event_data":None,"fn_index":3,"trigger_id":20,"session_hash":conversation.session_hash},
|
||||
})
|
||||
return session.get(f"{cls.api_url}/gradio_api/queue/data?session_hash={conversation.session_hash}", **{
|
||||
"headers": {
|
||||
@@ -66,11 +66,13 @@ class Janus_Pro_7B(AsyncGeneratorProvider, ProviderModelMixin):
|
||||
cls,
|
||||
model: str,
|
||||
messages: Messages,
|
||||
images: ImagesType = None,
|
||||
prompt: str = None,
|
||||
proxy: str = None,
|
||||
cookies: Cookies = None,
|
||||
return_conversation: bool = False,
|
||||
conversation: JsonConversation = None,
|
||||
seed: int = None,
|
||||
**kwargs
|
||||
) -> AsyncResult:
|
||||
def generate_session_hash():
|
||||
@@ -84,20 +86,43 @@ class Janus_Pro_7B(AsyncGeneratorProvider, ProviderModelMixin):
|
||||
prompt = format_prompt(messages) if prompt is None and conversation is None else prompt
|
||||
prompt = format_image_prompt(messages, prompt)
|
||||
|
||||
if seed is None:
|
||||
seed = int(time.time())
|
||||
|
||||
session_hash = generate_session_hash() if conversation is None else getattr(conversation, "session_hash")
|
||||
async with StreamSession(proxy=proxy, impersonate="chrome") as session:
|
||||
session_hash = generate_session_hash() if conversation is None else getattr(conversation, "session_hash")
|
||||
zerogpu_uuid, zerogpu_token = await get_zerogpu_token(session, conversation, cookies)
|
||||
zerogpu_uuid, zerogpu_token = await get_zerogpu_token(cls.space, session, conversation, cookies)
|
||||
if conversation is None or not hasattr(conversation, "session_hash"):
|
||||
conversation = JsonConversation(session_hash=session_hash, zerogpu_token=zerogpu_token, zerogpu_uuid=zerogpu_uuid)
|
||||
conversation.zerogpu_token = zerogpu_token
|
||||
if return_conversation:
|
||||
yield conversation
|
||||
|
||||
async with cls.run(method, session, prompt, conversation) as response:
|
||||
if images is not None:
|
||||
data = FormData()
|
||||
for i in range(len(images)):
|
||||
images[i] = (to_bytes(images[i][0]), images[i][1])
|
||||
for image, image_name in images:
|
||||
data.add_field(f"files", image, filename=image_name)
|
||||
async with session.post(f"{cls.api_url}/gradio_api/upload", params={"upload_id": session_hash}, data=data) as response:
|
||||
await raise_for_status(response)
|
||||
image_files = await response.json()
|
||||
images = [{
|
||||
"path": image_file,
|
||||
"url": f"{cls.api_url}/gradio_api/file={image_file}",
|
||||
"orig_name": images[i][1],
|
||||
"size": len(images[i][0]),
|
||||
"mime_type": is_accepted_format(images[i][0]),
|
||||
"meta": {
|
||||
"_type": "gradio.FileData"
|
||||
}
|
||||
} for i, image_file in enumerate(image_files)]
|
||||
|
||||
async with cls.run(method, session, prompt, conversation, None if images is None else images.pop(), seed) as response:
|
||||
await raise_for_status(response)
|
||||
|
||||
async with cls.run("get", session, prompt, conversation) as response:
|
||||
async with cls.run("get", session, prompt, conversation, None, seed) as response:
|
||||
response: StreamResponse = response
|
||||
async for line in response.iter_lines():
|
||||
decoded_line = line.decode(errors="replace")
|
||||
@@ -105,32 +130,34 @@ class Janus_Pro_7B(AsyncGeneratorProvider, ProviderModelMixin):
|
||||
try:
|
||||
json_data = json.loads(decoded_line[6:])
|
||||
if json_data.get('msg') == 'log':
|
||||
yield DebugResponse(log=json_data["log"])
|
||||
yield Reasoning(status=json_data["log"])
|
||||
|
||||
if json_data.get('msg') == 'process_generating':
|
||||
if 'output' in json_data and 'data' in json_data['output']:
|
||||
yield f"data: {json.dumps(json_data['output']['data'])}"
|
||||
if json_data.get('msg') == 'progress':
|
||||
if 'progress_data' in json_data and json_data['progress_data']:
|
||||
progress = json_data['progress_data'][0]
|
||||
yield Reasoning(status=f"{progress['desc']} {progress['index']}/{progress['length']}")
|
||||
|
||||
if json_data.get('msg') == 'process_completed':
|
||||
if 'output' in json_data and 'error' in json_data['output']:
|
||||
raise ResponseError("Text model is not working. Try out image model" if "AttributeError" in json_data['output']['error'] else json_data['output']['error'])
|
||||
raise ResponseError("Missing image input" if json_data['output']['error'] and "AttributeError" in json_data['output']['error'] else json_data['output']['error'])
|
||||
if 'output' in json_data and 'data' in json_data['output']:
|
||||
yield Reasoning(status="Finished")
|
||||
if "image" in json_data['output']['data'][0][0]:
|
||||
yield ImageResponse([image["image"]["url"] for image in json_data['output']['data'][0]], prompt)
|
||||
else:
|
||||
yield f"data: {json.dumps(json_data['output']['data'])}"
|
||||
yield json_data['output']['data'][0]
|
||||
break
|
||||
|
||||
except json.JSONDecodeError:
|
||||
debug.log("Could not parse JSON:", decoded_line)
|
||||
|
||||
async def get_zerogpu_token(session: StreamSession, conversation: JsonConversation, cookies: Cookies = None):
|
||||
async def get_zerogpu_token(space: str, session: StreamSession, conversation: JsonConversation, cookies: Cookies = None):
|
||||
zerogpu_uuid = None if conversation is None else getattr(conversation, "zerogpu_uuid", None)
|
||||
zerogpu_token = "[object Object]"
|
||||
|
||||
cookies = get_cookies("huggingface.co", raise_requirements_error=False) if cookies is None else cookies
|
||||
if zerogpu_uuid is None:
|
||||
async with session.get(Janus_Pro_7B.url, cookies=cookies) as response:
|
||||
async with session.get(f"https://huggingface.co/spaces/{space}", cookies=cookies) as response:
|
||||
match = re.search(r""token":"([^&]+?)"", await response.text())
|
||||
if match:
|
||||
zerogpu_token = match.group(1)
|
||||
@@ -141,8 +168,9 @@ async def get_zerogpu_token(session: StreamSession, conversation: JsonConversati
|
||||
# Get current UTC time + 10 minutes
|
||||
dt = (datetime.now(timezone.utc) + timedelta(minutes=10)).isoformat(timespec='milliseconds')
|
||||
encoded_dt = urllib.parse.quote(dt)
|
||||
async with session.get(f"https://huggingface.co/api/spaces/deepseek-ai/Janus-Pro-7B/jwt?expiration={encoded_dt}&include_pro_status=true", cookies=cookies) as response:
|
||||
zerogpu_token = (await response.json())
|
||||
zerogpu_token = zerogpu_token["token"]
|
||||
|
||||
async with session.get(f"https://huggingface.co/api/spaces/{space}/jwt?expiration={encoded_dt}&include_pro_status=true", cookies=cookies) as response:
|
||||
response_data = (await response.json())
|
||||
if "token" in response_data:
|
||||
zerogpu_token = response_data["token"]
|
||||
|
||||
return zerogpu_uuid, zerogpu_token
|
@@ -15,6 +15,7 @@ from .Qwen_QVQ_72B import Qwen_QVQ_72B
|
||||
from .Qwen_Qwen_2_5M_Demo import Qwen_Qwen_2_5M_Demo
|
||||
from .Qwen_Qwen_2_72B_Instruct import Qwen_Qwen_2_72B_Instruct
|
||||
from .StableDiffusion35Large import StableDiffusion35Large
|
||||
from .G4F import G4F
|
||||
|
||||
class HuggingSpace(AsyncGeneratorProvider, ProviderModelMixin):
|
||||
url = "https://huggingface.co/spaces"
|
||||
@@ -30,7 +31,8 @@ class HuggingSpace(AsyncGeneratorProvider, ProviderModelMixin):
|
||||
VoodoohopFlux1Schnell,
|
||||
CohereForAI, Janus_Pro_7B,
|
||||
Qwen_QVQ_72B, Qwen_Qwen_2_5M_Demo, Qwen_Qwen_2_72B_Instruct,
|
||||
StableDiffusion35Large
|
||||
StableDiffusion35Large,
|
||||
G4F
|
||||
]
|
||||
|
||||
@classmethod
|
||||
|
@@ -2,8 +2,9 @@ from __future__ import annotations
|
||||
|
||||
import json
|
||||
|
||||
from ...typing import Messages, AsyncResult
|
||||
from ...typing import Messages, AsyncResult, ImagesType
|
||||
from ...requests import StreamSession
|
||||
from ...image import to_data_uri
|
||||
from ...providers.base_provider import AsyncGeneratorProvider, ProviderModelMixin
|
||||
from ...providers.response import RawResponse
|
||||
from ... import debug
|
||||
@@ -17,16 +18,22 @@ class BackendApi(AsyncGeneratorProvider, ProviderModelMixin):
|
||||
cls,
|
||||
model: str,
|
||||
messages: Messages,
|
||||
images: ImagesType = None,
|
||||
api_key: str = None,
|
||||
**kwargs
|
||||
) -> AsyncResult:
|
||||
debug.log(f"{cls.__name__}: {api_key}")
|
||||
if images is not None:
|
||||
for i in range(len(images)):
|
||||
images[i] = (to_data_uri(images[i][0]), images[i][1])
|
||||
async with StreamSession(
|
||||
headers={"Accept": "text/event-stream", **cls.headers},
|
||||
) as session:
|
||||
async with session.post(f"{cls.url}/backend-api/v2/conversation", json={
|
||||
"model": model,
|
||||
"messages": messages,
|
||||
"images": images,
|
||||
"api_key": api_key,
|
||||
**kwargs
|
||||
}, ssl=cls.ssl) as response:
|
||||
async for line in response.iter_lines():
|
||||
|
@@ -1,7 +1,6 @@
|
||||
from __future__ import annotations
|
||||
|
||||
import json
|
||||
import time
|
||||
import requests
|
||||
|
||||
from ..helper import filter_none, format_image_prompt
|
||||
|
@@ -2425,6 +2425,7 @@ async function api(ressource, args=null, files=null, message_id=null, scroll=tru
|
||||
const message = body.match(/<p>([^<]+?)<\/p>/)[1];
|
||||
error_storage[message_id] = `**${title}**\n${message}`;
|
||||
await finish_message();
|
||||
return;
|
||||
} else {
|
||||
await read_response(response, message_id, args.provider || null, scroll, finish_message);
|
||||
await finish_message();
|
||||
|
@@ -741,10 +741,11 @@ class ModelUtils:
|
||||
|
||||
demo_models = {
|
||||
gpt_4o.name: [gpt_4o, [PollinationsAI, Blackbox]],
|
||||
gpt_4o_mini.name: [gpt_4o_mini, [PollinationsAI, CablyAI, DDG]],
|
||||
deepseek_r1.name: [deepseek_r1, [PollinationsAI, HuggingFace]],
|
||||
"default": [llama_3_2_11b, [HuggingFace]],
|
||||
qwen_2_vl_7b.name: [qwen_2_vl_7b, [HuggingFaceAPI]],
|
||||
qvq_72b.name: [qvq_72b, [HuggingSpace]],
|
||||
deepseek_r1.name: [deepseek_r1, [HuggingFace]],
|
||||
command_r.name: [command_r, [HuggingSpace]],
|
||||
command_r_plus.name: [command_r_plus, [HuggingSpace]],
|
||||
command_r7b.name: [command_r7b, [HuggingSpace]],
|
||||
@@ -753,8 +754,8 @@ demo_models = {
|
||||
qwq_32b.name: [qwq_32b, [HuggingFace]],
|
||||
llama_3_3_70b.name: [llama_3_3_70b, [HuggingFace]],
|
||||
sd_3_5.name: [sd_3_5, [HuggingSpace, HuggingFace]],
|
||||
flux_dev.name: [flux_dev, [HuggingSpace, HuggingFace]],
|
||||
flux_schnell.name: [flux_schnell, [HuggingFace, HuggingSpace, PollinationsAI]],
|
||||
flux_dev.name: [flux_dev, [PollinationsAI, HuggingSpace, HuggingFace]],
|
||||
flux_schnell.name: [flux_schnell, [PollinationsAI, HuggingFace, HuggingSpace, PollinationsAI]],
|
||||
}
|
||||
|
||||
# Create a list of all models and his providers
|
||||
|
31
projects/deepseek4free/setup.sh
Normal file
31
projects/deepseek4free/setup.sh
Normal file
@@ -0,0 +1,31 @@
|
||||
git clone https://github.com/xtekky/deepseek4free.git
|
||||
|
||||
echo "recursive-include dsk/wasm *" >> ./deepseek4free/MANIFEST.in
|
||||
|
||||
echo "from setuptools import find_packages, setup
|
||||
|
||||
INSTALL_REQUIRE = [
|
||||
'curl-cffi',
|
||||
'wasmtime',
|
||||
'numpy',
|
||||
]
|
||||
|
||||
DESCRIPTION = (
|
||||
'The official deepseek4free repository | various collection of powerful language models'
|
||||
)
|
||||
|
||||
# Setting up
|
||||
setup(
|
||||
name='dsk',
|
||||
version='0.0.1.0',
|
||||
author='Tekky',
|
||||
author_email='<support@g4f.ai>',
|
||||
description=DESCRIPTION,
|
||||
long_description_content_type='text/markdown',
|
||||
long_description='',
|
||||
packages=find_packages(),
|
||||
include_package_data=True,
|
||||
install_requires=INSTALL_REQUIRE
|
||||
)" >> ./deepseek4free/setup.py
|
||||
|
||||
pip install ./deepseek4free --break-system-packages
|
Reference in New Issue
Block a user