mirror of
https://github.com/xtekky/gpt4free.git
synced 2025-10-06 08:46:53 +08:00
Fix generate image in OpenaiChat
Add HarProvider, disable LMArenaProvider
This commit is contained in:
@@ -2,4 +2,5 @@ recursive-include g4f/gui/server *
|
||||
recursive-include g4f/gui/client *
|
||||
recursive-include g4f/Provider/npm *
|
||||
recursive-include g4f/Provider/gigachat_crt *
|
||||
recursive-include g4f/Provider/you *
|
||||
recursive-include g4f/Provider/you *
|
||||
recursive-include g4f/Provider/har *
|
@@ -17,7 +17,7 @@ class LMArenaProvider(AsyncGeneratorProvider, ProviderModelMixin, AuthFileMixin)
|
||||
label = "LM Arena"
|
||||
url = "https://lmarena.ai"
|
||||
api_endpoint = "/queue/join?"
|
||||
working = True
|
||||
working = False
|
||||
|
||||
default_model = "gpt-4o"
|
||||
model_aliases = {default_model: "chatgpt-4o-latest-20250326"}
|
||||
|
@@ -13,6 +13,7 @@ API_URL = "https://www.perplexity.ai/socket.io/"
|
||||
WS_URL = "wss://www.perplexity.ai/socket.io/"
|
||||
|
||||
class PerplexityLabs(AsyncGeneratorProvider, ProviderModelMixin):
|
||||
label = "Perplexity Labs"
|
||||
url = "https://labs.perplexity.ai"
|
||||
working = True
|
||||
|
||||
|
@@ -12,6 +12,7 @@ except ImportError as e:
|
||||
from .needs_auth import *
|
||||
from .template import OpenaiTemplate, BackendApi
|
||||
from .hf import HuggingFace, HuggingChat, HuggingFaceAPI, HuggingFaceInference, HuggingFaceMedia
|
||||
from .har import HarProvider
|
||||
try:
|
||||
from .not_working import *
|
||||
except ImportError as e:
|
||||
|
151
g4f/Provider/har/__init__.py
Normal file
151
g4f/Provider/har/__init__.py
Normal file
@@ -0,0 +1,151 @@
|
||||
from __future__ import annotations
|
||||
|
||||
import os
|
||||
import json
|
||||
import uuid
|
||||
from urllib.parse import urlparse
|
||||
|
||||
from ...typing import AsyncResult, Messages
|
||||
from ...requests import StreamSession, raise_for_status
|
||||
from ..base_provider import AsyncGeneratorProvider, ProviderModelMixin
|
||||
from ..helper import get_last_user_message
|
||||
from ..openai.har_file import get_headers
|
||||
|
||||
class HarProvider(AsyncGeneratorProvider, ProviderModelMixin):
|
||||
url = "https://lmarena.ai"
|
||||
working = True
|
||||
|
||||
@classmethod
|
||||
def get_models(cls):
|
||||
for harFile in read_har_files():
|
||||
for v in harFile['log']['entries']:
|
||||
request_url = v['request']['url']
|
||||
if not request_url.startswith(cls.url) or "." in urlparse(request_url).path or "heartbeat" in request_url:
|
||||
continue
|
||||
if "\n\ndata: " not in v['response']['content']['text']:
|
||||
continue
|
||||
chunk = v['response']['content']['text'].split("\n\ndata: ")[2]
|
||||
cls.models = list(dict.fromkeys(get_str_list(find_list(json.loads(chunk), 'choices'))).keys())
|
||||
if cls.models:
|
||||
break
|
||||
return cls.models
|
||||
|
||||
@classmethod
|
||||
async def create_async_generator(
|
||||
cls, model: str, messages: Messages,
|
||||
proxy: str = None,
|
||||
**kwargs
|
||||
) -> AsyncResult:
|
||||
if model in cls.model_aliases:
|
||||
model = cls.model_aliases[model]
|
||||
session_hash = str(uuid.uuid4()).replace("-", "")
|
||||
prompt = get_last_user_message(messages)
|
||||
|
||||
for harFile in read_har_files():
|
||||
async with StreamSession(impersonate="chrome") as session:
|
||||
for v in harFile['log']['entries']:
|
||||
request_url = v['request']['url']
|
||||
if not request_url.startswith(cls.url) or "." in urlparse(request_url).path or "heartbeat" in request_url:
|
||||
continue
|
||||
postData = None
|
||||
if "postData" in v['request']:
|
||||
postData = v['request']['postData']['text']
|
||||
postData = postData.replace('"hello"', json.dumps(prompt))
|
||||
postData = postData.replace("__SESSION__", session_hash)
|
||||
if model:
|
||||
postData = postData.replace("__MODEL__", model)
|
||||
request_url = request_url.replace("__SESSION__", session_hash)
|
||||
method = v['request']['method'].lower()
|
||||
|
||||
async with getattr(session, method)(request_url, data=postData, headers=get_headers(v), proxy=proxy) as response:
|
||||
await raise_for_status(response)
|
||||
if "heartbeat" in request_url:
|
||||
continue
|
||||
returned_data = ""
|
||||
async for line in response.iter_lines():
|
||||
if not line.startswith(b"data: "):
|
||||
continue
|
||||
for content in find_str(json.loads(line[6:]), 3):
|
||||
if content == '<span class="cursor"></span> ' or content == 'update':
|
||||
continue
|
||||
if content.endswith("▌"):
|
||||
content = content[:-2]
|
||||
new_content = content
|
||||
if content.startswith(returned_data):
|
||||
new_content = content[len(returned_data):]
|
||||
if not new_content:
|
||||
continue
|
||||
returned_data += new_content
|
||||
yield new_content
|
||||
|
||||
def read_har_files():
|
||||
for root, _, files in os.walk(os.path.dirname(__file__)):
|
||||
for file in files:
|
||||
if not file.endswith(".har"):
|
||||
continue
|
||||
with open(os.path.join(root, file), 'rb') as file:
|
||||
try:
|
||||
yield json.loads(file.read())
|
||||
except json.JSONDecodeError:
|
||||
raise RuntimeError(f"Failed to read HAR file: {file}")
|
||||
|
||||
def read_str_recusive(data):
|
||||
if isinstance(data, dict):
|
||||
data = data.values()
|
||||
for item in data:
|
||||
if isinstance(item, (list, dict)):
|
||||
yield from read_str_recusive(item)
|
||||
elif isinstance(item, str):
|
||||
yield item
|
||||
|
||||
def find_str(data, skip=0):
|
||||
for item in read_str_recusive(data):
|
||||
if skip > 0:
|
||||
skip -= 1
|
||||
continue
|
||||
yield item
|
||||
break
|
||||
|
||||
def read_list_recusive(data, key):
|
||||
if isinstance(data, dict):
|
||||
for k, v in data.items():
|
||||
if k == key:
|
||||
print(k, v)
|
||||
yield v
|
||||
else:
|
||||
yield from read_list_recusive(v, key)
|
||||
elif isinstance(data, list):
|
||||
for item in data:
|
||||
yield from read_list_recusive(item, key)
|
||||
|
||||
def find_list(data, key):
|
||||
for item in read_list_recusive(data, key):
|
||||
if isinstance(item, str):
|
||||
yield item
|
||||
elif isinstance(item, list):
|
||||
for sub_item in item:
|
||||
yield sub_item
|
||||
|
||||
def get_str_list(data):
|
||||
for item in data:
|
||||
if isinstance(item, list):
|
||||
yield from get_str_list(item)
|
||||
else:
|
||||
yield item
|
||||
|
||||
# with open("g4f/Provider/har/lmarena.ai.har", "r") as f:
|
||||
# try:
|
||||
# harFile = json.loads(f.read())
|
||||
# except json.JSONDecodeError:
|
||||
# raise RuntimeError(f"Failed to read HAR file")
|
||||
|
||||
# new_entries = []
|
||||
# for v in harFile['log']['entries']:
|
||||
# request_url = v['request']['url']
|
||||
# if not request_url.startswith("https://lmarena.ai") or "." in urlparse(request_url).path or "heartbeat" in request_url:
|
||||
# continue
|
||||
# v['request']['cookies'] = []
|
||||
# v['request']['headers'] = [header for header in v['request']['headers'] if header['name'].lower() != "cookie"]
|
||||
# v['response']['headers'] = []
|
||||
# new_entries.append(v)
|
||||
# print(f"Request URL: {request_url}"
|
1483
g4f/Provider/har/lmarena.ai.har
Normal file
1483
g4f/Provider/har/lmarena.ai.har
Normal file
File diff suppressed because one or more lines are too long
@@ -11,7 +11,7 @@ from ...providers.response import JsonConversation, TitleGeneration
|
||||
|
||||
class CohereForAI_C4AI_Command(AsyncGeneratorProvider, ProviderModelMixin):
|
||||
label = "CohereForAI C4AI Command"
|
||||
url = "https://cohereforai-c4ai-command.hf.space"
|
||||
url = " https://coherelabs-c4ai-command.hf.space"
|
||||
conversation_url = f"{url}/conversation"
|
||||
|
||||
working = True
|
||||
|
@@ -24,10 +24,10 @@ from ...requests import StreamSession
|
||||
from ...requests import get_nodriver
|
||||
from ...image import ImageRequest, to_image, to_bytes, is_accepted_format
|
||||
from ...errors import MissingAuthError, NoValidHarFileError
|
||||
from ...providers.response import JsonConversation, FinishReason, SynthesizeData, AuthResult, ImageResponse
|
||||
from ...providers.response import JsonConversation, FinishReason, SynthesizeData, AuthResult, ImageResponse, ImagePreview
|
||||
from ...providers.response import Sources, TitleGeneration, RequestLogin, Reasoning
|
||||
from ...tools.media import merge_media
|
||||
from ..helper import format_cookies, get_last_user_message
|
||||
from ..helper import format_cookies, format_image_prompt
|
||||
from ..openai.models import default_model, default_image_model, models, image_models, text_models
|
||||
from ..openai.har_file import get_request_config
|
||||
from ..openai.har_file import RequestConfig, arkReq, arkose_url, start_url, conversation_url, backend_url, backend_anon_url
|
||||
@@ -254,31 +254,26 @@ class OpenaiChat(AsyncAuthedProvider, ProviderModelMixin):
|
||||
return messages
|
||||
|
||||
@classmethod
|
||||
async def get_generated_image(cls, session: StreamSession, auth_result: AuthResult, element: dict, prompt: str, conversation_id: str) -> ImageResponse:
|
||||
try:
|
||||
prompt = element["metadata"]["dalle"]["prompt"]
|
||||
except IndexError:
|
||||
pass
|
||||
try:
|
||||
file_id = element["asset_pointer"]
|
||||
if "file-service://" in file_id:
|
||||
file_id = file_id.split("file-service://", 1)[-1]
|
||||
url = f"{cls.url}/backend-api/files/{file_id}/download"
|
||||
else:
|
||||
file_id = file_id.split("sediment://")[-1]
|
||||
url = f"{cls.url}/backend-api/conversation/{conversation_id}/attachment/{file_id}/download"
|
||||
except TypeError:
|
||||
return
|
||||
except Exception as e:
|
||||
raise RuntimeError(f"No Image: {element} - {e}")
|
||||
try:
|
||||
async def get_generated_images(cls, session: StreamSession, auth_result: AuthResult, parts: list, prompt: str, conversation_id: str) -> AsyncIterator:
|
||||
download_urls = []
|
||||
for element in [parts] if isinstance(parts, str) else parts:
|
||||
if isinstance(element, dict) and element.get("content_type") == "image_asset_pointer":
|
||||
if not prompt:
|
||||
prompt = element["metadata"]["dalle"]["prompt"]
|
||||
element = element["asset_pointer"]
|
||||
element = element.split("sediment://")[-1]
|
||||
url = f"{cls.url}/backend-api/conversation/{conversation_id}/attachment/{element}/download"
|
||||
debug.log(f"OpenaiChat: Downloading image: {url}")
|
||||
async with session.get(url, headers=auth_result.headers) as response:
|
||||
cls._update_request_args(auth_result, session)
|
||||
await raise_for_status(response)
|
||||
download_url = (await response.json())["download_url"]
|
||||
return ImageResponse(download_url, prompt)
|
||||
except Exception as e:
|
||||
raise RuntimeError(f"Error in downloading image: {e}")
|
||||
data = await response.json()
|
||||
download_url = data.get("download_url")
|
||||
if download_url is None:
|
||||
print(data)
|
||||
else:
|
||||
download_urls.append(download_url)
|
||||
return ImagePreview(download_urls, prompt)
|
||||
|
||||
@classmethod
|
||||
async def create_authed(
|
||||
@@ -394,10 +389,8 @@ class OpenaiChat(AsyncAuthedProvider, ProviderModelMixin):
|
||||
#f"Proofofwork: {'False' if proofofwork is None else proofofwork[:12]+'...'}",
|
||||
#f"AccessToken: {'False' if cls._api_key is None else cls._api_key[:12]+'...'}",
|
||||
)]
|
||||
if action is None or action == "variant" or action == "continue" and conversation.message_id is None:
|
||||
action = "next"
|
||||
data = {
|
||||
"action": action,
|
||||
"action": "next",
|
||||
"parent_message_id": conversation.message_id,
|
||||
"model": model,
|
||||
"timezone_offset_min":-60,
|
||||
@@ -413,7 +406,7 @@ class OpenaiChat(AsyncAuthedProvider, ProviderModelMixin):
|
||||
if conversation.conversation_id is not None:
|
||||
data["conversation_id"] = conversation.conversation_id
|
||||
debug.log(f"OpenaiChat: Use conversation: {conversation.conversation_id}")
|
||||
prompt = get_last_user_message(messages) if prompt is None else prompt
|
||||
conversation.prompt = format_image_prompt(messages, prompt)
|
||||
if action != "continue":
|
||||
data["parent_message_id"] = getattr(conversation, "parent_message_id", conversation.message_id)
|
||||
conversation.parent_message_id = None
|
||||
@@ -444,7 +437,7 @@ class OpenaiChat(AsyncAuthedProvider, ProviderModelMixin):
|
||||
await raise_for_status(response)
|
||||
buffer = u""
|
||||
async for line in response.iter_lines():
|
||||
async for chunk in cls.iter_messages_line(session, auth_result, line, conversation, sources, prompt):
|
||||
async for chunk in cls.iter_messages_line(session, auth_result, line, conversation, sources):
|
||||
if isinstance(chunk, str):
|
||||
chunk = chunk.replace("\ue203", "").replace("\ue204", "").replace("\ue206", "")
|
||||
buffer += chunk
|
||||
@@ -469,6 +462,10 @@ class OpenaiChat(AsyncAuthedProvider, ProviderModelMixin):
|
||||
break
|
||||
if sources.list:
|
||||
yield sources
|
||||
if conversation.generated_images:
|
||||
yield ImageResponse(conversation.generated_images.urls, conversation.prompt)
|
||||
conversation.generated_images = None
|
||||
conversation.prompt = None
|
||||
if return_conversation:
|
||||
yield conversation
|
||||
if auth_result.api_key is not None:
|
||||
@@ -486,7 +483,7 @@ class OpenaiChat(AsyncAuthedProvider, ProviderModelMixin):
|
||||
yield FinishReason(conversation.finish_reason)
|
||||
|
||||
@classmethod
|
||||
async def iter_messages_line(cls, session: StreamSession, auth_result: AuthResult, line: bytes, fields: Conversation, sources: Sources, prompt: str) -> AsyncIterator:
|
||||
async def iter_messages_line(cls, session: StreamSession, auth_result: AuthResult, line: bytes, fields: Conversation, sources: Sources) -> AsyncIterator:
|
||||
if not line.startswith(b"data: "):
|
||||
return
|
||||
elif line.startswith(b"data: [DONE]"):
|
||||
@@ -519,6 +516,10 @@ class OpenaiChat(AsyncAuthedProvider, ProviderModelMixin):
|
||||
for m in v:
|
||||
if m.get("p") == "/message/content/parts/0" and fields.recipient == "all":
|
||||
yield m.get("v")
|
||||
elif m.get("p") == "/message/metadata/image_gen_title":
|
||||
fields.prompt = m.get("v")
|
||||
elif m.get("p") == "/message/content/parts/0/asset_pointer":
|
||||
fields.generated_images = await cls.get_generated_images(session, auth_result, m.get("v"), fields.prompt, fields.conversation_id)
|
||||
elif m.get("p") == "/message/metadata/search_result_groups":
|
||||
for entry in [p.get("entries") for p in m.get("v")]:
|
||||
for link in entry:
|
||||
@@ -547,14 +548,7 @@ class OpenaiChat(AsyncAuthedProvider, ProviderModelMixin):
|
||||
fields.is_thinking = True
|
||||
yield Reasoning(status=m.get("metadata", {}).get("initial_text"))
|
||||
if c.get("content_type") == "multimodal_text":
|
||||
generated_images = []
|
||||
for element in c.get("parts"):
|
||||
if isinstance(element, dict) and element.get("content_type") == "image_asset_pointer":
|
||||
image = cls.get_generated_image(session, auth_result, element, prompt, fields.conversation_id)
|
||||
generated_images.append(image)
|
||||
for image_response in await asyncio.gather(*generated_images):
|
||||
if image_response is not None:
|
||||
yield image_response
|
||||
yield await cls.get_generated_images(session, auth_result, c.get("parts"), fields.prompt, fields.conversation_id)
|
||||
if m.get("author", {}).get("role") == "assistant":
|
||||
if fields.parent_message_id is None:
|
||||
fields.parent_message_id = v.get("message", {}).get("id")
|
||||
@@ -738,6 +732,8 @@ class Conversation(JsonConversation):
|
||||
self.is_thinking = is_thinking
|
||||
self.p = None
|
||||
self.thoughts_summary = ""
|
||||
self.prompt = None
|
||||
self.generated_images: ImagePreview = None
|
||||
|
||||
def get_cookies(
|
||||
urls: Optional[Iterator[str]] = None
|
||||
|
@@ -396,7 +396,7 @@ class Backend_Api(Api):
|
||||
updated = chat_data.get("updated", 0)
|
||||
cache_value = self.chat_cache.get(share_id, 0)
|
||||
if updated == cache_value:
|
||||
return jsonify({"error": {"message": "invalid date"}}), 400
|
||||
return {"share_id": share_id}
|
||||
share_id = secure_filename(share_id)
|
||||
bucket_dir = get_bucket_dir(share_id)
|
||||
os.makedirs(bucket_dir, exist_ok=True)
|
||||
|
Reference in New Issue
Block a user