mirror of
https://github.com/xtekky/gpt4free.git
synced 2025-10-21 07:19:44 +08:00
Fix process_image in Bing
Add ImageResponse to Bing Fix cursor styling in gui
This commit is contained in:
@@ -9,9 +9,10 @@ from urllib import parse
|
|||||||
from aiohttp import ClientSession, ClientTimeout
|
from aiohttp import ClientSession, ClientTimeout
|
||||||
|
|
||||||
from ..typing import AsyncResult, Messages, ImageType
|
from ..typing import AsyncResult, Messages, ImageType
|
||||||
|
from ..image import ImageResponse
|
||||||
from .base_provider import AsyncGeneratorProvider
|
from .base_provider import AsyncGeneratorProvider
|
||||||
from .bing.upload_image import upload_image
|
from .bing.upload_image import upload_image
|
||||||
from .bing.create_images import create_images, format_images_markdown
|
from .bing.create_images import create_images
|
||||||
from .bing.conversation import Conversation, create_conversation, delete_conversation
|
from .bing.conversation import Conversation, create_conversation, delete_conversation
|
||||||
|
|
||||||
class Tones():
|
class Tones():
|
||||||
@@ -172,7 +173,7 @@ def create_message(
|
|||||||
prompt: str,
|
prompt: str,
|
||||||
tone: str,
|
tone: str,
|
||||||
context: str = None,
|
context: str = None,
|
||||||
image_info: dict = None,
|
image_response: ImageResponse = None,
|
||||||
web_search: bool = False,
|
web_search: bool = False,
|
||||||
gpt4_turbo: bool = False
|
gpt4_turbo: bool = False
|
||||||
) -> str:
|
) -> str:
|
||||||
@@ -228,9 +229,9 @@ def create_message(
|
|||||||
'target': 'chat',
|
'target': 'chat',
|
||||||
'type': 4
|
'type': 4
|
||||||
}
|
}
|
||||||
if image_info and "imageUrl" in image_info and "originalImageUrl" in image_info:
|
if image_response.get('imageUrl') and image_response.get('originalImageUrl'):
|
||||||
struct['arguments'][0]['message']['originalImageUrl'] = image_info['originalImageUrl']
|
struct['arguments'][0]['message']['originalImageUrl'] = image_response.get('originalImageUrl')
|
||||||
struct['arguments'][0]['message']['imageUrl'] = image_info['imageUrl']
|
struct['arguments'][0]['message']['imageUrl'] = image_response.get('imageUrl')
|
||||||
struct['arguments'][0]['experienceType'] = None
|
struct['arguments'][0]['experienceType'] = None
|
||||||
struct['arguments'][0]['attachedFileInfo'] = {"fileName": None, "fileType": None}
|
struct['arguments'][0]['attachedFileInfo'] = {"fileName": None, "fileType": None}
|
||||||
if context:
|
if context:
|
||||||
@@ -262,9 +263,9 @@ async def stream_generate(
|
|||||||
headers=headers
|
headers=headers
|
||||||
) as session:
|
) as session:
|
||||||
conversation = await create_conversation(session, proxy)
|
conversation = await create_conversation(session, proxy)
|
||||||
image_info = None
|
image_response = await upload_image(session, image, tone, proxy) if image else None
|
||||||
if image:
|
if image_response:
|
||||||
image_info = await upload_image(session, image, tone, proxy)
|
yield image_response
|
||||||
try:
|
try:
|
||||||
async with session.ws_connect(
|
async with session.ws_connect(
|
||||||
'wss://sydney.bing.com/sydney/ChatHub',
|
'wss://sydney.bing.com/sydney/ChatHub',
|
||||||
@@ -274,7 +275,7 @@ async def stream_generate(
|
|||||||
) as wss:
|
) as wss:
|
||||||
await wss.send_str(format_message({'protocol': 'json', 'version': 1}))
|
await wss.send_str(format_message({'protocol': 'json', 'version': 1}))
|
||||||
await wss.receive(timeout=timeout)
|
await wss.receive(timeout=timeout)
|
||||||
await wss.send_str(create_message(conversation, prompt, tone, context, image_info, web_search, gpt4_turbo))
|
await wss.send_str(create_message(conversation, prompt, tone, context, image_response, web_search, gpt4_turbo))
|
||||||
|
|
||||||
response_txt = ''
|
response_txt = ''
|
||||||
returned_text = ''
|
returned_text = ''
|
||||||
@@ -290,6 +291,7 @@ async def stream_generate(
|
|||||||
response = json.loads(obj)
|
response = json.loads(obj)
|
||||||
if response.get('type') == 1 and response['arguments'][0].get('messages'):
|
if response.get('type') == 1 and response['arguments'][0].get('messages'):
|
||||||
message = response['arguments'][0]['messages'][0]
|
message = response['arguments'][0]['messages'][0]
|
||||||
|
image_response = None
|
||||||
if (message['contentOrigin'] != 'Apology'):
|
if (message['contentOrigin'] != 'Apology'):
|
||||||
if 'adaptiveCards' in message:
|
if 'adaptiveCards' in message:
|
||||||
card = message['adaptiveCards'][0]['body'][0]
|
card = message['adaptiveCards'][0]['body'][0]
|
||||||
@@ -301,7 +303,7 @@ async def stream_generate(
|
|||||||
elif message.get('contentType') == "IMAGE":
|
elif message.get('contentType') == "IMAGE":
|
||||||
prompt = message.get('text')
|
prompt = message.get('text')
|
||||||
try:
|
try:
|
||||||
response_txt += format_images_markdown(await create_images(session, prompt, proxy), prompt)
|
image_response = ImageResponse(await create_images(session, prompt, proxy), prompt)
|
||||||
except:
|
except:
|
||||||
response_txt += f"\nhttps://www.bing.com/images/create?q={parse.quote(prompt)}"
|
response_txt += f"\nhttps://www.bing.com/images/create?q={parse.quote(prompt)}"
|
||||||
final = True
|
final = True
|
||||||
@@ -310,6 +312,8 @@ async def stream_generate(
|
|||||||
if new != "\n":
|
if new != "\n":
|
||||||
yield new
|
yield new
|
||||||
returned_text = response_txt
|
returned_text = response_txt
|
||||||
|
if image_response:
|
||||||
|
yield image_response
|
||||||
elif response.get('type') == 2:
|
elif response.get('type') == 2:
|
||||||
result = response['item']['result']
|
result = response['item']['result']
|
||||||
if result.get('error'):
|
if result.get('error'):
|
||||||
|
@@ -6,7 +6,7 @@ import json
|
|||||||
import math
|
import math
|
||||||
from ...typing import ImageType
|
from ...typing import ImageType
|
||||||
from aiohttp import ClientSession
|
from aiohttp import ClientSession
|
||||||
from ...image import to_image, process_image, to_base64
|
from ...image import to_image, process_image, to_base64, ImageResponse
|
||||||
|
|
||||||
image_config = {
|
image_config = {
|
||||||
"maxImagePixels": 360000,
|
"maxImagePixels": 360000,
|
||||||
@@ -19,7 +19,7 @@ async def upload_image(
|
|||||||
image: ImageType,
|
image: ImageType,
|
||||||
tone: str,
|
tone: str,
|
||||||
proxy: str = None
|
proxy: str = None
|
||||||
) -> dict:
|
) -> ImageResponse:
|
||||||
image = to_image(image)
|
image = to_image(image)
|
||||||
width, height = image.size
|
width, height = image.size
|
||||||
max_image_pixels = image_config['maxImagePixels']
|
max_image_pixels = image_config['maxImagePixels']
|
||||||
@@ -55,7 +55,7 @@ async def upload_image(
|
|||||||
else "https://www.bing.com/images/blob?bcid="
|
else "https://www.bing.com/images/blob?bcid="
|
||||||
+ result['bcid']
|
+ result['bcid']
|
||||||
)
|
)
|
||||||
return result
|
return ImageResponse(result["imageUrl"], "", result)
|
||||||
|
|
||||||
def build_image_upload_api_payload(image_bin: str, tone: str):
|
def build_image_upload_api_payload(image_bin: str, tone: str):
|
||||||
payload = {
|
payload = {
|
||||||
|
@@ -13,7 +13,6 @@ from ...webdriver import get_browser, get_driver_cookies
|
|||||||
from ...typing import AsyncResult, Messages
|
from ...typing import AsyncResult, Messages
|
||||||
from ...requests import StreamSession
|
from ...requests import StreamSession
|
||||||
from ...image import to_image, to_bytes, ImageType, ImageResponse
|
from ...image import to_image, to_bytes, ImageType, ImageResponse
|
||||||
from ... import debug
|
|
||||||
|
|
||||||
models = {
|
models = {
|
||||||
"gpt-3.5": "text-davinci-002-render-sha",
|
"gpt-3.5": "text-davinci-002-render-sha",
|
||||||
@@ -242,9 +241,7 @@ class OpenaiChat(AsyncGeneratorProvider):
|
|||||||
json=data,
|
json=data,
|
||||||
headers={"Accept": "text/event-stream", **headers}
|
headers={"Accept": "text/event-stream", **headers}
|
||||||
) as response:
|
) as response:
|
||||||
try:
|
if not response.ok:
|
||||||
response.raise_for_status()
|
|
||||||
except:
|
|
||||||
raise RuntimeError(f"Response {response.status_code}: {await response.text()}")
|
raise RuntimeError(f"Response {response.status_code}: {await response.text()}")
|
||||||
try:
|
try:
|
||||||
last_message: int = 0
|
last_message: int = 0
|
||||||
|
@@ -566,6 +566,7 @@ select {
|
|||||||
animation: blink 0.8s infinite;
|
animation: blink 0.8s infinite;
|
||||||
width: 7px;
|
width: 7px;
|
||||||
height: 15px;
|
height: 15px;
|
||||||
|
display: inline-block;
|
||||||
}
|
}
|
||||||
|
|
||||||
@keyframes blink {
|
@keyframes blink {
|
||||||
|
@@ -104,7 +104,7 @@ const ask_gpt = async () => {
|
|||||||
</div>
|
</div>
|
||||||
<div class="content" id="gpt_${window.token}">
|
<div class="content" id="gpt_${window.token}">
|
||||||
<div class="provider"></div>
|
<div class="provider"></div>
|
||||||
<div class="content_inner"><div id="cursor"></div></div>
|
<div class="content_inner"><span id="cursor"></span></div>
|
||||||
</div>
|
</div>
|
||||||
</div>
|
</div>
|
||||||
`;
|
`;
|
||||||
@@ -168,7 +168,7 @@ const ask_gpt = async () => {
|
|||||||
}
|
}
|
||||||
if (error) {
|
if (error) {
|
||||||
console.error(error);
|
console.error(error);
|
||||||
content_inner.innerHTML = "An error occured, please try again, if the problem persists, please use a other model or provider";
|
content_inner.innerHTML += "<p>An error occured, please try again, if the problem persists, please use a other model or provider.</p>";
|
||||||
} else {
|
} else {
|
||||||
html = markdown_render(text);
|
html = markdown_render(text);
|
||||||
html = html.substring(0, html.lastIndexOf('</p>')) + '<span id="cursor"></span></p>';
|
html = html.substring(0, html.lastIndexOf('</p>')) + '<span id="cursor"></span></p>';
|
||||||
|
@@ -64,7 +64,6 @@ def get_orientation(image: Image.Image) -> int:
|
|||||||
|
|
||||||
def process_image(img: Image.Image, new_width: int, new_height: int) -> Image.Image:
|
def process_image(img: Image.Image, new_width: int, new_height: int) -> Image.Image:
|
||||||
orientation = get_orientation(img)
|
orientation = get_orientation(img)
|
||||||
new_img = Image.new("RGB", (new_width, new_height), color="#FFFFFF")
|
|
||||||
if orientation:
|
if orientation:
|
||||||
if orientation > 4:
|
if orientation > 4:
|
||||||
img = img.transpose(Image.FLIP_LEFT_RIGHT)
|
img = img.transpose(Image.FLIP_LEFT_RIGHT)
|
||||||
@@ -74,8 +73,8 @@ def process_image(img: Image.Image, new_width: int, new_height: int) -> Image.Im
|
|||||||
img = img.transpose(Image.ROTATE_270)
|
img = img.transpose(Image.ROTATE_270)
|
||||||
if orientation in [7, 8]:
|
if orientation in [7, 8]:
|
||||||
img = img.transpose(Image.ROTATE_90)
|
img = img.transpose(Image.ROTATE_90)
|
||||||
new_img.paste(img, (0, 0))
|
img.thumbnail((new_width, new_height))
|
||||||
return new_img
|
return img
|
||||||
|
|
||||||
def to_base64(image: Image.Image, compression_rate: float) -> str:
|
def to_base64(image: Image.Image, compression_rate: float) -> str:
|
||||||
output_buffer = BytesIO()
|
output_buffer = BytesIO()
|
||||||
|
Reference in New Issue
Block a user