Fix missing websocket_request_id in OpenaiChat

Add RateLimitErrors to Bing
Add android support to gui api
Add annotations import to gui api
This commit is contained in:
Heiner Lohaus
2024-03-19 18:48:32 +01:00
parent b5b56f35ca
commit 486e43dabd
12 changed files with 243 additions and 53 deletions

3
.gitignore vendored
View File

@@ -52,4 +52,5 @@ x.py
info.txt info.txt
local.py local.py
*.gguf *.gguf
image.py image.py
.buildozer

View File

@@ -11,7 +11,7 @@ from aiohttp import ClientSession, ClientTimeout, BaseConnector, WSMsgType
from ..typing import AsyncResult, Messages, ImageType, Cookies from ..typing import AsyncResult, Messages, ImageType, Cookies
from ..image import ImageRequest from ..image import ImageRequest
from ..errors import ResponseStatusError from ..errors import ResponseStatusError, RateLimitError
from .base_provider import AsyncGeneratorProvider, ProviderModelMixin from .base_provider import AsyncGeneratorProvider, ProviderModelMixin
from .helper import get_connector, get_random_hex from .helper import get_connector, get_random_hex
from .bing.upload_image import upload_image from .bing.upload_image import upload_image
@@ -26,7 +26,7 @@ class Tones:
creative = "Creative" creative = "Creative"
balanced = "Balanced" balanced = "Balanced"
precise = "Precise" precise = "Precise"
copilot = "Balanced" copilot = "Copilot"
class Bing(AsyncGeneratorProvider, ProviderModelMixin): class Bing(AsyncGeneratorProvider, ProviderModelMixin):
""" """
@@ -36,8 +36,8 @@ class Bing(AsyncGeneratorProvider, ProviderModelMixin):
working = True working = True
supports_message_history = True supports_message_history = True
supports_gpt_4 = True supports_gpt_4 = True
default_model = "balanced" default_model = "Balanced"
models = [key for key in Tones.__dict__ if not key.startswith("__")] models = [getattr(Tones, key) for key in Tones.__dict__ if not key.startswith("__")]
@classmethod @classmethod
def create_async_generator( def create_async_generator(
@@ -72,7 +72,7 @@ class Bing(AsyncGeneratorProvider, ProviderModelMixin):
context = create_context(messages[:-1]) if len(messages) > 1 else None context = create_context(messages[:-1]) if len(messages) > 1 else None
if tone is None: if tone is None:
tone = tone if model.startswith("gpt-4") else model tone = tone if model.startswith("gpt-4") else model
tone = cls.get_model("" if tone is None else tone.lower()) tone = cls.get_model("" if tone is None else tone)
gpt4_turbo = True if model.startswith("gpt-4-turbo") else False gpt4_turbo = True if model.startswith("gpt-4-turbo") else False
return stream_generate( return stream_generate(
@@ -258,7 +258,6 @@ class Defaults:
'sec-fetch-mode': 'cors', 'sec-fetch-mode': 'cors',
'sec-fetch-dest': 'empty', 'sec-fetch-dest': 'empty',
'referer': home, 'referer': home,
'accept-encoding': 'gzip, deflate, br',
'accept-language': 'en-US,en;q=0.9', 'accept-language': 'en-US,en;q=0.9',
} }
@@ -311,7 +310,7 @@ def create_message(
"allowedMessageTypes": Defaults.allowedMessageTypes, "allowedMessageTypes": Defaults.allowedMessageTypes,
"sliceIds": Defaults.sliceIds[tone], "sliceIds": Defaults.sliceIds[tone],
"verbosity": "verbose", "verbosity": "verbose",
"scenario": "CopilotMicrosoftCom" if tone == "copilot" else "SERP", "scenario": "CopilotMicrosoftCom" if tone == Tones.copilot else "SERP",
"plugins": [{"id": "c310c353-b9f0-4d76-ab0d-1dd5e979cf68", "category": 1}] if web_search else [], "plugins": [{"id": "c310c353-b9f0-4d76-ab0d-1dd5e979cf68", "category": 1}] if web_search else [],
"traceId": get_random_hex(40), "traceId": get_random_hex(40),
"conversationHistoryOptionsSets": ["autosave","savemem","uprofupd","uprofgen"], "conversationHistoryOptionsSets": ["autosave","savemem","uprofupd","uprofgen"],
@@ -329,7 +328,7 @@ def create_message(
"requestId": request_id, "requestId": request_id,
"messageId": request_id "messageId": request_id
}, },
"tone": getattr(Tones, tone), "tone": "Balanced" if tone == Tones.copilot else tone,
"spokenTextMode": "None", "spokenTextMode": "None",
"conversationId": conversation.conversationId, "conversationId": conversation.conversationId,
"participant": {"id": conversation.clientId} "participant": {"id": conversation.clientId}
@@ -412,10 +411,15 @@ async def stream_generate(
await asyncio.sleep(sleep_retry) await asyncio.sleep(sleep_retry)
continue continue
image_request = await upload_image(session, image, getattr(Tones, tone), headers) if image else None image_request = await upload_image(
session,
image,
"Balanced" if Tones.copilot == "Copilot" else tone,
headers
) if image else None
async with session.ws_connect( async with session.ws_connect(
'wss://s.copilot.microsoft.com/sydney/ChatHub' 'wss://s.copilot.microsoft.com/sydney/ChatHub'
if tone == "copilot" else if tone == "Copilot" else
'wss://sydney.bing.com/sydney/ChatHub', 'wss://sydney.bing.com/sydney/ChatHub',
autoping=False, autoping=False,
params={'sec_access_token': conversation.conversationSignature}, params={'sec_access_token': conversation.conversationSignature},
@@ -481,7 +485,7 @@ async def stream_generate(
max_retries -= 1 max_retries -= 1
if max_retries < 1: if max_retries < 1:
if result["value"] == "CaptchaChallenge": if result["value"] == "CaptchaChallenge":
raise RuntimeError(f"{result['value']}: Use other cookies or/and ip address") raise RateLimitError(f"{result['value']}: Use other cookies or/and ip address")
else: else:
raise RuntimeError(f"{result['value']}: {result['message']}") raise RuntimeError(f"{result['value']}: {result['message']}")
if debug.logging: if debug.logging:

View File

@@ -2,6 +2,7 @@ from __future__ import annotations
from aiohttp import ClientSession from aiohttp import ClientSession
from ...requests import raise_for_status from ...requests import raise_for_status
from ...errors import RateLimitError
class Conversation: class Conversation:
""" """
@@ -36,6 +37,8 @@ async def create_conversation(session: ClientSession, headers: dict, tone: str)
else: else:
url = "https://www.bing.com/turing/conversation/create?bundleVersion=1.1626.1" url = "https://www.bing.com/turing/conversation/create?bundleVersion=1.1626.1"
async with session.get(url, headers=headers) as response: async with session.get(url, headers=headers) as response:
if response.status == 404:
raise RateLimitError("Response 404: Do less requests and reuse conversations")
await raise_for_status(response, "Failed to create conversation") await raise_for_status(response, "Failed to create conversation")
data = await response.json() data = await response.json()
conversationId = data.get('conversationId') conversationId = data.get('conversationId')

View File

@@ -450,7 +450,7 @@ class OpenaiChat(AsyncGeneratorProvider, ProviderModelMixin):
) as response: ) as response:
cls._update_request_args(session) cls._update_request_args(session)
await raise_for_status(response) await raise_for_status(response)
async for chunk in cls.iter_messages_chunk(response.iter_lines(), session, fields, websocket_request_id): async for chunk in cls.iter_messages_chunk(response.iter_lines(), session, fields):
if response_fields: if response_fields:
response_fields = False response_fields = False
yield fields yield fields

View File

@@ -133,15 +133,15 @@
<div class="box input-box"> <div class="box input-box">
<textarea id="message-input" placeholder="Ask a question" cols="30" rows="10" <textarea id="message-input" placeholder="Ask a question" cols="30" rows="10"
style="white-space: pre-wrap;resize: none;"></textarea> style="white-space: pre-wrap;resize: none;"></textarea>
<label for="image" title="Works with Bing, Gemini, OpenaiChat and You"> <label class="file-label" for="image" title="Works with Bing, Gemini, OpenaiChat and You">
<input type="file" id="image" name="image" accept="image/*" required/> <input type="file" id="image" name="image" accept="image/*" required/>
<i class="fa-regular fa-image"></i> <i class="fa-regular fa-image"></i>
</label> </label>
<label for="camera"> <label class="file-label" for="camera">
<input type="file" id="camera" name="camera" accept="image/*" capture="camera" required/> <input type="file" id="camera" name="camera" accept="image/*" capture="camera" required/>
<i class="fa-solid fa-camera"></i> <i class="fa-solid fa-camera"></i>
</label> </label>
<label for="file"> <label class="file-label" for="file">
<input type="file" id="file" name="file" accept="text/plain, text/html, text/xml, application/json, text/javascript, .sh, .py, .php, .css, .yaml, .sql, .log, .csv, .twig, .md" required/> <input type="file" id="file" name="file" accept="text/plain, text/html, text/xml, application/json, text/javascript, .sh, .py, .php, .css, .yaml, .sql, .log, .csv, .twig, .md" required/>
<i class="fa-solid fa-paperclip"></i> <i class="fa-solid fa-paperclip"></i>
</label> </label>

View File

@@ -482,25 +482,18 @@ body {
display: none; display: none;
} }
label[for="image"]:has(> input:valid){ .file-label {
color: var(--accent);
}
label[for="camera"]:has(> input:valid){
color: var(--accent);
}
label[for="file"]:has(> input:valid){
color: var(--accent);
}
label[for="image"], label[for="file"], label[for="camera"] {
cursor: pointer; cursor: pointer;
position: absolute; position: absolute;
top: 10px; top: 10px;
left: 10px; left: 10px;
} }
.file-label:has(> input:valid),
.file-label.selected {
color: var(--accent);
}
label[for="image"] { label[for="image"] {
top: 32px; top: 32px;
} }

View File

@@ -211,7 +211,7 @@ async function add_message_chunk(message) {
${message.provider.model ? ' with ' + message.provider.model : ''} ${message.provider.model ? ' with ' + message.provider.model : ''}
` `
} else if (message.type == "message") { } else if (message.type == "message") {
console.error(messag.message) console.error(message.message)
} else if (message.type == "error") { } else if (message.type == "error") {
window.error = message.error window.error = message.error
console.error(message.error); console.error(message.error);
@@ -240,6 +240,27 @@ async function add_message_chunk(message) {
} }
} }
cameraInput?.addEventListener("click", (e) => {
if (window?.pywebview) {
e.preventDefault();
pywebview.api.choose_file();
}
})
cameraInput?.addEventListener("click", (e) => {
if (window?.pywebview) {
e.preventDefault();
pywebview.api.take_picture();
}
})
imageInput?.addEventListener("click", (e) => {
if (window?.pywebview) {
e.preventDefault();
pywebview.api.choose_image();
}
})
const ask_gpt = async () => { const ask_gpt = async () => {
regenerate.classList.add(`regenerate-hidden`); regenerate.classList.add(`regenerate-hidden`);
messages = await get_messages(window.conversation_id); messages = await get_messages(window.conversation_id);
@@ -307,8 +328,7 @@ const ask_gpt = async () => {
console.error(e); console.error(e);
if (e.name != "AbortError") { if (e.name != "AbortError") {
error = true; error = true;
text = "oops ! something went wrong, please try again / reload. [stacktrace in console]"; content_inner.innerHTML += `<p><strong>An error occured:</strong> ${e}</p>`;
content_inner.innerHTML = text;
} }
} }
if (!error && text) { if (!error && text) {
@@ -592,7 +612,7 @@ document.getElementById("cancelButton").addEventListener("click", async () => {
console.log(`aborted ${window.conversation_id}`); console.log(`aborted ${window.conversation_id}`);
}); });
document.getElementById(`regenerateButton`).addEventListener(`click`, async () => { document.getElementById("regenerateButton").addEventListener("click", async () => {
prompt_lock = true; prompt_lock = true;
await hide_last_message(window.conversation_id); await hide_last_message(window.conversation_id);
window.token = message_id(); window.token = message_id();
@@ -622,14 +642,20 @@ const message_id = () => {
async function hide_sidebar() { async function hide_sidebar() {
sidebar.classList.remove("shown"); sidebar.classList.remove("shown");
sidebar_button.classList.remove("rotated"); sidebar_button.classList.remove("rotated");
if (window.location.pathname == "/menu/") {
history.back();
}
} }
window.addEventListener('popstate', hide_sidebar, false);
sidebar_button.addEventListener("click", (event) => { sidebar_button.addEventListener("click", (event) => {
if (sidebar.classList.contains("shown")) { if (sidebar.classList.contains("shown")) {
hide_sidebar(); hide_sidebar();
} else { } else {
sidebar.classList.add("shown"); sidebar.classList.add("shown");
sidebar_button.classList.add("rotated"); sidebar_button.classList.add("rotated");
history.pushState({}, null, "/menu/");
} }
window.scrollTo(0, 0); window.scrollTo(0, 0);
}); });
@@ -817,19 +843,6 @@ async function on_api() {
register_settings_storage(); register_settings_storage();
versions = await api("version");
document.title = 'g4f - ' + versions["version"];
let text = "version ~ "
if (versions["version"] != versions["latest_version"]) {
let release_url = 'https://github.com/xtekky/gpt4free/releases/tag/' + versions["latest_version"];
let title = `New version: ${versions["latest_version"]}`;
text += `<a href="${release_url}" target="_blank" title="${title}">${versions["version"]}</a> `;
text += `<i class="fa-solid fa-rotate"></i>`
} else {
text += versions["version"];
}
document.getElementById("version_text").innerHTML = text
models = await api("models"); models = await api("models");
models.forEach((model) => { models.forEach((model) => {
let option = document.createElement("option"); let option = document.createElement("option");
@@ -845,9 +858,25 @@ async function on_api() {
}) })
await load_provider_models(appStorage.getItem("provider")); await load_provider_models(appStorage.getItem("provider"));
load_settings_storage() await load_settings_storage()
} }
async function load_version() {
const versions = await api("version");
document.title = 'g4f - ' + versions["version"];
let text = "version ~ "
if (versions["version"] != versions["latest_version"]) {
let release_url = 'https://github.com/xtekky/gpt4free/releases/tag/' + versions["latest_version"];
let title = `New version: ${versions["latest_version"]}`;
text += `<a href="${release_url}" target="_blank" title="${title}">${versions["version"]}</a> `;
text += `<i class="fa-solid fa-rotate"></i>`
} else {
text += versions["version"];
}
document.getElementById("version_text").innerHTML = text
}
setTimeout(load_version, 5000);
for (const el of [imageInput, cameraInput]) { for (const el of [imageInput, cameraInput]) {
el.addEventListener('click', async () => { el.addEventListener('click', async () => {
el.value = ''; el.value = '';
@@ -913,13 +942,13 @@ function get_selected_model() {
async function api(ressource, args=null, file=null) { async function api(ressource, args=null, file=null) {
if (window?.pywebview) { if (window?.pywebview) {
if (args) { if (args !== null) {
if (ressource == "models") { if (ressource == "models") {
ressource = "provider_models"; ressource = "provider_models";
} }
return pywebview.api["get_" + ressource](args); return pywebview.api[`get_${ressource}`](args);
} }
return pywebview.api["get_" + ressource](); return pywebview.api[`get_${ressource}`]();
} }
if (ressource == "models" && args) { if (ressource == "models" && args) {
ressource = `${ressource}/${args}`; ressource = `${ressource}/${args}`;
@@ -930,7 +959,7 @@ async function api(ressource, args=null, file=null) {
const headers = { const headers = {
accept: 'text/event-stream' accept: 'text/event-stream'
} }
if (file) { if (file !== null) {
const formData = new FormData(); const formData = new FormData();
formData.append('file', file); formData.append('file', file);
formData.append('json', body); formData.append('json', body);

View File

@@ -0,0 +1,67 @@
from kivy.logger import Logger
from kivy.clock import Clock
from jnius import autoclass
from jnius import cast
from android import activity
PythonActivity = autoclass('org.kivy.android.PythonActivity')
Intent = autoclass('android.content.Intent')
Uri = autoclass('android.net.Uri')
MEDIA_DATA = "_data"
RESULT_LOAD_IMAGE = 1
Activity = autoclass('android.app.Activity')
def user_select_image(on_selection):
"""Open Gallery Activity and call callback with absolute image filepath of image user selected.
None if user canceled.
"""
currentActivity = cast('android.app.Activity', PythonActivity.mActivity)
# Forum discussion: https://groups.google.com/forum/#!msg/kivy-users/bjsG2j9bptI/-Oe_aGo0newJ
def on_activity_result(request_code, result_code, intent):
if request_code != RESULT_LOAD_IMAGE:
Logger.warning('user_select_image: ignoring activity result that was not RESULT_LOAD_IMAGE')
return
if result_code == Activity.RESULT_CANCELED:
Clock.schedule_once(lambda dt: on_selection(None), 0)
return
if result_code != Activity.RESULT_OK:
# This may just go into the void...
raise NotImplementedError('Unknown result_code "{}"'.format(result_code))
selectedImage = intent.getData(); # Uri
filePathColumn = [MEDIA_DATA]; # String[]
# Cursor
cursor = currentActivity.getContentResolver().query(selectedImage,
filePathColumn, None, None, None);
cursor.moveToFirst();
# int
columnIndex = cursor.getColumnIndex(filePathColumn[0]);
# String
picturePath = cursor.getString(columnIndex);
cursor.close();
Logger.info('android_ui: user_select_image() selected %s', picturePath)
# This is possibly in a different thread?
Clock.schedule_once(lambda dt: on_selection(picturePath), 0)
# See: http://pyjnius.readthedocs.org/en/latest/android.html
activity.bind(on_activity_result=on_activity_result)
intent = Intent()
# http://programmerguru.com/android-tutorial/how-to-pick-image-from-gallery/
# http://stackoverflow.com/questions/18416122/open-gallery-app-in-android
intent.setAction(Intent.ACTION_PICK)
# TODO internal vs external?
intent.setData(Uri.parse('content://media/internal/images/media'))
# TODO setType(Image)?
currentActivity.startActivityForResult(intent, RESULT_LOAD_IMAGE)

View File

@@ -1,11 +1,40 @@
from __future__ import annotations
import logging import logging
import json import json
import os.path
from typing import Iterator from typing import Iterator
from uuid import uuid4
from functools import partial
try: try:
import webview import webview
import platformdirs
except ImportError: except ImportError:
... ...
try:
from plyer import camera
from plyer import filechooser
has_plyer = True
except ImportError:
has_plyer = False
try:
from android.runnable import run_on_ui_thread
from android.storage import app_storage_path
from android.permissions import request_permissions, Permission
from android.permissions import _RequestPermissionsManager
_RequestPermissionsManager.register_callback()
from .android_gallery import user_select_image
has_android = True
except ImportError:
run_on_ui_thread = lambda a : a
app_storage_path = platformdirs.user_pictures_dir
user_select_image = partial(
filechooser.open_file,
path=platformdirs.user_pictures_dir(),
filters=[["Image", "*.jpg", "*.jpeg", "*.png", "*.webp", "*.svg"]],
)
has_android = False
from g4f import version, models from g4f import version, models
from g4f import get_last_provider, ChatCompletion from g4f import get_last_provider, ChatCompletion
@@ -75,13 +104,71 @@ class Api():
return {'title': ''} return {'title': ''}
def get_conversation(self, options: dict, **kwargs) -> Iterator: def get_conversation(self, options: dict, **kwargs) -> Iterator:
window = webview.active_window() window = webview.windows[0]
if hasattr(self, "image") and self.image is not None:
kwargs["image"] = open(self.image, "rb")
for message in self._create_response_stream( for message in self._create_response_stream(
self._prepare_conversation_kwargs(options, kwargs), self._prepare_conversation_kwargs(options, kwargs),
options.get("conversation_id") options.get("conversation_id")
): ):
if not window.evaluate_js(f"if (!this.abort) this.add_message_chunk({json.dumps(message)}); !this.abort && !this.error;"): if not window.evaluate_js(f"if (!this.abort) this.add_message_chunk({json.dumps(message)}); !this.abort && !this.error;"):
break break
self.image = None
self.set_selected(None)
@run_on_ui_thread
def choose_file(self):
self.request_permissions()
filechooser.open_file(
path=platformdirs.user_pictures_dir(),
on_selection=print
)
@run_on_ui_thread
def choose_image(self):
self.request_permissions()
user_select_image(
on_selection=self.on_image_selection
)
@run_on_ui_thread
def take_picture(self):
self.request_permissions()
filename = os.path.join(app_storage_path(), f"chat-{uuid4()}.png")
camera.take_picture(filename=filename, on_complete=self.on_camera)
def on_image_selection(self, filename):
if filename is not None and os.path.exists(filename):
self.image = filename
else:
self.image = None
self.set_selected(None if self.image is None else "image")
def on_camera(self, filename):
if filename is not None and os.path.exists(filename):
self.image = filename
else:
self.image = None
self.set_selected(None if self.image is None else "camera")
def set_selected(self, input_id: str = None):
window = webview.windows[0]
if window is not None:
window.evaluate_js(
f"document.querySelector(`.file-label.selected`)?.classList.remove(`selected`);"
)
if input_id is not None and input_id in ("image", "camera"):
window.evaluate_js(
f'document.querySelector(`label[for="{input_id}"]`)?.classList.add(`selected`);'
)
def request_permissions(self):
if has_android:
request_permissions([
Permission.CAMERA,
Permission.READ_EXTERNAL_STORAGE,
Permission.WRITE_EXTERNAL_STORAGE
])
def _prepare_conversation_kwargs(self, json_data: dict, kwargs: dict): def _prepare_conversation_kwargs(self, json_data: dict, kwargs: dict):
""" """

View File

@@ -1,3 +1,5 @@
from __future__ import annotations
import sys import sys
import os.path import os.path
import webview import webview
@@ -20,6 +22,8 @@ def run_webview(
dirname = sys._MEIPASS dirname = sys._MEIPASS
else: else:
dirname = os.path.dirname(__file__) dirname = os.path.dirname(__file__)
webview.settings['OPEN_EXTERNAL_LINKS_IN_BROWSER'] = False
webview.settings['ALLOW_DOWNLOADS'] = True
webview.create_window( webview.create_window(
f"g4f - {g4f.version.utils.current_version}", f"g4f - {g4f.version.utils.current_version}",
os.path.join(dirname, "client/index.html"), os.path.join(dirname, "client/index.html"),

View File

@@ -1,2 +1,3 @@
requests requests
aiohttp aiohttp
brotli

View File

@@ -11,6 +11,7 @@ with codecs.open(os.path.join(here, 'README.md'), encoding='utf-8') as fh:
INSTALL_REQUIRE = [ INSTALL_REQUIRE = [
"requests", "requests",
"aiohttp", "aiohttp",
"brotli"
] ]
EXTRA_REQUIRE = { EXTRA_REQUIRE = {