New minimum requirements (#1515)

* New minimum requirements
* Add ConversationStyleOptionSets to Bing
* Add image.ImageRequest
* Improve python version support
* Improve unittests
This commit is contained in:
H Lohaus
2024-01-26 07:54:13 +01:00
committed by GitHub
parent 71d71b6512
commit feb83c168b
35 changed files with 471 additions and 284 deletions

View File

@@ -15,10 +15,19 @@ jobs:
runs-on: ubuntu-latest runs-on: ubuntu-latest
steps: steps:
- uses: actions/checkout@v4 - uses: actions/checkout@v4
- name: Set up Python - name: Set up Python 3.8
uses: actions/setup-python@v4 uses: actions/setup-python@v4
with: with:
python-version: "3.x" python-version: "3.8"
cache: 'pip'
- name: Install min requirements
run: pip install -r requirements-min.txt
- name: Run tests
run: python -m etc.unittest
- name: Set up Python 3.11
uses: actions/setup-python@v4
with:
python-version: "3.11"
cache: 'pip' cache: 'pip'
- name: Install requirements - name: Install requirements
run: pip install -r requirements.txt run: pip install -r requirements.txt

View File

@@ -100,7 +100,7 @@ or set the api base in your client to: [http://localhost:1337/v1](http://localho
##### Install using pypi: ##### Install using pypi:
``` ```
pip install -U g4f pip install -U "g4f[all]"
``` ```
##### or: ##### or:
@@ -134,13 +134,19 @@ python3 -m venv venv
``` ```
source venv/bin/activate source venv/bin/activate
``` ```
5. Install the required Python packages from `requirements.txt`: 5. Install minimum requirements:
```
pip install -r requirements-min.txt
```
6. Or install all used Python packages from `requirements.txt`:
``` ```
pip install -r requirements.txt pip install -r requirements.txt
``` ```
6. Create a `test.py` file in the root folder and start using the repo, further Instructions are below 7. Create a `test.py` file in the root folder and start using the repo, further Instructions are below
```py ```py
import g4f import g4f

View File

@@ -1,6 +1,10 @@
from .include import DEFAULT_MESSAGES from .include import DEFAULT_MESSAGES
import asyncio import asyncio
import nest_asyncio try:
import nest_asyncio
has_nest_asyncio = True
except:
has_nest_asyncio = False
import unittest import unittest
import g4f import g4f
from g4f import ChatCompletion from g4f import ChatCompletion
@@ -39,6 +43,8 @@ class TestChatCompletionAsync(unittest.IsolatedAsyncioTestCase):
class TestChatCompletionNestAsync(unittest.IsolatedAsyncioTestCase): class TestChatCompletionNestAsync(unittest.IsolatedAsyncioTestCase):
def setUp(self) -> None: def setUp(self) -> None:
if not has_nest_asyncio:
self.skipTest('"nest_asyncio" not installed')
nest_asyncio.apply() nest_asyncio.apply()
async def test_create(self): async def test_create(self):

View File

@@ -3,11 +3,17 @@ import unittest
from unittest.mock import MagicMock from unittest.mock import MagicMock
from .mocks import ProviderMock from .mocks import ProviderMock
import g4f import g4f
from g4f.gui.server.backend import Backend_Api, get_error_message try:
from g4f.gui.server.backend import Backend_Api, get_error_message
has_requirements = True
except:
has_requirements = False
class TestBackendApi(unittest.TestCase): class TestBackendApi(unittest.TestCase):
def setUp(self): def setUp(self):
if not has_requirements:
self.skipTest('"flask" not installed')
self.app = MagicMock() self.app = MagicMock()
self.api = Backend_Api(self.app) self.api = Backend_Api(self.app)
@@ -28,6 +34,10 @@ class TestBackendApi(unittest.TestCase):
class TestUtilityFunctions(unittest.TestCase): class TestUtilityFunctions(unittest.TestCase):
def setUp(self):
if not has_requirements:
self.skipTest('"flask" not installed')
def test_get_error_message(self): def test_get_error_message(self):
g4f.debug.last_provider = ProviderMock g4f.debug.last_provider = ProviderMock
exception = Exception("Message") exception = Exception("Message")

View File

@@ -9,7 +9,7 @@ from urllib import parse
from aiohttp import ClientSession, ClientTimeout, BaseConnector from aiohttp import ClientSession, ClientTimeout, BaseConnector
from ..typing import AsyncResult, Messages, ImageType from ..typing import AsyncResult, Messages, ImageType
from ..image import ImageResponse from ..image import ImageResponse, ImageRequest
from .base_provider import AsyncGeneratorProvider from .base_provider import AsyncGeneratorProvider
from .helper import get_connector from .helper import get_connector
from .bing.upload_image import upload_image from .bing.upload_image import upload_image
@@ -154,6 +154,11 @@ class Defaults:
'SRCHHPGUSR' : f'HV={int(time.time())}', 'SRCHHPGUSR' : f'HV={int(time.time())}',
} }
class ConversationStyleOptionSets():
CREATIVE = ["h3imaginative", "clgalileo", "gencontentv3"]
BALANCED = ["galileo"]
PRECISE = ["h3precise", "clgalileo"]
def format_message(msg: dict) -> str: def format_message(msg: dict) -> str:
""" """
Formats a message dictionary into a JSON string with a delimiter. Formats a message dictionary into a JSON string with a delimiter.
@@ -168,7 +173,7 @@ def create_message(
prompt: str, prompt: str,
tone: str, tone: str,
context: str = None, context: str = None,
image_response: ImageResponse = None, image_request: ImageRequest = None,
web_search: bool = False, web_search: bool = False,
gpt4_turbo: bool = False gpt4_turbo: bool = False
) -> str: ) -> str:
@@ -179,7 +184,7 @@ def create_message(
:param prompt: The user's input prompt. :param prompt: The user's input prompt.
:param tone: The desired tone for the response. :param tone: The desired tone for the response.
:param context: Additional context for the prompt. :param context: Additional context for the prompt.
:param image_response: The response if an image is involved. :param image_request: The image request with the url.
:param web_search: Flag to enable web search. :param web_search: Flag to enable web search.
:param gpt4_turbo: Flag to enable GPT-4 Turbo. :param gpt4_turbo: Flag to enable GPT-4 Turbo.
:return: A formatted string message for the Bing API. :return: A formatted string message for the Bing API.
@@ -187,11 +192,11 @@ def create_message(
options_sets = Defaults.optionsSets options_sets = Defaults.optionsSets
# Append tone-specific options # Append tone-specific options
if tone == Tones.creative: if tone == Tones.creative:
options_sets.append("h3imaginative") options_sets.extend(ConversationStyleOptionSets.CREATIVE)
elif tone == Tones.precise: elif tone == Tones.precise:
options_sets.append("h3precise") options_sets.extend(ConversationStyleOptionSets.PRECISE)
elif tone == Tones.balanced: elif tone == Tones.balanced:
options_sets.append("galileo") options_sets.extend(ConversationStyleOptionSets.BALANCED)
else: else:
options_sets.append("harmonyv3") options_sets.append("harmonyv3")
@@ -233,9 +238,9 @@ def create_message(
'type': 4 'type': 4
} }
if image_response and image_response.get('imageUrl') and image_response.get('originalImageUrl'): if image_request and image_request.get('imageUrl') and image_request.get('originalImageUrl'):
struct['arguments'][0]['message']['originalImageUrl'] = image_response.get('originalImageUrl') struct['arguments'][0]['message']['originalImageUrl'] = image_request.get('originalImageUrl')
struct['arguments'][0]['message']['imageUrl'] = image_response.get('imageUrl') struct['arguments'][0]['message']['imageUrl'] = image_request.get('imageUrl')
struct['arguments'][0]['experienceType'] = None struct['arguments'][0]['experienceType'] = None
struct['arguments'][0]['attachedFileInfo'] = {"fileName": None, "fileType": None} struct['arguments'][0]['attachedFileInfo'] = {"fileName": None, "fileType": None}
@@ -282,9 +287,9 @@ async def stream_generate(
timeout=ClientTimeout(total=timeout), headers=headers, connector=connector timeout=ClientTimeout(total=timeout), headers=headers, connector=connector
) as session: ) as session:
conversation = await create_conversation(session) conversation = await create_conversation(session)
image_response = await upload_image(session, image, tone) if image else None image_request = await upload_image(session, image, tone) if image else None
if image_response: if image_request:
yield image_response yield image_request
try: try:
async with session.ws_connect( async with session.ws_connect(
@@ -294,7 +299,7 @@ async def stream_generate(
) as wss: ) as wss:
await wss.send_str(format_message({'protocol': 'json', 'version': 1})) await wss.send_str(format_message({'protocol': 'json', 'version': 1}))
await wss.receive(timeout=timeout) await wss.receive(timeout=timeout)
await wss.send_str(create_message(conversation, prompt, tone, context, image_response, web_search, gpt4_turbo)) await wss.send_str(create_message(conversation, prompt, tone, context, image_request, web_search, gpt4_turbo))
response_txt = '' response_txt = ''
returned_text = '' returned_text = ''

View File

@@ -13,11 +13,12 @@ class DeepInfra(AsyncGeneratorProvider, ProviderModelMixin):
supports_message_history = True supports_message_history = True
default_model = 'meta-llama/Llama-2-70b-chat-hf' default_model = 'meta-llama/Llama-2-70b-chat-hf'
@staticmethod @classmethod
def get_models(): def get_models(cls):
url = 'https://api.deepinfra.com/models/featured' if not cls.models:
models = requests.get(url).json() url = 'https://api.deepinfra.com/models/featured'
return [model['model_name'] for model in models] cls.models = requests.get(url).json()
return cls.models
@classmethod @classmethod
async def create_async_generator( async def create_async_generator(

View File

@@ -1,11 +1,18 @@
from __future__ import annotations from __future__ import annotations
from aiohttp import ClientSession from aiohttp import ClientSession
import execjs, os, json import os
import json
try:
import execjs
has_requirements = True
except ImportError:
has_requirements = False
from ..typing import AsyncResult, Messages from ..typing import AsyncResult, Messages
from .base_provider import AsyncGeneratorProvider from .base_provider import AsyncGeneratorProvider
from .helper import format_prompt from .helper import format_prompt
from ..errors import MissingRequirementsError
class GptForLove(AsyncGeneratorProvider): class GptForLove(AsyncGeneratorProvider):
url = "https://ai18.gptforlove.com" url = "https://ai18.gptforlove.com"
@@ -20,6 +27,8 @@ class GptForLove(AsyncGeneratorProvider):
proxy: str = None, proxy: str = None,
**kwargs **kwargs
) -> AsyncResult: ) -> AsyncResult:
if not has_requirements:
raise MissingRequirementsError('Install "PyExecJS" package')
if not model: if not model:
model = "gpt-3.5-turbo" model = "gpt-3.5-turbo"
headers = { headers = {

View File

@@ -39,7 +39,7 @@ class HuggingChat(AsyncGeneratorProvider, ProviderModelMixin):
**kwargs **kwargs
) -> AsyncResult: ) -> AsyncResult:
if not cookies: if not cookies:
cookies = get_cookies(".huggingface.co") cookies = get_cookies(".huggingface.co", False)
headers = { headers = {
'User-Agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/111.0.0.0 Safari/537.36', 'User-Agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/111.0.0.0 Safari/537.36',

View File

@@ -14,12 +14,12 @@ WS_URL = "wss://labs-api.perplexity.ai/socket.io/"
class PerplexityLabs(AsyncGeneratorProvider, ProviderModelMixin): class PerplexityLabs(AsyncGeneratorProvider, ProviderModelMixin):
url = "https://labs.perplexity.ai" url = "https://labs.perplexity.ai"
working = True working = True
default_model = 'pplx-70b-online'
models = [ models = [
'pplx-7b-online', 'pplx-70b-online', 'pplx-7b-chat', 'pplx-70b-chat', 'mistral-7b-instruct', 'pplx-7b-online', 'pplx-70b-online', 'pplx-7b-chat', 'pplx-70b-chat', 'mistral-7b-instruct',
'codellama-34b-instruct', 'llama-2-70b-chat', 'llava-7b-chat', 'mixtral-8x7b-instruct', 'codellama-34b-instruct', 'llama-2-70b-chat', 'llava-7b-chat', 'mixtral-8x7b-instruct',
'mistral-medium', 'related' 'mistral-medium', 'related'
] ]
default_model = 'pplx-70b-online'
model_aliases = { model_aliases = {
"mistralai/Mistral-7B-Instruct-v0.1": "mistral-7b-instruct", "mistralai/Mistral-7B-Instruct-v0.1": "mistral-7b-instruct",
"meta-llama/Llama-2-70b-chat-hf": "llama-2-70b-chat", "meta-llama/Llama-2-70b-chat-hf": "llama-2-70b-chat",
@@ -52,8 +52,7 @@ class PerplexityLabs(AsyncGeneratorProvider, ProviderModelMixin):
async with ClientSession(headers=headers, connector=get_connector(connector, proxy)) as session: async with ClientSession(headers=headers, connector=get_connector(connector, proxy)) as session:
t = format(random.getrandbits(32), '08x') t = format(random.getrandbits(32), '08x')
async with session.get( async with session.get(
f"{API_URL}?EIO=4&transport=polling&t={t}", f"{API_URL}?EIO=4&transport=polling&t={t}"
proxy=proxy
) as response: ) as response:
text = await response.text() text = await response.text()
@@ -61,8 +60,7 @@ class PerplexityLabs(AsyncGeneratorProvider, ProviderModelMixin):
post_data = '40{"jwt":"anonymous-ask-user"}' post_data = '40{"jwt":"anonymous-ask-user"}'
async with session.post( async with session.post(
f'{API_URL}?EIO=4&transport=polling&t={t}&sid={sid}', f'{API_URL}?EIO=4&transport=polling&t={t}&sid={sid}',
data=post_data, data=post_data
proxy=proxy
) as response: ) as response:
assert await response.text() == 'OK' assert await response.text() == 'OK'

View File

@@ -9,7 +9,6 @@ from ..requests import StreamSession
class Phind(AsyncGeneratorProvider): class Phind(AsyncGeneratorProvider):
url = "https://www.phind.com" url = "https://www.phind.com"
working = True working = True
supports_gpt_4 = True
supports_stream = True supports_stream = True
supports_message_history = True supports_message_history = True

View File

@@ -1,10 +1,16 @@
from __future__ import annotations from __future__ import annotations
import json, base64, requests, execjs, random, uuid import json, base64, requests, random, uuid
try:
import execjs
has_requirements = True
except ImportError:
has_requirements = False
from ..typing import Messages, TypedDict, CreateResult, Any from ..typing import Messages, TypedDict, CreateResult, Any
from .base_provider import AbstractProvider from .base_provider import AbstractProvider
from ..debug import logging from ..errors import MissingRequirementsError
class Vercel(AbstractProvider): class Vercel(AbstractProvider):
url = 'https://sdk.vercel.ai' url = 'https://sdk.vercel.ai'
@@ -21,10 +27,11 @@ class Vercel(AbstractProvider):
proxy: str = None, proxy: str = None,
**kwargs **kwargs
) -> CreateResult: ) -> CreateResult:
if not has_requirements:
raise MissingRequirementsError('Install "PyExecJS" package')
if not model: if not model:
model = "gpt-3.5-turbo" model = "gpt-3.5-turbo"
elif model not in model_info: elif model not in model_info:
raise ValueError(f"Vercel does not support {model}") raise ValueError(f"Vercel does not support {model}")

View File

@@ -1,4 +1,5 @@
from __future__ import annotations from __future__ import annotations
import sys import sys
import asyncio import asyncio
from asyncio import AbstractEventLoop from asyncio import AbstractEventLoop

View File

@@ -1,3 +1,5 @@
from __future__ import annotations
from aiohttp import ClientSession from aiohttp import ClientSession
class Conversation: class Conversation:

View File

@@ -2,21 +2,28 @@
This module provides functionalities for creating and managing images using Bing's service. This module provides functionalities for creating and managing images using Bing's service.
It includes functions for user login, session creation, image creation, and processing. It includes functions for user login, session creation, image creation, and processing.
""" """
from __future__ import annotations
import asyncio import asyncio
import time import time
import json import json
import os import os
from aiohttp import ClientSession, BaseConnector from aiohttp import ClientSession, BaseConnector
from bs4 import BeautifulSoup
from urllib.parse import quote from urllib.parse import quote
from typing import Generator, List, Dict from typing import Generator, List, Dict
try:
from bs4 import BeautifulSoup
has_requirements = True
except ImportError:
has_requirements = False
from ..create_images import CreateImagesProvider from ..create_images import CreateImagesProvider
from ..helper import get_cookies, get_connector from ..helper import get_cookies, get_connector
from ...webdriver import WebDriver, get_driver_cookies, get_browser from ...webdriver import WebDriver, get_driver_cookies, get_browser
from ...base_provider import ProviderType from ...base_provider import ProviderType
from ...image import ImageResponse from ...image import ImageResponse
from ...errors import MissingRequirementsError, MissingAccessToken
BING_URL = "https://www.bing.com" BING_URL = "https://www.bing.com"
TIMEOUT_LOGIN = 1200 TIMEOUT_LOGIN = 1200
@@ -97,6 +104,8 @@ async def create_images(session: ClientSession, prompt: str, proxy: str = None,
Raises: Raises:
RuntimeError: If image creation fails or times out. RuntimeError: If image creation fails or times out.
""" """
if not has_requirements:
raise MissingRequirementsError('Install "beautifulsoup4" package')
url_encoded_prompt = quote(prompt) url_encoded_prompt = quote(prompt)
payload = f"q={url_encoded_prompt}&rt=4&FORM=GENCRE" payload = f"q={url_encoded_prompt}&rt=4&FORM=GENCRE"
url = f"{BING_URL}/images/create?q={url_encoded_prompt}&rt=4&FORM=GENCRE" url = f"{BING_URL}/images/create?q={url_encoded_prompt}&rt=4&FORM=GENCRE"
@@ -193,7 +202,11 @@ class CreateImagesBing:
Yields: Yields:
Generator[str, None, None]: The final output as markdown formatted string with images. Generator[str, None, None]: The final output as markdown formatted string with images.
""" """
cookies = self.cookies or get_cookies(".bing.com") try:
cookies = self.cookies or get_cookies(".bing.com")
except MissingRequirementsError as e:
raise MissingAccessToken(f'Missing "_U" cookie. {e}')
if "_U" not in cookies: if "_U" not in cookies:
login_url = os.environ.get("G4F_LOGIN_URL") login_url = os.environ.get("G4F_LOGIN_URL")
if login_url: if login_url:
@@ -211,9 +224,12 @@ class CreateImagesBing:
Returns: Returns:
str: Markdown formatted string with images. str: Markdown formatted string with images.
""" """
cookies = self.cookies or get_cookies(".bing.com") try:
cookies = self.cookies or get_cookies(".bing.com")
except MissingRequirementsError as e:
raise MissingAccessToken(f'Missing "_U" cookie. {e}')
if "_U" not in cookies: if "_U" not in cookies:
raise RuntimeError('"_U" cookie is missing') raise MissingAccessToken('Missing "_U" cookie')
proxy = os.environ.get("G4F_PROXY") proxy = os.environ.get("G4F_PROXY")
async with create_session(cookies, proxy) as session: async with create_session(cookies, proxy) as session:
images = await create_images(session, prompt, self.proxy) images = await create_images(session, prompt, self.proxy)

View File

@@ -1,17 +1,14 @@
""" """
Module to handle image uploading and processing for Bing AI integrations. Module to handle image uploading and processing for Bing AI integrations.
""" """
from __future__ import annotations from __future__ import annotations
import string
import random
import json import json
import math import math
from aiohttp import ClientSession from aiohttp import ClientSession, FormData
from PIL import Image
from ...typing import ImageType, Tuple from ...typing import ImageType, Tuple
from ...image import to_image, process_image, to_base64, ImageResponse from ...image import to_image, process_image, to_base64_jpg, ImageRequest, Image
IMAGE_CONFIG = { IMAGE_CONFIG = {
"maxImagePixels": 360000, "maxImagePixels": 360000,
@@ -24,7 +21,7 @@ async def upload_image(
image_data: ImageType, image_data: ImageType,
tone: str, tone: str,
proxy: str = None proxy: str = None
) -> ImageResponse: ) -> ImageRequest:
""" """
Uploads an image to Bing's AI service and returns the image response. Uploads an image to Bing's AI service and returns the image response.
@@ -38,22 +35,22 @@ async def upload_image(
RuntimeError: If the image upload fails. RuntimeError: If the image upload fails.
Returns: Returns:
ImageResponse: The response from the image upload. ImageRequest: The response from the image upload.
""" """
image = to_image(image_data) image = to_image(image_data)
new_width, new_height = calculate_new_dimensions(image) new_width, new_height = calculate_new_dimensions(image)
processed_img = process_image(image, new_width, new_height) image = process_image(image, new_width, new_height)
img_binary_data = to_base64(processed_img, IMAGE_CONFIG['imageCompressionRate']) img_binary_data = to_base64_jpg(image, IMAGE_CONFIG['imageCompressionRate'])
data, boundary = build_image_upload_payload(img_binary_data, tone) data = build_image_upload_payload(img_binary_data, tone)
headers = prepare_headers(session, boundary) headers = prepare_headers(session)
async with session.post("https://www.bing.com/images/kblob", data=data, headers=headers, proxy=proxy) as response: async with session.post("https://www.bing.com/images/kblob", data=data, headers=headers, proxy=proxy) as response:
if response.status != 200: if response.status != 200:
raise RuntimeError("Failed to upload image.") raise RuntimeError("Failed to upload image.")
return parse_image_response(await response.json()) return parse_image_response(await response.json())
def calculate_new_dimensions(image: Image.Image) -> Tuple[int, int]: def calculate_new_dimensions(image: Image) -> Tuple[int, int]:
""" """
Calculates the new dimensions for the image based on the maximum allowed pixels. Calculates the new dimensions for the image based on the maximum allowed pixels.
@@ -70,7 +67,7 @@ def calculate_new_dimensions(image: Image.Image) -> Tuple[int, int]:
return int(width * scale_factor), int(height * scale_factor) return int(width * scale_factor), int(height * scale_factor)
return width, height return width, height
def build_image_upload_payload(image_bin: str, tone: str) -> Tuple[str, str]: def build_image_upload_payload(image_bin: str, tone: str) -> FormData:
""" """
Builds the payload for image uploading. Builds the payload for image uploading.
@@ -81,18 +78,11 @@ def build_image_upload_payload(image_bin: str, tone: str) -> Tuple[str, str]:
Returns: Returns:
Tuple[str, str]: The data and boundary for the payload. Tuple[str, str]: The data and boundary for the payload.
""" """
boundary = "----WebKitFormBoundary" + ''.join(random.choices(string.ascii_letters + string.digits, k=16)) data = FormData()
data = f"""--{boundary} knowledge_request = json.dumps(build_knowledge_request(tone), ensure_ascii=False)
Content-Disposition: form-data; name="knowledgeRequest" data.add_field('knowledgeRequest', knowledge_request, content_type="application/json")
data.add_field('imageBase64', image_bin)
{json.dumps(build_knowledge_request(tone), ensure_ascii=False)} return data
--{boundary}
Content-Disposition: form-data; name="imageBase64"
{image_bin}
--{boundary}--
"""
return data, boundary
def build_knowledge_request(tone: str) -> dict: def build_knowledge_request(tone: str) -> dict:
""" """
@@ -119,7 +109,7 @@ def build_knowledge_request(tone: str) -> dict:
} }
} }
def prepare_headers(session: ClientSession, boundary: str) -> dict: def prepare_headers(session: ClientSession) -> dict:
""" """
Prepares the headers for the image upload request. Prepares the headers for the image upload request.
@@ -131,12 +121,11 @@ def prepare_headers(session: ClientSession, boundary: str) -> dict:
dict: The headers for the request. dict: The headers for the request.
""" """
headers = session.headers.copy() headers = session.headers.copy()
headers["Content-Type"] = f'multipart/form-data; boundary={boundary}'
headers["Referer"] = 'https://www.bing.com/search?q=Bing+AI&showconv=1&FORM=hpcodx' headers["Referer"] = 'https://www.bing.com/search?q=Bing+AI&showconv=1&FORM=hpcodx'
headers["Origin"] = 'https://www.bing.com' headers["Origin"] = 'https://www.bing.com'
return headers return headers
def parse_image_response(response: dict) -> ImageResponse: def parse_image_response(response: dict) -> ImageRequest:
""" """
Parses the response from the image upload. Parses the response from the image upload.
@@ -147,7 +136,7 @@ def parse_image_response(response: dict) -> ImageResponse:
RuntimeError: If parsing the image info fails. RuntimeError: If parsing the image info fails.
Returns: Returns:
ImageResponse: The parsed image response. ImageRequest: The parsed image response.
""" """
if not response.get('blobId'): if not response.get('blobId'):
raise RuntimeError("Failed to parse image info.") raise RuntimeError("Failed to parse image info.")
@@ -160,4 +149,4 @@ def parse_image_response(response: dict) -> ImageResponse:
if IMAGE_CONFIG["enableFaceBlurDebug"] else if IMAGE_CONFIG["enableFaceBlurDebug"] else
f"https://www.bing.com/images/blob?bcid={result['bcid']}" f"https://www.bing.com/images/blob?bcid={result['bcid']}"
) )
return ImageResponse(result["imageUrl"], "", result) return ImageRequest(result["imageUrl"], "", result)

View File

@@ -1,7 +1,7 @@
from __future__ import annotations from __future__ import annotations
from ...typing import Messages from ...typing import Messages
from curl_cffi.requests import AsyncSession from ...requests import StreamSession
from ..base_provider import AsyncProvider, format_prompt from ..base_provider import AsyncProvider, format_prompt
@@ -19,7 +19,7 @@ class ChatgptDuo(AsyncProvider):
timeout: int = 120, timeout: int = 120,
**kwargs **kwargs
) -> str: ) -> str:
async with AsyncSession( async with StreamSession(
impersonate="chrome107", impersonate="chrome107",
proxies={"https": proxy}, proxies={"https": proxy},
timeout=timeout timeout=timeout

View File

@@ -5,10 +5,10 @@ import os
import uuid import uuid
import requests import requests
try: # try:
from Crypto.Cipher import AES # from Crypto.Cipher import AES
except ImportError: # except ImportError:
from Cryptodome.Cipher import AES # from Cryptodome.Cipher import AES
from ...typing import Any, CreateResult from ...typing import Any, CreateResult
from ..base_provider import AbstractProvider from ..base_provider import AbstractProvider
@@ -57,19 +57,21 @@ class GetGpt(AbstractProvider):
def _encrypt(e: str): def _encrypt(e: str):
t = os.urandom(8).hex().encode('utf-8') # t = os.urandom(8).hex().encode('utf-8')
n = os.urandom(8).hex().encode('utf-8') # n = os.urandom(8).hex().encode('utf-8')
r = e.encode('utf-8') # r = e.encode('utf-8')
cipher = AES.new(t, AES.MODE_CBC, n) # cipher = AES.new(t, AES.MODE_CBC, n)
ciphertext = cipher.encrypt(_pad_data(r)) # ciphertext = cipher.encrypt(_pad_data(r))
return ciphertext.hex() + t.decode('utf-8') + n.decode('utf-8') # return ciphertext.hex() + t.decode('utf-8') + n.decode('utf-8')
return
def _pad_data(data: bytes) -> bytes: def _pad_data(data: bytes) -> bytes:
block_size = AES.block_size # block_size = AES.block_size
padding_size = block_size - len(data) % block_size # padding_size = block_size - len(data) % block_size
padding = bytes([padding_size] * padding_size) # padding = bytes([padding_size] * padding_size)
return data + padding # return data + padding
return

View File

@@ -1,57 +1,37 @@
from __future__ import annotations from __future__ import annotations
import asyncio
import os import os
import random import random
import secrets import secrets
import string import string
from asyncio import AbstractEventLoop, BaseEventLoop
from aiohttp import BaseConnector from aiohttp import BaseConnector
from platformdirs import user_config_dir
from browser_cookie3 import ( try:
chrome, chromium, opera, opera_gx, from platformdirs import user_config_dir
brave, edge, vivaldi, firefox, has_platformdirs = True
_LinuxPasswordManager, BrowserCookieError except ImportError:
) has_platformdirs = False
try:
from browser_cookie3 import (
chrome, chromium, opera, opera_gx,
brave, edge, vivaldi, firefox,
_LinuxPasswordManager, BrowserCookieError
)
has_browser_cookie3 = True
except ImportError:
has_browser_cookie3 = False
from ..typing import Dict, Messages, Optional from ..typing import Dict, Messages, Optional
from ..errors import AiohttpSocksError from ..errors import AiohttpSocksError, MissingRequirementsError
from .. import debug from .. import debug
# Global variable to store cookies # Global variable to store cookies
_cookies: Dict[str, Dict[str, str]] = {} _cookies: Dict[str, Dict[str, str]] = {}
def get_event_loop() -> AbstractEventLoop: if has_browser_cookie3 and os.environ.get('DBUS_SESSION_BUS_ADDRESS') == "/dev/null":
"""
Get the current asyncio event loop. If the loop is closed or not set, create a new event loop.
If a loop is running, handle nested event loops. Patch the loop if 'nest_asyncio' is installed.
Returns:
AbstractEventLoop: The current or new event loop.
"""
try:
loop = asyncio.get_event_loop()
if isinstance(loop, BaseEventLoop):
loop._check_closed()
except RuntimeError:
loop = asyncio.new_event_loop()
asyncio.set_event_loop(loop)
try:
asyncio.get_running_loop()
if not hasattr(loop.__class__, "_nest_patched"):
import nest_asyncio
nest_asyncio.apply(loop)
except RuntimeError:
pass
except ImportError:
raise RuntimeError(
'Use "create_async" instead of "create" function in a running event loop. Or install "nest_asyncio" package.'
)
return loop
if os.environ.get('DBUS_SESSION_BUS_ADDRESS') == "/dev/null":
_LinuxPasswordManager.get_password = lambda a, b: b"secret" _LinuxPasswordManager.get_password = lambda a, b: b"secret"
def get_cookies(domain_name: str = '') -> Dict[str, str]: def get_cookies(domain_name: str = '', raise_requirements_error: bool = True) -> Dict[str, str]:
""" """
Load cookies for a given domain from all supported browsers and cache the results. Load cookies for a given domain from all supported browsers and cache the results.
@@ -64,11 +44,11 @@ def get_cookies(domain_name: str = '') -> Dict[str, str]:
if domain_name in _cookies: if domain_name in _cookies:
return _cookies[domain_name] return _cookies[domain_name]
cookies = _load_cookies_from_browsers(domain_name) cookies = load_cookies_from_browsers(domain_name, raise_requirements_error)
_cookies[domain_name] = cookies _cookies[domain_name] = cookies
return cookies return cookies
def _load_cookies_from_browsers(domain_name: str) -> Dict[str, str]: def load_cookies_from_browsers(domain_name: str, raise_requirements_error: bool = True) -> Dict[str, str]:
""" """
Helper function to load cookies from various browsers. Helper function to load cookies from various browsers.
@@ -78,6 +58,10 @@ def _load_cookies_from_browsers(domain_name: str) -> Dict[str, str]:
Returns: Returns:
Dict[str, str]: A dictionary of cookie names and values. Dict[str, str]: A dictionary of cookie names and values.
""" """
if not has_browser_cookie3:
if raise_requirements_error:
raise MissingRequirementsError('Install "browser_cookie3" package')
return {}
cookies = {} cookies = {}
for cookie_fn in [_g4f, chrome, chromium, opera, opera_gx, brave, edge, vivaldi, firefox]: for cookie_fn in [_g4f, chrome, chromium, opera, opera_gx, brave, edge, vivaldi, firefox]:
try: try:
@@ -104,6 +88,8 @@ def _g4f(domain_name: str) -> list:
Returns: Returns:
list: List of cookies. list: List of cookies.
""" """
if not has_platformdirs:
return []
user_data_dir = user_config_dir("g4f") user_data_dir = user_config_dir("g4f")
cookie_file = os.path.join(user_data_dir, "Default", "Cookies") cookie_file = os.path.join(user_data_dir, "Default", "Cookies")
return [] if not os.path.exists(cookie_file) else chrome(cookie_file, domain_name) return [] if not os.path.exists(cookie_file) else chrome(cookie_file, domain_name)

View File

@@ -2,10 +2,14 @@ from __future__ import annotations
import time import time
import os import os
from selenium.webdriver.common.by import By
from selenium.webdriver.support.ui import WebDriverWait try:
from selenium.webdriver.support import expected_conditions as EC from selenium.webdriver.common.by import By
from selenium.webdriver.common.keys import Keys from selenium.webdriver.support.ui import WebDriverWait
from selenium.webdriver.support import expected_conditions as EC
from selenium.webdriver.common.keys import Keys
except ImportError:
pass
from ...typing import CreateResult, Messages from ...typing import CreateResult, Messages
from ..base_provider import AbstractProvider from ..base_provider import AbstractProvider

View File

@@ -1,21 +1,32 @@
from __future__ import annotations from __future__ import annotations
import asyncio import asyncio
import uuid import uuid
import json import json
import os import os
from py_arkose_generator.arkose import get_values_for_request try:
from async_property import async_cached_property from py_arkose_generator.arkose import get_values_for_request
from selenium.webdriver.common.by import By from async_property import async_cached_property
from selenium.webdriver.support.ui import WebDriverWait has_requirements = True
from selenium.webdriver.support import expected_conditions as EC except ImportError:
async_cached_property = property
has_requirements = False
try:
from selenium.webdriver.common.by import By
from selenium.webdriver.support.ui import WebDriverWait
from selenium.webdriver.support import expected_conditions as EC
has_webdriver = True
except ImportError:
has_webdriver = False
from ..base_provider import AsyncGeneratorProvider, ProviderModelMixin from ..base_provider import AsyncGeneratorProvider, ProviderModelMixin
from ..helper import format_prompt, get_cookies from ..helper import format_prompt, get_cookies
from ...webdriver import get_browser, get_driver_cookies from ...webdriver import get_browser, get_driver_cookies
from ...typing import AsyncResult, Messages from ...typing import AsyncResult, Messages, Cookies, ImageType
from ...requests import StreamSession from ...requests import StreamSession
from ...image import to_image, to_bytes, ImageType, ImageResponse from ...image import to_image, to_bytes, ImageResponse, ImageRequest
from ...errors import MissingRequirementsError, MissingAccessToken
class OpenaiChat(AsyncGeneratorProvider, ProviderModelMixin): class OpenaiChat(AsyncGeneratorProvider, ProviderModelMixin):
@@ -27,12 +38,8 @@ class OpenaiChat(AsyncGeneratorProvider, ProviderModelMixin):
supports_gpt_35_turbo = True supports_gpt_35_turbo = True
supports_gpt_4 = True supports_gpt_4 = True
default_model = None default_model = None
models = ["text-davinci-002-render-sha", "gpt-4", "gpt-4-gizmo"] models = ["gpt-3.5-turbo", "gpt-4", "gpt-4-gizmo"]
model_aliases = {
"gpt-3.5-turbo": "text-davinci-002-render-sha",
}
_cookies: dict = {} _cookies: dict = {}
_default_model: str = None
@classmethod @classmethod
async def create( async def create(
@@ -94,7 +101,7 @@ class OpenaiChat(AsyncGeneratorProvider, ProviderModelMixin):
session: StreamSession, session: StreamSession,
headers: dict, headers: dict,
image: ImageType image: ImageType
) -> ImageResponse: ) -> ImageRequest:
""" """
Upload an image to the service and get the download URL Upload an image to the service and get the download URL
@@ -104,7 +111,7 @@ class OpenaiChat(AsyncGeneratorProvider, ProviderModelMixin):
image: The image to upload, either a PIL Image object or a bytes object image: The image to upload, either a PIL Image object or a bytes object
Returns: Returns:
An ImageResponse object that contains the download URL, file name, and other data An ImageRequest object that contains the download URL, file name, and other data
""" """
# Convert the image to a PIL Image object and get the extension # Convert the image to a PIL Image object and get the extension
image = to_image(image) image = to_image(image)
@@ -145,7 +152,7 @@ class OpenaiChat(AsyncGeneratorProvider, ProviderModelMixin):
) as response: ) as response:
response.raise_for_status() response.raise_for_status()
download_url = (await response.json())["download_url"] download_url = (await response.json())["download_url"]
return ImageResponse(download_url, image_data["file_name"], image_data) return ImageRequest(download_url, image_data["file_name"], image_data)
@classmethod @classmethod
async def get_default_model(cls, session: StreamSession, headers: dict): async def get_default_model(cls, session: StreamSession, headers: dict):
@@ -169,7 +176,7 @@ class OpenaiChat(AsyncGeneratorProvider, ProviderModelMixin):
return cls.default_model return cls.default_model
@classmethod @classmethod
def create_messages(cls, prompt: str, image_response: ImageResponse = None): def create_messages(cls, prompt: str, image_response: ImageRequest = None):
""" """
Create a list of messages for the user input Create a list of messages for the user input
@@ -282,7 +289,7 @@ class OpenaiChat(AsyncGeneratorProvider, ProviderModelMixin):
proxy: str = None, proxy: str = None,
timeout: int = 120, timeout: int = 120,
access_token: str = None, access_token: str = None,
cookies: dict = None, cookies: Cookies = None,
auto_continue: bool = False, auto_continue: bool = False,
history_disabled: bool = True, history_disabled: bool = True,
action: str = "next", action: str = "next",
@@ -317,12 +324,16 @@ class OpenaiChat(AsyncGeneratorProvider, ProviderModelMixin):
Raises: Raises:
RuntimeError: If an error occurs during processing. RuntimeError: If an error occurs during processing.
""" """
if not has_requirements:
raise MissingRequirementsError('Install "py-arkose-generator" and "async_property" package')
if not parent_id: if not parent_id:
parent_id = str(uuid.uuid4()) parent_id = str(uuid.uuid4())
if not cookies: if not cookies:
cookies = cls._cookies or get_cookies("chat.openai.com") cookies = cls._cookies or get_cookies("chat.openai.com", False)
if not access_token and "access_token" in cookies: if not access_token and "access_token" in cookies:
access_token = cookies["access_token"] access_token = cookies["access_token"]
if not access_token and not has_webdriver:
raise MissingAccessToken(f'Missing "access_token"')
if not access_token: if not access_token:
login_url = os.environ.get("G4F_LOGIN_URL") login_url = os.environ.get("G4F_LOGIN_URL")
if login_url: if login_url:
@@ -331,7 +342,6 @@ class OpenaiChat(AsyncGeneratorProvider, ProviderModelMixin):
cls._cookies = cookies cls._cookies = cookies
headers = {"Authorization": f"Bearer {access_token}"} headers = {"Authorization": f"Bearer {access_token}"}
async with StreamSession( async with StreamSession(
proxies={"https": proxy}, proxies={"https": proxy},
impersonate="chrome110", impersonate="chrome110",
@@ -346,13 +356,15 @@ class OpenaiChat(AsyncGeneratorProvider, ProviderModelMixin):
except Exception as e: except Exception as e:
yield e yield e
end_turn = EndTurn() end_turn = EndTurn()
model = cls.get_model(model or await cls.get_default_model(session, headers))
model = "text-davinci-002-render-sha" if model == "gpt-3.5-turbo" else model
while not end_turn.is_end: while not end_turn.is_end:
data = { data = {
"action": action, "action": action,
"arkose_token": await cls.get_arkose_token(session), "arkose_token": await cls.get_arkose_token(session),
"conversation_id": conversation_id, "conversation_id": conversation_id,
"parent_message_id": parent_id, "parent_message_id": parent_id,
"model": cls.get_model(model or await cls.get_default_model(session, headers)), "model": model,
"history_and_training_disabled": history_disabled and not auto_continue, "history_and_training_disabled": history_disabled and not auto_continue,
} }
if action != "continue": if action != "continue":

View File

@@ -1,10 +1,14 @@
from __future__ import annotations from __future__ import annotations
import time import time
from selenium.webdriver.common.by import By
from selenium.webdriver.support.ui import WebDriverWait try:
from selenium.webdriver.support import expected_conditions as EC from selenium.webdriver.common.by import By
from selenium.webdriver.common.keys import Keys from selenium.webdriver.support.ui import WebDriverWait
from selenium.webdriver.support import expected_conditions as EC
from selenium.webdriver.common.keys import Keys
except ImportError:
pass
from ...typing import CreateResult, Messages from ...typing import CreateResult, Messages
from ..base_provider import AbstractProvider from ..base_provider import AbstractProvider

View File

@@ -1,3 +1,5 @@
from __future__ import annotations
from abc import ABC, abstractmethod from abc import ABC, abstractmethod
from typing import Union, List, Dict, Type from typing import Union, List, Dict, Type
from .typing import Messages, CreateResult from .typing import Messages, CreateResult

View File

@@ -4,7 +4,6 @@ from enum import Enum
import g4f import g4f
from g4f import Provider from g4f import Provider
from g4f.api import Api
from g4f.gui.run import gui_parser, run_gui_args from g4f.gui.run import gui_parser, run_gui_args
def run_gui(args): def run_gui(args):
@@ -23,6 +22,7 @@ def main():
args = parser.parse_args() args = parser.parse_args()
if args.mode == "api": if args.mode == "api":
from g4f.api import Api
controller=Api(engine=g4f, debug=args.debug, list_ignored_providers=args.ignored_providers) controller=Api(engine=g4f, debug=args.debug, list_ignored_providers=args.ignored_providers)
controller.run(args.bind) controller.run(args.bind)
elif args.mode == "gui": elif args.mode == "gui":

View File

@@ -31,5 +31,11 @@ class NestAsyncioError(Exception):
class ModelNotSupportedError(Exception): class ModelNotSupportedError(Exception):
pass pass
class AiohttpSocksError(Exception): class MissingRequirementsError(Exception):
pass
class AiohttpSocksError(MissingRequirementsError):
pass
class MissingAccessToken(Exception):
pass pass

View File

@@ -1,6 +1,10 @@
from .server.app import app try:
from .server.website import Website from .server.app import app
from .server.backend import Backend_Api from .server.website import Website
from .server.backend import Backend_Api
except ImportError:
from g4f.errors import MissingRequirementsError
raise MissingRequirementsError('Install "flask" and "werkzeug" package for gui')
def run_gui(host: str = '0.0.0.0', port: int = 80, debug: bool = False) -> None: def run_gui(host: str = '0.0.0.0', port: int = 80, debug: bool = False) -> None:
config = { config = {

View File

@@ -1,8 +1,5 @@
from argparse import ArgumentParser from argparse import ArgumentParser
from g4f.gui import run_gui
def gui_parser(): def gui_parser():
parser = ArgumentParser(description="Run the GUI") parser = ArgumentParser(description="Run the GUI")
parser.add_argument("-host", type=str, default="0.0.0.0", help="hostname") parser.add_argument("-host", type=str, default="0.0.0.0", help="hostname")
@@ -10,15 +7,14 @@ def gui_parser():
parser.add_argument("-debug", action="store_true", help="debug mode") parser.add_argument("-debug", action="store_true", help="debug mode")
return parser return parser
def run_gui_args(args): def run_gui_args(args):
from g4f.gui import run_gui
host = args.host host = args.host
port = args.port port = args.port
debug = args.debug debug = args.debug
run_gui(host, port, debug) run_gui(host, port, debug)
if __name__ == "__main__": if __name__ == "__main__":
parser = gui_parser() parser = gui_parser()
args = parser.parse_args() args = parser.parse_args()
run_gui_args(args) run_gui_args(args)

View File

@@ -1,8 +1,14 @@
from __future__ import annotations from __future__ import annotations
from bs4 import BeautifulSoup
from aiohttp import ClientSession, ClientTimeout from aiohttp import ClientSession, ClientTimeout
from duckduckgo_search import DDGS try:
from duckduckgo_search import DDGS
from bs4 import BeautifulSoup
has_requirements = True
except ImportError:
has_requirements = False
from ...errors import MissingRequirementsError
import asyncio import asyncio
class SearchResults(): class SearchResults():
@@ -88,6 +94,8 @@ async def fetch_and_scrape(session: ClientSession, url: str, max_words: int = No
return return
async def search(query: str, n_results: int = 5, max_words: int = 2500, add_text: bool = True) -> SearchResults: async def search(query: str, n_results: int = 5, max_words: int = 2500, add_text: bool = True) -> SearchResults:
if not has_requirements:
raise MissingRequirementsError('Install "duckduckgo-search" and "beautifulsoup4" package')
with DDGS() as ddgs: with DDGS() as ddgs:
results = [] results = []
for result in ddgs.text( for result in ddgs.text(

View File

@@ -1,39 +1,52 @@
from __future__ import annotations
import re import re
from io import BytesIO from io import BytesIO
import base64 import base64
from .typing import ImageType, Union from .typing import ImageType, Union
from PIL import Image
try:
from PIL.Image import open as open_image, new as new_image, Image
from PIL.Image import FLIP_LEFT_RIGHT, ROTATE_180, ROTATE_270, ROTATE_90
has_requirements = True
except ImportError:
Image = type
has_requirements = False
from .errors import MissingRequirementsError
ALLOWED_EXTENSIONS = {'png', 'jpg', 'jpeg', 'gif', 'webp', 'svg'} ALLOWED_EXTENSIONS = {'png', 'jpg', 'jpeg', 'gif', 'webp', 'svg'}
def to_image(image: ImageType, is_svg: bool = False) -> Image.Image: def to_image(image: ImageType, is_svg: bool = False) -> Image:
""" """
Converts the input image to a PIL Image object. Converts the input image to a PIL Image object.
Args: Args:
image (Union[str, bytes, Image.Image]): The input image. image (Union[str, bytes, Image]): The input image.
Returns: Returns:
Image.Image: The converted PIL Image object. Image: The converted PIL Image object.
""" """
if not has_requirements:
raise MissingRequirementsError('Install "pillow" package for images')
if is_svg: if is_svg:
try: try:
import cairosvg import cairosvg
except ImportError: except ImportError:
raise RuntimeError('Install "cairosvg" package for svg images') raise MissingRequirementsError('Install "cairosvg" package for svg images')
if not isinstance(image, bytes): if not isinstance(image, bytes):
image = image.read() image = image.read()
buffer = BytesIO() buffer = BytesIO()
cairosvg.svg2png(image, write_to=buffer) cairosvg.svg2png(image, write_to=buffer)
return Image.open(buffer) return open_image(buffer)
if isinstance(image, str): if isinstance(image, str):
is_data_uri_an_image(image) is_data_uri_an_image(image)
image = extract_data_uri(image) image = extract_data_uri(image)
if isinstance(image, bytes): if isinstance(image, bytes):
is_accepted_format(image) is_accepted_format(image)
return Image.open(BytesIO(image)) return open_image(BytesIO(image))
elif not isinstance(image, Image.Image): elif not isinstance(image, Image):
image = Image.open(image) image = open_image(image)
copy = image.copy() copy = image.copy()
copy.format = image.format copy.format = image.format
return copy return copy
@@ -110,12 +123,12 @@ def extract_data_uri(data_uri: str) -> bytes:
data = base64.b64decode(data) data = base64.b64decode(data)
return data return data
def get_orientation(image: Image.Image) -> int: def get_orientation(image: Image) -> int:
""" """
Gets the orientation of the given image. Gets the orientation of the given image.
Args: Args:
image (Image.Image): The image. image (Image): The image.
Returns: Returns:
int: The orientation value. int: The orientation value.
@@ -126,40 +139,40 @@ def get_orientation(image: Image.Image) -> int:
if orientation is not None: if orientation is not None:
return orientation return orientation
def process_image(img: Image.Image, new_width: int, new_height: int) -> Image.Image: def process_image(img: Image, new_width: int, new_height: int) -> Image:
""" """
Processes the given image by adjusting its orientation and resizing it. Processes the given image by adjusting its orientation and resizing it.
Args: Args:
img (Image.Image): The image to process. img (Image): The image to process.
new_width (int): The new width of the image. new_width (int): The new width of the image.
new_height (int): The new height of the image. new_height (int): The new height of the image.
Returns: Returns:
Image.Image: The processed image. Image: The processed image.
""" """
# Fix orientation # Fix orientation
orientation = get_orientation(img) orientation = get_orientation(img)
if orientation: if orientation:
if orientation > 4: if orientation > 4:
img = img.transpose(Image.FLIP_LEFT_RIGHT) img = img.transpose(FLIP_LEFT_RIGHT)
if orientation in [3, 4]: if orientation in [3, 4]:
img = img.transpose(Image.ROTATE_180) img = img.transpose(ROTATE_180)
if orientation in [5, 6]: if orientation in [5, 6]:
img = img.transpose(Image.ROTATE_270) img = img.transpose(ROTATE_270)
if orientation in [7, 8]: if orientation in [7, 8]:
img = img.transpose(Image.ROTATE_90) img = img.transpose(ROTATE_90)
# Resize image # Resize image
img.thumbnail((new_width, new_height)) img.thumbnail((new_width, new_height))
# Remove transparency # Remove transparency
if img.mode != "RGB": if img.mode != "RGB":
img.load() img.load()
white = Image.new('RGB', img.size, (255, 255, 255)) white = new_image('RGB', img.size, (255, 255, 255))
white.paste(img, mask=img.split()[3]) white.paste(img, mask=img.split()[3])
return white return white
return img return img
def to_base64(image: Image.Image, compression_rate: float) -> str: def to_base64_jpg(image: Image, compression_rate: float) -> str:
""" """
Converts the given image to a base64-encoded string. Converts the given image to a base64-encoded string.
@@ -195,7 +208,7 @@ def format_images_markdown(images, alt: str, preview: str="{image}?w=200&h=200")
end_flag = "<!-- generated images end -->\n" end_flag = "<!-- generated images end -->\n"
return f"\n{start_flag}{images}\n{end_flag}\n" return f"\n{start_flag}{images}\n{end_flag}\n"
def to_bytes(image: Image.Image) -> bytes: def to_bytes(image: Image) -> bytes:
""" """
Converts the given image to bytes. Converts the given image to bytes.
@@ -225,4 +238,7 @@ class ImageResponse():
return format_images_markdown(self.images, self.alt) return format_images_markdown(self.images, self.alt)
def get(self, key: str): def get(self, key: str):
return self.options.get(key) return self.options.get(key)
class ImageRequest(ImageResponse):
pass

View File

@@ -4,80 +4,124 @@ import json
from functools import partialmethod from functools import partialmethod
from typing import AsyncGenerator from typing import AsyncGenerator
from urllib.parse import urlparse from urllib.parse import urlparse
from curl_cffi.requests import AsyncSession, Session, Response
try:
from curl_cffi.requests import AsyncSession, Session, Response
has_curl_cffi = True
except ImportError:
Session = type
has_curl_cffi = False
from .webdriver import WebDriver, WebDriverSession, bypass_cloudflare, get_driver_cookies from .webdriver import WebDriver, WebDriverSession, bypass_cloudflare, get_driver_cookies
from .errors import MissingRequirementsError
class StreamResponse: if not has_curl_cffi:
""" from aiohttp import ClientSession, ClientResponse, ClientTimeout
A wrapper class for handling asynchronous streaming responses. from .Provider.helper import get_connector
class StreamResponse(ClientResponse):
async def iter_lines(self) -> iter[bytes, None]:
async for line in self.content:
yield line.rstrip(b"\r\n")
Attributes: async def json(self):
inner (Response): The original Response object. return await super().json(content_type=None)
"""
def __init__(self, inner: Response) -> None: class StreamSession(ClientSession):
"""Initialize the StreamResponse with the provided Response object.""" def __init__(self, headers: dict = {}, timeout: int = None, proxies: dict = {}, impersonate = None, **kwargs):
self.inner: Response = inner if impersonate:
headers = {
'Accept-Encoding': 'gzip, deflate, br',
'Accept-Language': 'en-US',
'Connection': 'keep-alive',
'Sec-Fetch-Dest': 'empty',
'Sec-Fetch-Mode': 'cors',
'Sec-Fetch-Site': 'same-site',
"User-Agent": 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/107.0.0.0 Safari/537.36',
'Accept': '*/*',
'sec-ch-ua': '"Google Chrome";v="107", "Chromium";v="107", "Not?A_Brand";v="24"',
'sec-ch-ua-mobile': '?0',
'sec-ch-ua-platform': '"Windows"',
**headers
}
super().__init__(
**kwargs,
timeout=ClientTimeout(timeout) if timeout else None,
response_class=StreamResponse,
connector=get_connector(kwargs.get("connector"), proxies.get("https")),
headers=headers
)
else:
class StreamResponse:
"""
A wrapper class for handling asynchronous streaming responses.
async def text(self) -> str: Attributes:
"""Asynchronously get the response text.""" inner (Response): The original Response object.
return await self.inner.atext() """
def raise_for_status(self) -> None: def __init__(self, inner: Response) -> None:
"""Raise an HTTPError if one occurred.""" """Initialize the StreamResponse with the provided Response object."""
self.inner.raise_for_status() self.inner: Response = inner
async def json(self, **kwargs) -> dict: async def text(self) -> str:
"""Asynchronously parse the JSON response content.""" """Asynchronously get the response text."""
return json.loads(await self.inner.acontent(), **kwargs) return await self.inner.atext()
async def iter_lines(self) -> AsyncGenerator[bytes, None]: def raise_for_status(self) -> None:
"""Asynchronously iterate over the lines of the response.""" """Raise an HTTPError if one occurred."""
async for line in self.inner.aiter_lines(): self.inner.raise_for_status()
yield line
async def iter_content(self) -> AsyncGenerator[bytes, None]: async def json(self, **kwargs) -> dict:
"""Asynchronously iterate over the response content.""" """Asynchronously parse the JSON response content."""
async for chunk in self.inner.aiter_content(): return json.loads(await self.inner.acontent(), **kwargs)
yield chunk
async def __aenter__(self): async def iter_lines(self) -> AsyncGenerator[bytes, None]:
"""Asynchronously enter the runtime context for the response object.""" """Asynchronously iterate over the lines of the response."""
inner: Response = await self.inner async for line in self.inner.aiter_lines():
self.inner = inner yield line
self.request = inner.request
self.status_code: int = inner.status_code
self.reason: str = inner.reason
self.ok: bool = inner.ok
self.headers = inner.headers
self.cookies = inner.cookies
return self
async def __aexit__(self, *args): async def iter_content(self) -> AsyncGenerator[bytes, None]:
"""Asynchronously exit the runtime context for the response object.""" """Asynchronously iterate over the response content."""
await self.inner.aclose() async for chunk in self.inner.aiter_content():
yield chunk
async def __aenter__(self):
"""Asynchronously enter the runtime context for the response object."""
inner: Response = await self.inner
self.inner = inner
self.request = inner.request
self.status_code: int = inner.status_code
self.reason: str = inner.reason
self.ok: bool = inner.ok
self.headers = inner.headers
self.cookies = inner.cookies
return self
class StreamSession(AsyncSession): async def __aexit__(self, *args):
""" """Asynchronously exit the runtime context for the response object."""
An asynchronous session class for handling HTTP requests with streaming. await self.inner.aclose()
Inherits from AsyncSession. class StreamSession(AsyncSession):
""" """
An asynchronous session class for handling HTTP requests with streaming.
def request( Inherits from AsyncSession.
self, method: str, url: str, **kwargs """
) -> StreamResponse:
"""Create and return a StreamResponse object for the given HTTP request."""
return StreamResponse(super().request(method, url, stream=True, **kwargs))
# Defining HTTP methods as partial methods of the request method. def request(
head = partialmethod(request, "HEAD") self, method: str, url: str, **kwargs
get = partialmethod(request, "GET") ) -> StreamResponse:
post = partialmethod(request, "POST") """Create and return a StreamResponse object for the given HTTP request."""
put = partialmethod(request, "PUT") return StreamResponse(super().request(method, url, stream=True, **kwargs))
patch = partialmethod(request, "PATCH")
delete = partialmethod(request, "DELETE") # Defining HTTP methods as partial methods of the request method.
head = partialmethod(request, "HEAD")
get = partialmethod(request, "GET")
post = partialmethod(request, "POST")
put = partialmethod(request, "PUT")
patch = partialmethod(request, "PATCH")
delete = partialmethod(request, "DELETE")
def get_session_from_browser(url: str, webdriver: WebDriver = None, proxy: str = None, timeout: int = 120) -> Session: def get_session_from_browser(url: str, webdriver: WebDriver = None, proxy: str = None, timeout: int = 120) -> Session:
@@ -93,6 +137,8 @@ def get_session_from_browser(url: str, webdriver: WebDriver = None, proxy: str =
Returns: Returns:
Session: A Session object configured with cookies and headers from the WebDriver. Session: A Session object configured with cookies and headers from the WebDriver.
""" """
if not has_curl_cffi:
raise MissingRequirementsError('Install "curl_cffi" package')
with WebDriverSession(webdriver, "", proxy=proxy, virtual_display=True) as driver: with WebDriverSession(webdriver, "", proxy=proxy, virtual_display=True) as driver:
bypass_cloudflare(driver, url, timeout) bypass_cloudflare(driver, url, timeout)
cookies = get_driver_cookies(driver) cookies = get_driver_cookies(driver)

View File

@@ -1,6 +1,9 @@
import sys import sys
from typing import Any, AsyncGenerator, Generator, NewType, Tuple, Union, List, Dict, Type, IO, Optional from typing import Any, AsyncGenerator, Generator, NewType, Tuple, Union, List, Dict, Type, IO, Optional
from PIL.Image import Image try:
from PIL.Image import Image
except ImportError:
Image = type
if sys.version_info >= (3, 8): if sys.version_info >= (3, 8):
from typing import TypedDict from typing import TypedDict
@@ -11,6 +14,7 @@ SHA256 = NewType('sha_256_hash', str)
CreateResult = Generator[str, None, None] CreateResult = Generator[str, None, None]
AsyncResult = AsyncGenerator[str, None] AsyncResult = AsyncGenerator[str, None]
Messages = List[Dict[str, str]] Messages = List[Dict[str, str]]
Cookies = List[Dict[str, str]]
ImageType = Union[str, bytes, IO, Image, None] ImageType = Union[str, bytes, IO, Image, None]
__all__ = [ __all__ = [

View File

@@ -1,3 +1,5 @@
from __future__ import annotations
from os import environ from os import environ
import requests import requests
from functools import cached_property from functools import cached_property

View File

@@ -1,12 +1,20 @@
from __future__ import annotations from __future__ import annotations
from platformdirs import user_config_dir
from selenium.webdriver.remote.webdriver import WebDriver try:
from undetected_chromedriver import Chrome, ChromeOptions from platformdirs import user_config_dir
from selenium.webdriver.common.by import By from selenium.webdriver.remote.webdriver import WebDriver
from selenium.webdriver.support.ui import WebDriverWait from undetected_chromedriver import Chrome, ChromeOptions
from selenium.webdriver.support import expected_conditions as EC from selenium.webdriver.common.by import By
from selenium.webdriver.support.ui import WebDriverWait
from selenium.webdriver.support import expected_conditions as EC
has_requirements = True
except ImportError:
WebDriver = type
has_requirements = False
from os import path from os import path
from os import access, R_OK from os import access, R_OK
from .errors import MissingRequirementsError
from . import debug from . import debug
try: try:
@@ -33,6 +41,8 @@ def get_browser(
Returns: Returns:
WebDriver: An instance of WebDriver configured with the specified options. WebDriver: An instance of WebDriver configured with the specified options.
""" """
if not has_requirements:
raise MissingRequirementsError('Install "undetected_chromedriver" and "platformdirs" package')
if user_data_dir is None: if user_data_dir is None:
user_data_dir = user_config_dir("g4f") user_data_dir = user_config_dir("g4f")
if user_data_dir and debug.logging: if user_data_dir and debug.logging:
@@ -144,7 +154,7 @@ class WebDriverSession:
Returns: Returns:
WebDriver: The reopened WebDriver instance. WebDriver: The reopened WebDriver instance.
""" """
user_data_dir = user_data_data_dir or self.user_data_dir user_data_dir = user_data_dir or self.user_data_dir
if self.default_driver: if self.default_driver:
self.default_driver.quit() self.default_driver.quit()
if not virtual_display and self.virtual_display: if not virtual_display and self.virtual_display:

2
requirements-min.txt Normal file
View File

@@ -0,0 +1,2 @@
requests
aiohttp

View File

@@ -4,7 +4,6 @@ curl_cffi>=0.5.10
aiohttp aiohttp
certifi certifi
browser_cookie3 browser_cookie3
typing-extensions
PyExecJS PyExecJS
duckduckgo-search duckduckgo-search
nest_asyncio nest_asyncio
@@ -16,7 +15,6 @@ fastapi
uvicorn uvicorn
flask flask
py-arkose-generator py-arkose-generator
asyncstdlib
async-property async-property
undetected-chromedriver undetected-chromedriver
brotli brotli

View File

@@ -8,33 +8,59 @@ here = os.path.abspath(os.path.dirname(__file__))
with codecs.open(os.path.join(here, 'README.md'), encoding='utf-8') as fh: with codecs.open(os.path.join(here, 'README.md'), encoding='utf-8') as fh:
long_description = '\n' + fh.read() long_description = '\n' + fh.read()
install_requires = [ INSTALL_REQUIRE = [
"requests", "requests",
"pycryptodome",
"curl_cffi>=0.5.10",
"aiohttp", "aiohttp",
"certifi",
"browser_cookie3",
"typing-extensions",
"PyExecJS",
"duckduckgo-search",
"nest_asyncio",
"werkzeug",
"loguru",
"pillow",
"platformdirs",
"fastapi",
"uvicorn",
"flask",
"py-arkose-generator",
"asyncstdlib",
"async-property",
"undetected-chromedriver",
"brotli",
"beautifulsoup4",
"setuptools",
] ]
EXTRA_REQUIRE = {
'all': [
"curl_cffi>=0.5.10",
"certifi",
"async-property", # openai
"py-arkose-generator", # openai
"browser_cookie3", # get_cookies
"PyExecJS", # GptForLove
"duckduckgo-search", # internet.search
"beautifulsoup4", # internet.search and bing.create_images
"brotli", # openai
"platformdirs", # webdriver
"undetected-chromedriver", # webdriver
"setuptools", # webdriver
"aiohttp_socks" # proxy
"pillow", # image
"cairosvg", # svg image
"werkzeug", "flask", # gui
"loguru", "fastapi",
"uvicorn", "nest_asyncio", # api
],
"image": [
"pillow",
"cairosvg",
"beautifulsoup4"
],
"webdriver": [
"platformdirs",
"undetected-chromedriver",
"setuptools"
],
"openai": [
"async-property",
"py-arkose-generator",
"brotli"
],
"api": [
"loguru", "fastapi",
"uvicorn", "nest_asyncio"
],
"gui": [
"werkzeug", "flask",
"beautifulsoup4", "pillow",
"duckduckgo-search",
"browser_cookie3"
]
}
DESCRIPTION = ( DESCRIPTION = (
'The official gpt4free repository | various collection of powerful language models' 'The official gpt4free repository | various collection of powerful language models'
) )
@@ -53,7 +79,8 @@ setup(
'g4f': ['g4f/interference/*', 'g4f/gui/client/*', 'g4f/gui/server/*', 'g4f/Provider/npm/*'] 'g4f': ['g4f/interference/*', 'g4f/gui/client/*', 'g4f/gui/server/*', 'g4f/Provider/npm/*']
}, },
include_package_data=True, include_package_data=True,
install_requires=install_requires, install_requires=INSTALL_REQUIRE,
extras_require=EXTRA_REQUIRE,
entry_points={ entry_points={
'console_scripts': ['g4f=g4f.cli:main'], 'console_scripts': ['g4f=g4f.cli:main'],
}, },