mirror of
https://github.com/xtekky/gpt4free.git
synced 2025-10-05 00:12:44 +08:00
Add nodriver to Gemini provider,
Add slim docker image with google-chrome usage, Add the new docker images to publish worklow, Update requirements.txt and pip requirements
This commit is contained in:
12
.github/workflows/publish-workflow.yaml
vendored
12
.github/workflows/publish-workflow.yaml
vendored
@@ -48,3 +48,15 @@ jobs:
|
|||||||
labels: ${{ steps.metadata.outputs.labels }}
|
labels: ${{ steps.metadata.outputs.labels }}
|
||||||
build-args: |
|
build-args: |
|
||||||
G4F_VERSION=${{ github.ref_name }}
|
G4F_VERSION=${{ github.ref_name }}
|
||||||
|
- name: Build and push slim image
|
||||||
|
uses: docker/build-push-action@v5
|
||||||
|
with:
|
||||||
|
context: .
|
||||||
|
file: docker/Dockerfile-slim
|
||||||
|
push: true
|
||||||
|
tags: |
|
||||||
|
hlohaus789/g4f=slim
|
||||||
|
hlohaus789/g4f=${{ github.ref_name }}-slim
|
||||||
|
labels: ${{ steps.metadata.outputs.labels }}
|
||||||
|
build-args: |
|
||||||
|
G4F_VERSION=${{ github.ref_name }}
|
25
docker-compose-slim.yml
Normal file
25
docker-compose-slim.yml
Normal file
@@ -0,0 +1,25 @@
|
|||||||
|
version: '3'
|
||||||
|
|
||||||
|
services:
|
||||||
|
g4f-gui:
|
||||||
|
container_name: g4f-gui
|
||||||
|
image: hlohaus789/g4f:slim
|
||||||
|
build:
|
||||||
|
context: .
|
||||||
|
dockerfile: docker/Dockerfile-slim
|
||||||
|
command: python -m g4f.cli gui -debug
|
||||||
|
volumes:
|
||||||
|
- .:/app
|
||||||
|
ports:
|
||||||
|
- '8080:8080'
|
||||||
|
g4f-api:
|
||||||
|
container_name: g4f-api
|
||||||
|
image: hlohaus789/g4f:slim
|
||||||
|
build:
|
||||||
|
context: .
|
||||||
|
dockerfile: docker/Dockerfile-slim
|
||||||
|
command: python -m g4f.cli api
|
||||||
|
volumes:
|
||||||
|
- .:/app
|
||||||
|
ports:
|
||||||
|
- '1337:1337'
|
@@ -40,6 +40,7 @@ RUN apt-get -qqy update \
|
|||||||
|
|
||||||
# Update entrypoint
|
# Update entrypoint
|
||||||
COPY docker/supervisor.conf /etc/supervisor/conf.d/selenium.conf
|
COPY docker/supervisor.conf /etc/supervisor/conf.d/selenium.conf
|
||||||
|
COPY docker/supervisor-api.conf /etc/supervisor/conf.d/api.conf
|
||||||
COPY docker/supervisor-gui.conf /etc/supervisor/conf.d/gui.conf
|
COPY docker/supervisor-gui.conf /etc/supervisor/conf.d/gui.conf
|
||||||
|
|
||||||
# If no gui
|
# If no gui
|
||||||
|
68
docker/Dockerfile-slim
Normal file
68
docker/Dockerfile-slim
Normal file
@@ -0,0 +1,68 @@
|
|||||||
|
FROM python:bookworm
|
||||||
|
|
||||||
|
ARG G4F_VERSION
|
||||||
|
ARG G4F_USER=g4f
|
||||||
|
ARG G4F_USER_ID=1000
|
||||||
|
ARG PYDANTIC_VERSION=1.8.1
|
||||||
|
|
||||||
|
ENV G4F_VERSION $G4F_VERSION
|
||||||
|
ENV G4F_USER $G4F_USER
|
||||||
|
ENV G4F_USER_ID $G4F_USER_ID
|
||||||
|
ENV G4F_DIR /app
|
||||||
|
|
||||||
|
RUN apt-get update && apt-get upgrade -y \
|
||||||
|
&& apt-get install -y git \
|
||||||
|
&& apt-get install --quiet --yes --no-install-recommends \
|
||||||
|
build-essential \
|
||||||
|
# Add user and user group
|
||||||
|
&& groupadd -g $G4F_USER_ID $G4F_USER \
|
||||||
|
&& useradd -rm -G sudo -u $G4F_USER_ID -g $G4F_USER_ID $G4F_USER \
|
||||||
|
&& mkdir -p /var/log/supervisor \
|
||||||
|
&& chown "${G4F_USER_ID}:${G4F_USER_ID}" /var/log/supervisor \
|
||||||
|
&& echo "${G4F_USER}:${G4F_USER}" | chpasswd
|
||||||
|
|
||||||
|
USER $G4F_USER_ID
|
||||||
|
WORKDIR $G4F_DIR
|
||||||
|
|
||||||
|
ENV HOME /home/$G4F_USER
|
||||||
|
ENV PATH "${HOME}/.local/bin:${HOME}/.cargo/bin:${PATH}"
|
||||||
|
|
||||||
|
# Create app dir and copy the project's requirements file into it
|
||||||
|
RUN mkdir -p $G4F_DIR
|
||||||
|
COPY requirements-slim.txt $G4F_DIR
|
||||||
|
|
||||||
|
# Install rust toolchain
|
||||||
|
RUN curl https://sh.rustup.rs -sSf | bash -s -- -y
|
||||||
|
|
||||||
|
# Upgrade pip for the latest features and install the project's Python dependencies.
|
||||||
|
RUN python -m pip install --upgrade pip \
|
||||||
|
&& pip install --no-cache-dir \
|
||||||
|
Cython==0.29.22 \
|
||||||
|
setuptools \
|
||||||
|
# Install PyDantic
|
||||||
|
&& pip install \
|
||||||
|
-vvv \
|
||||||
|
--no-cache-dir \
|
||||||
|
--no-binary pydantic \
|
||||||
|
--global-option=build_ext \
|
||||||
|
--global-option=-j8 \
|
||||||
|
pydantic==${PYDANTIC_VERSION} \
|
||||||
|
&& pip install --no-cache-dir -r requirements-slim.txt \
|
||||||
|
# Remove build packages
|
||||||
|
&& pip uninstall --yes \
|
||||||
|
Cython \
|
||||||
|
setuptools
|
||||||
|
|
||||||
|
USER root
|
||||||
|
|
||||||
|
# Clean up build deps
|
||||||
|
RUN rustup self uninstall -y \
|
||||||
|
&& apt-get purge --auto-remove --yes \
|
||||||
|
build-essential \
|
||||||
|
&& apt-get clean \
|
||||||
|
&& rm --recursive --force /var/lib/apt/lists/* /tmp/* /var/tmp/*
|
||||||
|
|
||||||
|
USER $G4F_USER_ID
|
||||||
|
|
||||||
|
# Copy the entire package into the container.
|
||||||
|
ADD --chown=$G4F_USER:$G4F_USER g4f $G4F_DIR/g4f
|
12
docker/supervisor-api.conf
Executable file
12
docker/supervisor-api.conf
Executable file
@@ -0,0 +1,12 @@
|
|||||||
|
[program:g4f-api]
|
||||||
|
priority=15
|
||||||
|
command=python -m g4f.cli api
|
||||||
|
directory=/app
|
||||||
|
stopasgroup=true
|
||||||
|
autostart=true
|
||||||
|
autorestart=true
|
||||||
|
|
||||||
|
;Logs (all Hub activity redirected to stdout so it can be seen through "docker logs"
|
||||||
|
redirect_stderr=true
|
||||||
|
stdout_logfile=/dev/stdout
|
||||||
|
stdout_logfile_maxbytes=0
|
@@ -1,6 +1,6 @@
|
|||||||
[program:g4f-gui]
|
[program:g4f-gui]
|
||||||
priority=15
|
priority=15
|
||||||
command=python -m g4f.cli gui
|
command=python -m g4f.cli gui -debug
|
||||||
directory=/app
|
directory=/app
|
||||||
stopasgroup=true
|
stopasgroup=true
|
||||||
autostart=true
|
autostart=true
|
||||||
|
@@ -48,16 +48,3 @@ stdout_logfile_backups=5
|
|||||||
stderr_logfile_backups=5
|
stderr_logfile_backups=5
|
||||||
stdout_capture_maxbytes=50MB
|
stdout_capture_maxbytes=50MB
|
||||||
stderr_capture_maxbytes=50MB
|
stderr_capture_maxbytes=50MB
|
||||||
|
|
||||||
[program:g4f-api]
|
|
||||||
priority=15
|
|
||||||
command=python -m g4f.cli api
|
|
||||||
directory=/app
|
|
||||||
stopasgroup=true
|
|
||||||
autostart=true
|
|
||||||
autorestart=true
|
|
||||||
|
|
||||||
;Logs (all Hub activity redirected to stdout so it can be seen through "docker logs"
|
|
||||||
redirect_stderr=true
|
|
||||||
stdout_logfile=/dev/stdout
|
|
||||||
stdout_logfile_maxbytes=0
|
|
@@ -28,12 +28,22 @@
|
|||||||
```
|
```
|
||||||
|
|
||||||
2. **Build and Run with Docker Compose**
|
2. **Build and Run with Docker Compose**
|
||||||
|
|
||||||
|
Pull the latest image and run a container with Google Chrome support:
|
||||||
```bash
|
```bash
|
||||||
docker-compose up --build
|
docker pull hlohaus789/g4f
|
||||||
|
docker-compose up -d
|
||||||
|
```
|
||||||
|
Or run the small docker images without Google Chrome:
|
||||||
|
```bash
|
||||||
|
docker-compose -f docker-compose-slim.yml up -d
|
||||||
```
|
```
|
||||||
|
|
||||||
3. **Access the API**
|
3. **Access the API or the GUI**
|
||||||
The server will be accessible at `http://localhost:1337`
|
|
||||||
|
The api server will be accessible at `http://localhost:1337`
|
||||||
|
|
||||||
|
And the gui at this url: `http://localhost:8080`
|
||||||
|
|
||||||
### Non-Docker Method
|
### Non-Docker Method
|
||||||
If you encounter issues with Docker, you can run the project directly using Python:
|
If you encounter issues with Docker, you can run the project directly using Python:
|
||||||
@@ -54,8 +64,12 @@ If you encounter issues with Docker, you can run the project directly using Pyth
|
|||||||
python -m g4f.api.run
|
python -m g4f.api.run
|
||||||
```
|
```
|
||||||
|
|
||||||
4. **Access the API**
|
4. **Access the API or the GUI**
|
||||||
The server will be accessible at `http://localhost:1337`
|
|
||||||
|
The api server will be accessible at `http://localhost:1337`
|
||||||
|
|
||||||
|
And the gui at this url: `http://localhost:8080`
|
||||||
|
|
||||||
|
|
||||||
## Testing the API
|
## Testing the API
|
||||||
**You can test the API using curl or by creating a simple Python script:**
|
**You can test the API using curl or by creating a simple Python script:**
|
||||||
|
@@ -7,6 +7,7 @@ import uuid
|
|||||||
from ..typing import AsyncResult, Messages, Cookies
|
from ..typing import AsyncResult, Messages, Cookies
|
||||||
from .base_provider import AsyncGeneratorProvider, ProviderModelMixin, get_running_loop
|
from .base_provider import AsyncGeneratorProvider, ProviderModelMixin, get_running_loop
|
||||||
from ..requests import Session, StreamSession, get_args_from_nodriver, raise_for_status, merge_cookies
|
from ..requests import Session, StreamSession, get_args_from_nodriver, raise_for_status, merge_cookies
|
||||||
|
from ..errors import ResponseStatusError
|
||||||
|
|
||||||
class Cloudflare(AsyncGeneratorProvider, ProviderModelMixin):
|
class Cloudflare(AsyncGeneratorProvider, ProviderModelMixin):
|
||||||
label = "Cloudflare AI"
|
label = "Cloudflare AI"
|
||||||
@@ -42,10 +43,14 @@ class Cloudflare(AsyncGeneratorProvider, ProviderModelMixin):
|
|||||||
cls._args = asyncio.run(args)
|
cls._args = asyncio.run(args)
|
||||||
with Session(**cls._args) as session:
|
with Session(**cls._args) as session:
|
||||||
response = session.get(cls.models_url)
|
response = session.get(cls.models_url)
|
||||||
raise_for_status(response)
|
cls._args["cookies"] = merge_cookies(cls._args["cookies"] , response)
|
||||||
|
try:
|
||||||
|
raise_for_status(response)
|
||||||
|
except ResponseStatusError as e:
|
||||||
|
cls._args = None
|
||||||
|
raise e
|
||||||
json_data = response.json()
|
json_data = response.json()
|
||||||
cls.models = [model.get("name") for model in json_data.get("models")]
|
cls.models = [model.get("name") for model in json_data.get("models")]
|
||||||
cls._args["cookies"] = merge_cookies(cls._args["cookies"] , response)
|
|
||||||
return cls.models
|
return cls.models
|
||||||
|
|
||||||
@classmethod
|
@classmethod
|
||||||
@@ -74,8 +79,12 @@ class Cloudflare(AsyncGeneratorProvider, ProviderModelMixin):
|
|||||||
cls.api_endpoint,
|
cls.api_endpoint,
|
||||||
json=data,
|
json=data,
|
||||||
) as response:
|
) as response:
|
||||||
await raise_for_status(response)
|
|
||||||
cls._args["cookies"] = merge_cookies(cls._args["cookies"] , response)
|
cls._args["cookies"] = merge_cookies(cls._args["cookies"] , response)
|
||||||
|
try:
|
||||||
|
await raise_for_status(response)
|
||||||
|
except ResponseStatusError as e:
|
||||||
|
cls._args = None
|
||||||
|
raise e
|
||||||
async for line in response.iter_lines():
|
async for line in response.iter_lines():
|
||||||
if line.startswith(b'data: '):
|
if line.startswith(b'data: '):
|
||||||
if line == b'data: [DONE]':
|
if line == b'data: [DONE]':
|
||||||
|
@@ -4,12 +4,13 @@ import json
|
|||||||
import requests
|
import requests
|
||||||
|
|
||||||
try:
|
try:
|
||||||
from curl_cffi import requests as cf_reqs
|
from curl_cffi import Session
|
||||||
has_curl_cffi = True
|
has_curl_cffi = True
|
||||||
except ImportError:
|
except ImportError:
|
||||||
has_curl_cffi = False
|
has_curl_cffi = False
|
||||||
from ..typing import CreateResult, Messages
|
from ..typing import CreateResult, Messages
|
||||||
from ..errors import MissingRequirementsError
|
from ..errors import MissingRequirementsError
|
||||||
|
from ..requests.raise_for_status import raise_for_status
|
||||||
from .base_provider import ProviderModelMixin, AbstractProvider
|
from .base_provider import ProviderModelMixin, AbstractProvider
|
||||||
from .helper import format_prompt
|
from .helper import format_prompt
|
||||||
|
|
||||||
@@ -43,15 +44,6 @@ class HuggingChat(AbstractProvider, ProviderModelMixin):
|
|||||||
"phi-3.5-mini": "microsoft/Phi-3.5-mini-instruct",
|
"phi-3.5-mini": "microsoft/Phi-3.5-mini-instruct",
|
||||||
}
|
}
|
||||||
|
|
||||||
@classmethod
|
|
||||||
def get_model(cls, model: str) -> str:
|
|
||||||
if model in cls.models:
|
|
||||||
return model
|
|
||||||
elif model in cls.model_aliases:
|
|
||||||
return cls.model_aliases[model]
|
|
||||||
else:
|
|
||||||
return cls.default_model
|
|
||||||
|
|
||||||
@classmethod
|
@classmethod
|
||||||
def create_completion(
|
def create_completion(
|
||||||
cls,
|
cls,
|
||||||
@@ -65,7 +57,7 @@ class HuggingChat(AbstractProvider, ProviderModelMixin):
|
|||||||
model = cls.get_model(model)
|
model = cls.get_model(model)
|
||||||
|
|
||||||
if model in cls.models:
|
if model in cls.models:
|
||||||
session = cf_reqs.Session()
|
session = Session()
|
||||||
session.headers = {
|
session.headers = {
|
||||||
'accept': '*/*',
|
'accept': '*/*',
|
||||||
'accept-language': 'en',
|
'accept-language': 'en',
|
||||||
@@ -82,19 +74,17 @@ class HuggingChat(AbstractProvider, ProviderModelMixin):
|
|||||||
'sec-fetch-site': 'same-origin',
|
'sec-fetch-site': 'same-origin',
|
||||||
'user-agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/127.0.0.0 Safari/537.36',
|
'user-agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/127.0.0.0 Safari/537.36',
|
||||||
}
|
}
|
||||||
|
|
||||||
json_data = {
|
json_data = {
|
||||||
'model': model,
|
'model': model,
|
||||||
}
|
}
|
||||||
|
|
||||||
response = session.post('https://huggingface.co/chat/conversation', json=json_data)
|
response = session.post('https://huggingface.co/chat/conversation', json=json_data)
|
||||||
if response.status_code != 200:
|
raise_for_status(response)
|
||||||
raise RuntimeError(f"Request failed with status code: {response.status_code}, response: {response.text}")
|
|
||||||
|
|
||||||
conversationId = response.json().get('conversationId')
|
conversationId = response.json().get('conversationId')
|
||||||
|
|
||||||
# Get the data response and parse it properly
|
# Get the data response and parse it properly
|
||||||
response = session.get(f'https://huggingface.co/chat/conversation/{conversationId}/__data.json?x-sveltekit-invalidated=11')
|
response = session.get(f'https://huggingface.co/chat/conversation/{conversationId}/__data.json?x-sveltekit-invalidated=11')
|
||||||
|
raise_for_status(response)
|
||||||
|
|
||||||
# Split the response content by newlines and parse each line as JSON
|
# Split the response content by newlines and parse each line as JSON
|
||||||
try:
|
try:
|
||||||
@@ -156,6 +146,7 @@ class HuggingChat(AbstractProvider, ProviderModelMixin):
|
|||||||
headers=headers,
|
headers=headers,
|
||||||
files=files,
|
files=files,
|
||||||
)
|
)
|
||||||
|
raise_for_status(response)
|
||||||
|
|
||||||
full_response = ""
|
full_response = ""
|
||||||
for line in response.iter_lines():
|
for line in response.iter_lines():
|
||||||
@@ -183,8 +174,3 @@ class HuggingChat(AbstractProvider, ProviderModelMixin):
|
|||||||
|
|
||||||
if not stream:
|
if not stream:
|
||||||
yield full_response
|
yield full_response
|
||||||
|
|
||||||
@classmethod
|
|
||||||
def supports_model(cls, model: str) -> bool:
|
|
||||||
"""Check if the model is supported by the provider."""
|
|
||||||
return model in cls.models or model in cls.model_aliases
|
|
||||||
|
@@ -6,24 +6,20 @@ import random
|
|||||||
import re
|
import re
|
||||||
|
|
||||||
from aiohttp import ClientSession, BaseConnector
|
from aiohttp import ClientSession, BaseConnector
|
||||||
|
|
||||||
from ..helper import get_connector
|
|
||||||
|
|
||||||
try:
|
try:
|
||||||
from selenium.webdriver.common.by import By
|
import nodriver
|
||||||
from selenium.webdriver.support.ui import WebDriverWait
|
has_nodriver = True
|
||||||
from selenium.webdriver.support import expected_conditions as EC
|
|
||||||
except ImportError:
|
except ImportError:
|
||||||
pass
|
has_nodriver = False
|
||||||
|
|
||||||
from ... import debug
|
from ... import debug
|
||||||
from ...typing import Messages, Cookies, ImageType, AsyncResult, AsyncIterator
|
from ...typing import Messages, Cookies, ImageType, AsyncResult, AsyncIterator
|
||||||
from ..base_provider import AsyncGeneratorProvider, BaseConversation
|
from ..base_provider import AsyncGeneratorProvider, BaseConversation
|
||||||
from ..helper import format_prompt, get_cookies
|
from ..helper import format_prompt, get_cookies
|
||||||
from ...requests.raise_for_status import raise_for_status
|
from ...requests.raise_for_status import raise_for_status
|
||||||
from ...errors import MissingAuthError, MissingRequirementsError
|
from ...requests.aiohttp import get_connector
|
||||||
|
from ...errors import MissingAuthError
|
||||||
from ...image import ImageResponse, to_bytes
|
from ...image import ImageResponse, to_bytes
|
||||||
from ...webdriver import get_browser, get_driver_cookies
|
|
||||||
|
|
||||||
REQUEST_HEADERS = {
|
REQUEST_HEADERS = {
|
||||||
"authority": "gemini.google.com",
|
"authority": "gemini.google.com",
|
||||||
@@ -64,9 +60,9 @@ class Gemini(AsyncGeneratorProvider):
|
|||||||
|
|
||||||
@classmethod
|
@classmethod
|
||||||
async def nodriver_login(cls, proxy: str = None) -> AsyncIterator[str]:
|
async def nodriver_login(cls, proxy: str = None) -> AsyncIterator[str]:
|
||||||
try:
|
if not has_nodriver:
|
||||||
import nodriver as uc
|
if debug.logging:
|
||||||
except ImportError:
|
print("Skip nodriver login in Gemini provider")
|
||||||
return
|
return
|
||||||
try:
|
try:
|
||||||
from platformdirs import user_config_dir
|
from platformdirs import user_config_dir
|
||||||
@@ -75,7 +71,7 @@ class Gemini(AsyncGeneratorProvider):
|
|||||||
user_data_dir = None
|
user_data_dir = None
|
||||||
if debug.logging:
|
if debug.logging:
|
||||||
print(f"Open nodriver with user_dir: {user_data_dir}")
|
print(f"Open nodriver with user_dir: {user_data_dir}")
|
||||||
browser = await uc.start(
|
browser = await nodriver.start(
|
||||||
user_data_dir=user_data_dir,
|
user_data_dir=user_data_dir,
|
||||||
browser_args=None if proxy is None else [f"--proxy-server={proxy}"],
|
browser_args=None if proxy is None else [f"--proxy-server={proxy}"],
|
||||||
)
|
)
|
||||||
@@ -91,30 +87,6 @@ class Gemini(AsyncGeneratorProvider):
|
|||||||
await page.close()
|
await page.close()
|
||||||
cls._cookies = cookies
|
cls._cookies = cookies
|
||||||
|
|
||||||
@classmethod
|
|
||||||
async def webdriver_login(cls, proxy: str) -> AsyncIterator[str]:
|
|
||||||
driver = None
|
|
||||||
try:
|
|
||||||
driver = get_browser(proxy=proxy)
|
|
||||||
try:
|
|
||||||
driver.get(f"{cls.url}/app")
|
|
||||||
WebDriverWait(driver, 5).until(
|
|
||||||
EC.visibility_of_element_located((By.CSS_SELECTOR, "div.ql-editor.textarea"))
|
|
||||||
)
|
|
||||||
except:
|
|
||||||
login_url = os.environ.get("G4F_LOGIN_URL")
|
|
||||||
if login_url:
|
|
||||||
yield f"Please login: [Google Gemini]({login_url})\n\n"
|
|
||||||
WebDriverWait(driver, 240).until(
|
|
||||||
EC.visibility_of_element_located((By.CSS_SELECTOR, "div.ql-editor.textarea"))
|
|
||||||
)
|
|
||||||
cls._cookies = get_driver_cookies(driver)
|
|
||||||
except MissingRequirementsError:
|
|
||||||
pass
|
|
||||||
finally:
|
|
||||||
if driver:
|
|
||||||
driver.close()
|
|
||||||
|
|
||||||
@classmethod
|
@classmethod
|
||||||
async def create_async_generator(
|
async def create_async_generator(
|
||||||
cls,
|
cls,
|
||||||
@@ -143,9 +115,6 @@ class Gemini(AsyncGeneratorProvider):
|
|||||||
if not cls._snlm0e:
|
if not cls._snlm0e:
|
||||||
async for chunk in cls.nodriver_login(proxy):
|
async for chunk in cls.nodriver_login(proxy):
|
||||||
yield chunk
|
yield chunk
|
||||||
if cls._cookies is None:
|
|
||||||
async for chunk in cls.webdriver_login(proxy):
|
|
||||||
yield chunk
|
|
||||||
if not cls._snlm0e:
|
if not cls._snlm0e:
|
||||||
if cls._cookies is None or "__Secure-1PSID" not in cls._cookies:
|
if cls._cookies is None or "__Secure-1PSID" not in cls._cookies:
|
||||||
raise MissingAuthError('Missing "__Secure-1PSID" cookie')
|
raise MissingAuthError('Missing "__Secure-1PSID" cookie')
|
||||||
@@ -211,20 +180,23 @@ class Gemini(AsyncGeneratorProvider):
|
|||||||
yield content[last_content_len:]
|
yield content[last_content_len:]
|
||||||
last_content_len = len(content)
|
last_content_len = len(content)
|
||||||
if image_prompt:
|
if image_prompt:
|
||||||
images = [image[0][3][3] for image in response_part[4][0][12][7][0]]
|
try:
|
||||||
if response_format == "b64_json":
|
images = [image[0][3][3] for image in response_part[4][0][12][7][0]]
|
||||||
yield ImageResponse(images, image_prompt, {"cookies": cls._cookies})
|
if response_format == "b64_json":
|
||||||
else:
|
yield ImageResponse(images, image_prompt, {"cookies": cls._cookies})
|
||||||
resolved_images = []
|
else:
|
||||||
preview = []
|
resolved_images = []
|
||||||
for image in images:
|
preview = []
|
||||||
async with client.get(image, allow_redirects=False) as fetch:
|
for image in images:
|
||||||
image = fetch.headers["location"]
|
async with client.get(image, allow_redirects=False) as fetch:
|
||||||
async with client.get(image, allow_redirects=False) as fetch:
|
image = fetch.headers["location"]
|
||||||
image = fetch.headers["location"]
|
async with client.get(image, allow_redirects=False) as fetch:
|
||||||
resolved_images.append(image)
|
image = fetch.headers["location"]
|
||||||
preview.append(image.replace('=s512', '=s200'))
|
resolved_images.append(image)
|
||||||
yield ImageResponse(resolved_images, image_prompt, {"orginal_links": images, "preview": preview})
|
preview.append(image.replace('=s512', '=s200'))
|
||||||
|
yield ImageResponse(resolved_images, image_prompt, {"orginal_links": images, "preview": preview})
|
||||||
|
except TypeError:
|
||||||
|
pass
|
||||||
|
|
||||||
def build_request(
|
def build_request(
|
||||||
prompt: str,
|
prompt: str,
|
||||||
|
@@ -16,9 +16,9 @@ class GeminiPro(AsyncGeneratorProvider, ProviderModelMixin):
|
|||||||
working = True
|
working = True
|
||||||
supports_message_history = True
|
supports_message_history = True
|
||||||
needs_auth = True
|
needs_auth = True
|
||||||
default_model = "gemini-1.5-pro-latest"
|
default_model = "gemini-1.5-pro"
|
||||||
default_vision_model = default_model
|
default_vision_model = default_model
|
||||||
models = [default_model, "gemini-pro", "gemini-pro-vision", "gemini-1.5-flash"]
|
models = [default_model, "gemini-pro", "gemini-1.5-flash", "gemini-1.5-flash-8b"]
|
||||||
|
|
||||||
@classmethod
|
@classmethod
|
||||||
async def create_async_generator(
|
async def create_async_generator(
|
||||||
|
@@ -1,13 +1,11 @@
|
|||||||
from __future__ import annotations
|
from __future__ import annotations
|
||||||
|
|
||||||
import json
|
import json
|
||||||
from aiohttp import ClientSession, BaseConnector
|
|
||||||
|
|
||||||
from ...typing import AsyncResult, Messages
|
from ...typing import AsyncResult, Messages
|
||||||
from ..base_provider import AsyncGeneratorProvider, ProviderModelMixin
|
from ..base_provider import AsyncGeneratorProvider, ProviderModelMixin
|
||||||
from ..helper import get_connector
|
from ...errors import ModelNotFoundError
|
||||||
from ...errors import RateLimitError, ModelNotFoundError
|
from ...requests import StreamSession, raise_for_status
|
||||||
from ...requests.raise_for_status import raise_for_status
|
|
||||||
|
|
||||||
from ..HuggingChat import HuggingChat
|
from ..HuggingChat import HuggingChat
|
||||||
|
|
||||||
@@ -20,15 +18,6 @@ class HuggingFace(AsyncGeneratorProvider, ProviderModelMixin):
|
|||||||
models = HuggingChat.models
|
models = HuggingChat.models
|
||||||
model_aliases = HuggingChat.model_aliases
|
model_aliases = HuggingChat.model_aliases
|
||||||
|
|
||||||
@classmethod
|
|
||||||
def get_model(cls, model: str) -> str:
|
|
||||||
if model in cls.models:
|
|
||||||
return model
|
|
||||||
elif model in cls.model_aliases:
|
|
||||||
return cls.model_aliases[model]
|
|
||||||
else:
|
|
||||||
return cls.default_model
|
|
||||||
|
|
||||||
@classmethod
|
@classmethod
|
||||||
async def create_async_generator(
|
async def create_async_generator(
|
||||||
cls,
|
cls,
|
||||||
@@ -36,7 +25,6 @@ class HuggingFace(AsyncGeneratorProvider, ProviderModelMixin):
|
|||||||
messages: Messages,
|
messages: Messages,
|
||||||
stream: bool = True,
|
stream: bool = True,
|
||||||
proxy: str = None,
|
proxy: str = None,
|
||||||
connector: BaseConnector = None,
|
|
||||||
api_base: str = "https://api-inference.huggingface.co",
|
api_base: str = "https://api-inference.huggingface.co",
|
||||||
api_key: str = None,
|
api_key: str = None,
|
||||||
max_new_tokens: int = 1024,
|
max_new_tokens: int = 1024,
|
||||||
@@ -62,7 +50,6 @@ class HuggingFace(AsyncGeneratorProvider, ProviderModelMixin):
|
|||||||
}
|
}
|
||||||
if api_key is not None:
|
if api_key is not None:
|
||||||
headers["Authorization"] = f"Bearer {api_key}"
|
headers["Authorization"] = f"Bearer {api_key}"
|
||||||
|
|
||||||
params = {
|
params = {
|
||||||
"return_full_text": False,
|
"return_full_text": False,
|
||||||
"max_new_tokens": max_new_tokens,
|
"max_new_tokens": max_new_tokens,
|
||||||
@@ -70,10 +57,9 @@ class HuggingFace(AsyncGeneratorProvider, ProviderModelMixin):
|
|||||||
**kwargs
|
**kwargs
|
||||||
}
|
}
|
||||||
payload = {"inputs": format_prompt(messages), "parameters": params, "stream": stream}
|
payload = {"inputs": format_prompt(messages), "parameters": params, "stream": stream}
|
||||||
|
async with StreamSession(
|
||||||
async with ClientSession(
|
|
||||||
headers=headers,
|
headers=headers,
|
||||||
connector=get_connector(connector, proxy)
|
proxy=proxy
|
||||||
) as session:
|
) as session:
|
||||||
async with session.post(f"{api_base.rstrip('/')}/models/{model}", json=payload) as response:
|
async with session.post(f"{api_base.rstrip('/')}/models/{model}", json=payload) as response:
|
||||||
if response.status == 404:
|
if response.status == 404:
|
||||||
@@ -81,7 +67,7 @@ class HuggingFace(AsyncGeneratorProvider, ProviderModelMixin):
|
|||||||
await raise_for_status(response)
|
await raise_for_status(response)
|
||||||
if stream:
|
if stream:
|
||||||
first = True
|
first = True
|
||||||
async for line in response.content:
|
async for line in response.iter_lines():
|
||||||
if line.startswith(b"data:"):
|
if line.startswith(b"data:"):
|
||||||
data = json.loads(line[5:])
|
data = json.loads(line[5:])
|
||||||
if not data["token"]["special"]:
|
if not data["token"]["special"]:
|
||||||
@@ -89,7 +75,8 @@ class HuggingFace(AsyncGeneratorProvider, ProviderModelMixin):
|
|||||||
if first:
|
if first:
|
||||||
first = False
|
first = False
|
||||||
chunk = chunk.lstrip()
|
chunk = chunk.lstrip()
|
||||||
yield chunk
|
if chunk:
|
||||||
|
yield chunk
|
||||||
else:
|
else:
|
||||||
yield (await response.json())[0]["generated_text"].strip()
|
yield (await response.json())[0]["generated_text"].strip()
|
||||||
|
|
||||||
|
@@ -79,7 +79,6 @@ class MetaAI(AsyncGeneratorProvider, ProviderModelMixin):
|
|||||||
self.access_token = None
|
self.access_token = None
|
||||||
if self.access_token is None and cookies is None:
|
if self.access_token is None and cookies is None:
|
||||||
await self.update_access_token()
|
await self.update_access_token()
|
||||||
|
|
||||||
if self.access_token is None:
|
if self.access_token is None:
|
||||||
url = "https://www.meta.ai/api/graphql/"
|
url = "https://www.meta.ai/api/graphql/"
|
||||||
payload = {"lsd": self.lsd, 'fb_dtsg': self.dtsg}
|
payload = {"lsd": self.lsd, 'fb_dtsg': self.dtsg}
|
||||||
@@ -128,6 +127,8 @@ class MetaAI(AsyncGeneratorProvider, ProviderModelMixin):
|
|||||||
json_line = json.loads(line)
|
json_line = json.loads(line)
|
||||||
except json.JSONDecodeError:
|
except json.JSONDecodeError:
|
||||||
continue
|
continue
|
||||||
|
if json_line.get("errors"):
|
||||||
|
raise RuntimeError("\n".join([error.get("message") for error in json_line.get("errors")]))
|
||||||
bot_response_message = json_line.get("data", {}).get("node", {}).get("bot_response_message", {})
|
bot_response_message = json_line.get("data", {}).get("node", {}).get("bot_response_message", {})
|
||||||
streaming_state = bot_response_message.get("streaming_state")
|
streaming_state = bot_response_message.get("streaming_state")
|
||||||
fetch_id = bot_response_message.get("fetch_id") or fetch_id
|
fetch_id = bot_response_message.get("fetch_id") or fetch_id
|
||||||
|
@@ -2,7 +2,7 @@ from __future__ import annotations
|
|||||||
|
|
||||||
from ...typing import AsyncResult, Messages, Cookies
|
from ...typing import AsyncResult, Messages, Cookies
|
||||||
from ..helper import format_prompt, get_cookies
|
from ..helper import format_prompt, get_cookies
|
||||||
from ..MetaAI import MetaAI
|
from .MetaAI import MetaAI
|
||||||
|
|
||||||
class MetaAIAccount(MetaAI):
|
class MetaAIAccount(MetaAI):
|
||||||
needs_auth = True
|
needs_auth = True
|
||||||
|
@@ -11,6 +11,7 @@ from .GeminiPro import GeminiPro
|
|||||||
from .Groq import Groq
|
from .Groq import Groq
|
||||||
from .HuggingFace import HuggingFace
|
from .HuggingFace import HuggingFace
|
||||||
from .MetaAI import MetaAI
|
from .MetaAI import MetaAI
|
||||||
|
from .MetaAIAccount import MetaAIAccount
|
||||||
from .OpenaiAPI import OpenaiAPI
|
from .OpenaiAPI import OpenaiAPI
|
||||||
from .OpenaiChat import OpenaiChat
|
from .OpenaiChat import OpenaiChat
|
||||||
from .PerplexityApi import PerplexityApi
|
from .PerplexityApi import PerplexityApi
|
||||||
|
@@ -6,14 +6,14 @@ import uuid
|
|||||||
import asyncio
|
import asyncio
|
||||||
import time
|
import time
|
||||||
from aiohttp import ClientSession
|
from aiohttp import ClientSession
|
||||||
from typing import Iterator, Optional, AsyncIterator, Union
|
from typing import Iterator, Optional
|
||||||
from flask import send_from_directory
|
from flask import send_from_directory
|
||||||
|
|
||||||
from g4f import version, models
|
from g4f import version, models
|
||||||
from g4f import get_last_provider, ChatCompletion
|
from g4f import get_last_provider, ChatCompletion
|
||||||
from g4f.errors import VersionNotFoundError
|
from g4f.errors import VersionNotFoundError
|
||||||
from g4f.typing import Cookies
|
from g4f.typing import Cookies
|
||||||
from g4f.image import ImagePreview, ImageResponse, is_accepted_format
|
from g4f.image import ImagePreview, ImageResponse, is_accepted_format, extract_data_uri
|
||||||
from g4f.requests.aiohttp import get_connector
|
from g4f.requests.aiohttp import get_connector
|
||||||
from g4f.Provider import ProviderType, __providers__, __map__
|
from g4f.Provider import ProviderType, __providers__, __map__
|
||||||
from g4f.providers.base_provider import ProviderModelMixin, FinishReason
|
from g4f.providers.base_provider import ProviderModelMixin, FinishReason
|
||||||
@@ -31,7 +31,6 @@ def ensure_images_dir():
|
|||||||
|
|
||||||
conversations: dict[dict[str, BaseConversation]] = {}
|
conversations: dict[dict[str, BaseConversation]] = {}
|
||||||
|
|
||||||
|
|
||||||
class Api:
|
class Api:
|
||||||
@staticmethod
|
@staticmethod
|
||||||
def get_models() -> list[str]:
|
def get_models() -> list[str]:
|
||||||
@@ -176,18 +175,22 @@ class Api:
|
|||||||
connector=get_connector(None, os.environ.get("G4F_PROXY")),
|
connector=get_connector(None, os.environ.get("G4F_PROXY")),
|
||||||
cookies=cookies
|
cookies=cookies
|
||||||
) as session:
|
) as session:
|
||||||
async def copy_image(image):
|
async def copy_image(image: str) -> str:
|
||||||
async with session.get(image) as response:
|
target = os.path.join(images_dir, f"{int(time.time())}_{str(uuid.uuid4())}")
|
||||||
target = os.path.join(images_dir, f"{int(time.time())}_{str(uuid.uuid4())}")
|
if image.startswith("data:"):
|
||||||
with open(target, "wb") as f:
|
with open(target, "wb") as f:
|
||||||
async for chunk in response.content.iter_any():
|
f.write(extract_data_uri(image))
|
||||||
f.write(chunk)
|
else:
|
||||||
with open(target, "rb") as f:
|
async with session.get(image) as response:
|
||||||
extension = is_accepted_format(f.read(12)).split("/")[-1]
|
with open(target, "wb") as f:
|
||||||
extension = "jpg" if extension == "jpeg" else extension
|
async for chunk in response.content.iter_any():
|
||||||
new_target = f"{target}.{extension}"
|
f.write(chunk)
|
||||||
os.rename(target, new_target)
|
with open(target, "rb") as f:
|
||||||
return f"/images/{os.path.basename(new_target)}"
|
extension = is_accepted_format(f.read(12)).split("/")[-1]
|
||||||
|
extension = "jpg" if extension == "jpeg" else extension
|
||||||
|
new_target = f"{target}.{extension}"
|
||||||
|
os.rename(target, new_target)
|
||||||
|
return f"/images/{os.path.basename(new_target)}"
|
||||||
|
|
||||||
return await asyncio.gather(*[copy_image(image) for image in images])
|
return await asyncio.gather(*[copy_image(image) for image in images])
|
||||||
|
|
||||||
@@ -197,7 +200,6 @@ class Api:
|
|||||||
response_type: content
|
response_type: content
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
def get_error_message(exception: Exception) -> str:
|
def get_error_message(exception: Exception) -> str:
|
||||||
message = f"{type(exception).__name__}: {exception}"
|
message = f"{type(exception).__name__}: {exception}"
|
||||||
provider = get_last_provider()
|
provider = get_last_provider()
|
||||||
|
@@ -133,7 +133,7 @@ def extract_data_uri(data_uri: str) -> bytes:
|
|||||||
Returns:
|
Returns:
|
||||||
bytes: The extracted binary data.
|
bytes: The extracted binary data.
|
||||||
"""
|
"""
|
||||||
data = data_uri.split(",")[1]
|
data = data_uri.split(",")[-1]
|
||||||
data = base64.b64decode(data)
|
data = base64.b64decode(data)
|
||||||
return data
|
return data
|
||||||
|
|
||||||
|
16
requirements-slim.txt
Normal file
16
requirements-slim.txt
Normal file
@@ -0,0 +1,16 @@
|
|||||||
|
requests
|
||||||
|
pycryptodome
|
||||||
|
curl_cffi>=0.6.2
|
||||||
|
aiohttp
|
||||||
|
certifi
|
||||||
|
duckduckgo-search>=5.0
|
||||||
|
nest_asyncio
|
||||||
|
werkzeug
|
||||||
|
pillow
|
||||||
|
fastapi
|
||||||
|
uvicorn
|
||||||
|
flask
|
||||||
|
brotli
|
||||||
|
beautifulsoup4
|
||||||
|
aiohttp_socks
|
||||||
|
cryptography
|
@@ -4,7 +4,6 @@ curl_cffi>=0.6.2
|
|||||||
aiohttp
|
aiohttp
|
||||||
certifi
|
certifi
|
||||||
browser_cookie3
|
browser_cookie3
|
||||||
PyExecJS
|
|
||||||
duckduckgo-search>=5.0
|
duckduckgo-search>=5.0
|
||||||
nest_asyncio
|
nest_asyncio
|
||||||
werkzeug
|
werkzeug
|
||||||
@@ -20,4 +19,3 @@ pywebview
|
|||||||
plyer
|
plyer
|
||||||
cryptography
|
cryptography
|
||||||
nodriver
|
nodriver
|
||||||
cloudscraper
|
|
||||||
|
Reference in New Issue
Block a user