mirror of
https://github.com/xtekky/gpt4free.git
synced 2025-10-15 12:50:45 +08:00
Add support for all models
Add AbstractProvider class Add ProviderType type Add get_last_provider function Add version module and VersionUtils Display used provider in gui Fix error response in api
This commit is contained in:
@@ -4,12 +4,12 @@ import time
|
|||||||
import random
|
import random
|
||||||
|
|
||||||
from ..typing import CreateResult, Messages
|
from ..typing import CreateResult, Messages
|
||||||
from .base_provider import BaseProvider
|
from .base_provider import AbstractProvider
|
||||||
from .helper import format_prompt, get_random_string
|
from .helper import format_prompt, get_random_string
|
||||||
from ..webdriver import WebDriver, WebDriverSession
|
from ..webdriver import WebDriver, WebDriverSession
|
||||||
from .. import debug
|
from .. import debug
|
||||||
|
|
||||||
class AItianhuSpace(BaseProvider):
|
class AItianhuSpace(AbstractProvider):
|
||||||
url = "https://chat3.aiyunos.top/"
|
url = "https://chat3.aiyunos.top/"
|
||||||
working = True
|
working = True
|
||||||
supports_stream = True
|
supports_stream = True
|
||||||
|
@@ -2,9 +2,9 @@ from __future__ import annotations
|
|||||||
|
|
||||||
import requests, json
|
import requests, json
|
||||||
from ..typing import CreateResult, Messages
|
from ..typing import CreateResult, Messages
|
||||||
from .base_provider import BaseProvider
|
from .base_provider import AbstractProvider
|
||||||
|
|
||||||
class DeepInfra(BaseProvider):
|
class DeepInfra(AbstractProvider):
|
||||||
url: str = "https://deepinfra.com"
|
url: str = "https://deepinfra.com"
|
||||||
working: bool = True
|
working: bool = True
|
||||||
supports_stream: bool = True
|
supports_stream: bool = True
|
||||||
@@ -14,8 +14,10 @@ class DeepInfra(BaseProvider):
|
|||||||
def create_completion(model: str,
|
def create_completion(model: str,
|
||||||
messages: Messages,
|
messages: Messages,
|
||||||
stream: bool,
|
stream: bool,
|
||||||
|
auth: str = None,
|
||||||
**kwargs) -> CreateResult:
|
**kwargs) -> CreateResult:
|
||||||
|
if not model:
|
||||||
|
model = 'meta-llama/Llama-2-70b-chat-hf'
|
||||||
headers = {
|
headers = {
|
||||||
'Accept-Language': 'en,fr-FR;q=0.9,fr;q=0.8,es-ES;q=0.7,es;q=0.6,en-US;q=0.5,am;q=0.4,de;q=0.3',
|
'Accept-Language': 'en,fr-FR;q=0.9,fr;q=0.8,es-ES;q=0.7,es;q=0.6,en-US;q=0.5,am;q=0.4,de;q=0.3',
|
||||||
'Cache-Control': 'no-cache',
|
'Cache-Control': 'no-cache',
|
||||||
@@ -34,9 +36,11 @@ class DeepInfra(BaseProvider):
|
|||||||
'sec-ch-ua-mobile': '?0',
|
'sec-ch-ua-mobile': '?0',
|
||||||
'sec-ch-ua-platform': '"macOS"',
|
'sec-ch-ua-platform': '"macOS"',
|
||||||
}
|
}
|
||||||
|
if auth:
|
||||||
|
headers['Authorization'] = f"bearer {auth}"
|
||||||
|
|
||||||
json_data = json.dumps({
|
json_data = json.dumps({
|
||||||
'model' : 'meta-llama/Llama-2-70b-chat-hf',
|
'model' : model,
|
||||||
'messages': messages,
|
'messages': messages,
|
||||||
'stream' : True}, separators=(',', ':'))
|
'stream' : True}, separators=(',', ':'))
|
||||||
|
|
||||||
@@ -45,18 +49,17 @@ class DeepInfra(BaseProvider):
|
|||||||
|
|
||||||
response.raise_for_status()
|
response.raise_for_status()
|
||||||
first = True
|
first = True
|
||||||
|
for line in response.content:
|
||||||
for line in response.iter_content(chunk_size=1024):
|
|
||||||
if line.startswith(b"data: [DONE]"):
|
if line.startswith(b"data: [DONE]"):
|
||||||
break
|
break
|
||||||
|
|
||||||
elif line.startswith(b"data: "):
|
elif line.startswith(b"data: "):
|
||||||
chunk = json.loads(line[6:])["choices"][0]["delta"].get("content")
|
try:
|
||||||
|
chunk = json.loads(line[6:])["choices"][0]["delta"].get("content")
|
||||||
|
except Exception:
|
||||||
|
raise RuntimeError(f"Response: {line}")
|
||||||
if chunk:
|
if chunk:
|
||||||
if first:
|
if first:
|
||||||
chunk = chunk.lstrip()
|
chunk = chunk.lstrip()
|
||||||
if chunk:
|
if chunk:
|
||||||
first = False
|
first = False
|
||||||
|
yield chunk
|
||||||
yield (chunk)
|
|
@@ -1,12 +1,12 @@
|
|||||||
from __future__ import annotations
|
from __future__ import annotations
|
||||||
import requests, json
|
import requests, json
|
||||||
|
|
||||||
from .base_provider import BaseProvider
|
from .base_provider import AbstractProvider
|
||||||
from ..typing import CreateResult, Messages
|
from ..typing import CreateResult, Messages
|
||||||
from json import dumps
|
from json import dumps
|
||||||
|
|
||||||
|
|
||||||
class GeekGpt(BaseProvider):
|
class GeekGpt(AbstractProvider):
|
||||||
url = 'https://chat.geekgpt.org'
|
url = 'https://chat.geekgpt.org'
|
||||||
working = True
|
working = True
|
||||||
supports_message_history = True
|
supports_message_history = True
|
||||||
|
@@ -3,11 +3,11 @@ from __future__ import annotations
|
|||||||
import time, json
|
import time, json
|
||||||
|
|
||||||
from ..typing import CreateResult, Messages
|
from ..typing import CreateResult, Messages
|
||||||
from .base_provider import BaseProvider
|
from .base_provider import AbstractProvider
|
||||||
from .helper import format_prompt
|
from .helper import format_prompt
|
||||||
from ..webdriver import WebDriver, WebDriverSession, bypass_cloudflare
|
from ..webdriver import WebDriver, WebDriverSession, bypass_cloudflare
|
||||||
|
|
||||||
class MyShell(BaseProvider):
|
class MyShell(AbstractProvider):
|
||||||
url = "https://app.myshell.ai/chat"
|
url = "https://app.myshell.ai/chat"
|
||||||
working = True
|
working = True
|
||||||
supports_gpt_35_turbo = True
|
supports_gpt_35_turbo = True
|
||||||
|
@@ -7,11 +7,11 @@ from selenium.webdriver.support import expected_conditions as EC
|
|||||||
from selenium.webdriver.common.keys import Keys
|
from selenium.webdriver.common.keys import Keys
|
||||||
|
|
||||||
from ..typing import CreateResult, Messages
|
from ..typing import CreateResult, Messages
|
||||||
from .base_provider import BaseProvider
|
from .base_provider import AbstractProvider
|
||||||
from .helper import format_prompt
|
from .helper import format_prompt
|
||||||
from ..webdriver import WebDriver, WebDriverSession
|
from ..webdriver import WebDriver, WebDriverSession
|
||||||
|
|
||||||
class PerplexityAi(BaseProvider):
|
class PerplexityAi(AbstractProvider):
|
||||||
url = "https://www.perplexity.ai"
|
url = "https://www.perplexity.ai"
|
||||||
working = True
|
working = True
|
||||||
supports_gpt_35_turbo = True
|
supports_gpt_35_turbo = True
|
||||||
|
@@ -1,12 +1,12 @@
|
|||||||
from __future__ import annotations
|
from __future__ import annotations
|
||||||
|
|
||||||
from ..typing import CreateResult, Messages
|
|
||||||
from .base_provider import BaseProvider, format_prompt
|
|
||||||
|
|
||||||
import json
|
import json
|
||||||
|
|
||||||
|
from ..typing import CreateResult, Messages
|
||||||
|
from .base_provider import AbstractProvider, format_prompt
|
||||||
from ..requests import Session, get_session_from_browser
|
from ..requests import Session, get_session_from_browser
|
||||||
|
|
||||||
class Pi(BaseProvider):
|
class Pi(AbstractProvider):
|
||||||
url = "https://pi.ai/talk"
|
url = "https://pi.ai/talk"
|
||||||
working = True
|
working = True
|
||||||
supports_stream = True
|
supports_stream = True
|
||||||
|
@@ -3,10 +3,10 @@ from __future__ import annotations
|
|||||||
import time, json, time
|
import time, json, time
|
||||||
|
|
||||||
from ..typing import CreateResult, Messages
|
from ..typing import CreateResult, Messages
|
||||||
from .base_provider import BaseProvider
|
from .base_provider import AbstractProvider
|
||||||
from ..webdriver import WebDriver, WebDriverSession
|
from ..webdriver import WebDriver, WebDriverSession
|
||||||
|
|
||||||
class TalkAi(BaseProvider):
|
class TalkAi(AbstractProvider):
|
||||||
url = "https://talkai.info"
|
url = "https://talkai.info"
|
||||||
working = True
|
working = True
|
||||||
supports_gpt_35_turbo = True
|
supports_gpt_35_turbo = True
|
||||||
|
@@ -3,10 +3,10 @@ from __future__ import annotations
|
|||||||
import json, base64, requests, execjs, random, uuid
|
import json, base64, requests, execjs, random, uuid
|
||||||
|
|
||||||
from ..typing import Messages, TypedDict, CreateResult, Any
|
from ..typing import Messages, TypedDict, CreateResult, Any
|
||||||
from .base_provider import BaseProvider
|
from .base_provider import AbstractProvider
|
||||||
from ..debug import logging
|
from ..debug import logging
|
||||||
|
|
||||||
class Vercel(BaseProvider):
|
class Vercel(AbstractProvider):
|
||||||
url = 'https://sdk.vercel.ai'
|
url = 'https://sdk.vercel.ai'
|
||||||
working = False
|
working = False
|
||||||
supports_message_history = True
|
supports_message_history = True
|
||||||
|
@@ -1,11 +1,13 @@
|
|||||||
from __future__ import annotations
|
from __future__ import annotations
|
||||||
|
|
||||||
from .base_provider import BaseProvider, AsyncProvider, AsyncGeneratorProvider
|
from ..base_provider import BaseProvider, ProviderType
|
||||||
from .retry_provider import RetryProvider
|
from .retry_provider import RetryProvider
|
||||||
from .deprecated import *
|
from .base_provider import AsyncProvider, AsyncGeneratorProvider
|
||||||
from .needs_auth import *
|
from .deprecated import *
|
||||||
from .unfinished import *
|
from .needs_auth import *
|
||||||
from .selenium import *
|
from .unfinished import *
|
||||||
|
from .selenium import *
|
||||||
|
|
||||||
from .Aura import Aura
|
from .Aura import Aura
|
||||||
from .AiAsk import AiAsk
|
from .AiAsk import AiAsk
|
||||||
from .Aichat import Aichat
|
from .Aichat import Aichat
|
||||||
@@ -59,7 +61,7 @@ __modules__: list = [
|
|||||||
getattr(sys.modules[__name__], provider) for provider in dir()
|
getattr(sys.modules[__name__], provider) for provider in dir()
|
||||||
if not provider.startswith("__")
|
if not provider.startswith("__")
|
||||||
]
|
]
|
||||||
__providers__: list[type[BaseProvider]] = [
|
__providers__: list[ProviderType] = [
|
||||||
provider for provider in __modules__
|
provider for provider in __modules__
|
||||||
if isinstance(provider, type)
|
if isinstance(provider, type)
|
||||||
and issubclass(provider, BaseProvider)
|
and issubclass(provider, BaseProvider)
|
||||||
@@ -67,9 +69,9 @@ __providers__: list[type[BaseProvider]] = [
|
|||||||
__all__: list[str] = [
|
__all__: list[str] = [
|
||||||
provider.__name__ for provider in __providers__
|
provider.__name__ for provider in __providers__
|
||||||
]
|
]
|
||||||
__map__: dict[str, type[BaseProvider]] = dict([
|
__map__: dict[str, ProviderType] = dict([
|
||||||
(provider.__name__, provider) for provider in __providers__
|
(provider.__name__, provider) for provider in __providers__
|
||||||
])
|
])
|
||||||
|
|
||||||
class ProviderUtils:
|
class ProviderUtils:
|
||||||
convert: dict[str, type[BaseProvider]] = __map__
|
convert: dict[str, ProviderType] = __map__
|
@@ -1,12 +1,14 @@
|
|||||||
from __future__ import annotations
|
from __future__ import annotations
|
||||||
|
|
||||||
import sys
|
import sys
|
||||||
|
import asyncio
|
||||||
from asyncio import AbstractEventLoop
|
from asyncio import AbstractEventLoop
|
||||||
from concurrent.futures import ThreadPoolExecutor
|
from concurrent.futures import ThreadPoolExecutor
|
||||||
from abc import ABC, abstractmethod
|
from abc import abstractmethod
|
||||||
from inspect import signature, Parameter
|
from inspect import signature, Parameter
|
||||||
from .helper import get_event_loop, get_cookies, format_prompt
|
from .helper import get_event_loop, get_cookies, format_prompt
|
||||||
from ..typing import CreateResult, AsyncResult, Messages
|
from ..typing import CreateResult, AsyncResult, Messages, Union
|
||||||
|
from ..base_provider import BaseProvider
|
||||||
|
|
||||||
if sys.version_info < (3, 10):
|
if sys.version_info < (3, 10):
|
||||||
NoneType = type(None)
|
NoneType = type(None)
|
||||||
@@ -20,25 +22,7 @@ if sys.platform == 'win32':
|
|||||||
):
|
):
|
||||||
asyncio.set_event_loop_policy(asyncio.WindowsSelectorEventLoopPolicy())
|
asyncio.set_event_loop_policy(asyncio.WindowsSelectorEventLoopPolicy())
|
||||||
|
|
||||||
class BaseProvider(ABC):
|
class AbstractProvider(BaseProvider):
|
||||||
url: str
|
|
||||||
working: bool = False
|
|
||||||
needs_auth: bool = False
|
|
||||||
supports_stream: bool = False
|
|
||||||
supports_gpt_35_turbo: bool = False
|
|
||||||
supports_gpt_4: bool = False
|
|
||||||
supports_message_history: bool = False
|
|
||||||
|
|
||||||
@staticmethod
|
|
||||||
@abstractmethod
|
|
||||||
def create_completion(
|
|
||||||
model: str,
|
|
||||||
messages: Messages,
|
|
||||||
stream: bool,
|
|
||||||
**kwargs
|
|
||||||
) -> CreateResult:
|
|
||||||
raise NotImplementedError()
|
|
||||||
|
|
||||||
@classmethod
|
@classmethod
|
||||||
async def create_async(
|
async def create_async(
|
||||||
cls,
|
cls,
|
||||||
@@ -60,9 +44,12 @@ class BaseProvider(ABC):
|
|||||||
**kwargs
|
**kwargs
|
||||||
))
|
))
|
||||||
|
|
||||||
return await loop.run_in_executor(
|
return await asyncio.wait_for(
|
||||||
executor,
|
loop.run_in_executor(
|
||||||
create_func
|
executor,
|
||||||
|
create_func
|
||||||
|
),
|
||||||
|
timeout=kwargs.get("timeout", 0)
|
||||||
)
|
)
|
||||||
|
|
||||||
@classmethod
|
@classmethod
|
||||||
@@ -102,16 +89,19 @@ class BaseProvider(ABC):
|
|||||||
return f"g4f.Provider.{cls.__name__} supports: ({args}\n)"
|
return f"g4f.Provider.{cls.__name__} supports: ({args}\n)"
|
||||||
|
|
||||||
|
|
||||||
class AsyncProvider(BaseProvider):
|
class AsyncProvider(AbstractProvider):
|
||||||
@classmethod
|
@classmethod
|
||||||
def create_completion(
|
def create_completion(
|
||||||
cls,
|
cls,
|
||||||
model: str,
|
model: str,
|
||||||
messages: Messages,
|
messages: Messages,
|
||||||
stream: bool = False,
|
stream: bool = False,
|
||||||
|
*,
|
||||||
|
loop: AbstractEventLoop = None,
|
||||||
**kwargs
|
**kwargs
|
||||||
) -> CreateResult:
|
) -> CreateResult:
|
||||||
loop = get_event_loop()
|
if not loop:
|
||||||
|
loop = get_event_loop()
|
||||||
coro = cls.create_async(model, messages, **kwargs)
|
coro = cls.create_async(model, messages, **kwargs)
|
||||||
yield loop.run_until_complete(coro)
|
yield loop.run_until_complete(coro)
|
||||||
|
|
||||||
@@ -134,9 +124,12 @@ class AsyncGeneratorProvider(AsyncProvider):
|
|||||||
model: str,
|
model: str,
|
||||||
messages: Messages,
|
messages: Messages,
|
||||||
stream: bool = True,
|
stream: bool = True,
|
||||||
|
*,
|
||||||
|
loop: AbstractEventLoop = None,
|
||||||
**kwargs
|
**kwargs
|
||||||
) -> CreateResult:
|
) -> CreateResult:
|
||||||
loop = get_event_loop()
|
if not loop:
|
||||||
|
loop = get_event_loop()
|
||||||
generator = cls.create_async_generator(
|
generator = cls.create_async_generator(
|
||||||
model,
|
model,
|
||||||
messages,
|
messages,
|
||||||
@@ -171,6 +164,7 @@ class AsyncGeneratorProvider(AsyncProvider):
|
|||||||
def create_async_generator(
|
def create_async_generator(
|
||||||
model: str,
|
model: str,
|
||||||
messages: Messages,
|
messages: Messages,
|
||||||
|
stream: bool = True,
|
||||||
**kwargs
|
**kwargs
|
||||||
) -> AsyncResult:
|
) -> AsyncResult:
|
||||||
raise NotImplementedError()
|
raise NotImplementedError()
|
||||||
|
@@ -3,10 +3,10 @@ from __future__ import annotations
|
|||||||
import requests
|
import requests
|
||||||
|
|
||||||
from ...typing import Any, CreateResult, Messages
|
from ...typing import Any, CreateResult, Messages
|
||||||
from ..base_provider import BaseProvider
|
from ..base_provider import AbstractProvider
|
||||||
|
|
||||||
|
|
||||||
class AiService(BaseProvider):
|
class AiService(AbstractProvider):
|
||||||
url = "https://aiservice.vercel.app/"
|
url = "https://aiservice.vercel.app/"
|
||||||
working = False
|
working = False
|
||||||
supports_gpt_35_turbo = True
|
supports_gpt_35_turbo = True
|
||||||
|
@@ -1,9 +1,10 @@
|
|||||||
from __future__ import annotations
|
from __future__ import annotations
|
||||||
import requests
|
|
||||||
|
|
||||||
from ..base_provider import BaseProvider
|
import requests
|
||||||
|
import json
|
||||||
|
|
||||||
|
from ..base_provider import AbstractProvider
|
||||||
from ...typing import CreateResult, Messages
|
from ...typing import CreateResult, Messages
|
||||||
from json import dumps
|
|
||||||
|
|
||||||
# to recreate this easily, send a post request to https://chat.aivvm.com/api/models
|
# to recreate this easily, send a post request to https://chat.aivvm.com/api/models
|
||||||
models = {
|
models = {
|
||||||
@@ -17,7 +18,7 @@ models = {
|
|||||||
'gpt-4-32k-0613': {'id': 'gpt-4-32k-0613', 'name': 'GPT-4-32K-0613'},
|
'gpt-4-32k-0613': {'id': 'gpt-4-32k-0613', 'name': 'GPT-4-32K-0613'},
|
||||||
}
|
}
|
||||||
|
|
||||||
class Aivvm(BaseProvider):
|
class Aivvm(AbstractProvider):
|
||||||
url = 'https://chat.aivvm.com'
|
url = 'https://chat.aivvm.com'
|
||||||
supports_stream = True
|
supports_stream = True
|
||||||
working = False
|
working = False
|
||||||
@@ -44,7 +45,7 @@ class Aivvm(BaseProvider):
|
|||||||
"temperature" : kwargs.get("temperature", 0.7)
|
"temperature" : kwargs.get("temperature", 0.7)
|
||||||
}
|
}
|
||||||
|
|
||||||
data = dumps(json_data)
|
data = json.dumps(json_data)
|
||||||
|
|
||||||
headers = {
|
headers = {
|
||||||
"accept" : "text/event-stream",
|
"accept" : "text/event-stream",
|
||||||
|
@@ -7,10 +7,10 @@ import time
|
|||||||
import requests
|
import requests
|
||||||
|
|
||||||
from ...typing import Any, CreateResult
|
from ...typing import Any, CreateResult
|
||||||
from ..base_provider import BaseProvider
|
from ..base_provider import AbstractProvider
|
||||||
|
|
||||||
|
|
||||||
class DfeHub(BaseProvider):
|
class DfeHub(AbstractProvider):
|
||||||
url = "https://chat.dfehub.com/"
|
url = "https://chat.dfehub.com/"
|
||||||
supports_stream = True
|
supports_stream = True
|
||||||
supports_gpt_35_turbo = True
|
supports_gpt_35_turbo = True
|
||||||
|
@@ -2,14 +2,13 @@ from __future__ import annotations
|
|||||||
|
|
||||||
import json
|
import json
|
||||||
import random
|
import random
|
||||||
|
|
||||||
import requests
|
import requests
|
||||||
|
|
||||||
from ...typing import Any, CreateResult
|
from ...typing import Any, CreateResult
|
||||||
from ..base_provider import BaseProvider
|
from ..base_provider import AbstractProvider
|
||||||
|
|
||||||
|
|
||||||
class EasyChat(BaseProvider):
|
class EasyChat(AbstractProvider):
|
||||||
url: str = "https://free.easychat.work"
|
url: str = "https://free.easychat.work"
|
||||||
supports_stream = True
|
supports_stream = True
|
||||||
supports_gpt_35_turbo = True
|
supports_gpt_35_turbo = True
|
||||||
|
@@ -6,10 +6,10 @@ from abc import ABC, abstractmethod
|
|||||||
import requests
|
import requests
|
||||||
|
|
||||||
from ...typing import Any, CreateResult
|
from ...typing import Any, CreateResult
|
||||||
from ..base_provider import BaseProvider
|
from ..base_provider import AbstractProvider
|
||||||
|
|
||||||
|
|
||||||
class Equing(BaseProvider):
|
class Equing(AbstractProvider):
|
||||||
url: str = 'https://next.eqing.tech/'
|
url: str = 'https://next.eqing.tech/'
|
||||||
working = False
|
working = False
|
||||||
supports_stream = True
|
supports_stream = True
|
||||||
|
@@ -2,15 +2,13 @@ from __future__ import annotations
|
|||||||
|
|
||||||
import json
|
import json
|
||||||
import random
|
import random
|
||||||
from abc import ABC, abstractmethod
|
|
||||||
|
|
||||||
import requests
|
import requests
|
||||||
|
|
||||||
from ...typing import Any, CreateResult
|
from ...typing import Any, CreateResult
|
||||||
from ..base_provider import BaseProvider
|
from ..base_provider import AbstractProvider
|
||||||
|
|
||||||
|
|
||||||
class FastGpt(BaseProvider):
|
class FastGpt(AbstractProvider):
|
||||||
url: str = 'https://chat9.fastgpt.me/'
|
url: str = 'https://chat9.fastgpt.me/'
|
||||||
working = False
|
working = False
|
||||||
needs_auth = False
|
needs_auth = False
|
||||||
@@ -19,7 +17,6 @@ class FastGpt(BaseProvider):
|
|||||||
supports_gpt_4 = False
|
supports_gpt_4 = False
|
||||||
|
|
||||||
@staticmethod
|
@staticmethod
|
||||||
@abstractmethod
|
|
||||||
def create_completion(
|
def create_completion(
|
||||||
model: str,
|
model: str,
|
||||||
messages: list[dict[str, str]],
|
messages: list[dict[str, str]],
|
||||||
|
@@ -5,10 +5,10 @@ import json
|
|||||||
import requests
|
import requests
|
||||||
|
|
||||||
from ...typing import Any, CreateResult
|
from ...typing import Any, CreateResult
|
||||||
from ..base_provider import BaseProvider
|
from ..base_provider import AbstractProvider
|
||||||
|
|
||||||
|
|
||||||
class Forefront(BaseProvider):
|
class Forefront(AbstractProvider):
|
||||||
url = "https://forefront.com"
|
url = "https://forefront.com"
|
||||||
supports_stream = True
|
supports_stream = True
|
||||||
supports_gpt_35_turbo = True
|
supports_gpt_35_turbo = True
|
||||||
|
@@ -11,10 +11,10 @@ except ImportError:
|
|||||||
from Cryptodome.Cipher import AES
|
from Cryptodome.Cipher import AES
|
||||||
|
|
||||||
from ...typing import Any, CreateResult
|
from ...typing import Any, CreateResult
|
||||||
from ..base_provider import BaseProvider
|
from ..base_provider import AbstractProvider
|
||||||
|
|
||||||
|
|
||||||
class GetGpt(BaseProvider):
|
class GetGpt(AbstractProvider):
|
||||||
url = 'https://chat.getgpt.world/'
|
url = 'https://chat.getgpt.world/'
|
||||||
supports_stream = True
|
supports_stream = True
|
||||||
working = False
|
working = False
|
||||||
|
@@ -5,10 +5,10 @@ import json
|
|||||||
import requests
|
import requests
|
||||||
|
|
||||||
from ...typing import Any, CreateResult
|
from ...typing import Any, CreateResult
|
||||||
from ..base_provider import BaseProvider
|
from ..base_provider import AbstractProvider
|
||||||
|
|
||||||
|
|
||||||
class Lockchat(BaseProvider):
|
class Lockchat(AbstractProvider):
|
||||||
url: str = "http://supertest.lockchat.app"
|
url: str = "http://supertest.lockchat.app"
|
||||||
supports_stream = True
|
supports_stream = True
|
||||||
supports_gpt_35_turbo = True
|
supports_gpt_35_turbo = True
|
||||||
|
@@ -5,10 +5,10 @@ import uuid
|
|||||||
import requests
|
import requests
|
||||||
|
|
||||||
from ...typing import Any, CreateResult
|
from ...typing import Any, CreateResult
|
||||||
from ..base_provider import BaseProvider
|
from ..base_provider import AbstractProvider
|
||||||
|
|
||||||
|
|
||||||
class V50(BaseProvider):
|
class V50(AbstractProvider):
|
||||||
url = 'https://p5.v50.ltd'
|
url = 'https://p5.v50.ltd'
|
||||||
supports_gpt_35_turbo = True
|
supports_gpt_35_turbo = True
|
||||||
supports_stream = False
|
supports_stream = False
|
||||||
|
@@ -2,13 +2,11 @@ from __future__ import annotations
|
|||||||
|
|
||||||
import json
|
import json
|
||||||
import requests
|
import requests
|
||||||
from .base_provider import BaseProvider
|
from ..base_provider import AbstractProvider
|
||||||
from ..typing import Messages, CreateResult
|
from ...typing import Messages, CreateResult
|
||||||
from .helper import get_cookies
|
|
||||||
|
|
||||||
|
|
||||||
|
class VoiGpt(AbstractProvider):
|
||||||
class VoiGpt(BaseProvider):
|
|
||||||
"""
|
"""
|
||||||
VoiGpt - A provider for VoiGpt.com
|
VoiGpt - A provider for VoiGpt.com
|
||||||
|
|
||||||
|
@@ -5,10 +5,10 @@ import random
|
|||||||
import requests
|
import requests
|
||||||
|
|
||||||
from ...typing import Any, CreateResult
|
from ...typing import Any, CreateResult
|
||||||
from ..base_provider import BaseProvider, format_prompt
|
from ..base_provider import AbstractProvider, format_prompt
|
||||||
|
|
||||||
|
|
||||||
class Wuguokai(BaseProvider):
|
class Wuguokai(AbstractProvider):
|
||||||
url = 'https://chat.wuguokai.xyz'
|
url = 'https://chat.wuguokai.xyz'
|
||||||
supports_gpt_35_turbo = True
|
supports_gpt_35_turbo = True
|
||||||
working = False
|
working = False
|
||||||
|
@@ -1,6 +1,5 @@
|
|||||||
from __future__ import annotations
|
from __future__ import annotations
|
||||||
|
|
||||||
import sys
|
|
||||||
import asyncio
|
import asyncio
|
||||||
import webbrowser
|
import webbrowser
|
||||||
import random
|
import random
|
||||||
@@ -8,7 +7,7 @@ import string
|
|||||||
import secrets
|
import secrets
|
||||||
import os
|
import os
|
||||||
from os import path
|
from os import path
|
||||||
from asyncio import AbstractEventLoop
|
from asyncio import AbstractEventLoop, BaseEventLoop
|
||||||
from platformdirs import user_config_dir
|
from platformdirs import user_config_dir
|
||||||
from browser_cookie3 import (
|
from browser_cookie3 import (
|
||||||
chrome,
|
chrome,
|
||||||
@@ -34,7 +33,8 @@ _cookies: Dict[str, Dict[str, str]] = {}
|
|||||||
def get_event_loop() -> AbstractEventLoop:
|
def get_event_loop() -> AbstractEventLoop:
|
||||||
try:
|
try:
|
||||||
loop = asyncio.get_event_loop()
|
loop = asyncio.get_event_loop()
|
||||||
loop._check_closed()
|
if isinstance(loop, BaseEventLoop):
|
||||||
|
loop._check_closed()
|
||||||
except RuntimeError:
|
except RuntimeError:
|
||||||
loop = asyncio.new_event_loop()
|
loop = asyncio.new_event_loop()
|
||||||
asyncio.set_event_loop(loop)
|
asyncio.set_event_loop(loop)
|
||||||
|
@@ -8,11 +8,11 @@ from selenium.webdriver.support import expected_conditions as EC
|
|||||||
from selenium.webdriver.common.keys import Keys
|
from selenium.webdriver.common.keys import Keys
|
||||||
|
|
||||||
from ...typing import CreateResult, Messages
|
from ...typing import CreateResult, Messages
|
||||||
from ..base_provider import BaseProvider
|
from ..base_provider import AbstractProvider
|
||||||
from ..helper import format_prompt
|
from ..helper import format_prompt
|
||||||
from ...webdriver import WebDriver, WebDriverSession
|
from ...webdriver import WebDriver, WebDriverSession
|
||||||
|
|
||||||
class Bard(BaseProvider):
|
class Bard(AbstractProvider):
|
||||||
url = "https://bard.google.com"
|
url = "https://bard.google.com"
|
||||||
working = True
|
working = True
|
||||||
needs_auth = True
|
needs_auth = True
|
||||||
|
@@ -8,6 +8,9 @@ from ...typing import AsyncResult, Messages
|
|||||||
from ..base_provider import AsyncGeneratorProvider
|
from ..base_provider import AsyncGeneratorProvider
|
||||||
from ..helper import format_prompt, get_cookies
|
from ..helper import format_prompt, get_cookies
|
||||||
|
|
||||||
|
map = {
|
||||||
|
"openchat/openchat_3.5": "openchat/openchat-3.5-1210",
|
||||||
|
}
|
||||||
|
|
||||||
class HuggingChat(AsyncGeneratorProvider):
|
class HuggingChat(AsyncGeneratorProvider):
|
||||||
url = "https://huggingface.co/chat"
|
url = "https://huggingface.co/chat"
|
||||||
@@ -25,7 +28,10 @@ class HuggingChat(AsyncGeneratorProvider):
|
|||||||
cookies: dict = None,
|
cookies: dict = None,
|
||||||
**kwargs
|
**kwargs
|
||||||
) -> AsyncResult:
|
) -> AsyncResult:
|
||||||
model = model if model else cls.model
|
if not model:
|
||||||
|
model = cls.model
|
||||||
|
elif model in map:
|
||||||
|
model = map[model]
|
||||||
if not cookies:
|
if not cookies:
|
||||||
cookies = get_cookies(".huggingface.co")
|
cookies = get_cookies(".huggingface.co")
|
||||||
|
|
||||||
|
@@ -3,7 +3,7 @@ from __future__ import annotations
|
|||||||
import time
|
import time
|
||||||
|
|
||||||
from ...typing import CreateResult, Messages
|
from ...typing import CreateResult, Messages
|
||||||
from ..base_provider import BaseProvider
|
from ..base_provider import AbstractProvider
|
||||||
from ..helper import format_prompt
|
from ..helper import format_prompt
|
||||||
from ...webdriver import WebDriver, WebDriverSession
|
from ...webdriver import WebDriver, WebDriverSession
|
||||||
|
|
||||||
@@ -20,7 +20,7 @@ models = {
|
|||||||
"palm": {"name": "Google-PaLM"},
|
"palm": {"name": "Google-PaLM"},
|
||||||
}
|
}
|
||||||
|
|
||||||
class Poe(BaseProvider):
|
class Poe(AbstractProvider):
|
||||||
url = "https://poe.com"
|
url = "https://poe.com"
|
||||||
working = True
|
working = True
|
||||||
needs_auth = True
|
needs_auth = True
|
||||||
|
@@ -5,10 +5,10 @@ import json
|
|||||||
import requests
|
import requests
|
||||||
|
|
||||||
from ...typing import CreateResult, Messages
|
from ...typing import CreateResult, Messages
|
||||||
from ..base_provider import BaseProvider
|
from ..base_provider import AbstractProvider
|
||||||
|
|
||||||
|
|
||||||
class Raycast(BaseProvider):
|
class Raycast(AbstractProvider):
|
||||||
url = "https://raycast.com"
|
url = "https://raycast.com"
|
||||||
supports_gpt_35_turbo = True
|
supports_gpt_35_turbo = True
|
||||||
supports_gpt_4 = True
|
supports_gpt_4 = True
|
||||||
|
@@ -3,7 +3,7 @@ from __future__ import annotations
|
|||||||
import time
|
import time
|
||||||
|
|
||||||
from ...typing import CreateResult, Messages
|
from ...typing import CreateResult, Messages
|
||||||
from ..base_provider import BaseProvider
|
from ..base_provider import AbstractProvider
|
||||||
from ..helper import format_prompt
|
from ..helper import format_prompt
|
||||||
from ...webdriver import WebDriver, WebDriverSession
|
from ...webdriver import WebDriver, WebDriverSession
|
||||||
|
|
||||||
@@ -31,7 +31,7 @@ models = {
|
|||||||
"qwen-7b-chat": "Qwen 7B"
|
"qwen-7b-chat": "Qwen 7B"
|
||||||
}
|
}
|
||||||
|
|
||||||
class Theb(BaseProvider):
|
class Theb(AbstractProvider):
|
||||||
url = "https://beta.theb.ai"
|
url = "https://beta.theb.ai"
|
||||||
working = True
|
working = True
|
||||||
supports_gpt_35_turbo = True
|
supports_gpt_35_turbo = True
|
||||||
|
@@ -3,7 +3,7 @@ from __future__ import annotations
|
|||||||
import requests
|
import requests
|
||||||
|
|
||||||
from ...typing import Any, CreateResult, Messages
|
from ...typing import Any, CreateResult, Messages
|
||||||
from ..base_provider import BaseProvider
|
from ..base_provider import AbstractProvider
|
||||||
|
|
||||||
models = {
|
models = {
|
||||||
"theb-ai": "TheB.AI",
|
"theb-ai": "TheB.AI",
|
||||||
@@ -29,7 +29,7 @@ models = {
|
|||||||
"qwen-7b-chat": "Qwen 7B"
|
"qwen-7b-chat": "Qwen 7B"
|
||||||
}
|
}
|
||||||
|
|
||||||
class ThebApi(BaseProvider):
|
class ThebApi(AbstractProvider):
|
||||||
url = "https://theb.ai"
|
url = "https://theb.ai"
|
||||||
working = True
|
working = True
|
||||||
needs_auth = True
|
needs_auth = True
|
||||||
|
@@ -2,26 +2,13 @@ from __future__ import annotations
|
|||||||
|
|
||||||
import asyncio
|
import asyncio
|
||||||
import random
|
import random
|
||||||
from typing import List, Type, Dict
|
|
||||||
from ..typing import CreateResult, Messages
|
from ..typing import CreateResult, Messages
|
||||||
from .base_provider import BaseProvider, AsyncProvider
|
from ..base_provider import BaseRetryProvider
|
||||||
from .. import debug
|
from .. import debug
|
||||||
from ..errors import RetryProviderError, RetryNoProviderError
|
from ..errors import RetryProviderError, RetryNoProviderError
|
||||||
|
|
||||||
|
|
||||||
class RetryProvider(AsyncProvider):
|
class RetryProvider(BaseRetryProvider):
|
||||||
__name__: str = "RetryProvider"
|
|
||||||
supports_stream: bool = True
|
|
||||||
|
|
||||||
def __init__(
|
|
||||||
self,
|
|
||||||
providers: List[Type[BaseProvider]],
|
|
||||||
shuffle: bool = True
|
|
||||||
) -> None:
|
|
||||||
self.providers: List[Type[BaseProvider]] = providers
|
|
||||||
self.shuffle: bool = shuffle
|
|
||||||
self.working = True
|
|
||||||
|
|
||||||
def create_completion(
|
def create_completion(
|
||||||
self,
|
self,
|
||||||
model: str,
|
model: str,
|
||||||
@@ -36,20 +23,18 @@ class RetryProvider(AsyncProvider):
|
|||||||
if self.shuffle:
|
if self.shuffle:
|
||||||
random.shuffle(providers)
|
random.shuffle(providers)
|
||||||
|
|
||||||
self.exceptions: Dict[str, Exception] = {}
|
self.exceptions = {}
|
||||||
started: bool = False
|
started: bool = False
|
||||||
for provider in providers:
|
for provider in providers:
|
||||||
|
self.last_provider = provider
|
||||||
try:
|
try:
|
||||||
if debug.logging:
|
if debug.logging:
|
||||||
print(f"Using {provider.__name__} provider")
|
print(f"Using {provider.__name__} provider")
|
||||||
|
|
||||||
for token in provider.create_completion(model, messages, stream, **kwargs):
|
for token in provider.create_completion(model, messages, stream, **kwargs):
|
||||||
yield token
|
yield token
|
||||||
started = True
|
started = True
|
||||||
|
|
||||||
if started:
|
if started:
|
||||||
return
|
return
|
||||||
|
|
||||||
except Exception as e:
|
except Exception as e:
|
||||||
self.exceptions[provider.__name__] = e
|
self.exceptions[provider.__name__] = e
|
||||||
if debug.logging:
|
if debug.logging:
|
||||||
@@ -69,8 +54,9 @@ class RetryProvider(AsyncProvider):
|
|||||||
if self.shuffle:
|
if self.shuffle:
|
||||||
random.shuffle(providers)
|
random.shuffle(providers)
|
||||||
|
|
||||||
self.exceptions: Dict[str, Exception] = {}
|
self.exceptions = {}
|
||||||
for provider in providers:
|
for provider in providers:
|
||||||
|
self.last_provider = provider
|
||||||
try:
|
try:
|
||||||
return await asyncio.wait_for(
|
return await asyncio.wait_for(
|
||||||
provider.create_async(model, messages, **kwargs),
|
provider.create_async(model, messages, **kwargs),
|
||||||
|
@@ -4,11 +4,11 @@ import time
|
|||||||
from urllib.parse import quote
|
from urllib.parse import quote
|
||||||
|
|
||||||
from ...typing import CreateResult, Messages
|
from ...typing import CreateResult, Messages
|
||||||
from ..base_provider import BaseProvider
|
from ..base_provider import AbstractProvider
|
||||||
from ..helper import format_prompt
|
from ..helper import format_prompt
|
||||||
from ...webdriver import WebDriver, WebDriverSession
|
from ...webdriver import WebDriver, WebDriverSession
|
||||||
|
|
||||||
class Phind(BaseProvider):
|
class Phind(AbstractProvider):
|
||||||
url = "https://www.phind.com"
|
url = "https://www.phind.com"
|
||||||
working = True
|
working = True
|
||||||
supports_gpt_4 = True
|
supports_gpt_4 = True
|
||||||
|
@@ -3,11 +3,11 @@ from __future__ import annotations
|
|||||||
from urllib.parse import unquote
|
from urllib.parse import unquote
|
||||||
|
|
||||||
from ...typing import AsyncResult, Messages
|
from ...typing import AsyncResult, Messages
|
||||||
from ..base_provider import BaseProvider
|
from ..base_provider import AbstractProvider
|
||||||
from ...webdriver import WebDriver
|
from ...webdriver import WebDriver
|
||||||
from ...requests import Session, get_session_from_browser
|
from ...requests import Session, get_session_from_browser
|
||||||
|
|
||||||
class AiChatting(BaseProvider):
|
class AiChatting(AbstractProvider):
|
||||||
url = "https://www.aichatting.net"
|
url = "https://www.aichatting.net"
|
||||||
supports_gpt_35_turbo = True
|
supports_gpt_35_turbo = True
|
||||||
_session: Session = None
|
_session: Session = None
|
||||||
|
@@ -4,19 +4,20 @@ import os
|
|||||||
|
|
||||||
from .errors import *
|
from .errors import *
|
||||||
from .models import Model, ModelUtils, _all_models
|
from .models import Model, ModelUtils, _all_models
|
||||||
from .Provider import BaseProvider, AsyncGeneratorProvider, RetryProvider, ProviderUtils
|
from .Provider import AsyncGeneratorProvider, ProviderUtils
|
||||||
from .typing import Messages, CreateResult, AsyncResult, Union, List
|
from .typing import Messages, CreateResult, AsyncResult, Union
|
||||||
from . import debug
|
from . import debug, version
|
||||||
|
from .base_provider import BaseRetryProvider, ProviderType
|
||||||
|
|
||||||
def get_model_and_provider(model : Union[Model, str],
|
def get_model_and_provider(model : Union[Model, str],
|
||||||
provider : Union[type[BaseProvider], str, None],
|
provider : Union[ProviderType, str, None],
|
||||||
stream : bool,
|
stream : bool,
|
||||||
ignored : List[str] = None,
|
ignored : list[str] = None,
|
||||||
ignore_working: bool = False,
|
ignore_working: bool = False,
|
||||||
ignore_stream: bool = False) -> tuple[Model, type[BaseProvider]]:
|
ignore_stream: bool = False) -> tuple[str, ProviderType]:
|
||||||
if debug.version_check:
|
if debug.version_check:
|
||||||
debug.version_check = False
|
debug.version_check = False
|
||||||
debug.check_pypi_version()
|
version.utils.check_pypi_version()
|
||||||
|
|
||||||
if isinstance(provider, str):
|
if isinstance(provider, str):
|
||||||
if provider in ProviderUtils.convert:
|
if provider in ProviderUtils.convert:
|
||||||
@@ -24,29 +25,36 @@ def get_model_and_provider(model : Union[Model, str],
|
|||||||
else:
|
else:
|
||||||
raise ProviderNotFoundError(f'Provider not found: {provider}')
|
raise ProviderNotFoundError(f'Provider not found: {provider}')
|
||||||
|
|
||||||
if isinstance(model, str):
|
|
||||||
if model in ModelUtils.convert:
|
|
||||||
model = ModelUtils.convert[model]
|
|
||||||
else:
|
|
||||||
raise ModelNotFoundError(f'The model: {model} does not exist')
|
|
||||||
|
|
||||||
if not provider:
|
if not provider:
|
||||||
|
if isinstance(model, str):
|
||||||
|
if model in ModelUtils.convert:
|
||||||
|
model = ModelUtils.convert[model]
|
||||||
|
else:
|
||||||
|
raise ModelNotFoundError(f'Model not found: {model}')
|
||||||
provider = model.best_provider
|
provider = model.best_provider
|
||||||
|
|
||||||
if isinstance(provider, RetryProvider) and ignored:
|
|
||||||
provider.providers = [p for p in provider.providers if p.__name__ not in ignored]
|
|
||||||
|
|
||||||
if not provider:
|
if not provider:
|
||||||
raise ProviderNotFoundError(f'No provider found for model: {model}')
|
raise ProviderNotFoundError(f'No provider found for model: {model}')
|
||||||
|
|
||||||
if not provider.working and not ignore_working:
|
if isinstance(model, Model):
|
||||||
|
model = model.name
|
||||||
|
|
||||||
|
if ignored and isinstance(provider, BaseRetryProvider):
|
||||||
|
provider.providers = [p for p in provider.providers if p.__name__ not in ignored]
|
||||||
|
|
||||||
|
if not ignore_working and not provider.working:
|
||||||
raise ProviderNotWorkingError(f'{provider.__name__} is not working')
|
raise ProviderNotWorkingError(f'{provider.__name__} is not working')
|
||||||
|
|
||||||
if not ignore_stream and not provider.supports_stream and stream:
|
if not ignore_stream and not provider.supports_stream and stream:
|
||||||
raise StreamNotSupportedError(f'{provider.__name__} does not support "stream" argument')
|
raise StreamNotSupportedError(f'{provider.__name__} does not support "stream" argument')
|
||||||
|
|
||||||
if debug.logging:
|
if debug.logging:
|
||||||
print(f'Using {provider.__name__} provider')
|
if model:
|
||||||
|
print(f'Using {provider.__name__} provider and {model} model')
|
||||||
|
else:
|
||||||
|
print(f'Using {provider.__name__} provider')
|
||||||
|
|
||||||
|
debug.last_provider = provider
|
||||||
|
|
||||||
return model, provider
|
return model, provider
|
||||||
|
|
||||||
@@ -54,10 +62,10 @@ class ChatCompletion:
|
|||||||
@staticmethod
|
@staticmethod
|
||||||
def create(model : Union[Model, str],
|
def create(model : Union[Model, str],
|
||||||
messages : Messages,
|
messages : Messages,
|
||||||
provider : Union[type[BaseProvider], str, None] = None,
|
provider : Union[ProviderType, str, None] = None,
|
||||||
stream : bool = False,
|
stream : bool = False,
|
||||||
auth : Union[str, None] = None,
|
auth : Union[str, None] = None,
|
||||||
ignored : List[str] = None,
|
ignored : list[str] = None,
|
||||||
ignore_working: bool = False,
|
ignore_working: bool = False,
|
||||||
ignore_stream_and_auth: bool = False,
|
ignore_stream_and_auth: bool = False,
|
||||||
**kwargs) -> Union[CreateResult, str]:
|
**kwargs) -> Union[CreateResult, str]:
|
||||||
@@ -75,32 +83,33 @@ class ChatCompletion:
|
|||||||
if proxy:
|
if proxy:
|
||||||
kwargs['proxy'] = proxy
|
kwargs['proxy'] = proxy
|
||||||
|
|
||||||
result = provider.create_completion(model.name, messages, stream, **kwargs)
|
result = provider.create_completion(model, messages, stream, **kwargs)
|
||||||
return result if stream else ''.join(result)
|
return result if stream else ''.join(result)
|
||||||
|
|
||||||
@staticmethod
|
@staticmethod
|
||||||
async def create_async(model : Union[Model, str],
|
def create_async(model : Union[Model, str],
|
||||||
messages : Messages,
|
messages : Messages,
|
||||||
provider : Union[type[BaseProvider], str, None] = None,
|
provider : Union[ProviderType, str, None] = None,
|
||||||
stream : bool = False,
|
stream : bool = False,
|
||||||
ignored : List[str] = None,
|
ignored : list[str] = None,
|
||||||
**kwargs) -> Union[AsyncResult, str]:
|
**kwargs) -> Union[AsyncResult, str]:
|
||||||
|
|
||||||
model, provider = get_model_and_provider(model, provider, False, ignored)
|
model, provider = get_model_and_provider(model, provider, False, ignored)
|
||||||
|
|
||||||
if stream:
|
if stream:
|
||||||
if isinstance(provider, type) and issubclass(provider, AsyncGeneratorProvider):
|
if isinstance(provider, type) and issubclass(provider, AsyncGeneratorProvider):
|
||||||
return await provider.create_async_generator(model.name, messages, **kwargs)
|
return provider.create_async_generator(model, messages, **kwargs)
|
||||||
raise StreamNotSupportedError(f'{provider.__name__} does not support "stream" argument in "create_async"')
|
raise StreamNotSupportedError(f'{provider.__name__} does not support "stream" argument in "create_async"')
|
||||||
|
|
||||||
return await provider.create_async(model.name, messages, **kwargs)
|
return provider.create_async(model, messages, **kwargs)
|
||||||
|
|
||||||
class Completion:
|
class Completion:
|
||||||
@staticmethod
|
@staticmethod
|
||||||
def create(model : Union[Model, str],
|
def create(model : Union[Model, str],
|
||||||
prompt : str,
|
prompt : str,
|
||||||
provider : Union[type[BaseProvider], None] = None,
|
provider : Union[ProviderType, None] = None,
|
||||||
stream : bool = False,
|
stream : bool = False,
|
||||||
ignored : List[str] = None, **kwargs) -> Union[CreateResult, str]:
|
ignored : list[str] = None, **kwargs) -> Union[CreateResult, str]:
|
||||||
|
|
||||||
allowed_models = [
|
allowed_models = [
|
||||||
'code-davinci-002',
|
'code-davinci-002',
|
||||||
@@ -111,10 +120,18 @@ class Completion:
|
|||||||
'text-davinci-003'
|
'text-davinci-003'
|
||||||
]
|
]
|
||||||
if model not in allowed_models:
|
if model not in allowed_models:
|
||||||
raise ModelNotAllowed(f'Can\'t use {model} with Completion.create()')
|
raise ModelNotAllowedError(f'Can\'t use {model} with Completion.create()')
|
||||||
|
|
||||||
model, provider = get_model_and_provider(model, provider, stream, ignored)
|
model, provider = get_model_and_provider(model, provider, stream, ignored)
|
||||||
|
|
||||||
result = provider.create_completion(model.name, [{"role": "user", "content": prompt}], stream, **kwargs)
|
result = provider.create_completion(model, [{"role": "user", "content": prompt}], stream, **kwargs)
|
||||||
|
|
||||||
return result if stream else ''.join(result)
|
return result if stream else ''.join(result)
|
||||||
|
|
||||||
|
def get_last_provider(as_dict: bool = False) -> ProviderType:
|
||||||
|
last = debug.last_provider
|
||||||
|
if isinstance(last, BaseRetryProvider):
|
||||||
|
last = last.last_provider
|
||||||
|
if last and as_dict:
|
||||||
|
return {"name": last.__name__, "url": last.url}
|
||||||
|
return last
|
@@ -1,23 +1,25 @@
|
|||||||
import ast
|
import ast
|
||||||
import logging
|
import logging
|
||||||
|
|
||||||
from fastapi import FastAPI, Response, Request
|
|
||||||
from fastapi.responses import StreamingResponse
|
|
||||||
from typing import List, Union, Any, Dict, AnyStr
|
|
||||||
#from ._tokenizer import tokenize
|
|
||||||
from .. import BaseProvider
|
|
||||||
|
|
||||||
import time
|
import time
|
||||||
import json
|
import json
|
||||||
import random
|
import random
|
||||||
import string
|
import string
|
||||||
import uvicorn
|
import uvicorn
|
||||||
import nest_asyncio
|
import nest_asyncio
|
||||||
|
|
||||||
|
from fastapi import FastAPI, Response, Request
|
||||||
|
from fastapi.responses import StreamingResponse
|
||||||
|
from typing import List, Union, Any, Dict, AnyStr
|
||||||
|
#from ._tokenizer import tokenize
|
||||||
|
|
||||||
import g4f
|
import g4f
|
||||||
|
from .. import debug
|
||||||
|
|
||||||
|
debug.logging = True
|
||||||
|
|
||||||
class Api:
|
class Api:
|
||||||
def __init__(self, engine: g4f, debug: bool = True, sentry: bool = False,
|
def __init__(self, engine: g4f, debug: bool = True, sentry: bool = False,
|
||||||
list_ignored_providers: List[Union[str, BaseProvider]] = None) -> None:
|
list_ignored_providers: List[str] = None) -> None:
|
||||||
self.engine = engine
|
self.engine = engine
|
||||||
self.debug = debug
|
self.debug = debug
|
||||||
self.sentry = sentry
|
self.sentry = sentry
|
||||||
@@ -75,7 +77,10 @@ class Api:
|
|||||||
}
|
}
|
||||||
|
|
||||||
# item contains byte keys, and dict.get suppresses error
|
# item contains byte keys, and dict.get suppresses error
|
||||||
item_data.update({key.decode('utf-8') if isinstance(key, bytes) else key: str(value) for key, value in (item or {}).items()})
|
item_data.update({
|
||||||
|
key.decode('utf-8') if isinstance(key, bytes) else key: str(value)
|
||||||
|
for key, value in (item or {}).items()
|
||||||
|
})
|
||||||
# messages is str, need dict
|
# messages is str, need dict
|
||||||
if isinstance(item_data.get('messages'), str):
|
if isinstance(item_data.get('messages'), str):
|
||||||
item_data['messages'] = ast.literal_eval(item_data.get('messages'))
|
item_data['messages'] = ast.literal_eval(item_data.get('messages'))
|
||||||
@@ -96,7 +101,12 @@ class Api:
|
|||||||
)
|
)
|
||||||
except Exception as e:
|
except Exception as e:
|
||||||
logging.exception(e)
|
logging.exception(e)
|
||||||
return Response(content=json.dumps({"error": "An error occurred while generating the response."}, indent=4), media_type="application/json")
|
content = json.dumps({
|
||||||
|
"error": {"message": f"An error occurred while generating the response:\n{e}"},
|
||||||
|
"model": model,
|
||||||
|
"provider": g4f.get_last_provider(True)
|
||||||
|
})
|
||||||
|
return Response(content=content, status_code=500, media_type="application/json")
|
||||||
completion_id = ''.join(random.choices(string.ascii_letters + string.digits, k=28))
|
completion_id = ''.join(random.choices(string.ascii_letters + string.digits, k=28))
|
||||||
completion_timestamp = int(time.time())
|
completion_timestamp = int(time.time())
|
||||||
|
|
||||||
@@ -109,6 +119,7 @@ class Api:
|
|||||||
'object': 'chat.completion',
|
'object': 'chat.completion',
|
||||||
'created': completion_timestamp,
|
'created': completion_timestamp,
|
||||||
'model': model,
|
'model': model,
|
||||||
|
'provider': g4f.get_last_provider(True),
|
||||||
'choices': [
|
'choices': [
|
||||||
{
|
{
|
||||||
'index': 0,
|
'index': 0,
|
||||||
@@ -136,6 +147,7 @@ class Api:
|
|||||||
'object': 'chat.completion.chunk',
|
'object': 'chat.completion.chunk',
|
||||||
'created': completion_timestamp,
|
'created': completion_timestamp,
|
||||||
'model': model,
|
'model': model,
|
||||||
|
'provider': g4f.get_last_provider(True),
|
||||||
'choices': [
|
'choices': [
|
||||||
{
|
{
|
||||||
'index': 0,
|
'index': 0,
|
||||||
@@ -147,16 +159,14 @@ class Api:
|
|||||||
}
|
}
|
||||||
],
|
],
|
||||||
}
|
}
|
||||||
|
yield f'data: {json.dumps(completion_data)}\n\n'
|
||||||
content = json.dumps(completion_data, separators=(',', ':'))
|
|
||||||
yield f'data: {content}\n\n'
|
|
||||||
time.sleep(0.03)
|
time.sleep(0.03)
|
||||||
|
|
||||||
end_completion_data = {
|
end_completion_data = {
|
||||||
'id': f'chatcmpl-{completion_id}',
|
'id': f'chatcmpl-{completion_id}',
|
||||||
'object': 'chat.completion.chunk',
|
'object': 'chat.completion.chunk',
|
||||||
'created': completion_timestamp,
|
'created': completion_timestamp,
|
||||||
'model': model,
|
'model': model,
|
||||||
|
'provider': g4f.get_last_provider(True),
|
||||||
'choices': [
|
'choices': [
|
||||||
{
|
{
|
||||||
'index': 0,
|
'index': 0,
|
||||||
@@ -165,15 +175,17 @@ class Api:
|
|||||||
}
|
}
|
||||||
],
|
],
|
||||||
}
|
}
|
||||||
|
yield f'data: {json.dumps(end_completion_data)}\n\n'
|
||||||
content = json.dumps(end_completion_data, separators=(',', ':'))
|
|
||||||
yield f'data: {content}\n\n'
|
|
||||||
except GeneratorExit:
|
except GeneratorExit:
|
||||||
pass
|
pass
|
||||||
except Exception as e:
|
except Exception as e:
|
||||||
logging.exception(e)
|
logging.exception(e)
|
||||||
content=json.dumps({"error": "An error occurred while generating the response."}, indent=4)
|
content = json.dumps({
|
||||||
yield f'data: {content}\n\n'
|
"error": {"message": f"An error occurred while generating the response:\n{e}"},
|
||||||
|
"model": model,
|
||||||
|
"provider": g4f.get_last_provider(True),
|
||||||
|
})
|
||||||
|
yield f'data: {content}'
|
||||||
|
|
||||||
return StreamingResponse(streaming(), media_type="text/event-stream")
|
return StreamingResponse(streaming(), media_type="text/event-stream")
|
||||||
|
|
||||||
|
54
g4f/base_provider.py
Normal file
54
g4f/base_provider.py
Normal file
@@ -0,0 +1,54 @@
|
|||||||
|
from abc import ABC, abstractmethod
|
||||||
|
from .typing import Messages, CreateResult, Union
|
||||||
|
|
||||||
|
class BaseProvider(ABC):
|
||||||
|
url: str
|
||||||
|
working: bool = False
|
||||||
|
needs_auth: bool = False
|
||||||
|
supports_stream: bool = False
|
||||||
|
supports_gpt_35_turbo: bool = False
|
||||||
|
supports_gpt_4: bool = False
|
||||||
|
supports_message_history: bool = False
|
||||||
|
params: str
|
||||||
|
|
||||||
|
@classmethod
|
||||||
|
@abstractmethod
|
||||||
|
def create_completion(
|
||||||
|
cls,
|
||||||
|
model: str,
|
||||||
|
messages: Messages,
|
||||||
|
stream: bool,
|
||||||
|
**kwargs
|
||||||
|
) -> CreateResult:
|
||||||
|
raise NotImplementedError()
|
||||||
|
|
||||||
|
@classmethod
|
||||||
|
@abstractmethod
|
||||||
|
async def create_async(
|
||||||
|
cls,
|
||||||
|
model: str,
|
||||||
|
messages: Messages,
|
||||||
|
**kwargs
|
||||||
|
) -> str:
|
||||||
|
raise NotImplementedError()
|
||||||
|
|
||||||
|
@classmethod
|
||||||
|
def get_dict(cls):
|
||||||
|
return {'name': cls.__name__, 'url': cls.url}
|
||||||
|
|
||||||
|
class BaseRetryProvider(BaseProvider):
|
||||||
|
__name__: str = "RetryProvider"
|
||||||
|
supports_stream: bool = True
|
||||||
|
|
||||||
|
def __init__(
|
||||||
|
self,
|
||||||
|
providers: list[type[BaseProvider]],
|
||||||
|
shuffle: bool = True
|
||||||
|
) -> None:
|
||||||
|
self.providers: list[type[BaseProvider]] = providers
|
||||||
|
self.shuffle: bool = shuffle
|
||||||
|
self.working: bool = True
|
||||||
|
self.exceptions: dict[str, Exception] = {}
|
||||||
|
self.last_provider: type[BaseProvider] = None
|
||||||
|
|
||||||
|
ProviderType = Union[type[BaseProvider], BaseRetryProvider]
|
48
g4f/debug.py
48
g4f/debug.py
@@ -1,45 +1,5 @@
|
|||||||
from os import environ
|
from .base_provider import ProviderType
|
||||||
import requests
|
|
||||||
from importlib.metadata import version as get_package_version, PackageNotFoundError
|
|
||||||
from subprocess import check_output, CalledProcessError, PIPE
|
|
||||||
from .errors import VersionNotFoundError
|
|
||||||
|
|
||||||
logging = False
|
logging: bool = False
|
||||||
version_check = True
|
version_check: bool = True
|
||||||
|
last_provider: ProviderType = None
|
||||||
def get_version() -> str:
|
|
||||||
# Read from package manager
|
|
||||||
try:
|
|
||||||
return get_package_version("g4f")
|
|
||||||
except PackageNotFoundError:
|
|
||||||
pass
|
|
||||||
# Read from docker environment
|
|
||||||
current_version = environ.get("G4F_VERSION")
|
|
||||||
if current_version:
|
|
||||||
return current_version
|
|
||||||
# Read from git repository
|
|
||||||
try:
|
|
||||||
command = ["git", "describe", "--tags", "--abbrev=0"]
|
|
||||||
return check_output(command, text=True, stderr=PIPE).strip()
|
|
||||||
except CalledProcessError:
|
|
||||||
pass
|
|
||||||
raise VersionNotFoundError("Version not found")
|
|
||||||
|
|
||||||
def get_latest_version() -> str:
|
|
||||||
if environ.get("G4F_VERSION"):
|
|
||||||
url = "https://registry.hub.docker.com/v2/repositories/"
|
|
||||||
url += "hlohaus789/g4f"
|
|
||||||
url += "/tags?page_size=2&ordering=last_updated"
|
|
||||||
response = requests.get(url).json()
|
|
||||||
return response["results"][1]["name"]
|
|
||||||
response = requests.get("https://pypi.org/pypi/g4f/json").json()
|
|
||||||
return response["info"]["version"]
|
|
||||||
|
|
||||||
def check_pypi_version() -> None:
|
|
||||||
try:
|
|
||||||
version = get_version()
|
|
||||||
latest_version = get_latest_version()
|
|
||||||
if version != latest_version:
|
|
||||||
print(f'New pypi version: {latest_version} (current: {version}) | pip install -U g4f')
|
|
||||||
except Exception as e:
|
|
||||||
print(f'Failed to check g4f pypi version: {e}')
|
|
@@ -13,7 +13,7 @@ class AuthenticationRequiredError(Exception):
|
|||||||
class ModelNotFoundError(Exception):
|
class ModelNotFoundError(Exception):
|
||||||
pass
|
pass
|
||||||
|
|
||||||
class ModelNotAllowed(Exception):
|
class ModelNotAllowedError(Exception):
|
||||||
pass
|
pass
|
||||||
|
|
||||||
class RetryProviderError(Exception):
|
class RetryProviderError(Exception):
|
||||||
|
@@ -295,11 +295,12 @@ body {
|
|||||||
gap: 18px;
|
gap: 18px;
|
||||||
}
|
}
|
||||||
|
|
||||||
.message .content p,
|
.message .content,
|
||||||
.message .content li,
|
.message .content a:link,
|
||||||
.message .content code {
|
.message .content a:visited{
|
||||||
font-size: 15px;
|
font-size: 15px;
|
||||||
line-height: 1.3;
|
line-height: 1.3;
|
||||||
|
color: var(--colour-3);
|
||||||
}
|
}
|
||||||
.message .content pre {
|
.message .content pre {
|
||||||
white-space: pre-wrap;
|
white-space: pre-wrap;
|
||||||
|
@@ -73,7 +73,7 @@ const ask_gpt = async () => {
|
|||||||
provider = document.getElementById("provider");
|
provider = document.getElementById("provider");
|
||||||
model = document.getElementById("model");
|
model = document.getElementById("model");
|
||||||
prompt_lock = true;
|
prompt_lock = true;
|
||||||
window.text = ``;
|
window.text = '';
|
||||||
|
|
||||||
stop_generating.classList.remove(`stop_generating-hidden`);
|
stop_generating.classList.remove(`stop_generating-hidden`);
|
||||||
|
|
||||||
@@ -88,10 +88,13 @@ const ask_gpt = async () => {
|
|||||||
${gpt_image} <i class="fa-regular fa-phone-arrow-down-left"></i>
|
${gpt_image} <i class="fa-regular fa-phone-arrow-down-left"></i>
|
||||||
</div>
|
</div>
|
||||||
<div class="content" id="gpt_${window.token}">
|
<div class="content" id="gpt_${window.token}">
|
||||||
<div id="cursor"></div>
|
<div class="provider"></div>
|
||||||
|
<div class="content_inner"><div id="cursor"></div></div>
|
||||||
</div>
|
</div>
|
||||||
</div>
|
</div>
|
||||||
`;
|
`;
|
||||||
|
content = document.getElementById(`gpt_${window.token}`);
|
||||||
|
content_inner = content.querySelector('.content_inner');
|
||||||
|
|
||||||
message_box.scrollTop = message_box.scrollHeight;
|
message_box.scrollTop = message_box.scrollHeight;
|
||||||
window.scrollTo(0, 0);
|
window.scrollTo(0, 0);
|
||||||
@@ -123,28 +126,38 @@ const ask_gpt = async () => {
|
|||||||
await new Promise((r) => setTimeout(r, 1000));
|
await new Promise((r) => setTimeout(r, 1000));
|
||||||
window.scrollTo(0, 0);
|
window.scrollTo(0, 0);
|
||||||
|
|
||||||
const reader = response.body.getReader();
|
const reader = response.body.pipeThrough(new TextDecoderStream()).getReader();
|
||||||
|
|
||||||
|
error = provider = null;
|
||||||
while (true) {
|
while (true) {
|
||||||
const { value, done } = await reader.read();
|
const { value, done } = await reader.read();
|
||||||
if (done) break;
|
if (done) break;
|
||||||
|
for (const line of value.split("\n")) {
|
||||||
chunk = new TextDecoder().decode(value);
|
if (!line) continue;
|
||||||
|
const message = JSON.parse(line);
|
||||||
text += chunk;
|
if (message["type"] == "content") {
|
||||||
|
text += message["content"];
|
||||||
document.getElementById(`gpt_${window.token}`).innerHTML = markdown_render(text);
|
} else if (message["type"] == "provider") {
|
||||||
document.querySelectorAll(`code`).forEach((el) => {
|
provider = message["provider"];
|
||||||
hljs.highlightElement(el);
|
content.querySelector('.provider').innerHTML =
|
||||||
});
|
'<a href="' + provider.url + '" target="_blank">' + provider.name + "</a>"
|
||||||
|
} else if (message["type"] == "error") {
|
||||||
|
error = message["error"];
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if (error) {
|
||||||
|
console.error(error);
|
||||||
|
content_inner.innerHTML = "An error occured, please try again, if the problem persists, please use a other model or provider";
|
||||||
|
} else {
|
||||||
|
content_inner.innerHTML = markdown_render(text);
|
||||||
|
document.querySelectorAll('code').forEach((el) => {
|
||||||
|
hljs.highlightElement(el);
|
||||||
|
});
|
||||||
|
}
|
||||||
|
|
||||||
window.scrollTo(0, 0);
|
window.scrollTo(0, 0);
|
||||||
message_box.scrollTo({ top: message_box.scrollHeight, behavior: "auto" });
|
message_box.scrollTo({ top: message_box.scrollHeight, behavior: "auto" });
|
||||||
}
|
}
|
||||||
|
|
||||||
if (text.includes(`G4F_ERROR`)) {
|
|
||||||
console.log("response", text);
|
|
||||||
document.getElementById(`gpt_${window.token}`).innerHTML = "An error occured, please try again, if the problem persists, please use a other model or provider";
|
|
||||||
}
|
|
||||||
} catch (e) {
|
} catch (e) {
|
||||||
console.log(e);
|
console.log(e);
|
||||||
|
|
||||||
@@ -153,13 +166,13 @@ const ask_gpt = async () => {
|
|||||||
|
|
||||||
if (e.name != `AbortError`) {
|
if (e.name != `AbortError`) {
|
||||||
text = `oops ! something went wrong, please try again / reload. [stacktrace in console]`;
|
text = `oops ! something went wrong, please try again / reload. [stacktrace in console]`;
|
||||||
document.getElementById(`gpt_${window.token}`).innerHTML = text;
|
content_inner.innerHTML = text;
|
||||||
} else {
|
} else {
|
||||||
document.getElementById(`gpt_${window.token}`).innerHTML += ` [aborted]`;
|
content_inner.innerHTML += ` [aborted]`;
|
||||||
text += ` [aborted]`
|
text += ` [aborted]`
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
add_message(window.conversation_id, "assistant", text);
|
add_message(window.conversation_id, "assistant", text, provider);
|
||||||
message_box.scrollTop = message_box.scrollHeight;
|
message_box.scrollTop = message_box.scrollHeight;
|
||||||
await remove_cancel_button();
|
await remove_cancel_button();
|
||||||
prompt_lock = false;
|
prompt_lock = false;
|
||||||
@@ -259,10 +272,11 @@ const load_conversation = async (conversation_id) => {
|
|||||||
}
|
}
|
||||||
</div>
|
</div>
|
||||||
<div class="content">
|
<div class="content">
|
||||||
${item.role == "assistant"
|
${item.provider
|
||||||
? markdown_render(item.content)
|
? '<div class="provider"><a href="' + item.provider.url + '" target="_blank">' + item.provider.name + '</a></div>'
|
||||||
: item.content
|
: ''
|
||||||
}
|
}
|
||||||
|
<div class="content_inner">${markdown_render(item.content)}</div>
|
||||||
</div>
|
</div>
|
||||||
</div>
|
</div>
|
||||||
`;
|
`;
|
||||||
@@ -323,12 +337,13 @@ const remove_last_message = async (conversation_id) => {
|
|||||||
);
|
);
|
||||||
};
|
};
|
||||||
|
|
||||||
const add_message = async (conversation_id, role, content) => {
|
const add_message = async (conversation_id, role, content, provider) => {
|
||||||
const conversation = await get_conversation(conversation_id);
|
const conversation = await get_conversation(conversation_id);
|
||||||
|
|
||||||
conversation.items.push({
|
conversation.items.push({
|
||||||
role: role,
|
role: role,
|
||||||
content: content,
|
content: content,
|
||||||
|
provider: provider
|
||||||
});
|
});
|
||||||
|
|
||||||
localStorage.setItem(
|
localStorage.setItem(
|
||||||
|
@@ -4,7 +4,7 @@ from g4f.Provider import __providers__
|
|||||||
import json
|
import json
|
||||||
from flask import request, Flask
|
from flask import request, Flask
|
||||||
from .internet import get_search_message
|
from .internet import get_search_message
|
||||||
from g4f import debug
|
from g4f import debug, version
|
||||||
|
|
||||||
debug.logging = True
|
debug.logging = True
|
||||||
|
|
||||||
@@ -53,8 +53,8 @@ class Backend_Api:
|
|||||||
|
|
||||||
def version(self):
|
def version(self):
|
||||||
return {
|
return {
|
||||||
"version": debug.get_version(),
|
"version": version.utils.current_version,
|
||||||
"lastet_version": debug.get_latest_version(),
|
"lastet_version": version.utils.latest_version,
|
||||||
}
|
}
|
||||||
|
|
||||||
def _gen_title(self):
|
def _gen_title(self):
|
||||||
@@ -65,7 +65,7 @@ class Backend_Api:
|
|||||||
def _conversation(self):
|
def _conversation(self):
|
||||||
#jailbreak = request.json['jailbreak']
|
#jailbreak = request.json['jailbreak']
|
||||||
messages = request.json['meta']['content']['parts']
|
messages = request.json['meta']['content']['parts']
|
||||||
if request.json['internet_access']:
|
if request.json.get('internet_access'):
|
||||||
messages[-1]["content"] = get_search_message(messages[-1]["content"])
|
messages[-1]["content"] = get_search_message(messages[-1]["content"])
|
||||||
model = request.json.get('model')
|
model = request.json.get('model')
|
||||||
model = model if model else g4f.models.default
|
model = model if model else g4f.models.default
|
||||||
@@ -74,20 +74,30 @@ class Backend_Api:
|
|||||||
|
|
||||||
def try_response():
|
def try_response():
|
||||||
try:
|
try:
|
||||||
yield from g4f.ChatCompletion.create(
|
first = True
|
||||||
|
for chunk in g4f.ChatCompletion.create(
|
||||||
model=model,
|
model=model,
|
||||||
provider=provider,
|
provider=provider,
|
||||||
messages=messages,
|
messages=messages,
|
||||||
stream=True,
|
stream=True,
|
||||||
ignore_stream_and_auth=True
|
ignore_stream_and_auth=True
|
||||||
)
|
):
|
||||||
|
if first:
|
||||||
|
first = False
|
||||||
|
yield json.dumps({
|
||||||
|
'type' : 'provider',
|
||||||
|
'provider': g4f.get_last_provider(True)
|
||||||
|
}) + "\n"
|
||||||
|
yield json.dumps({
|
||||||
|
'type' : 'content',
|
||||||
|
'content': chunk,
|
||||||
|
}) + "\n"
|
||||||
|
|
||||||
except Exception as e:
|
except Exception as e:
|
||||||
print(e)
|
|
||||||
yield json.dumps({
|
yield json.dumps({
|
||||||
'code' : 'G4F_ERROR',
|
'type' : 'error',
|
||||||
'_action': '_ask',
|
'error': f'{e.__class__.__name__}: {e}'
|
||||||
'success': False,
|
|
||||||
'error' : f'{e.__class__.__name__}: {e}'
|
|
||||||
})
|
})
|
||||||
|
raise e
|
||||||
|
|
||||||
return self.app.response_class(try_response(), mimetype='text/event-stream')
|
return self.app.response_class(try_response(), mimetype='text/event-stream')
|
@@ -1,7 +1,6 @@
|
|||||||
from __future__ import annotations
|
from __future__ import annotations
|
||||||
from dataclasses import dataclass
|
from dataclasses import dataclass
|
||||||
from .typing import Union
|
from .Provider import RetryProvider, ProviderType
|
||||||
from .Provider import BaseProvider, RetryProvider
|
|
||||||
from .Provider import (
|
from .Provider import (
|
||||||
Chatgpt4Online,
|
Chatgpt4Online,
|
||||||
ChatgptDemoAi,
|
ChatgptDemoAi,
|
||||||
@@ -36,7 +35,7 @@ from .Provider import (
|
|||||||
class Model:
|
class Model:
|
||||||
name: str
|
name: str
|
||||||
base_provider: str
|
base_provider: str
|
||||||
best_provider: Union[type[BaseProvider], RetryProvider] = None
|
best_provider: ProviderType = None
|
||||||
|
|
||||||
@staticmethod
|
@staticmethod
|
||||||
def __all__() -> list[str]:
|
def __all__() -> list[str]:
|
||||||
@@ -101,28 +100,39 @@ gpt_4_turbo = Model(
|
|||||||
llama2_7b = Model(
|
llama2_7b = Model(
|
||||||
name = "meta-llama/Llama-2-7b-chat-hf",
|
name = "meta-llama/Llama-2-7b-chat-hf",
|
||||||
base_provider = 'huggingface',
|
base_provider = 'huggingface',
|
||||||
best_provider = RetryProvider([Llama2, DeepInfra]))
|
best_provider = RetryProvider([Llama2, DeepInfra])
|
||||||
|
)
|
||||||
|
|
||||||
llama2_13b = Model(
|
llama2_13b = Model(
|
||||||
name = "meta-llama/Llama-2-13b-chat-hf",
|
name = "meta-llama/Llama-2-13b-chat-hf",
|
||||||
base_provider = 'huggingface',
|
base_provider = 'huggingface',
|
||||||
best_provider = RetryProvider([Llama2, DeepInfra]))
|
best_provider = RetryProvider([Llama2, DeepInfra])
|
||||||
|
)
|
||||||
|
|
||||||
llama2_70b = Model(
|
llama2_70b = Model(
|
||||||
name = "meta-llama/Llama-2-70b-chat-hf",
|
name = "meta-llama/Llama-2-70b-chat-hf",
|
||||||
base_provider = "huggingface",
|
base_provider = "huggingface",
|
||||||
best_provider = RetryProvider([Llama2, DeepInfra, HuggingChat]))
|
best_provider = RetryProvider([Llama2, DeepInfra, HuggingChat])
|
||||||
|
)
|
||||||
|
|
||||||
# Mistal
|
# Mistal
|
||||||
mixtral_8x7b = Model(
|
mixtral_8x7b = Model(
|
||||||
name = "mistralai/Mixtral-8x7B-Instruct-v0.1",
|
name = "mistralai/Mixtral-8x7B-Instruct-v0.1",
|
||||||
base_provider = "huggingface",
|
base_provider = "huggingface",
|
||||||
best_provider = HuggingChat)
|
best_provider = RetryProvider([DeepInfra, HuggingChat])
|
||||||
|
)
|
||||||
|
|
||||||
mistral_7b = Model(
|
mistral_7b = Model(
|
||||||
name = "mistralai/Mistral-7B-Instruct-v0.1",
|
name = "mistralai/Mistral-7B-Instruct-v0.1",
|
||||||
base_provider = "huggingface",
|
base_provider = "huggingface",
|
||||||
best_provider = HuggingChat)
|
best_provider = RetryProvider([DeepInfra, HuggingChat])
|
||||||
|
)
|
||||||
|
|
||||||
|
openchat_35 = Model(
|
||||||
|
name = "openchat/openchat_3.5",
|
||||||
|
base_provider = "huggingface",
|
||||||
|
best_provider = RetryProvider([DeepInfra, HuggingChat])
|
||||||
|
)
|
||||||
|
|
||||||
# Bard
|
# Bard
|
||||||
palm = Model(
|
palm = Model(
|
||||||
@@ -313,6 +323,7 @@ class ModelUtils:
|
|||||||
# Mistral
|
# Mistral
|
||||||
'mixtral-8x7b': mixtral_8x7b,
|
'mixtral-8x7b': mixtral_8x7b,
|
||||||
'mistral-7b': mistral_7b,
|
'mistral-7b': mistral_7b,
|
||||||
|
'openchat_3.5': openchat_35,
|
||||||
|
|
||||||
# Bard
|
# Bard
|
||||||
'palm2' : palm,
|
'palm2' : palm,
|
||||||
|
@@ -1,5 +1,5 @@
|
|||||||
import sys
|
import sys
|
||||||
from typing import Any, AsyncGenerator, Generator, NewType, Tuple, Union, List, Dict
|
from typing import Any, AsyncGenerator, Generator, NewType, Tuple, Union, List, Dict, Type
|
||||||
|
|
||||||
if sys.version_info >= (3, 8):
|
if sys.version_info >= (3, 8):
|
||||||
from typing import TypedDict
|
from typing import TypedDict
|
||||||
|
47
g4f/version.py
Normal file
47
g4f/version.py
Normal file
@@ -0,0 +1,47 @@
|
|||||||
|
from os import environ
|
||||||
|
import requests
|
||||||
|
from functools import cached_property
|
||||||
|
from importlib.metadata import version as get_package_version, PackageNotFoundError
|
||||||
|
from subprocess import check_output, CalledProcessError, PIPE
|
||||||
|
from .errors import VersionNotFoundError
|
||||||
|
|
||||||
|
|
||||||
|
class VersionUtils():
|
||||||
|
@cached_property
|
||||||
|
def current_version(self) -> str:
|
||||||
|
# Read from package manager
|
||||||
|
try:
|
||||||
|
return get_package_version("g4f")
|
||||||
|
except PackageNotFoundError:
|
||||||
|
pass
|
||||||
|
# Read from docker environment
|
||||||
|
version = environ.get("G4F_VERSION")
|
||||||
|
if version:
|
||||||
|
return version
|
||||||
|
# Read from git repository
|
||||||
|
try:
|
||||||
|
command = ["git", "describe", "--tags", "--abbrev=0"]
|
||||||
|
return check_output(command, text=True, stderr=PIPE).strip()
|
||||||
|
except CalledProcessError:
|
||||||
|
pass
|
||||||
|
raise VersionNotFoundError("Version not found")
|
||||||
|
|
||||||
|
@cached_property
|
||||||
|
def latest_version(self) -> str:
|
||||||
|
try:
|
||||||
|
get_package_version("g4f")
|
||||||
|
response = requests.get("https://pypi.org/pypi/g4f/json").json()
|
||||||
|
return response["info"]["version"]
|
||||||
|
except PackageNotFoundError:
|
||||||
|
url = "https://api.github.com/repos/xtekky/gpt4free/releases/latest"
|
||||||
|
response = requests.get(url).json()
|
||||||
|
return response["tag_name"]
|
||||||
|
|
||||||
|
def check_pypi_version(self) -> None:
|
||||||
|
try:
|
||||||
|
if self.current_version != self.latest_version:
|
||||||
|
print(f'New pypi version: {self.latest_version} (current: {self.version}) | pip install -U g4f')
|
||||||
|
except Exception as e:
|
||||||
|
print(f'Failed to check g4f pypi version: {e}')
|
||||||
|
|
||||||
|
utils = VersionUtils()
|
Reference in New Issue
Block a user