mirror of
https://github.com/xtekky/gpt4free.git
synced 2025-10-31 03:26:22 +08:00
~ | code styling
This commit is contained in:
@@ -15,9 +15,8 @@ class AItianhu(BaseProvider):
|
||||
def create_completion(
|
||||
model: str,
|
||||
messages: list[dict[str, str]],
|
||||
stream: bool,
|
||||
**kwargs: Any,
|
||||
) -> CreateResult:
|
||||
stream: bool, **kwargs: Any) -> CreateResult:
|
||||
|
||||
base = ""
|
||||
for message in messages:
|
||||
base += "%s: %s\n" % (message["role"], message["content"])
|
||||
|
||||
@@ -7,8 +7,8 @@ from .base_provider import BaseProvider
|
||||
|
||||
|
||||
class Acytoo(BaseProvider):
|
||||
url = "https://chat.acytoo.com/"
|
||||
working = True
|
||||
url = 'https://chat.acytoo.com/'
|
||||
working = True
|
||||
supports_gpt_35_turbo = True
|
||||
|
||||
@classmethod
|
||||
@@ -16,33 +16,33 @@ class Acytoo(BaseProvider):
|
||||
cls,
|
||||
model: str,
|
||||
messages: list[dict[str, str]],
|
||||
stream: bool,
|
||||
**kwargs: Any,
|
||||
) -> CreateResult:
|
||||
headers = _create_header()
|
||||
payload = _create_payload(messages, kwargs.get('temperature', 0.5))
|
||||
stream: bool, **kwargs: Any) -> CreateResult:
|
||||
|
||||
response = requests.post(f'{cls.url}api/completions',
|
||||
headers=_create_header(), json=_create_payload(messages, kwargs.get('temperature', 0.5)))
|
||||
|
||||
response = requests.post("{cls.url}api/completions", headers=headers, json=payload)
|
||||
response.raise_for_status()
|
||||
response.encoding = "utf-8"
|
||||
response.encoding = 'utf-8'
|
||||
|
||||
yield response.text
|
||||
|
||||
|
||||
def _create_header():
|
||||
return {
|
||||
"accept": "*/*",
|
||||
"content-type": "application/json",
|
||||
'accept': '*/*',
|
||||
'content-type': 'application/json',
|
||||
}
|
||||
|
||||
|
||||
def _create_payload(messages: list[dict[str, str]], temperature):
|
||||
payload_messages = [
|
||||
message | {"createdAt": int(time.time()) * 1000} for message in messages
|
||||
message | {'createdAt': int(time.time()) * 1000} for message in messages
|
||||
]
|
||||
|
||||
return {
|
||||
"key": "",
|
||||
"model": "gpt-3.5-turbo",
|
||||
"messages": payload_messages,
|
||||
"temperature": temperature,
|
||||
"password": "",
|
||||
'key' : '',
|
||||
'model' : 'gpt-3.5-turbo',
|
||||
'messages' : payload_messages,
|
||||
'temperature' : temperature,
|
||||
'password' : ''
|
||||
}
|
||||
@@ -5,19 +5,17 @@ from .base_provider import BaseProvider
|
||||
|
||||
|
||||
class Aichat(BaseProvider):
|
||||
url = "https://chat-gpt.org/chat"
|
||||
working = True
|
||||
url = "https://chat-gpt.org/chat"
|
||||
working = True
|
||||
supports_gpt_35_turbo = True
|
||||
|
||||
@staticmethod
|
||||
def create_completion(
|
||||
model: str,
|
||||
messages: list[dict[str, str]],
|
||||
stream: bool,
|
||||
**kwargs: Any,
|
||||
) -> CreateResult:
|
||||
base = ""
|
||||
stream: bool, **kwargs: Any) -> CreateResult:
|
||||
|
||||
base = ""
|
||||
for message in messages:
|
||||
base += "%s: %s\n" % (message["role"], message["content"])
|
||||
base += "assistant:"
|
||||
|
||||
@@ -9,20 +9,18 @@ import requests
|
||||
from ..typing import SHA256, Any, CreateResult
|
||||
from .base_provider import BaseProvider
|
||||
|
||||
|
||||
class Ails(BaseProvider):
|
||||
url: str = "https://ai.ls"
|
||||
working = True
|
||||
supports_stream = True
|
||||
url: str = "https://ai.ls"
|
||||
working = True
|
||||
supports_stream = True
|
||||
supports_gpt_35_turbo = True
|
||||
|
||||
@staticmethod
|
||||
def create_completion(
|
||||
model: str,
|
||||
messages: list[dict[str, str]],
|
||||
stream: bool,
|
||||
**kwargs: Any,
|
||||
) -> CreateResult:
|
||||
stream: bool, **kwargs: Any) -> CreateResult:
|
||||
|
||||
headers = {
|
||||
"authority": "api.caipacity.com",
|
||||
"accept": "*/*",
|
||||
|
||||
@@ -19,9 +19,7 @@ class Bard(AsyncProvider):
|
||||
model: str,
|
||||
messages: list[dict[str, str]],
|
||||
proxy: str = None,
|
||||
cookies: dict = get_cookies(".google.com"),
|
||||
**kwargs: Any,
|
||||
) -> str:
|
||||
cookies: dict = get_cookies(".google.com"), **kwargs: Any,) -> str:
|
||||
|
||||
formatted = "\n".join(
|
||||
["%s: %s" % (message["role"], message["content"]) for message in messages]
|
||||
|
||||
@@ -1,29 +1,22 @@
|
||||
import asyncio
|
||||
import json
|
||||
import os
|
||||
import random
|
||||
import asyncio, aiohttp, json, os, random
|
||||
|
||||
import aiohttp
|
||||
import asyncio
|
||||
from aiohttp import ClientSession
|
||||
|
||||
from ..typing import Any, AsyncGenerator, CreateResult, Union
|
||||
from aiohttp import ClientSession
|
||||
from ..typing import Any, AsyncGenerator, CreateResult, Union
|
||||
from .base_provider import AsyncGeneratorProvider, get_cookies
|
||||
|
||||
class Bing(AsyncGeneratorProvider):
|
||||
url = "https://bing.com/chat"
|
||||
needs_auth = True
|
||||
working = True
|
||||
supports_gpt_4 = True
|
||||
supports_stream=True
|
||||
url = "https://bing.com/chat"
|
||||
needs_auth = True
|
||||
working = True
|
||||
supports_gpt_4 = True
|
||||
supports_stream = True
|
||||
|
||||
@staticmethod
|
||||
def create_async_generator(
|
||||
model: str,
|
||||
messages: list[dict[str, str]],
|
||||
cookies: dict = get_cookies(".bing.com"),
|
||||
**kwargs
|
||||
) -> AsyncGenerator:
|
||||
cookies: dict = get_cookies(".bing.com"), **kwargs) -> AsyncGenerator:
|
||||
|
||||
if len(messages) < 2:
|
||||
prompt = messages[0]["content"]
|
||||
context = None
|
||||
|
||||
@@ -1,23 +1,20 @@
|
||||
import re
|
||||
import re, requests
|
||||
|
||||
import requests
|
||||
|
||||
from ..typing import Any, CreateResult
|
||||
from ..typing import Any, CreateResult
|
||||
from .base_provider import BaseProvider
|
||||
|
||||
|
||||
class ChatgptAi(BaseProvider):
|
||||
url = "https://chatgpt.ai/gpt-4/"
|
||||
working = True
|
||||
supports_gpt_4 = True
|
||||
url: str = "https://chatgpt.ai/gpt-4/"
|
||||
working = True
|
||||
supports_gpt_4 = True
|
||||
|
||||
@staticmethod
|
||||
def create_completion(
|
||||
model: str,
|
||||
messages: list[dict[str, str]],
|
||||
stream: bool,
|
||||
**kwargs: Any,
|
||||
) -> CreateResult:
|
||||
stream: bool, **kwargs: Any) -> CreateResult:
|
||||
|
||||
chat = ""
|
||||
for message in messages:
|
||||
chat += "%s: %s\n" % (message["role"], message["content"])
|
||||
@@ -26,36 +23,35 @@ class ChatgptAi(BaseProvider):
|
||||
response = requests.get("https://chatgpt.ai/")
|
||||
nonce, post_id, _, bot_id = re.findall(
|
||||
r'data-nonce="(.*)"\n data-post-id="(.*)"\n data-url="(.*)"\n data-bot-id="(.*)"\n data-width',
|
||||
response.text,
|
||||
)[0]
|
||||
response.text)[0]
|
||||
|
||||
headers = {
|
||||
"authority": "chatgpt.ai",
|
||||
"accept": "*/*",
|
||||
"accept-language": "en,fr-FR;q=0.9,fr;q=0.8,es-ES;q=0.7,es;q=0.6,en-US;q=0.5,am;q=0.4,de;q=0.3",
|
||||
"cache-control": "no-cache",
|
||||
"origin": "https://chatgpt.ai",
|
||||
"pragma": "no-cache",
|
||||
"referer": "https://chatgpt.ai/gpt-4/",
|
||||
"sec-ch-ua": '"Not.A/Brand";v="8", "Chromium";v="114", "Google Chrome";v="114"',
|
||||
"sec-ch-ua-mobile": "?0",
|
||||
"sec-ch-ua-platform": '"Windows"',
|
||||
"sec-fetch-dest": "empty",
|
||||
"sec-fetch-mode": "cors",
|
||||
"sec-fetch-site": "same-origin",
|
||||
"user-agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/114.0.0.0 Safari/537.36",
|
||||
"authority" : "chatgpt.ai",
|
||||
"accept" : "*/*",
|
||||
"accept-language" : "en,fr-FR;q=0.9,fr;q=0.8,es-ES;q=0.7,es;q=0.6,en-US;q=0.5,am;q=0.4,de;q=0.3",
|
||||
"cache-control" : "no-cache",
|
||||
"origin" : "https://chatgpt.ai",
|
||||
"pragma" : "no-cache",
|
||||
"referer" : "https://chatgpt.ai/gpt-4/",
|
||||
"sec-ch-ua" : '"Not.A/Brand";v="8", "Chromium";v="114", "Google Chrome";v="114"',
|
||||
"sec-ch-ua-mobile" : "?0",
|
||||
"sec-ch-ua-platform" : '"Windows"',
|
||||
"sec-fetch-dest" : "empty",
|
||||
"sec-fetch-mode" : "cors",
|
||||
"sec-fetch-site" : "same-origin",
|
||||
"user-agent" : "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/114.0.0.0 Safari/537.36",
|
||||
}
|
||||
data = {
|
||||
"_wpnonce": nonce,
|
||||
"post_id": post_id,
|
||||
"url": "https://chatgpt.ai/gpt-4",
|
||||
"action": "wpaicg_chat_shortcode_message",
|
||||
"message": chat,
|
||||
"bot_id": bot_id,
|
||||
"_wpnonce" : nonce,
|
||||
"post_id" : post_id,
|
||||
"url" : "https://chatgpt.ai/gpt-4",
|
||||
"action" : "wpaicg_chat_shortcode_message",
|
||||
"message" : chat,
|
||||
"bot_id" : bot_id,
|
||||
}
|
||||
|
||||
response = requests.post(
|
||||
"https://chatgpt.ai/wp-admin/admin-ajax.php", headers=headers, data=data
|
||||
)
|
||||
"https://chatgpt.ai/wp-admin/admin-ajax.php", headers=headers, data=data)
|
||||
|
||||
response.raise_for_status()
|
||||
yield response.json()["data"]
|
||||
@@ -1,69 +1,62 @@
|
||||
import base64
|
||||
import os
|
||||
import re
|
||||
import base64, os, re, requests
|
||||
|
||||
import requests
|
||||
|
||||
from ..typing import Any, CreateResult
|
||||
from ..typing import Any, CreateResult
|
||||
from .base_provider import BaseProvider
|
||||
|
||||
|
||||
class ChatgptLogin(BaseProvider):
|
||||
url = "https://opchatgpts.net"
|
||||
url = "https://opchatgpts.net"
|
||||
supports_gpt_35_turbo = True
|
||||
working = True
|
||||
working = True
|
||||
|
||||
@staticmethod
|
||||
def create_completion(
|
||||
model: str,
|
||||
messages: list[dict[str, str]],
|
||||
stream: bool,
|
||||
**kwargs: Any,
|
||||
) -> CreateResult:
|
||||
stream: bool, **kwargs: Any) -> CreateResult:
|
||||
|
||||
headers = {
|
||||
"authority": "chatgptlogin.ac",
|
||||
"accept": "*/*",
|
||||
"accept-language": "en,fr-FR;q=0.9,fr;q=0.8,es-ES;q=0.7,es;q=0.6,en-US;q=0.5,am;q=0.4,de;q=0.3",
|
||||
"content-type": "application/json",
|
||||
"origin": "https://opchatgpts.net",
|
||||
"referer": "https://opchatgpts.net/chatgpt-free-use/",
|
||||
"sec-ch-ua": '"Chromium";v="116", "Not)A;Brand";v="24", "Google Chrome";v="116"',
|
||||
"sec-ch-ua-mobile": "?0",
|
||||
"sec-ch-ua-platform": '"Windows"',
|
||||
"sec-fetch-dest": "empty",
|
||||
"sec-fetch-mode": "cors",
|
||||
"sec-fetch-site": "same-origin",
|
||||
"user-agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/116.0.0.0 Safari/537.36",
|
||||
"x-wp-nonce": _get_nonce(),
|
||||
"authority" : "chatgptlogin.ac",
|
||||
"accept" : "*/*",
|
||||
"accept-language" : "en,fr-FR;q=0.9,fr;q=0.8,es-ES;q=0.7,es;q=0.6,en-US;q=0.5,am;q=0.4,de;q=0.3",
|
||||
"content-type" : "application/json",
|
||||
"origin" : "https://opchatgpts.net",
|
||||
"referer" : "https://opchatgpts.net/chatgpt-free-use/",
|
||||
"sec-ch-ua" : '"Chromium";v="116", "Not)A;Brand";v="24", "Google Chrome";v="116"',
|
||||
"sec-ch-ua-mobile" : "?0",
|
||||
"sec-ch-ua-platform" : '"Windows"',
|
||||
"sec-fetch-dest" : "empty",
|
||||
"sec-fetch-mode" : "cors",
|
||||
"sec-fetch-site" : "same-origin",
|
||||
"user-agent" : "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/116.0.0.0 Safari/537.36",
|
||||
"x-wp-nonce" : _get_nonce(),
|
||||
}
|
||||
|
||||
conversation = _transform(messages)
|
||||
|
||||
json_data = {
|
||||
"env": "chatbot",
|
||||
"session": "N/A",
|
||||
"prompt": "Converse as if you were an AI assistant. Be friendly, creative.",
|
||||
"context": "Converse as if you were an AI assistant. Be friendly, creative.",
|
||||
"messages": conversation,
|
||||
"newMessage": messages[-1]["content"],
|
||||
"userName": '<div class="mwai-name-text">User:</div>',
|
||||
"aiName": '<div class="mwai-name-text">AI:</div>',
|
||||
"model": "gpt-3.5-turbo",
|
||||
"temperature": kwargs.get("temperature", 0.8),
|
||||
"maxTokens": 1024,
|
||||
"maxResults": 1,
|
||||
"apiKey": "",
|
||||
"service": "openai",
|
||||
"env" : "chatbot",
|
||||
"session" : "N/A",
|
||||
"prompt" : "Converse as if you were an AI assistant. Be friendly, creative.",
|
||||
"context" : "Converse as if you were an AI assistant. Be friendly, creative.",
|
||||
"messages" : conversation,
|
||||
"newMessage" : messages[-1]["content"],
|
||||
"userName" : '<div class="mwai-name-text">User:</div>',
|
||||
"aiName" : '<div class="mwai-name-text">AI:</div>',
|
||||
"model" : "gpt-3.5-turbo",
|
||||
"temperature" : kwargs.get("temperature", 0.8),
|
||||
"maxTokens" : 1024,
|
||||
"maxResults" : 1,
|
||||
"apiKey" : "",
|
||||
"service" : "openai",
|
||||
"embeddingsIndex": "",
|
||||
"stop": "",
|
||||
"clientId": os.urandom(6).hex(),
|
||||
"stop" : "",
|
||||
"clientId" : os.urandom(6).hex()
|
||||
}
|
||||
|
||||
response = requests.post(
|
||||
"https://opchatgpts.net/wp-json/ai-chatbot/v1/chat",
|
||||
headers=headers,
|
||||
json=json_data,
|
||||
)
|
||||
response = requests.post("https://opchatgpts.net/wp-json/ai-chatbot/v1/chat",
|
||||
headers=headers, json=json_data)
|
||||
|
||||
response.raise_for_status()
|
||||
yield response.json()["reply"]
|
||||
|
||||
@@ -81,24 +74,21 @@ class ChatgptLogin(BaseProvider):
|
||||
|
||||
|
||||
def _get_nonce() -> str:
|
||||
res = requests.get(
|
||||
"https://opchatgpts.net/chatgpt-free-use/",
|
||||
headers={
|
||||
"Referer": "https://opchatgpts.net/chatgpt-free-use/",
|
||||
"User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/114.0.0.0 Safari/537.36",
|
||||
},
|
||||
)
|
||||
res = requests.get("https://opchatgpts.net/chatgpt-free-use/",
|
||||
headers = {
|
||||
"Referer" : "https://opchatgpts.net/chatgpt-free-use/",
|
||||
"User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/114.0.0.0 Safari/537.36"})
|
||||
|
||||
result = re.search(
|
||||
r'class="mwai-chat mwai-chatgpt">.*<span>Send</span></button></div></div></div> <script defer src="(.*?)">',
|
||||
res.text,
|
||||
)
|
||||
res.text)
|
||||
|
||||
if result is None:
|
||||
return ""
|
||||
|
||||
src = result.group(1)
|
||||
src = result.group(1)
|
||||
decoded_string = base64.b64decode(src.split(",")[-1]).decode("utf-8")
|
||||
result = re.search(r"let restNonce = '(.*?)';", decoded_string)
|
||||
result = re.search(r"let restNonce = '(.*?)';", decoded_string)
|
||||
|
||||
return "" if result is None else result.group(1)
|
||||
|
||||
@@ -106,11 +96,11 @@ def _get_nonce() -> str:
|
||||
def _transform(messages: list[dict[str, str]]) -> list[dict[str, Any]]:
|
||||
return [
|
||||
{
|
||||
"id": os.urandom(6).hex(),
|
||||
"role": message["role"],
|
||||
"id" : os.urandom(6).hex(),
|
||||
"role" : message["role"],
|
||||
"content": message["content"],
|
||||
"who": "AI: " if message["role"] == "assistant" else "User: ",
|
||||
"html": _html_encode(message["content"]),
|
||||
"who" : "AI: " if message["role"] == "assistant" else "User: ",
|
||||
"html" : _html_encode(message["content"]),
|
||||
}
|
||||
for message in messages
|
||||
]
|
||||
@@ -118,14 +108,14 @@ def _transform(messages: list[dict[str, str]]) -> list[dict[str, Any]]:
|
||||
|
||||
def _html_encode(string: str) -> str:
|
||||
table = {
|
||||
'"': """,
|
||||
"'": "'",
|
||||
"&": "&",
|
||||
">": ">",
|
||||
"<": "<",
|
||||
'"' : """,
|
||||
"'" : "'",
|
||||
"&" : "&",
|
||||
">" : ">",
|
||||
"<" : "<",
|
||||
"\n": "<br>",
|
||||
"\t": " ",
|
||||
" ": " ",
|
||||
" " : " ",
|
||||
}
|
||||
|
||||
for key in table:
|
||||
|
||||
@@ -1,26 +1,21 @@
|
||||
import json
|
||||
import json, js2py, requests
|
||||
|
||||
import js2py
|
||||
import requests
|
||||
|
||||
from ..typing import Any, CreateResult
|
||||
from ..typing import Any, CreateResult
|
||||
from .base_provider import BaseProvider
|
||||
|
||||
|
||||
class DeepAi(BaseProvider):
|
||||
url = "https://deepai.org"
|
||||
working = True
|
||||
supports_stream = True
|
||||
url: str = "https://deepai.org"
|
||||
working = True
|
||||
supports_stream = True
|
||||
supports_gpt_35_turbo = True
|
||||
|
||||
@staticmethod
|
||||
def create_completion(
|
||||
model: str,
|
||||
messages: list[dict[str, str]],
|
||||
stream: bool,
|
||||
**kwargs: Any,
|
||||
) -> CreateResult:
|
||||
url = "https://api.deepai.org/make_me_a_pizza"
|
||||
stream: bool, **kwargs: Any) -> CreateResult:
|
||||
|
||||
token_js = """
|
||||
var agent = 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/115.0.0.0 Safari/537.36'
|
||||
var a, b, c, d, e, h, f, l, g, k, m, n, r, x, C, E, N, F, T, O, P, w, D, G, Q, R, W, I, aa, fa, na, oa, ha, ba, X, ia, ja, ka, J, la, K, L, ca, S, U, M, ma, B, da, V, Y;
|
||||
@@ -54,7 +49,9 @@ f = function () {
|
||||
"User-Agent": "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/115.0.0.0 Safari/537.36",
|
||||
}
|
||||
|
||||
response = requests.post(url, headers=headers, data=payload, stream=True)
|
||||
response = requests.post("https://api.deepai.org/make_me_a_pizza",
|
||||
headers=headers, data=payload, stream=True)
|
||||
|
||||
for chunk in response.iter_content(chunk_size=None):
|
||||
response.raise_for_status()
|
||||
yield chunk.decode()
|
||||
|
||||
@@ -1,57 +1,49 @@
|
||||
import json
|
||||
import re
|
||||
import time
|
||||
import json, re, time , requests
|
||||
|
||||
import requests
|
||||
|
||||
from ..typing import Any, CreateResult
|
||||
from ..typing import Any, CreateResult
|
||||
from .base_provider import BaseProvider
|
||||
|
||||
|
||||
class DfeHub(BaseProvider):
|
||||
url = "https://chat.dfehub.com/"
|
||||
supports_stream = True
|
||||
url = "https://chat.dfehub.com/"
|
||||
supports_stream = True
|
||||
supports_gpt_35_turbo = True
|
||||
|
||||
@staticmethod
|
||||
def create_completion(
|
||||
model: str,
|
||||
messages: list[dict[str, str]],
|
||||
stream: bool,
|
||||
**kwargs: Any,
|
||||
) -> CreateResult:
|
||||
stream: bool, **kwargs: Any) -> CreateResult:
|
||||
|
||||
headers = {
|
||||
"authority": "chat.dfehub.com",
|
||||
"accept": "*/*",
|
||||
"accept-language": "en,fr-FR;q=0.9,fr;q=0.8,es-ES;q=0.7,es;q=0.6,en-US;q=0.5,am;q=0.4,de;q=0.3",
|
||||
"content-type": "application/json",
|
||||
"origin": "https://chat.dfehub.com",
|
||||
"referer": "https://chat.dfehub.com/",
|
||||
"sec-ch-ua": '"Not.A/Brand";v="8", "Chromium";v="114", "Google Chrome";v="114"',
|
||||
"sec-ch-ua-mobile": "?0",
|
||||
"authority" : "chat.dfehub.com",
|
||||
"accept" : "*/*",
|
||||
"accept-language" : "en,fr-FR;q=0.9,fr;q=0.8,es-ES;q=0.7,es;q=0.6,en-US;q=0.5,am;q=0.4,de;q=0.3",
|
||||
"content-type" : "application/json",
|
||||
"origin" : "https://chat.dfehub.com",
|
||||
"referer" : "https://chat.dfehub.com/",
|
||||
"sec-ch-ua" : '"Not.A/Brand";v="8", "Chromium";v="114", "Google Chrome";v="114"',
|
||||
"sec-ch-ua-mobile" : "?0",
|
||||
"sec-ch-ua-platform": '"macOS"',
|
||||
"sec-fetch-dest": "empty",
|
||||
"sec-fetch-mode": "cors",
|
||||
"sec-fetch-site": "same-origin",
|
||||
"user-agent": "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/114.0.0.0 Safari/537.36",
|
||||
"x-requested-with": "XMLHttpRequest",
|
||||
"sec-fetch-dest" : "empty",
|
||||
"sec-fetch-mode" : "cors",
|
||||
"sec-fetch-site" : "same-origin",
|
||||
"user-agent" : "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/114.0.0.0 Safari/537.36",
|
||||
"x-requested-with" : "XMLHttpRequest",
|
||||
}
|
||||
|
||||
json_data = {
|
||||
"messages": messages,
|
||||
"model": "gpt-3.5-turbo",
|
||||
"temperature": kwargs.get("temperature", 0.5),
|
||||
"presence_penalty": kwargs.get("presence_penalty", 0),
|
||||
"frequency_penalty": kwargs.get("frequency_penalty", 0),
|
||||
"top_p": kwargs.get("top_p", 1),
|
||||
"stream": True,
|
||||
"messages" : messages,
|
||||
"model" : "gpt-3.5-turbo",
|
||||
"temperature" : kwargs.get("temperature", 0.5),
|
||||
"presence_penalty" : kwargs.get("presence_penalty", 0),
|
||||
"frequency_penalty" : kwargs.get("frequency_penalty", 0),
|
||||
"top_p" : kwargs.get("top_p", 1),
|
||||
"stream" : True
|
||||
}
|
||||
response = requests.post(
|
||||
"https://chat.dfehub.com/api/openai/v1/chat/completions",
|
||||
headers=headers,
|
||||
json=json_data,
|
||||
timeout=3
|
||||
)
|
||||
|
||||
response = requests.post("https://chat.dfehub.com/api/openai/v1/chat/completions",
|
||||
headers=headers, json=json_data, timeout=3)
|
||||
|
||||
for chunk in response.iter_lines():
|
||||
if b"detail" in chunk:
|
||||
|
||||
@@ -1,24 +1,21 @@
|
||||
import json
|
||||
import json, requests, random
|
||||
|
||||
import requests
|
||||
|
||||
from ..typing import Any, CreateResult
|
||||
from ..typing import Any, CreateResult
|
||||
from .base_provider import BaseProvider
|
||||
|
||||
|
||||
class EasyChat(BaseProvider):
|
||||
url = "https://free.easychat.work"
|
||||
supports_stream = True
|
||||
url: str = "https://free.easychat.work"
|
||||
supports_stream = True
|
||||
supports_gpt_35_turbo = True
|
||||
working = True
|
||||
working = True
|
||||
|
||||
@staticmethod
|
||||
def create_completion(
|
||||
model: str,
|
||||
messages: list[dict[str, str]],
|
||||
stream: bool,
|
||||
**kwargs: Any,
|
||||
) -> CreateResult:
|
||||
stream: bool, **kwargs: Any) -> CreateResult:
|
||||
|
||||
active_servers = [
|
||||
"https://chat10.fastgpt.me",
|
||||
"https://chat9.fastgpt.me",
|
||||
@@ -28,59 +25,62 @@ class EasyChat(BaseProvider):
|
||||
"https://chat4.fastgpt.me",
|
||||
"https://gxos1h1ddt.fastgpt.me"
|
||||
]
|
||||
server = active_servers[kwargs.get("active_server", 0)]
|
||||
|
||||
server = active_servers[kwargs.get("active_server", random.randint(0, 5))]
|
||||
headers = {
|
||||
"authority": f"{server}".replace("https://", ""),
|
||||
"accept": "text/event-stream",
|
||||
"accept-language": "en,fr-FR;q=0.9,fr;q=0.8,es-ES;q=0.7,es;q=0.6,en-US;q=0.5,am;q=0.4,de;q=0.3,fa=0.2",
|
||||
"content-type": "application/json",
|
||||
"origin": f"{server}",
|
||||
"referer": f"{server}/",
|
||||
"x-requested-with": "XMLHttpRequest",
|
||||
'plugins': '0',
|
||||
'sec-ch-ua': '"Chromium";v="116", "Not)A;Brand";v="24", "Google Chrome";v="116"',
|
||||
'sec-ch-ua-mobile': '?0',
|
||||
"authority" : f"{server}".replace("https://", ""),
|
||||
"accept" : "text/event-stream",
|
||||
"accept-language" : "en,fr-FR;q=0.9,fr;q=0.8,es-ES;q=0.7,es;q=0.6,en-US;q=0.5,am;q=0.4,de;q=0.3,fa=0.2",
|
||||
"content-type" : "application/json",
|
||||
"origin" : f"{server}",
|
||||
"referer" : f"{server}/",
|
||||
"x-requested-with" : "XMLHttpRequest",
|
||||
'plugins' : '0',
|
||||
'sec-ch-ua' : '"Chromium";v="116", "Not)A;Brand";v="24", "Google Chrome";v="116"',
|
||||
'sec-ch-ua-mobile' : '?0',
|
||||
'sec-ch-ua-platform': '"Windows"',
|
||||
'sec-fetch-dest': 'empty',
|
||||
'sec-fetch-mode': 'cors',
|
||||
'sec-fetch-site': 'same-origin',
|
||||
'user-agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/116.0.0.0 Safari/537.36',
|
||||
'usesearch': 'false',
|
||||
'x-requested-with': 'XMLHttpRequest'
|
||||
'sec-fetch-dest' : 'empty',
|
||||
'sec-fetch-mode' : 'cors',
|
||||
'sec-fetch-site' : 'same-origin',
|
||||
'user-agent' : 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/116.0.0.0 Safari/537.36',
|
||||
'usesearch' : 'false',
|
||||
'x-requested-with' : 'XMLHttpRequest'
|
||||
}
|
||||
|
||||
json_data = {
|
||||
"messages": messages,
|
||||
"stream": stream,
|
||||
"model": model,
|
||||
"temperature": kwargs.get("temperature", 0.5),
|
||||
"presence_penalty": kwargs.get("presence_penalty", 0),
|
||||
"frequency_penalty": kwargs.get("frequency_penalty", 0),
|
||||
"top_p": kwargs.get("top_p", 1),
|
||||
"messages" : messages,
|
||||
"stream" : stream,
|
||||
"model" : model,
|
||||
"temperature" : kwargs.get("temperature", 0.5),
|
||||
"presence_penalty" : kwargs.get("presence_penalty", 0),
|
||||
"frequency_penalty" : kwargs.get("frequency_penalty", 0),
|
||||
"top_p" : kwargs.get("top_p", 1)
|
||||
}
|
||||
|
||||
session = requests.Session()
|
||||
# init cookies from server
|
||||
session.get(f"{server}/")
|
||||
|
||||
response = session.post(
|
||||
f"{server}/api/openai/v1/chat/completions",
|
||||
headers=headers,
|
||||
json=json_data,
|
||||
stream=stream,
|
||||
)
|
||||
response = session.post(f"{server}/api/openai/v1/chat/completions",
|
||||
headers=headers, json=json_data, stream=stream)
|
||||
|
||||
if response.status_code == 200:
|
||||
|
||||
if stream == False:
|
||||
json_data = response.json()
|
||||
|
||||
if "choices" in json_data:
|
||||
yield json_data["choices"][0]["message"]["content"]
|
||||
else:
|
||||
raise Exception("No response from server")
|
||||
|
||||
else:
|
||||
|
||||
for chunk in response.iter_lines():
|
||||
|
||||
if b"content" in chunk:
|
||||
splitData = chunk.decode().split("data:")
|
||||
|
||||
if len(splitData) > 1:
|
||||
yield json.loads(splitData[1])["choices"][0]["delta"]["content"]
|
||||
else:
|
||||
|
||||
@@ -1,58 +1,58 @@
|
||||
import requests, json
|
||||
from abc import ABC, abstractmethod
|
||||
|
||||
from abc import ABC, abstractmethod
|
||||
from ..typing import Any, CreateResult
|
||||
|
||||
|
||||
class Equing(ABC):
|
||||
url: str = 'https://next.eqing.tech/'
|
||||
working = True
|
||||
needs_auth = False
|
||||
supports_stream = True
|
||||
url: str = 'https://next.eqing.tech/'
|
||||
working = True
|
||||
needs_auth = False
|
||||
supports_stream = True
|
||||
supports_gpt_35_turbo = True
|
||||
supports_gpt_4 = False
|
||||
supports_gpt_4 = False
|
||||
|
||||
@staticmethod
|
||||
@abstractmethod
|
||||
def create_completion(
|
||||
model: str,
|
||||
messages: list[dict[str, str]],
|
||||
stream: bool,
|
||||
**kwargs: Any) -> CreateResult:
|
||||
stream: bool, **kwargs: Any) -> CreateResult:
|
||||
|
||||
headers = {
|
||||
'authority': 'next.eqing.tech',
|
||||
'accept': 'text/event-stream',
|
||||
'accept-language': 'en,fr-FR;q=0.9,fr;q=0.8,es-ES;q=0.7,es;q=0.6,en-US;q=0.5,am;q=0.4,de;q=0.3',
|
||||
'cache-control': 'no-cache',
|
||||
'content-type': 'application/json',
|
||||
'origin': 'https://next.eqing.tech',
|
||||
'plugins': '0',
|
||||
'pragma': 'no-cache',
|
||||
'referer': 'https://next.eqing.tech/',
|
||||
'sec-ch-ua': '"Not/A)Brand";v="99", "Google Chrome";v="115", "Chromium";v="115"',
|
||||
'sec-ch-ua-mobile': '?0',
|
||||
'authority' : 'next.eqing.tech',
|
||||
'accept' : 'text/event-stream',
|
||||
'accept-language' : 'en,fr-FR;q=0.9,fr;q=0.8,es-ES;q=0.7,es;q=0.6,en-US;q=0.5,am;q=0.4,de;q=0.3',
|
||||
'cache-control' : 'no-cache',
|
||||
'content-type' : 'application/json',
|
||||
'origin' : 'https://next.eqing.tech',
|
||||
'plugins' : '0',
|
||||
'pragma' : 'no-cache',
|
||||
'referer' : 'https://next.eqing.tech/',
|
||||
'sec-ch-ua' : '"Not/A)Brand";v="99", "Google Chrome";v="115", "Chromium";v="115"',
|
||||
'sec-ch-ua-mobile' : '?0',
|
||||
'sec-ch-ua-platform': '"macOS"',
|
||||
'sec-fetch-dest': 'empty',
|
||||
'sec-fetch-mode': 'cors',
|
||||
'sec-fetch-site': 'same-origin',
|
||||
'user-agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/115.0.0.0 Safari/537.36',
|
||||
'usesearch': 'false',
|
||||
'x-requested-with': 'XMLHttpRequest',
|
||||
'sec-fetch-dest' : 'empty',
|
||||
'sec-fetch-mode' : 'cors',
|
||||
'sec-fetch-site' : 'same-origin',
|
||||
'user-agent' : 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/115.0.0.0 Safari/537.36',
|
||||
'usesearch' : 'false',
|
||||
'x-requested-with' : 'XMLHttpRequest'
|
||||
}
|
||||
|
||||
json_data = {
|
||||
'messages': messages,
|
||||
'stream': stream,
|
||||
'model': model,
|
||||
'temperature': kwargs.get('temperature', 0.5),
|
||||
'presence_penalty': kwargs.get('presence_penalty', 0),
|
||||
'frequency_penalty': kwargs.get('frequency_penalty', 0),
|
||||
'top_p': kwargs.get('top_p', 1),
|
||||
'messages' : messages,
|
||||
'stream' : stream,
|
||||
'model' : model,
|
||||
'temperature' : kwargs.get('temperature', 0.5),
|
||||
'presence_penalty' : kwargs.get('presence_penalty', 0),
|
||||
'frequency_penalty' : kwargs.get('frequency_penalty', 0),
|
||||
'top_p' : kwargs.get('top_p', 1),
|
||||
}
|
||||
|
||||
response = requests.post('https://next.eqing.tech/api/openai/v1/chat/completions',
|
||||
headers=headers, json=json_data, stream=stream)
|
||||
|
||||
if not stream:
|
||||
yield response.json()["choices"][0]["message"]["content"]
|
||||
return
|
||||
|
||||
@@ -5,51 +5,49 @@ from ..typing import Any, CreateResult
|
||||
|
||||
|
||||
class FastGpt(ABC):
|
||||
url: str = 'https://chat9.fastgpt.me/'
|
||||
working = False
|
||||
needs_auth = False
|
||||
supports_stream = True
|
||||
supports_gpt_35_turbo = True
|
||||
supports_gpt_4 = False
|
||||
url: str = 'https://chat9.fastgpt.me/'
|
||||
working = False
|
||||
needs_auth = False
|
||||
supports_stream = True
|
||||
supports_gpt_35_turbo = True
|
||||
supports_gpt_4 = False
|
||||
|
||||
@staticmethod
|
||||
@abstractmethod
|
||||
def create_completion(
|
||||
model: str,
|
||||
messages: list[dict[str, str]],
|
||||
stream: bool,
|
||||
**kwargs: Any) -> CreateResult:
|
||||
stream: bool, **kwargs: Any) -> CreateResult:
|
||||
|
||||
headers = {
|
||||
'authority': 'chat9.fastgpt.me',
|
||||
'accept': 'text/event-stream',
|
||||
'accept-language': 'en,fr-FR;q=0.9,fr;q=0.8,es-ES;q=0.7,es;q=0.6,en-US;q=0.5,am;q=0.4,de;q=0.3',
|
||||
'cache-control': 'no-cache',
|
||||
'content-type': 'application/json',
|
||||
# 'cookie': 'cf_clearance=idIAwtoSCn0uCzcWLGuD.KtiAJv9a1GsPduEOqIkyHU-1692278595-0-1-cb11fd7a.ab1546d4.ccf35fd7-0.2.1692278595; Hm_lvt_563fb31e93813a8a7094966df6671d3f=1691966491,1692278597; Hm_lpvt_563fb31e93813a8a7094966df6671d3f=1692278597',
|
||||
'origin': 'https://chat9.fastgpt.me',
|
||||
'plugins': '0',
|
||||
'pragma': 'no-cache',
|
||||
'referer': 'https://chat9.fastgpt.me/',
|
||||
'sec-ch-ua': '"Not/A)Brand";v="99", "Google Chrome";v="115", "Chromium";v="115"',
|
||||
'sec-ch-ua-mobile': '?0',
|
||||
'authority' : 'chat9.fastgpt.me',
|
||||
'accept' : 'text/event-stream',
|
||||
'accept-language' : 'en,fr-FR;q=0.9,fr;q=0.8,es-ES;q=0.7,es;q=0.6,en-US;q=0.5,am;q=0.4,de;q=0.3',
|
||||
'cache-control' : 'no-cache',
|
||||
'content-type' : 'application/json',
|
||||
'origin' : 'https://chat9.fastgpt.me',
|
||||
'plugins' : '0',
|
||||
'pragma' : 'no-cache',
|
||||
'referer' : 'https://chat9.fastgpt.me/',
|
||||
'sec-ch-ua' : '"Not/A)Brand";v="99", "Google Chrome";v="115", "Chromium";v="115"',
|
||||
'sec-ch-ua-mobile' : '?0',
|
||||
'sec-ch-ua-platform': '"macOS"',
|
||||
'sec-fetch-dest': 'empty',
|
||||
'sec-fetch-mode': 'cors',
|
||||
'sec-fetch-site': 'same-origin',
|
||||
'user-agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/115.0.0.0 Safari/537.36',
|
||||
'usesearch': 'false',
|
||||
'x-requested-with': 'XMLHttpRequest',
|
||||
'sec-fetch-dest' : 'empty',
|
||||
'sec-fetch-mode' : 'cors',
|
||||
'sec-fetch-site' : 'same-origin',
|
||||
'user-agent' : 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/115.0.0.0 Safari/537.36',
|
||||
'usesearch' : 'false',
|
||||
'x-requested-with' : 'XMLHttpRequest',
|
||||
}
|
||||
|
||||
json_data = {
|
||||
'messages': messages,
|
||||
'stream': stream,
|
||||
'model': model,
|
||||
'temperature': kwargs.get('temperature', 0.5),
|
||||
'presence_penalty': kwargs.get('presence_penalty', 0),
|
||||
'frequency_penalty': kwargs.get('frequency_penalty', 0),
|
||||
'top_p': kwargs.get('top_p', 1),
|
||||
'messages' : messages,
|
||||
'stream' : stream,
|
||||
'model' : model,
|
||||
'temperature' : kwargs.get('temperature', 0.5),
|
||||
'presence_penalty' : kwargs.get('presence_penalty', 0),
|
||||
'frequency_penalty' : kwargs.get('frequency_penalty', 0),
|
||||
'top_p' : kwargs.get('top_p', 1),
|
||||
}
|
||||
|
||||
subdomain = random.choice([
|
||||
@@ -58,7 +56,7 @@ class FastGpt(ABC):
|
||||
])
|
||||
|
||||
response = requests.post(f'https://{subdomain}.fastgpt.me/api/openai/v1/chat/completions',
|
||||
headers=headers, json=json_data, stream=stream)
|
||||
headers=headers, json=json_data, stream=stream)
|
||||
|
||||
for line in response.iter_lines():
|
||||
if line:
|
||||
|
||||
@@ -7,34 +7,31 @@ from .base_provider import BaseProvider
|
||||
|
||||
|
||||
class Forefront(BaseProvider):
|
||||
url = "https://forefront.com"
|
||||
supports_stream = True
|
||||
url = "https://forefront.com"
|
||||
supports_stream = True
|
||||
supports_gpt_35_turbo = True
|
||||
|
||||
@staticmethod
|
||||
def create_completion(
|
||||
model: str,
|
||||
messages: list[dict[str, str]],
|
||||
stream: bool,
|
||||
**kwargs: Any,
|
||||
) -> CreateResult:
|
||||
stream: bool, **kwargs: Any) -> CreateResult:
|
||||
|
||||
json_data = {
|
||||
"text": messages[-1]["content"],
|
||||
"action": "noauth",
|
||||
"id": "",
|
||||
"parentId": "",
|
||||
"workspaceId": "",
|
||||
"text" : messages[-1]["content"],
|
||||
"action" : "noauth",
|
||||
"id" : "",
|
||||
"parentId" : "",
|
||||
"workspaceId" : "",
|
||||
"messagePersona": "607e41fe-95be-497e-8e97-010a59b2e2c0",
|
||||
"model": "gpt-4",
|
||||
"messages": messages[:-1] if len(messages) > 1 else [],
|
||||
"internetMode": "auto",
|
||||
"model" : "gpt-4",
|
||||
"messages" : messages[:-1] if len(messages) > 1 else [],
|
||||
"internetMode" : "auto",
|
||||
}
|
||||
|
||||
response = requests.post(
|
||||
"https://streaming.tenant-forefront-default.knative.chi.coreweave.com/free-chat",
|
||||
json=json_data,
|
||||
stream=True,
|
||||
)
|
||||
response = requests.post("https://streaming.tenant-forefront-default.knative.chi.coreweave.com/free-chat",
|
||||
json=json_data, stream=True)
|
||||
|
||||
response.raise_for_status()
|
||||
for token in response.iter_lines():
|
||||
if b"delta" in token:
|
||||
|
||||
@@ -1,87 +1,82 @@
|
||||
import json
|
||||
import os
|
||||
import uuid
|
||||
import os, json, uuid, requests
|
||||
|
||||
import requests
|
||||
from Crypto.Cipher import AES
|
||||
|
||||
from ..typing import Any, CreateResult
|
||||
from Crypto.Cipher import AES
|
||||
from ..typing import Any, CreateResult
|
||||
from .base_provider import BaseProvider
|
||||
|
||||
|
||||
class GetGpt(BaseProvider):
|
||||
url = "https://chat.getgpt.world/"
|
||||
supports_stream = True
|
||||
working = True
|
||||
url = 'https://chat.getgpt.world/'
|
||||
supports_stream = True
|
||||
working = True
|
||||
supports_gpt_35_turbo = True
|
||||
|
||||
@staticmethod
|
||||
def create_completion(
|
||||
model: str,
|
||||
messages: list[dict[str, str]],
|
||||
stream: bool,
|
||||
**kwargs: Any,
|
||||
) -> CreateResult:
|
||||
stream: bool, **kwargs: Any) -> CreateResult:
|
||||
|
||||
headers = {
|
||||
"Content-Type": "application/json",
|
||||
"Referer": "https://chat.getgpt.world/",
|
||||
"user-agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/114.0.0.0 Safari/537.36",
|
||||
'Content-Type' : 'application/json',
|
||||
'Referer' : 'https://chat.getgpt.world/',
|
||||
'user-agent' : 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/114.0.0.0 Safari/537.36',
|
||||
}
|
||||
|
||||
data = json.dumps(
|
||||
{
|
||||
"messages": messages,
|
||||
"frequency_penalty": kwargs.get("frequency_penalty", 0),
|
||||
"max_tokens": kwargs.get("max_tokens", 4000),
|
||||
"model": "gpt-3.5-turbo",
|
||||
"presence_penalty": kwargs.get("presence_penalty", 0),
|
||||
"temperature": kwargs.get("temperature", 1),
|
||||
"top_p": kwargs.get("top_p", 1),
|
||||
"stream": True,
|
||||
"uuid": str(uuid.uuid4()),
|
||||
'messages' : messages,
|
||||
'frequency_penalty' : kwargs.get('frequency_penalty', 0),
|
||||
'max_tokens' : kwargs.get('max_tokens', 4000),
|
||||
'model' : 'gpt-3.5-turbo',
|
||||
'presence_penalty' : kwargs.get('presence_penalty', 0),
|
||||
'temperature' : kwargs.get('temperature', 1),
|
||||
'top_p' : kwargs.get('top_p', 1),
|
||||
'stream' : True,
|
||||
'uuid' : str(uuid.uuid4())
|
||||
}
|
||||
)
|
||||
|
||||
res = requests.post(
|
||||
"https://chat.getgpt.world/api/chat/stream",
|
||||
headers=headers,
|
||||
json={"signature": _encrypt(data)},
|
||||
stream=True,
|
||||
)
|
||||
res = requests.post('https://chat.getgpt.world/api/chat/stream',
|
||||
headers=headers, json={'signature': _encrypt(data)}, stream=True)
|
||||
|
||||
res.raise_for_status()
|
||||
for line in res.iter_lines():
|
||||
if b"content" in line:
|
||||
line_json = json.loads(line.decode("utf-8").split("data: ")[1])
|
||||
yield (line_json["choices"][0]["delta"]["content"])
|
||||
if b'content' in line:
|
||||
line_json = json.loads(line.decode('utf-8').split('data: ')[1])
|
||||
yield (line_json['choices'][0]['delta']['content'])
|
||||
|
||||
@classmethod
|
||||
@property
|
||||
def params(cls):
|
||||
params = [
|
||||
("model", "str"),
|
||||
("messages", "list[dict[str, str]]"),
|
||||
("stream", "bool"),
|
||||
("temperature", "float"),
|
||||
("presence_penalty", "int"),
|
||||
("frequency_penalty", "int"),
|
||||
("top_p", "int"),
|
||||
("max_tokens", "int"),
|
||||
('model', 'str'),
|
||||
('messages', 'list[dict[str, str]]'),
|
||||
('stream', 'bool'),
|
||||
('temperature', 'float'),
|
||||
('presence_penalty', 'int'),
|
||||
('frequency_penalty', 'int'),
|
||||
('top_p', 'int'),
|
||||
('max_tokens', 'int'),
|
||||
]
|
||||
param = ", ".join([": ".join(p) for p in params])
|
||||
return f"g4f.provider.{cls.__name__} supports: ({param})"
|
||||
param = ', '.join([': '.join(p) for p in params])
|
||||
return f'g4f.provider.{cls.__name__} supports: ({param})'
|
||||
|
||||
|
||||
def _encrypt(e: str):
|
||||
t = os.urandom(8).hex().encode("utf-8")
|
||||
n = os.urandom(8).hex().encode("utf-8")
|
||||
r = e.encode("utf-8")
|
||||
cipher = AES.new(t, AES.MODE_CBC, n)
|
||||
t = os.urandom(8).hex().encode('utf-8')
|
||||
n = os.urandom(8).hex().encode('utf-8')
|
||||
r = e.encode('utf-8')
|
||||
|
||||
cipher = AES.new(t, AES.MODE_CBC, n)
|
||||
ciphertext = cipher.encrypt(_pad_data(r))
|
||||
return ciphertext.hex() + t.decode("utf-8") + n.decode("utf-8")
|
||||
|
||||
return ciphertext.hex() + t.decode('utf-8') + n.decode('utf-8')
|
||||
|
||||
|
||||
def _pad_data(data: bytes) -> bytes:
|
||||
block_size = AES.block_size
|
||||
block_size = AES.block_size
|
||||
padding_size = block_size - len(data) % block_size
|
||||
padding = bytes([padding_size] * padding_size)
|
||||
padding = bytes([padding_size] * padding_size)
|
||||
|
||||
return data + padding
|
||||
|
||||
@@ -1,25 +1,21 @@
|
||||
import json
|
||||
import uuid
|
||||
import json, uuid, requests
|
||||
|
||||
import requests
|
||||
|
||||
from ..typing import Any, CreateResult
|
||||
from ..typing import Any, CreateResult
|
||||
from .base_provider import BaseProvider
|
||||
|
||||
|
||||
class H2o(BaseProvider):
|
||||
url = "https://gpt-gm.h2o.ai"
|
||||
working = True
|
||||
url = "https://gpt-gm.h2o.ai"
|
||||
working = True
|
||||
supports_stream = True
|
||||
model = "h2oai/h2ogpt-gm-oasst1-en-2048-falcon-40b-v1"
|
||||
model = "h2oai/h2ogpt-gm-oasst1-en-2048-falcon-40b-v1"
|
||||
|
||||
@staticmethod
|
||||
def create_completion(
|
||||
model: str,
|
||||
messages: list[dict[str, str]],
|
||||
stream: bool,
|
||||
**kwargs: Any,
|
||||
) -> CreateResult:
|
||||
stream: bool, **kwargs: Any) -> CreateResult:
|
||||
|
||||
conversation = ""
|
||||
for message in messages:
|
||||
conversation += "%s: %s\n" % (message["role"], message["content"])
|
||||
@@ -29,58 +25,52 @@ class H2o(BaseProvider):
|
||||
|
||||
headers = {"Referer": "https://gpt-gm.h2o.ai/r/jGfKSwU"}
|
||||
data = {
|
||||
"ethicsModalAccepted": "true",
|
||||
"ethicsModalAccepted" : "true",
|
||||
"shareConversationsWithModelAuthors": "true",
|
||||
"ethicsModalAcceptedAt": "",
|
||||
"activeModel": model,
|
||||
"searchEnabled": "true",
|
||||
"ethicsModalAcceptedAt" : "",
|
||||
"activeModel" : model,
|
||||
"searchEnabled" : "true",
|
||||
}
|
||||
session.post(
|
||||
"https://gpt-gm.h2o.ai/settings",
|
||||
headers=headers,
|
||||
data=data,
|
||||
)
|
||||
|
||||
session.post("https://gpt-gm.h2o.ai/settings",
|
||||
headers=headers, data=data)
|
||||
|
||||
headers = {"Referer": "https://gpt-gm.h2o.ai/"}
|
||||
data = {"model": model}
|
||||
data = {"model": model}
|
||||
|
||||
response = session.post("https://gpt-gm.h2o.ai/conversation",
|
||||
headers=headers, json=data).json()
|
||||
|
||||
response = session.post(
|
||||
"https://gpt-gm.h2o.ai/conversation",
|
||||
headers=headers,
|
||||
json=data,
|
||||
).json()
|
||||
if "conversationId" not in response:
|
||||
return
|
||||
|
||||
data = {
|
||||
"inputs": conversation,
|
||||
"parameters": {
|
||||
"temperature": kwargs.get("temperature", 0.4),
|
||||
"truncate": kwargs.get("truncate", 2048),
|
||||
"max_new_tokens": kwargs.get("max_new_tokens", 1024),
|
||||
"do_sample": kwargs.get("do_sample", True),
|
||||
"temperature" : kwargs.get("temperature", 0.4),
|
||||
"truncate" : kwargs.get("truncate", 2048),
|
||||
"max_new_tokens" : kwargs.get("max_new_tokens", 1024),
|
||||
"do_sample" : kwargs.get("do_sample", True),
|
||||
"repetition_penalty": kwargs.get("repetition_penalty", 1.2),
|
||||
"return_full_text": kwargs.get("return_full_text", False),
|
||||
"return_full_text" : kwargs.get("return_full_text", False),
|
||||
},
|
||||
"stream": True,
|
||||
"stream" : True,
|
||||
"options": {
|
||||
"id": kwargs.get("id", str(uuid.uuid4())),
|
||||
"response_id": kwargs.get("response_id", str(uuid.uuid4())),
|
||||
"is_retry": False,
|
||||
"use_cache": False,
|
||||
"id" : kwargs.get("id", str(uuid.uuid4())),
|
||||
"response_id" : kwargs.get("response_id", str(uuid.uuid4())),
|
||||
"is_retry" : False,
|
||||
"use_cache" : False,
|
||||
"web_search_id": "",
|
||||
},
|
||||
}
|
||||
|
||||
response = session.post(
|
||||
f"https://gpt-gm.h2o.ai/conversation/{response['conversationId']}",
|
||||
headers=headers,
|
||||
json=data,
|
||||
)
|
||||
response = session.post(f"https://gpt-gm.h2o.ai/conversation/{response['conversationId']}",
|
||||
headers=headers, json=data)
|
||||
|
||||
response.raise_for_status()
|
||||
response.encoding = "utf-8"
|
||||
generated_text = response.text.replace("\n", "").split("data:")
|
||||
generated_text = json.loads(generated_text[-1])
|
||||
generated_text = response.text.replace("\n", "").split("data:")
|
||||
generated_text = json.loads(generated_text[-1])
|
||||
|
||||
yield generated_text["generated_text"]
|
||||
|
||||
|
||||
@@ -5,13 +5,13 @@ except ImportError:
|
||||
has_module = False
|
||||
|
||||
from .base_provider import BaseProvider, get_cookies
|
||||
from g4f.typing import CreateResult
|
||||
from g4f.typing import CreateResult
|
||||
|
||||
class Hugchat(BaseProvider):
|
||||
url = "https://huggingface.co/chat/"
|
||||
url = "https://huggingface.co/chat/"
|
||||
needs_auth = True
|
||||
working = has_module
|
||||
llms = ['OpenAssistant/oasst-sft-6-llama-30b-xor', 'meta-llama/Llama-2-70b-chat-hf']
|
||||
working = has_module
|
||||
llms = ['OpenAssistant/oasst-sft-6-llama-30b-xor', 'meta-llama/Llama-2-70b-chat-hf']
|
||||
|
||||
@classmethod
|
||||
def create_completion(
|
||||
@@ -20,12 +20,10 @@ class Hugchat(BaseProvider):
|
||||
messages: list[dict[str, str]],
|
||||
stream: bool = False,
|
||||
proxy: str = None,
|
||||
cookies: str = get_cookies(".huggingface.co"),
|
||||
**kwargs
|
||||
) -> CreateResult:
|
||||
cookies: str = get_cookies(".huggingface.co"), **kwargs) -> CreateResult:
|
||||
|
||||
bot = ChatBot(
|
||||
cookies=cookies
|
||||
)
|
||||
cookies=cookies)
|
||||
|
||||
if proxy and "://" not in proxy:
|
||||
proxy = f"http://{proxy}"
|
||||
|
||||
@@ -1,33 +1,31 @@
|
||||
import uuid
|
||||
import uuid, requests
|
||||
|
||||
import requests
|
||||
|
||||
from ..typing import Any, CreateResult
|
||||
from ..typing import Any, CreateResult
|
||||
from .base_provider import BaseProvider
|
||||
|
||||
|
||||
class Liaobots(BaseProvider):
|
||||
url = "https://liaobots.com"
|
||||
supports_stream = True
|
||||
needs_auth = True
|
||||
supports_gpt_35_turbo = True
|
||||
supports_gpt_4 = True
|
||||
url: str = "https://liaobots.com"
|
||||
supports_stream = True
|
||||
needs_auth = True
|
||||
supports_gpt_35_turbo = True
|
||||
supports_gpt_4 = True
|
||||
|
||||
@staticmethod
|
||||
def create_completion(
|
||||
model: str,
|
||||
messages: list[dict[str, str]],
|
||||
stream: bool,
|
||||
**kwargs: Any,
|
||||
) -> CreateResult:
|
||||
stream: bool, **kwargs: Any) -> CreateResult:
|
||||
|
||||
headers = {
|
||||
"authority": "liaobots.com",
|
||||
"content-type": "application/json",
|
||||
"origin": "https://liaobots.com",
|
||||
"referer": "https://liaobots.com/",
|
||||
"user-agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/112.0.0.0 Safari/537.36",
|
||||
"x-auth-code": str(kwargs.get("auth")),
|
||||
"authority" : "liaobots.com",
|
||||
"content-type" : "application/json",
|
||||
"origin" : "https://liaobots.com",
|
||||
"referer" : "https://liaobots.com/",
|
||||
"user-agent" : "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/112.0.0.0 Safari/537.36",
|
||||
"x-auth-code" : str(kwargs.get("auth")),
|
||||
}
|
||||
|
||||
models = {
|
||||
"gpt-4": {
|
||||
"id": "gpt-4",
|
||||
@@ -44,18 +42,15 @@ class Liaobots(BaseProvider):
|
||||
}
|
||||
json_data = {
|
||||
"conversationId": str(uuid.uuid4()),
|
||||
"model": models[model],
|
||||
"messages": messages,
|
||||
"key": "",
|
||||
"prompt": "You are ChatGPT, a large language model trained by OpenAI. Follow the user's instructions carefully. Respond using markdown.",
|
||||
"model" : models[model],
|
||||
"messages" : messages,
|
||||
"key" : "",
|
||||
"prompt" : "You are ChatGPT, a large language model trained by OpenAI. Follow the user's instructions carefully. Respond using markdown.",
|
||||
}
|
||||
|
||||
response = requests.post(
|
||||
"https://liaobots.com/api/chat",
|
||||
headers=headers,
|
||||
json=json_data,
|
||||
stream=True,
|
||||
)
|
||||
response = requests.post("https://liaobots.com/api/chat",
|
||||
headers=headers, json=json_data, stream=True)
|
||||
|
||||
response.raise_for_status()
|
||||
for token in response.iter_content(chunk_size=2046):
|
||||
yield token.decode("utf-8")
|
||||
|
||||
@@ -1,52 +1,46 @@
|
||||
import json
|
||||
import json, requests
|
||||
|
||||
import requests
|
||||
|
||||
from ..typing import Any, CreateResult
|
||||
from ..typing import Any, CreateResult
|
||||
from .base_provider import BaseProvider
|
||||
|
||||
|
||||
class Lockchat(BaseProvider):
|
||||
url = "http://supertest.lockchat.app"
|
||||
supports_stream = True
|
||||
url: str = "http://supertest.lockchat.app"
|
||||
supports_stream = True
|
||||
supports_gpt_35_turbo = True
|
||||
supports_gpt_4 = True
|
||||
supports_gpt_4 = True
|
||||
|
||||
@staticmethod
|
||||
def create_completion(
|
||||
model: str,
|
||||
messages: list[dict[str, str]],
|
||||
stream: bool,
|
||||
**kwargs: Any,
|
||||
) -> CreateResult:
|
||||
stream: bool, **kwargs: Any) -> CreateResult:
|
||||
|
||||
temperature = float(kwargs.get("temperature", 0.7))
|
||||
payload = {
|
||||
"temperature": temperature,
|
||||
"messages": messages,
|
||||
"model": model,
|
||||
"stream": True,
|
||||
"messages" : messages,
|
||||
"model" : model,
|
||||
"stream" : True,
|
||||
}
|
||||
|
||||
headers = {
|
||||
"user-agent": "ChatX/39 CFNetwork/1408.0.4 Darwin/22.5.0",
|
||||
}
|
||||
response = requests.post(
|
||||
"http://supertest.lockchat.app/v1/chat/completions",
|
||||
json=payload,
|
||||
headers=headers,
|
||||
stream=True,
|
||||
)
|
||||
response = requests.post("http://supertest.lockchat.app/v1/chat/completions",
|
||||
json=payload, headers=headers, stream=True)
|
||||
|
||||
response.raise_for_status()
|
||||
for token in response.iter_lines():
|
||||
if b"The model: `gpt-4` does not exist" in token:
|
||||
print("error, retrying...")
|
||||
Lockchat.create_completion(
|
||||
model=model,
|
||||
messages=messages,
|
||||
stream=stream,
|
||||
temperature=temperature,
|
||||
**kwargs,
|
||||
)
|
||||
model = model,
|
||||
messages = messages,
|
||||
stream = stream,
|
||||
temperature = temperature,
|
||||
**kwargs)
|
||||
|
||||
if b"content" in token:
|
||||
token = json.loads(token.decode("utf-8").split("data: ")[1])
|
||||
token = token["choices"][0]["delta"].get("content")
|
||||
|
||||
@@ -1,37 +1,34 @@
|
||||
import requests
|
||||
|
||||
from ..typing import Any, CreateResult
|
||||
from ..typing import Any, CreateResult
|
||||
from .base_provider import BaseProvider
|
||||
|
||||
|
||||
class Opchatgpts(BaseProvider):
|
||||
url = "https://opchatgpts.net"
|
||||
working = True
|
||||
url = "https://opchatgpts.net"
|
||||
working = True
|
||||
supports_gpt_35_turbo = True
|
||||
|
||||
@staticmethod
|
||||
def create_completion(
|
||||
model: str,
|
||||
messages: list[dict[str, str]],
|
||||
stream: bool,
|
||||
**kwargs: Any,
|
||||
) -> CreateResult:
|
||||
temperature = kwargs.get("temperature", 0.8)
|
||||
max_tokens = kwargs.get("max_tokens", 1024)
|
||||
stream: bool, **kwargs: Any) -> CreateResult:
|
||||
|
||||
temperature = kwargs.get("temperature", 0.8)
|
||||
max_tokens = kwargs.get("max_tokens", 1024)
|
||||
system_prompt = kwargs.get(
|
||||
"system_prompt",
|
||||
"Converse as if you were an AI assistant. Be friendly, creative.",
|
||||
)
|
||||
payload = _create_payload(
|
||||
messages=messages,
|
||||
temperature=temperature,
|
||||
max_tokens=max_tokens,
|
||||
system_prompt=system_prompt,
|
||||
)
|
||||
"Converse as if you were an AI assistant. Be friendly, creative.")
|
||||
|
||||
payload = _create_payload(
|
||||
messages = messages,
|
||||
temperature = temperature,
|
||||
max_tokens = max_tokens,
|
||||
system_prompt = system_prompt)
|
||||
|
||||
response = requests.post("https://opchatgpts.net/wp-json/ai-chatbot/v1/chat", json=payload)
|
||||
|
||||
response = requests.post(
|
||||
"https://opchatgpts.net/wp-json/ai-chatbot/v1/chat", json=payload
|
||||
)
|
||||
response.raise_for_status()
|
||||
yield response.json()["reply"]
|
||||
|
||||
@@ -39,24 +36,23 @@ class Opchatgpts(BaseProvider):
|
||||
def _create_payload(
|
||||
messages: list[dict[str, str]],
|
||||
temperature: float,
|
||||
max_tokens: int,
|
||||
system_prompt: str,
|
||||
):
|
||||
max_tokens: int, system_prompt: str) -> dict:
|
||||
|
||||
return {
|
||||
"env": "chatbot",
|
||||
"session": "N/A",
|
||||
"prompt": "\n",
|
||||
"context": system_prompt,
|
||||
"messages": messages,
|
||||
"newMessage": messages[::-1][0]["content"],
|
||||
"userName": '<div class="mwai-name-text">User:</div>',
|
||||
"aiName": '<div class="mwai-name-text">AI:</div>',
|
||||
"model": "gpt-3.5-turbo",
|
||||
"temperature": temperature,
|
||||
"maxTokens": max_tokens,
|
||||
"maxResults": 1,
|
||||
"apiKey": "",
|
||||
"service": "openai",
|
||||
"embeddingsIndex": "",
|
||||
"stop": "",
|
||||
"env" : "chatbot",
|
||||
"session" : "N/A",
|
||||
"prompt" : "\n",
|
||||
"context" : system_prompt,
|
||||
"messages" : messages,
|
||||
"newMessage" : messages[::-1][0]["content"],
|
||||
"userName" : '<div class="mwai-name-text">User:</div>',
|
||||
"aiName" : '<div class="mwai-name-text">AI:</div>',
|
||||
"model" : "gpt-3.5-turbo",
|
||||
"temperature" : temperature,
|
||||
"maxTokens" : max_tokens,
|
||||
"maxResults" : 1,
|
||||
"apiKey" : "",
|
||||
"service" : "openai",
|
||||
"embeddingsIndex" : "",
|
||||
"stop" : "",
|
||||
}
|
||||
|
||||
@@ -3,16 +3,17 @@ try:
|
||||
from revChatGPT.V1 import AsyncChatbot
|
||||
except ImportError:
|
||||
has_module = False
|
||||
|
||||
from .base_provider import AsyncGeneratorProvider, get_cookies
|
||||
from ..typing import AsyncGenerator
|
||||
from ..typing import AsyncGenerator
|
||||
|
||||
class OpenaiChat(AsyncGeneratorProvider):
|
||||
url = "https://chat.openai.com"
|
||||
needs_auth = True
|
||||
working = has_module
|
||||
url = "https://chat.openai.com"
|
||||
needs_auth = True
|
||||
working = has_module
|
||||
supports_gpt_35_turbo = True
|
||||
supports_gpt_4 = True
|
||||
supports_stream = True
|
||||
supports_gpt_4 = True
|
||||
supports_stream = True
|
||||
|
||||
@classmethod
|
||||
async def create_async_generator(
|
||||
@@ -36,8 +37,8 @@ class OpenaiChat(AsyncGeneratorProvider):
|
||||
)
|
||||
|
||||
if not access_token:
|
||||
cookies = cookies if cookies else get_cookies("chat.openai.com")
|
||||
response = await bot.session.get("https://chat.openai.com/api/auth/session", cookies=cookies)
|
||||
cookies = cookies if cookies else get_cookies("chat.openai.com")
|
||||
response = await bot.session.get("https://chat.openai.com/api/auth/session", cookies=cookies)
|
||||
access_token = response.json()["accessToken"]
|
||||
bot.set_access_token(access_token)
|
||||
|
||||
|
||||
@@ -1,17 +1,16 @@
|
||||
import json
|
||||
import requests
|
||||
from ..typing import Any, CreateResult
|
||||
import json, requests
|
||||
|
||||
from ..typing import Any, CreateResult
|
||||
from .base_provider import BaseProvider
|
||||
|
||||
|
||||
class Raycast(BaseProvider):
|
||||
url = "https://raycast.com"
|
||||
# model = ['gpt-3.5-turbo', 'gpt-4']
|
||||
supports_gpt_35_turbo = True
|
||||
supports_gpt_4 = True
|
||||
supports_stream = True
|
||||
needs_auth = True
|
||||
working = True
|
||||
url = "https://raycast.com"
|
||||
supports_gpt_35_turbo = True
|
||||
supports_gpt_4 = True
|
||||
supports_stream = True
|
||||
needs_auth = True
|
||||
working = True
|
||||
|
||||
@staticmethod
|
||||
def create_completion(
|
||||
|
||||
@@ -1,74 +1,72 @@
|
||||
import json,random,requests
|
||||
# from curl_cffi import requests
|
||||
from ..typing import Any, CreateResult
|
||||
import json, random, requests
|
||||
|
||||
from ..typing import Any, CreateResult
|
||||
from .base_provider import BaseProvider
|
||||
|
||||
|
||||
class Theb(BaseProvider):
|
||||
url = "https://theb.ai"
|
||||
working = True
|
||||
supports_stream = True
|
||||
supports_gpt_35_turbo = True
|
||||
needs_auth = True
|
||||
url = "https://theb.ai"
|
||||
working = True
|
||||
supports_stream = True
|
||||
supports_gpt_35_turbo = True
|
||||
needs_auth = True
|
||||
|
||||
@staticmethod
|
||||
def create_completion(
|
||||
model: str,
|
||||
messages: list[dict[str, str]],
|
||||
stream: bool,
|
||||
**kwargs: Any,
|
||||
) -> CreateResult:
|
||||
stream: bool, **kwargs: Any) -> CreateResult:
|
||||
|
||||
conversation = ''
|
||||
for message in messages:
|
||||
conversation += '%s: %s\n' % (message['role'], message['content'])
|
||||
|
||||
conversation += 'assistant: '
|
||||
|
||||
auth = kwargs.get("auth", {
|
||||
"bearer_token":"free",
|
||||
"org_id":"theb",
|
||||
})
|
||||
|
||||
bearer_token = auth["bearer_token"]
|
||||
org_id = auth["org_id"]
|
||||
org_id = auth["org_id"]
|
||||
|
||||
headers = {
|
||||
'authority': 'beta.theb.ai',
|
||||
'accept': 'text/event-stream',
|
||||
'accept-language': 'id-ID,id;q=0.9,en-US;q=0.8,en;q=0.7',
|
||||
'authorization': 'Bearer '+bearer_token,
|
||||
'content-type': 'application/json',
|
||||
'origin': 'https://beta.theb.ai',
|
||||
'referer': 'https://beta.theb.ai/home',
|
||||
'sec-ch-ua': '"Chromium";v="116", "Not)A;Brand";v="24", "Google Chrome";v="116"',
|
||||
'sec-ch-ua-mobile': '?0',
|
||||
'authority' : 'beta.theb.ai',
|
||||
'accept' : 'text/event-stream',
|
||||
'accept-language' : 'id-ID,id;q=0.9,en-US;q=0.8,en;q=0.7',
|
||||
'authorization' : 'Bearer '+bearer_token,
|
||||
'content-type' : 'application/json',
|
||||
'origin' : 'https://beta.theb.ai',
|
||||
'referer' : 'https://beta.theb.ai/home',
|
||||
'sec-ch-ua' : '"Chromium";v="116", "Not)A;Brand";v="24", "Google Chrome";v="116"',
|
||||
'sec-ch-ua-mobile' : '?0',
|
||||
'sec-ch-ua-platform': '"Windows"',
|
||||
'sec-fetch-dest': 'empty',
|
||||
'sec-fetch-mode': 'cors',
|
||||
'sec-fetch-site': 'same-origin',
|
||||
'user-agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/116.0.0.0 Safari/537.36',
|
||||
'x-ai-model': 'ee8d4f29cb7047f78cbe84313ed6ace8',
|
||||
'sec-fetch-dest' : 'empty',
|
||||
'sec-fetch-mode' : 'cors',
|
||||
'sec-fetch-site' : 'same-origin',
|
||||
'user-agent' : 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/116.0.0.0 Safari/537.36',
|
||||
'x-ai-model' : 'ee8d4f29cb7047f78cbe84313ed6ace8',
|
||||
}
|
||||
# generate 10 random number
|
||||
# 0.1 - 0.9
|
||||
|
||||
req_rand = random.randint(100000000, 9999999999)
|
||||
|
||||
json_data: dict[str, Any] = {
|
||||
"text": conversation,
|
||||
"category": "04f58f64a4aa4191a957b47290fee864",
|
||||
"model": "ee8d4f29cb7047f78cbe84313ed6ace8",
|
||||
"text" : conversation,
|
||||
"category" : "04f58f64a4aa4191a957b47290fee864",
|
||||
"model" : "ee8d4f29cb7047f78cbe84313ed6ace8",
|
||||
"model_params": {
|
||||
"system_prompt": "You are ChatGPT, a large language model trained by OpenAI, based on the GPT-3.5 architecture.\nKnowledge cutoff: 2021-09\nCurrent date: {{YYYY-MM-DD}}",
|
||||
"temperature": kwargs.get("temperature", 1),
|
||||
"top_p": kwargs.get("top_p", 1),
|
||||
"frequency_penalty": kwargs.get("frequency_penalty", 0),
|
||||
"presence_penalty": kwargs.get("presence_penalty", 0),
|
||||
"long_term_memory": "auto"
|
||||
"system_prompt" : "You are ChatGPT, a large language model trained by OpenAI, based on the GPT-3.5 architecture.\nKnowledge cutoff: 2021-09\nCurrent date: {{YYYY-MM-DD}}",
|
||||
"temperature" : kwargs.get("temperature", 1),
|
||||
"top_p" : kwargs.get("top_p", 1),
|
||||
"frequency_penalty" : kwargs.get("frequency_penalty", 0),
|
||||
"presence_penalty" : kwargs.get("presence_penalty", 0),
|
||||
"long_term_memory" : "auto"
|
||||
}
|
||||
}
|
||||
response = requests.post(
|
||||
"https://beta.theb.ai/api/conversation?org_id="+org_id+"&req_rand="+str(req_rand),
|
||||
headers=headers,
|
||||
json=json_data,
|
||||
stream=True,
|
||||
)
|
||||
|
||||
response = requests.post(f"https://beta.theb.ai/api/conversation?org_id={org_id}&req_rand={req_rand}",
|
||||
headers=headers, json=json_data, stream=True)
|
||||
|
||||
response.raise_for_status()
|
||||
content = ""
|
||||
next_content = ""
|
||||
|
||||
@@ -1,51 +1,52 @@
|
||||
import uuid, requests
|
||||
from ..typing import Any, CreateResult
|
||||
|
||||
from ..typing import Any, CreateResult
|
||||
from .base_provider import BaseProvider
|
||||
|
||||
|
||||
class V50(BaseProvider):
|
||||
url = 'https://p5.v50.ltd'
|
||||
supports_gpt_35_turbo = True
|
||||
supports_stream = False
|
||||
needs_auth = False
|
||||
working = False
|
||||
url = 'https://p5.v50.ltd'
|
||||
supports_gpt_35_turbo = True
|
||||
supports_stream = False
|
||||
needs_auth = False
|
||||
working = False
|
||||
|
||||
@staticmethod
|
||||
def create_completion(
|
||||
model: str,
|
||||
messages: list[dict[str, str]],
|
||||
stream: bool,
|
||||
**kwargs: Any,
|
||||
) -> CreateResult:
|
||||
stream: bool, **kwargs: Any) -> CreateResult:
|
||||
|
||||
conversation = ''
|
||||
for message in messages:
|
||||
conversation += '%s: %s\n' % (message['role'], message['content'])
|
||||
|
||||
conversation += 'assistant: '
|
||||
payload = {
|
||||
"prompt": conversation,
|
||||
"options": {},
|
||||
"systemMessage": ".",
|
||||
"temperature": kwargs.get("temperature", 0.4),
|
||||
"top_p": kwargs.get("top_p", 0.4),
|
||||
"model": model,
|
||||
"user": str(uuid.uuid4())
|
||||
"prompt" : conversation,
|
||||
"options" : {},
|
||||
"systemMessage" : ".",
|
||||
"temperature" : kwargs.get("temperature", 0.4),
|
||||
"top_p" : kwargs.get("top_p", 0.4),
|
||||
"model" : model,
|
||||
"user" : str(uuid.uuid4())
|
||||
}
|
||||
|
||||
headers = {
|
||||
'authority': 'p5.v50.ltd',
|
||||
'accept': 'application/json, text/plain, */*',
|
||||
'accept-language': 'id-ID,id;q=0.9,en-US;q=0.8,en;q=0.7',
|
||||
'content-type': 'application/json',
|
||||
'origin': 'https://p5.v50.ltd',
|
||||
'referer': 'https://p5.v50.ltd/',
|
||||
'authority' : 'p5.v50.ltd',
|
||||
'accept' : 'application/json, text/plain, */*',
|
||||
'accept-language' : 'id-ID,id;q=0.9,en-US;q=0.8,en;q=0.7',
|
||||
'content-type' : 'application/json',
|
||||
'origin' : 'https://p5.v50.ltd',
|
||||
'referer' : 'https://p5.v50.ltd/',
|
||||
'sec-ch-ua-platform': '"Windows"',
|
||||
'sec-fetch-dest': 'empty',
|
||||
'sec-fetch-mode': 'cors',
|
||||
'sec-fetch-site': 'same-origin',
|
||||
'user-agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/116.0.0.0 Safari/537.36'
|
||||
'sec-fetch-dest' : 'empty',
|
||||
'sec-fetch-mode' : 'cors',
|
||||
'sec-fetch-site' : 'same-origin',
|
||||
'user-agent' : 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/116.0.0.0 Safari/537.36'
|
||||
}
|
||||
response = requests.post("https://p5.v50.ltd/api/chat-process",
|
||||
json=payload, headers=headers, proxies=kwargs['proxy'] if 'proxy' in kwargs else {})
|
||||
|
||||
if "https://fk1.v50.ltd" not in response.text:
|
||||
yield response.text
|
||||
|
||||
|
||||
@@ -1,26 +1,21 @@
|
||||
import base64
|
||||
import json
|
||||
import uuid
|
||||
import base64, json, uuid, quickjs
|
||||
|
||||
import quickjs
|
||||
from curl_cffi import requests
|
||||
|
||||
from ..typing import Any, CreateResult, TypedDict
|
||||
from curl_cffi import requests
|
||||
from ..typing import Any, CreateResult, TypedDict
|
||||
from .base_provider import BaseProvider
|
||||
|
||||
|
||||
class Vercel(BaseProvider):
|
||||
url = "https://play.vercel.ai"
|
||||
working = True
|
||||
url = "https://play.vercel.ai"
|
||||
working = True
|
||||
supports_gpt_35_turbo = True
|
||||
|
||||
@staticmethod
|
||||
def create_completion(
|
||||
model: str,
|
||||
messages: list[dict[str, str]],
|
||||
stream: bool,
|
||||
**kwargs: Any,
|
||||
) -> CreateResult:
|
||||
stream: bool, **kwargs: Any) -> CreateResult:
|
||||
|
||||
if model in ["gpt-3.5-turbo", "gpt-4"]:
|
||||
model = "openai:" + model
|
||||
yield _chat(model_id=model, messages=messages)
|
||||
@@ -29,8 +24,8 @@ class Vercel(BaseProvider):
|
||||
def _chat(model_id: str, messages: list[dict[str, str]]) -> str:
|
||||
session = requests.Session(impersonate="chrome107")
|
||||
|
||||
url = "https://sdk.vercel.ai/api/generate"
|
||||
header = _create_header(session)
|
||||
url = "https://sdk.vercel.ai/api/generate"
|
||||
header = _create_header(session)
|
||||
payload = _create_payload(model_id, messages)
|
||||
|
||||
response = session.post(url=url, headers=header, json=payload)
|
||||
@@ -44,15 +39,13 @@ def _create_payload(model_id: str, messages: list[dict[str, str]]) -> dict[str,
|
||||
"messages": messages,
|
||||
"playgroundId": str(uuid.uuid4()),
|
||||
"chatIndex": 0,
|
||||
"model": model_id,
|
||||
} | default_params
|
||||
"model": model_id} | default_params
|
||||
|
||||
|
||||
def _create_header(session: requests.Session):
|
||||
custom_encoding = _get_custom_encoding(session)
|
||||
return {"custom-encoding": custom_encoding}
|
||||
|
||||
|
||||
# based on https://github.com/ading2210/vercel-llm-api
|
||||
def _get_custom_encoding(session: requests.Session):
|
||||
url = "https://sdk.vercel.ai/openai.jpeg"
|
||||
|
||||
@@ -1,69 +1,66 @@
|
||||
import json
|
||||
import random
|
||||
import string
|
||||
import time
|
||||
import json, random, string, time, requests
|
||||
|
||||
import requests
|
||||
|
||||
from ..typing import Any, CreateResult
|
||||
from ..typing import Any, CreateResult
|
||||
from .base_provider import BaseProvider
|
||||
|
||||
|
||||
class Wewordle(BaseProvider):
|
||||
url = "https://wewordle.org/"
|
||||
working = True
|
||||
supports_gpt_35_turbo = True
|
||||
url = "https://wewordle.org/"
|
||||
working = True
|
||||
supports_gpt_35_turbo = True
|
||||
|
||||
@classmethod
|
||||
def create_completion(
|
||||
cls,
|
||||
model: str,
|
||||
messages: list[dict[str, str]],
|
||||
stream: bool,
|
||||
**kwargs: Any,
|
||||
) -> CreateResult:
|
||||
stream: bool, **kwargs: Any) -> CreateResult:
|
||||
|
||||
# randomize user id and app id
|
||||
_user_id = "".join(
|
||||
random.choices(f"{string.ascii_lowercase}{string.digits}", k=16)
|
||||
)
|
||||
random.choices(f"{string.ascii_lowercase}{string.digits}", k=16))
|
||||
|
||||
_app_id = "".join(
|
||||
random.choices(f"{string.ascii_lowercase}{string.digits}", k=31)
|
||||
)
|
||||
random.choices(f"{string.ascii_lowercase}{string.digits}", k=31))
|
||||
|
||||
# make current date with format utc
|
||||
_request_date = time.strftime("%Y-%m-%dT%H:%M:%S.000Z", time.gmtime())
|
||||
headers = {
|
||||
"accept": "*/*",
|
||||
"pragma": "no-cache",
|
||||
"Content-Type": "application/json",
|
||||
"Connection": "keep-alive"
|
||||
"accept" : "*/*",
|
||||
"pragma" : "no-cache",
|
||||
"Content-Type" : "application/json",
|
||||
"Connection" : "keep-alive"
|
||||
# user agent android client
|
||||
# 'User-Agent': 'Dalvik/2.1.0 (Linux; U; Android 10; SM-G975F Build/QP1A.190711.020)',
|
||||
}
|
||||
|
||||
data: dict[str, Any] = {
|
||||
"user": _user_id,
|
||||
"messages": messages,
|
||||
"user" : _user_id,
|
||||
"messages" : messages,
|
||||
"subscriber": {
|
||||
"originalPurchaseDate": None,
|
||||
"originalApplicationVersion": None,
|
||||
"allPurchaseDatesMillis": {},
|
||||
"entitlements": {"active": {}, "all": {}},
|
||||
"allPurchaseDates": {},
|
||||
"allExpirationDatesMillis": {},
|
||||
"allExpirationDates": {},
|
||||
"originalAppUserId": f"$RCAnonymousID:{_app_id}",
|
||||
"latestExpirationDate": None,
|
||||
"requestDate": _request_date,
|
||||
"latestExpirationDateMillis": None,
|
||||
"nonSubscriptionTransactions": [],
|
||||
"originalPurchaseDateMillis": None,
|
||||
"managementURL": None,
|
||||
"originalPurchaseDate" : None,
|
||||
"originalApplicationVersion" : None,
|
||||
"allPurchaseDatesMillis" : {},
|
||||
"entitlements" : {"active": {}, "all": {}},
|
||||
"allPurchaseDates" : {},
|
||||
"allExpirationDatesMillis" : {},
|
||||
"allExpirationDates" : {},
|
||||
"originalAppUserId" : f"$RCAnonymousID:{_app_id}",
|
||||
"latestExpirationDate" : None,
|
||||
"requestDate" : _request_date,
|
||||
"latestExpirationDateMillis" : None,
|
||||
"nonSubscriptionTransactions" : [],
|
||||
"originalPurchaseDateMillis" : None,
|
||||
"managementURL" : None,
|
||||
"allPurchasedProductIdentifiers": [],
|
||||
"firstSeen": _request_date,
|
||||
"activeSubscriptions": [],
|
||||
},
|
||||
"firstSeen" : _request_date,
|
||||
"activeSubscriptions" : [],
|
||||
}
|
||||
}
|
||||
|
||||
response = requests.post(f"{cls.url}gptapi/v1/android/turbo", headers=headers, data=json.dumps(data))
|
||||
response = requests.post(f"{cls.url}gptapi/v1/android/turbo",
|
||||
headers=headers, data=json.dumps(data))
|
||||
|
||||
response.raise_for_status()
|
||||
_json = response.json()
|
||||
if "message" in _json:
|
||||
|
||||
@@ -1,33 +1,27 @@
|
||||
import re
|
||||
import urllib.parse
|
||||
import json
|
||||
import urllib.parse, json
|
||||
|
||||
from curl_cffi import requests
|
||||
|
||||
from ..typing import Any, CreateResult
|
||||
from curl_cffi import requests
|
||||
from ..typing import Any, CreateResult
|
||||
from .base_provider import BaseProvider
|
||||
|
||||
|
||||
class You(BaseProvider):
|
||||
url = "https://you.com"
|
||||
working = True
|
||||
url = "https://you.com"
|
||||
working = True
|
||||
supports_gpt_35_turbo = True
|
||||
|
||||
@staticmethod
|
||||
def create_completion(
|
||||
model: str,
|
||||
messages: list[dict[str, str]],
|
||||
stream: bool,
|
||||
**kwargs: Any,
|
||||
) -> CreateResult:
|
||||
stream: bool, **kwargs: Any) -> CreateResult:
|
||||
|
||||
url_param = _create_url_param(messages, kwargs.get("history", []))
|
||||
headers = _create_header()
|
||||
url = f"https://you.com/api/streamingSearch?{url_param}"
|
||||
response = requests.get(
|
||||
url,
|
||||
headers=headers,
|
||||
impersonate="chrome107",
|
||||
)
|
||||
headers = _create_header()
|
||||
|
||||
response = requests.get(f"https://you.com/api/streamingSearch?{url_param}",
|
||||
headers=headers, impersonate="chrome107")
|
||||
|
||||
response.raise_for_status()
|
||||
|
||||
start = 'data: {"youChatToken": '
|
||||
|
||||
@@ -1,26 +1,26 @@
|
||||
import requests
|
||||
|
||||
from ..typing import Any, CreateResult
|
||||
from ..typing import Any, CreateResult
|
||||
from .base_provider import BaseProvider
|
||||
|
||||
|
||||
class Yqcloud(BaseProvider):
|
||||
url = "https://chat9.yqcloud.top/"
|
||||
working = True
|
||||
supports_gpt_35_turbo = True
|
||||
url = "https://chat9.yqcloud.top/"
|
||||
working = True
|
||||
supports_gpt_35_turbo = True
|
||||
|
||||
@staticmethod
|
||||
def create_completion(
|
||||
model: str,
|
||||
messages: list[dict[str, str]],
|
||||
stream: bool,
|
||||
**kwargs: Any,
|
||||
) -> CreateResult:
|
||||
stream: bool, **kwargs: Any) -> CreateResult:
|
||||
|
||||
headers = _create_header()
|
||||
payload = _create_payload(messages)
|
||||
|
||||
url = "https://api.aichatos.cloud/api/generateStream"
|
||||
response = requests.post(url=url, headers=headers, json=payload)
|
||||
response = requests.post("https://api.aichatos.cloud/api/generateStream",
|
||||
headers=headers, json=payload)
|
||||
|
||||
response.raise_for_status()
|
||||
response.encoding = 'utf-8'
|
||||
yield response.text
|
||||
@@ -28,9 +28,9 @@ class Yqcloud(BaseProvider):
|
||||
|
||||
def _create_header():
|
||||
return {
|
||||
"accept": "application/json, text/plain, */*",
|
||||
"content-type": "application/json",
|
||||
"origin": "https://chat9.yqcloud.top",
|
||||
"accept" : "application/json, text/plain, */*",
|
||||
"content-type" : "application/json",
|
||||
"origin" : "https://chat9.yqcloud.top",
|
||||
}
|
||||
|
||||
|
||||
@@ -39,10 +39,11 @@ def _create_payload(messages: list[dict[str, str]]):
|
||||
for message in messages:
|
||||
prompt += "%s: %s\n" % (message["role"], message["content"])
|
||||
prompt += "assistant:"
|
||||
|
||||
return {
|
||||
"prompt": prompt,
|
||||
"network": True,
|
||||
"system": "",
|
||||
"prompt" : prompt,
|
||||
"network" : True,
|
||||
"system" : "",
|
||||
"withoutContext": False,
|
||||
"stream": False,
|
||||
"stream" : False,
|
||||
}
|
||||
@@ -1,65 +1,66 @@
|
||||
from .Acytoo import Acytoo
|
||||
from .Aichat import Aichat
|
||||
from .Ails import Ails
|
||||
from .AiService import AiService
|
||||
from .AItianhu import AItianhu
|
||||
from .Bard import Bard
|
||||
from .Acytoo import Acytoo
|
||||
from .Aichat import Aichat
|
||||
from .Ails import Ails
|
||||
from .AiService import AiService
|
||||
from .AItianhu import AItianhu
|
||||
from .Bard import Bard
|
||||
from .Bing import Bing
|
||||
from .ChatgptAi import ChatgptAi
|
||||
from .ChatgptLogin import ChatgptLogin
|
||||
from .DeepAi import DeepAi
|
||||
from .DfeHub import DfeHub
|
||||
from .EasyChat import EasyChat
|
||||
from .Forefront import Forefront
|
||||
from .GetGpt import GetGpt
|
||||
from .H2o import H2o
|
||||
from .Hugchat import Hugchat
|
||||
from .Liaobots import Liaobots
|
||||
from .Lockchat import Lockchat
|
||||
from .Opchatgpts import Opchatgpts
|
||||
from .OpenaiChat import OpenaiChat
|
||||
from .Raycast import Raycast
|
||||
from .Theb import Theb
|
||||
from .Vercel import Vercel
|
||||
from .Wewordle import Wewordle
|
||||
from .You import You
|
||||
from .Yqcloud import Yqcloud
|
||||
from .Equing import Equing
|
||||
from .FastGpt import FastGpt
|
||||
from .V50 import V50
|
||||
from .Wuguokai import Wuguokai
|
||||
|
||||
from .base_provider import BaseProvider
|
||||
from .Bing import Bing
|
||||
from .ChatgptAi import ChatgptAi
|
||||
from .ChatgptLogin import ChatgptLogin
|
||||
from .DeepAi import DeepAi
|
||||
from .DfeHub import DfeHub
|
||||
from .EasyChat import EasyChat
|
||||
from .Forefront import Forefront
|
||||
from .GetGpt import GetGpt
|
||||
from .H2o import H2o
|
||||
from .Hugchat import Hugchat
|
||||
from .Liaobots import Liaobots
|
||||
from .Lockchat import Lockchat
|
||||
from .Opchatgpts import Opchatgpts
|
||||
from .OpenaiChat import OpenaiChat
|
||||
from .Raycast import Raycast
|
||||
from .Theb import Theb
|
||||
from .Vercel import Vercel
|
||||
from .Wewordle import Wewordle
|
||||
from .You import You
|
||||
from .Yqcloud import Yqcloud
|
||||
from .Equing import Equing
|
||||
from .FastGpt import FastGpt
|
||||
from .V50 import V50
|
||||
from .Wuguokai import Wuguokai
|
||||
|
||||
__all__ = [
|
||||
"BaseProvider",
|
||||
"Acytoo",
|
||||
"Aichat",
|
||||
"Ails",
|
||||
"AiService",
|
||||
"AItianhu",
|
||||
"Bard",
|
||||
"Bing",
|
||||
"ChatgptAi",
|
||||
"ChatgptLogin",
|
||||
"DeepAi",
|
||||
"DfeHub",
|
||||
"EasyChat",
|
||||
"Forefront",
|
||||
"GetGpt",
|
||||
"H2o",
|
||||
"Hugchat",
|
||||
"Liaobots",
|
||||
"Lockchat",
|
||||
"Opchatgpts",
|
||||
"Raycast",
|
||||
"OpenaiChat",
|
||||
"Theb",
|
||||
"Vercel",
|
||||
"Wewordle",
|
||||
"You",
|
||||
"Yqcloud",
|
||||
"Equing",
|
||||
"FastGpt",
|
||||
"Wuguokai"
|
||||
"V50"
|
||||
'BaseProvider',
|
||||
'Acytoo',
|
||||
'Aichat',
|
||||
'Ails',
|
||||
'AiService',
|
||||
'AItianhu',
|
||||
'Bard',
|
||||
'Bing',
|
||||
'ChatgptAi',
|
||||
'ChatgptLogin',
|
||||
'DeepAi',
|
||||
'DfeHub',
|
||||
'EasyChat',
|
||||
'Forefront',
|
||||
'GetGpt',
|
||||
'H2o',
|
||||
'Hugchat',
|
||||
'Liaobots',
|
||||
'Lockchat',
|
||||
'Opchatgpts',
|
||||
'Raycast',
|
||||
'OpenaiChat',
|
||||
'Theb',
|
||||
'Vercel',
|
||||
'Wewordle',
|
||||
'You',
|
||||
'Yqcloud',
|
||||
'Equing',
|
||||
'FastGpt',
|
||||
'Wuguokai',
|
||||
'V50'
|
||||
]
|
||||
|
||||
@@ -9,20 +9,19 @@ import math
|
||||
|
||||
class BaseProvider(ABC):
|
||||
url: str
|
||||
working = False
|
||||
needs_auth = False
|
||||
supports_stream = False
|
||||
working = False
|
||||
needs_auth = False
|
||||
supports_stream = False
|
||||
supports_gpt_35_turbo = False
|
||||
supports_gpt_4 = False
|
||||
supports_gpt_4 = False
|
||||
|
||||
@staticmethod
|
||||
@abstractmethod
|
||||
def create_completion(
|
||||
model: str,
|
||||
messages: list[dict[str, str]],
|
||||
stream: bool,
|
||||
**kwargs: Any,
|
||||
) -> CreateResult:
|
||||
stream: bool, **kwargs: Any) -> CreateResult:
|
||||
|
||||
raise NotImplementedError()
|
||||
|
||||
@classmethod
|
||||
@@ -42,8 +41,10 @@ _cookies = {}
|
||||
def get_cookies(cookie_domain: str) -> dict:
|
||||
if cookie_domain not in _cookies:
|
||||
_cookies[cookie_domain] = {}
|
||||
|
||||
for cookie in browser_cookie3.load(cookie_domain):
|
||||
_cookies[cookie_domain][cookie.name] = cookie.value
|
||||
|
||||
return _cookies[cookie_domain]
|
||||
|
||||
|
||||
@@ -53,18 +54,15 @@ class AsyncProvider(BaseProvider):
|
||||
cls,
|
||||
model: str,
|
||||
messages: list[dict[str, str]],
|
||||
stream: bool = False,
|
||||
**kwargs: Any
|
||||
) -> CreateResult:
|
||||
stream: bool = False, **kwargs: Any) -> CreateResult:
|
||||
|
||||
yield asyncio.run(cls.create_async(model, messages, **kwargs))
|
||||
|
||||
@staticmethod
|
||||
@abstractmethod
|
||||
async def create_async(
|
||||
model: str,
|
||||
messages: list[dict[str, str]],
|
||||
**kwargs: Any,
|
||||
) -> str:
|
||||
messages: list[dict[str, str]], **kwargs: Any) -> str:
|
||||
raise NotImplementedError()
|
||||
|
||||
|
||||
@@ -74,9 +72,8 @@ class AsyncGeneratorProvider(AsyncProvider):
|
||||
cls,
|
||||
model: str,
|
||||
messages: list[dict[str, str]],
|
||||
stream: bool = True,
|
||||
**kwargs: Any
|
||||
) -> CreateResult:
|
||||
stream: bool = True, **kwargs: Any) -> CreateResult:
|
||||
|
||||
if stream:
|
||||
yield from run_generator(cls.create_async_generator(model, messages, **kwargs))
|
||||
else:
|
||||
@@ -86,9 +83,8 @@ class AsyncGeneratorProvider(AsyncProvider):
|
||||
async def create_async(
|
||||
cls,
|
||||
model: str,
|
||||
messages: list[dict[str, str]],
|
||||
**kwargs: Any,
|
||||
) -> str:
|
||||
messages: list[dict[str, str]], **kwargs: Any) -> str:
|
||||
|
||||
chunks = [chunk async for chunk in cls.create_async_generator(model, messages, **kwargs)]
|
||||
if chunks:
|
||||
return "".join(chunks)
|
||||
@@ -97,14 +93,14 @@ class AsyncGeneratorProvider(AsyncProvider):
|
||||
@abstractmethod
|
||||
def create_async_generator(
|
||||
model: str,
|
||||
messages: list[dict[str, str]],
|
||||
) -> AsyncGenerator:
|
||||
messages: list[dict[str, str]]) -> AsyncGenerator:
|
||||
|
||||
raise NotImplementedError()
|
||||
|
||||
|
||||
def run_generator(generator: AsyncGenerator[Union[Any, str], Any]):
|
||||
loop = asyncio.new_event_loop()
|
||||
gen = generator.__aiter__()
|
||||
gen = generator.__aiter__()
|
||||
|
||||
while True:
|
||||
try:
|
||||
|
||||
@@ -1,45 +1,42 @@
|
||||
from . import models
|
||||
from .Provider import BaseProvider
|
||||
from .typing import Any, CreateResult, Union
|
||||
from . import models
|
||||
from .Provider import BaseProvider
|
||||
from .typing import Any, CreateResult, Union
|
||||
|
||||
logging = False
|
||||
|
||||
|
||||
class ChatCompletion:
|
||||
@staticmethod
|
||||
def create(
|
||||
model: Union[models.Model, str],
|
||||
messages: list[dict[str, str]],
|
||||
provider: Union[type[BaseProvider], None] = None,
|
||||
stream: bool = False,
|
||||
auth: Union[str, None] = None,
|
||||
**kwargs: Any,
|
||||
) -> Union[CreateResult, str]:
|
||||
model : Union[models.Model, str],
|
||||
messages : list[dict[str, str]],
|
||||
provider : Union[type[BaseProvider], None] = None,
|
||||
stream : bool = False,
|
||||
auth : Union[str, None] = None, **kwargs: Any) -> Union[CreateResult, str]:
|
||||
|
||||
if isinstance(model, str):
|
||||
try:
|
||||
model = models.ModelUtils.convert[model]
|
||||
except KeyError:
|
||||
raise Exception(f"The model: {model} does not exist")
|
||||
raise Exception(f'The model: {model} does not exist')
|
||||
|
||||
provider = model.best_provider if provider == None else provider
|
||||
|
||||
if not provider.working:
|
||||
raise Exception(f"{provider.__name__} is not working")
|
||||
raise Exception(f'{provider.__name__} is not working')
|
||||
|
||||
if provider.needs_auth and not auth:
|
||||
raise Exception(
|
||||
f'ValueError: {provider.__name__} requires authentication (use auth="cookie or token or jwt ..." param)'
|
||||
)
|
||||
f'ValueError: {provider.__name__} requires authentication (use auth=\'cookie or token or jwt ...\' param)')
|
||||
|
||||
if provider.needs_auth:
|
||||
kwargs["auth"] = auth
|
||||
kwargs['auth'] = auth
|
||||
|
||||
if not provider.supports_stream and stream:
|
||||
raise Exception(
|
||||
f"ValueError: {provider.__name__} does not support 'stream' argument"
|
||||
)
|
||||
f'ValueError: {provider.__name__} does not support "stream" argument')
|
||||
|
||||
if logging:
|
||||
print(f"Using {provider.__name__} provider")
|
||||
print(f'Using {provider.__name__} provider')
|
||||
|
||||
result = provider.create_completion(model.name, messages, stream, **kwargs)
|
||||
return result if stream else "".join(result)
|
||||
return result if stream else ''.join(result)
|
||||
|
||||
268
g4f/models.py
268
g4f/models.py
@@ -1,8 +1,6 @@
|
||||
from dataclasses import dataclass
|
||||
|
||||
from .Provider import Bard, BaseProvider, GetGpt, H2o, Liaobots, Vercel, Equing
|
||||
|
||||
|
||||
@dataclass
|
||||
class Model:
|
||||
name: str
|
||||
@@ -12,214 +10,190 @@ class Model:
|
||||
|
||||
# GPT-3.5 / GPT-4
|
||||
gpt_35_turbo = Model(
|
||||
name="gpt-3.5-turbo",
|
||||
base_provider="openai",
|
||||
best_provider=GetGpt,
|
||||
)
|
||||
name = 'gpt-3.5-turbo',
|
||||
base_provider = 'openai',
|
||||
best_provider = GetGpt)
|
||||
|
||||
gpt_4 = Model(
|
||||
name="gpt-4",
|
||||
base_provider="openai",
|
||||
best_provider=Liaobots,
|
||||
)
|
||||
name = 'gpt-4',
|
||||
base_provider = 'openai',
|
||||
best_provider = Liaobots)
|
||||
|
||||
# Bard
|
||||
palm = Model(
|
||||
name="palm",
|
||||
base_provider="google",
|
||||
best_provider=Bard,
|
||||
)
|
||||
name = 'palm',
|
||||
base_provider = 'google',
|
||||
best_provider = Bard)
|
||||
|
||||
# H2o
|
||||
falcon_7b = Model(
|
||||
name="h2oai/h2ogpt-gm-oasst1-en-2048-falcon-7b-v3",
|
||||
base_provider="huggingface",
|
||||
best_provider=H2o,
|
||||
)
|
||||
name = 'h2oai/h2ogpt-gm-oasst1-en-2048-falcon-7b-v3',
|
||||
base_provider = 'huggingface',
|
||||
best_provider = H2o)
|
||||
|
||||
falcon_40b = Model(
|
||||
name="h2oai/h2ogpt-gm-oasst1-en-2048-falcon-40b-v1",
|
||||
base_provider="huggingface",
|
||||
best_provider=H2o,
|
||||
)
|
||||
name = 'h2oai/h2ogpt-gm-oasst1-en-2048-falcon-40b-v1',
|
||||
base_provider = 'huggingface',
|
||||
best_provider = H2o)
|
||||
|
||||
llama_13b = Model(
|
||||
name="h2oai/h2ogpt-gm-oasst1-en-2048-open-llama-13b",
|
||||
base_provider="huggingface",
|
||||
best_provider=H2o,
|
||||
)
|
||||
name = 'h2oai/h2ogpt-gm-oasst1-en-2048-open-llama-13b',
|
||||
base_provider = 'huggingface',
|
||||
best_provider = H2o)
|
||||
|
||||
# Vercel
|
||||
claude_instant_v1 = Model(
|
||||
name="anthropic:claude-instant-v1",
|
||||
base_provider="anthropic",
|
||||
best_provider=Vercel,
|
||||
)
|
||||
name = 'anthropic:claude-instant-v1',
|
||||
base_provider = 'anthropic',
|
||||
best_provider = Vercel)
|
||||
|
||||
claude_v1 = Model(
|
||||
name="anthropic:claude-v1",
|
||||
base_provider="anthropic",
|
||||
best_provider=Vercel,
|
||||
)
|
||||
name = 'anthropic:claude-v1',
|
||||
base_provider = 'anthropic',
|
||||
best_provider = Vercel)
|
||||
|
||||
claude_v2 = Model(
|
||||
name="anthropic:claude-v2",
|
||||
base_provider="anthropic",
|
||||
best_provider=Vercel,
|
||||
)
|
||||
name = 'anthropic:claude-v2',
|
||||
base_provider = 'anthropic',
|
||||
best_provider = Vercel)
|
||||
|
||||
command_light_nightly = Model(
|
||||
name="cohere:command-light-nightly",
|
||||
base_provider="cohere",
|
||||
best_provider=Vercel,
|
||||
)
|
||||
name = 'cohere:command-light-nightly',
|
||||
base_provider = 'cohere',
|
||||
best_provider = Vercel)
|
||||
|
||||
command_nightly = Model(
|
||||
name="cohere:command-nightly",
|
||||
base_provider="cohere",
|
||||
best_provider=Vercel,
|
||||
)
|
||||
name = 'cohere:command-nightly',
|
||||
base_provider = 'cohere',
|
||||
best_provider = Vercel)
|
||||
|
||||
gpt_neox_20b = Model(
|
||||
name="huggingface:EleutherAI/gpt-neox-20b",
|
||||
base_provider="huggingface",
|
||||
best_provider=Vercel,
|
||||
)
|
||||
name = 'huggingface:EleutherAI/gpt-neox-20b',
|
||||
base_provider = 'huggingface',
|
||||
best_provider = Vercel)
|
||||
|
||||
oasst_sft_1_pythia_12b = Model(
|
||||
name="huggingface:OpenAssistant/oasst-sft-1-pythia-12b",
|
||||
base_provider="huggingface",
|
||||
best_provider=Vercel,
|
||||
)
|
||||
name = 'huggingface:OpenAssistant/oasst-sft-1-pythia-12b',
|
||||
base_provider = 'huggingface',
|
||||
best_provider = Vercel)
|
||||
|
||||
oasst_sft_4_pythia_12b_epoch_35 = Model(
|
||||
name="huggingface:OpenAssistant/oasst-sft-4-pythia-12b-epoch-3.5",
|
||||
base_provider="huggingface",
|
||||
best_provider=Vercel,
|
||||
)
|
||||
name = 'huggingface:OpenAssistant/oasst-sft-4-pythia-12b-epoch-3.5',
|
||||
base_provider = 'huggingface',
|
||||
best_provider = Vercel)
|
||||
|
||||
santacoder = Model(
|
||||
name="huggingface:bigcode/santacoder",
|
||||
base_provider="huggingface",
|
||||
best_provider=Vercel,
|
||||
)
|
||||
name = 'huggingface:bigcode/santacoder',
|
||||
base_provider = 'huggingface',
|
||||
best_provider = Vercel)
|
||||
|
||||
bloom = Model(
|
||||
name="huggingface:bigscience/bloom",
|
||||
base_provider="huggingface",
|
||||
best_provider=Vercel,
|
||||
)
|
||||
name = 'huggingface:bigscience/bloom',
|
||||
base_provider = 'huggingface',
|
||||
best_provider = Vercel)
|
||||
|
||||
flan_t5_xxl = Model(
|
||||
name="huggingface:google/flan-t5-xxl",
|
||||
base_provider="huggingface",
|
||||
best_provider=Vercel,
|
||||
)
|
||||
name = 'huggingface:google/flan-t5-xxl',
|
||||
base_provider = 'huggingface',
|
||||
best_provider = Vercel)
|
||||
|
||||
code_davinci_002 = Model(
|
||||
name="openai:code-davinci-002",
|
||||
base_provider="openai",
|
||||
best_provider=Vercel,
|
||||
)
|
||||
name = 'openai:code-davinci-002',
|
||||
base_provider = 'openai',
|
||||
best_provider = Vercel)
|
||||
|
||||
gpt_35_turbo_16k = Model(
|
||||
name="openai:gpt-3.5-turbo-16k",
|
||||
base_provider="openai",
|
||||
best_provider=Vercel,
|
||||
)
|
||||
name = 'openai:gpt-3.5-turbo-16k',
|
||||
base_provider = 'openai',
|
||||
best_provider = Vercel)
|
||||
|
||||
gpt_35_turbo_16k_0613 = Model(
|
||||
name="openai:gpt-3.5-turbo-16k-0613",
|
||||
base_provider="openai",
|
||||
best_provider=Equing,
|
||||
)
|
||||
name = 'openai:gpt-3.5-turbo-16k-0613',
|
||||
base_provider = 'openai',
|
||||
best_provider = Equing)
|
||||
|
||||
gpt_4_0613 = Model(
|
||||
name="openai:gpt-4-0613",
|
||||
base_provider="openai",
|
||||
best_provider=Vercel,
|
||||
)
|
||||
name = 'openai:gpt-4-0613',
|
||||
base_provider = 'openai',
|
||||
best_provider = Vercel)
|
||||
|
||||
text_ada_001 = Model(
|
||||
name="openai:text-ada-001",
|
||||
base_provider="openai",
|
||||
best_provider=Vercel,
|
||||
)
|
||||
name = 'openai:text-ada-001',
|
||||
base_provider = 'openai',
|
||||
best_provider = Vercel)
|
||||
|
||||
text_babbage_001 = Model(
|
||||
name="openai:text-babbage-001",
|
||||
base_provider="openai",
|
||||
best_provider=Vercel,
|
||||
)
|
||||
name = 'openai:text-babbage-001',
|
||||
base_provider = 'openai',
|
||||
best_provider = Vercel)
|
||||
|
||||
text_curie_001 = Model(
|
||||
name="openai:text-curie-001",
|
||||
base_provider="openai",
|
||||
best_provider=Vercel,
|
||||
)
|
||||
name = 'openai:text-curie-001',
|
||||
base_provider = 'openai',
|
||||
best_provider = Vercel)
|
||||
|
||||
text_davinci_002 = Model(
|
||||
name="openai:text-davinci-002",
|
||||
base_provider="openai",
|
||||
best_provider=Vercel,
|
||||
)
|
||||
name = 'openai:text-davinci-002',
|
||||
base_provider = 'openai',
|
||||
best_provider = Vercel)
|
||||
|
||||
text_davinci_003 = Model(
|
||||
name="openai:text-davinci-003",
|
||||
base_provider="openai",
|
||||
best_provider=Vercel,
|
||||
)
|
||||
name = 'openai:text-davinci-003',
|
||||
base_provider = 'openai',
|
||||
best_provider = Vercel)
|
||||
|
||||
llama13b_v2_chat = Model(
|
||||
name="replicate:a16z-infra/llama13b-v2-chat",
|
||||
base_provider="replicate",
|
||||
best_provider=Vercel,
|
||||
)
|
||||
name = 'replicate:a16z-infra/llama13b-v2-chat',
|
||||
base_provider = 'replicate',
|
||||
best_provider = Vercel)
|
||||
|
||||
llama7b_v2_chat = Model(
|
||||
name="replicate:a16z-infra/llama7b-v2-chat",
|
||||
base_provider="replicate",
|
||||
best_provider=Vercel,
|
||||
)
|
||||
name = 'replicate:a16z-infra/llama7b-v2-chat',
|
||||
base_provider = 'replicate',
|
||||
best_provider = Vercel)
|
||||
|
||||
|
||||
class ModelUtils:
|
||||
convert: dict[str, Model] = {
|
||||
# GPT-3.5 / GPT-4
|
||||
"gpt-3.5-turbo": gpt_35_turbo,
|
||||
"gpt-4": gpt_4,
|
||||
'gpt-3.5-turbo' : gpt_35_turbo,
|
||||
'gpt-4' : gpt_4,
|
||||
|
||||
# Bard
|
||||
"palm2": palm,
|
||||
"palm": palm,
|
||||
"google": palm,
|
||||
"google-bard": palm,
|
||||
"google-palm": palm,
|
||||
"bard": palm,
|
||||
'palm2' : palm,
|
||||
'palm' : palm,
|
||||
'google' : palm,
|
||||
'google-bard' : palm,
|
||||
'google-palm' : palm,
|
||||
'bard' : palm,
|
||||
|
||||
# H2o
|
||||
"falcon-40b": falcon_40b,
|
||||
"falcon-7b": falcon_7b,
|
||||
"llama-13b": llama_13b,
|
||||
'falcon-40b' : falcon_40b,
|
||||
'falcon-7b' : falcon_7b,
|
||||
'llama-13b' : llama_13b,
|
||||
|
||||
# Vercel
|
||||
"claude-instant-v1": claude_instant_v1,
|
||||
"claude-v1": claude_v1,
|
||||
"claude-v2": claude_v2,
|
||||
"command-light-nightly": command_light_nightly,
|
||||
"command-nightly": command_nightly,
|
||||
"gpt-neox-20b": gpt_neox_20b,
|
||||
"oasst-sft-1-pythia-12b": oasst_sft_1_pythia_12b,
|
||||
"oasst-sft-4-pythia-12b-epoch-3.5": oasst_sft_4_pythia_12b_epoch_35,
|
||||
"santacoder": santacoder,
|
||||
"bloom": bloom,
|
||||
"flan-t5-xxl": flan_t5_xxl,
|
||||
"code-davinci-002": code_davinci_002,
|
||||
"gpt-3.5-turbo-16k": gpt_35_turbo_16k,
|
||||
"gpt-3.5-turbo-16k-0613": gpt_35_turbo_16k_0613,
|
||||
"gpt-4-0613": gpt_4_0613,
|
||||
"text-ada-001": text_ada_001,
|
||||
"text-babbage-001": text_babbage_001,
|
||||
"text-curie-001": text_curie_001,
|
||||
"text-davinci-002": text_davinci_002,
|
||||
"text-davinci-003": text_davinci_003,
|
||||
"llama13b-v2-chat": llama13b_v2_chat,
|
||||
"llama7b-v2-chat": llama7b_v2_chat,
|
||||
'claude-instant-v1' : claude_instant_v1,
|
||||
'claude-v1' : claude_v1,
|
||||
'claude-v2' : claude_v2,
|
||||
'command-nightly' : command_nightly,
|
||||
'gpt-neox-20b' : gpt_neox_20b,
|
||||
'santacoder' : santacoder,
|
||||
'bloom' : bloom,
|
||||
'flan-t5-xxl' : flan_t5_xxl,
|
||||
'code-davinci-002' : code_davinci_002,
|
||||
'gpt-3.5-turbo-16k' : gpt_35_turbo_16k,
|
||||
'gpt-4-0613' : gpt_4_0613,
|
||||
'text-ada-001' : text_ada_001,
|
||||
'text-babbage-001' : text_babbage_001,
|
||||
'text-curie-001' : text_curie_001,
|
||||
'text-davinci-002' : text_davinci_002,
|
||||
'text-davinci-003' : text_davinci_003,
|
||||
'llama13b-v2-chat' : llama13b_v2_chat,
|
||||
'llama7b-v2-chat' : llama7b_v2_chat,
|
||||
|
||||
'oasst-sft-1-pythia-12b' : oasst_sft_1_pythia_12b,
|
||||
'oasst-sft-4-pythia-12b-epoch-3.5' : oasst_sft_4_pythia_12b_epoch_35,
|
||||
'command-light-nightly' : command_light_nightly,
|
||||
'gpt-3.5-turbo-16k-0613' : gpt_35_turbo_16k_0613,
|
||||
}
|
||||
@@ -1,15 +1,14 @@
|
||||
from typing import Any, AsyncGenerator, Generator, NewType, Tuple, TypedDict, Union
|
||||
|
||||
SHA256 = NewType("sha_256_hash", str)
|
||||
SHA256 = NewType('sha_256_hash', str)
|
||||
CreateResult = Generator[str, None, None]
|
||||
|
||||
|
||||
__all__ = [
|
||||
"Any",
|
||||
"AsyncGenerator",
|
||||
"Generator",
|
||||
"Tuple",
|
||||
"TypedDict",
|
||||
"SHA256",
|
||||
"CreateResult",
|
||||
'Any',
|
||||
'AsyncGenerator',
|
||||
'Generator',
|
||||
'Tuple',
|
||||
'TypedDict',
|
||||
'SHA256',
|
||||
'CreateResult',
|
||||
]
|
||||
Reference in New Issue
Block a user