mirror of
				https://github.com/xtekky/gpt4free.git
				synced 2025-10-31 11:36:26 +08:00 
			
		
		
		
	~ | code styling
This commit is contained in:
		| @@ -15,9 +15,8 @@ class AItianhu(BaseProvider): | |||||||
|     def create_completion( |     def create_completion( | ||||||
|         model: str, |         model: str, | ||||||
|         messages: list[dict[str, str]], |         messages: list[dict[str, str]], | ||||||
|         stream: bool, |         stream: bool, **kwargs: Any) -> CreateResult: | ||||||
|         **kwargs: Any, |          | ||||||
|     ) -> CreateResult: |  | ||||||
|         base = "" |         base = "" | ||||||
|         for message in messages: |         for message in messages: | ||||||
|             base += "%s: %s\n" % (message["role"], message["content"]) |             base += "%s: %s\n" % (message["role"], message["content"]) | ||||||
|   | |||||||
| @@ -7,7 +7,7 @@ from .base_provider import BaseProvider | |||||||
|  |  | ||||||
|  |  | ||||||
| class Acytoo(BaseProvider): | class Acytoo(BaseProvider): | ||||||
|     url = "https://chat.acytoo.com/" |     url                   = 'https://chat.acytoo.com/' | ||||||
|     working               = True |     working               = True | ||||||
|     supports_gpt_35_turbo = True |     supports_gpt_35_turbo = True | ||||||
|  |  | ||||||
| @@ -16,33 +16,33 @@ class Acytoo(BaseProvider): | |||||||
|         cls, |         cls, | ||||||
|         model: str, |         model: str, | ||||||
|         messages: list[dict[str, str]], |         messages: list[dict[str, str]], | ||||||
|         stream: bool, |         stream: bool, **kwargs: Any) -> CreateResult: | ||||||
|         **kwargs: Any, |  | ||||||
|     ) -> CreateResult: |         response = requests.post(f'{cls.url}api/completions',  | ||||||
|         headers = _create_header() |                                  headers=_create_header(), json=_create_payload(messages, kwargs.get('temperature', 0.5))) | ||||||
|         payload = _create_payload(messages, kwargs.get('temperature', 0.5)) |  | ||||||
|          |          | ||||||
|         response = requests.post("{cls.url}api/completions", headers=headers, json=payload) |  | ||||||
|         response.raise_for_status() |         response.raise_for_status() | ||||||
|         response.encoding = "utf-8" |         response.encoding = 'utf-8' | ||||||
|  |          | ||||||
|         yield response.text |         yield response.text | ||||||
|  |  | ||||||
|  |  | ||||||
| def _create_header(): | def _create_header(): | ||||||
|     return { |     return { | ||||||
|         "accept": "*/*", |         'accept': '*/*', | ||||||
|         "content-type": "application/json", |         'content-type': 'application/json', | ||||||
|     } |     } | ||||||
|  |  | ||||||
|  |  | ||||||
| def _create_payload(messages: list[dict[str, str]], temperature): | def _create_payload(messages: list[dict[str, str]], temperature): | ||||||
|     payload_messages = [ |     payload_messages = [ | ||||||
|         message | {"createdAt": int(time.time()) * 1000} for message in messages |         message | {'createdAt': int(time.time()) * 1000} for message in messages | ||||||
|     ] |     ] | ||||||
|  |      | ||||||
|     return { |     return { | ||||||
|         "key": "", |         'key'         : '', | ||||||
|         "model": "gpt-3.5-turbo", |         'model'       : 'gpt-3.5-turbo', | ||||||
|         "messages": payload_messages, |         'messages'    : payload_messages, | ||||||
|         "temperature": temperature, |         'temperature' : temperature, | ||||||
|         "password": "", |         'password'    : '' | ||||||
|     } |     } | ||||||
| @@ -13,11 +13,9 @@ class Aichat(BaseProvider): | |||||||
|     def create_completion( |     def create_completion( | ||||||
|         model: str, |         model: str, | ||||||
|         messages: list[dict[str, str]], |         messages: list[dict[str, str]], | ||||||
|         stream: bool, |         stream: bool, **kwargs: Any) -> CreateResult: | ||||||
|         **kwargs: Any, |  | ||||||
|     ) -> CreateResult: |  | ||||||
|         base = "" |  | ||||||
|          |          | ||||||
|  |         base = "" | ||||||
|         for message in messages: |         for message in messages: | ||||||
|             base += "%s: %s\n" % (message["role"], message["content"]) |             base += "%s: %s\n" % (message["role"], message["content"]) | ||||||
|         base += "assistant:" |         base += "assistant:" | ||||||
|   | |||||||
| @@ -9,7 +9,6 @@ import requests | |||||||
| from ..typing import SHA256, Any, CreateResult | from ..typing import SHA256, Any, CreateResult | ||||||
| from .base_provider import BaseProvider | from .base_provider import BaseProvider | ||||||
|  |  | ||||||
|  |  | ||||||
| class Ails(BaseProvider): | class Ails(BaseProvider): | ||||||
|     url: str              = "https://ai.ls" |     url: str              = "https://ai.ls" | ||||||
|     working               = True |     working               = True | ||||||
| @@ -20,9 +19,8 @@ class Ails(BaseProvider): | |||||||
|     def create_completion( |     def create_completion( | ||||||
|         model: str, |         model: str, | ||||||
|         messages: list[dict[str, str]], |         messages: list[dict[str, str]], | ||||||
|         stream: bool, |         stream: bool, **kwargs: Any) -> CreateResult: | ||||||
|         **kwargs: Any, |          | ||||||
|     ) -> CreateResult: |  | ||||||
|         headers = { |         headers = { | ||||||
|             "authority": "api.caipacity.com", |             "authority": "api.caipacity.com", | ||||||
|             "accept": "*/*", |             "accept": "*/*", | ||||||
|   | |||||||
| @@ -19,9 +19,7 @@ class Bard(AsyncProvider): | |||||||
|         model: str, |         model: str, | ||||||
|         messages: list[dict[str, str]], |         messages: list[dict[str, str]], | ||||||
|         proxy: str = None, |         proxy: str = None, | ||||||
|         cookies: dict = get_cookies(".google.com"), |         cookies: dict = get_cookies(".google.com"), **kwargs: Any,) -> str: | ||||||
|         **kwargs: Any, |  | ||||||
|     ) -> str: |  | ||||||
|  |  | ||||||
|         formatted = "\n".join( |         formatted = "\n".join( | ||||||
|             ["%s: %s" % (message["role"], message["content"]) for message in messages] |             ["%s: %s" % (message["role"], message["content"]) for message in messages] | ||||||
|   | |||||||
| @@ -1,12 +1,6 @@ | |||||||
| import asyncio | import asyncio, aiohttp, json, os, random | ||||||
| import json |  | ||||||
| import os |  | ||||||
| import random |  | ||||||
|  |  | ||||||
| import aiohttp |  | ||||||
| import asyncio |  | ||||||
| from aiohttp        import ClientSession | from aiohttp        import ClientSession | ||||||
|  |  | ||||||
| from ..typing       import Any, AsyncGenerator, CreateResult, Union | from ..typing       import Any, AsyncGenerator, CreateResult, Union | ||||||
| from .base_provider import AsyncGeneratorProvider, get_cookies | from .base_provider import AsyncGeneratorProvider, get_cookies | ||||||
|  |  | ||||||
| @@ -15,15 +9,14 @@ class Bing(AsyncGeneratorProvider): | |||||||
|     needs_auth      = True |     needs_auth      = True | ||||||
|     working         = True |     working         = True | ||||||
|     supports_gpt_4  = True |     supports_gpt_4  = True | ||||||
|     supports_stream=True |     supports_stream = True | ||||||
|          |          | ||||||
|     @staticmethod |     @staticmethod | ||||||
|     def create_async_generator( |     def create_async_generator( | ||||||
|             model: str, |             model: str, | ||||||
|             messages: list[dict[str, str]], |             messages: list[dict[str, str]], | ||||||
|             cookies: dict = get_cookies(".bing.com"), |             cookies: dict = get_cookies(".bing.com"), **kwargs) -> AsyncGenerator: | ||||||
|             **kwargs |          | ||||||
|         ) -> AsyncGenerator: |  | ||||||
|         if len(messages) < 2: |         if len(messages) < 2: | ||||||
|             prompt = messages[0]["content"] |             prompt = messages[0]["content"] | ||||||
|             context = None |             context = None | ||||||
|   | |||||||
| @@ -1,13 +1,11 @@ | |||||||
| import re | import re, requests | ||||||
|  |  | ||||||
| import requests |  | ||||||
|  |  | ||||||
| from ..typing       import Any, CreateResult | from ..typing       import Any, CreateResult | ||||||
| from .base_provider import BaseProvider | from .base_provider import BaseProvider | ||||||
|  |  | ||||||
|  |  | ||||||
| class ChatgptAi(BaseProvider): | class ChatgptAi(BaseProvider): | ||||||
|     url = "https://chatgpt.ai/gpt-4/" |     url: str        = "https://chatgpt.ai/gpt-4/" | ||||||
|     working         = True |     working         = True | ||||||
|     supports_gpt_4  = True |     supports_gpt_4  = True | ||||||
|  |  | ||||||
| @@ -15,9 +13,8 @@ class ChatgptAi(BaseProvider): | |||||||
|     def create_completion( |     def create_completion( | ||||||
|         model: str, |         model: str, | ||||||
|         messages: list[dict[str, str]], |         messages: list[dict[str, str]], | ||||||
|         stream: bool, |         stream: bool, **kwargs: Any) -> CreateResult: | ||||||
|         **kwargs: Any, |          | ||||||
|     ) -> CreateResult: |  | ||||||
|         chat = "" |         chat = "" | ||||||
|         for message in messages: |         for message in messages: | ||||||
|             chat += "%s: %s\n" % (message["role"], message["content"]) |             chat += "%s: %s\n" % (message["role"], message["content"]) | ||||||
| @@ -26,36 +23,35 @@ class ChatgptAi(BaseProvider): | |||||||
|         response = requests.get("https://chatgpt.ai/") |         response = requests.get("https://chatgpt.ai/") | ||||||
|         nonce, post_id, _, bot_id = re.findall( |         nonce, post_id, _, bot_id = re.findall( | ||||||
|             r'data-nonce="(.*)"\n     data-post-id="(.*)"\n     data-url="(.*)"\n     data-bot-id="(.*)"\n     data-width', |             r'data-nonce="(.*)"\n     data-post-id="(.*)"\n     data-url="(.*)"\n     data-bot-id="(.*)"\n     data-width', | ||||||
|             response.text, |             response.text)[0] | ||||||
|         )[0] |  | ||||||
|  |  | ||||||
|         headers = { |         headers = { | ||||||
|             "authority": "chatgpt.ai", |             "authority"          : "chatgpt.ai", | ||||||
|             "accept": "*/*", |             "accept"             : "*/*", | ||||||
|             "accept-language": "en,fr-FR;q=0.9,fr;q=0.8,es-ES;q=0.7,es;q=0.6,en-US;q=0.5,am;q=0.4,de;q=0.3", |             "accept-language"    : "en,fr-FR;q=0.9,fr;q=0.8,es-ES;q=0.7,es;q=0.6,en-US;q=0.5,am;q=0.4,de;q=0.3", | ||||||
|             "cache-control": "no-cache", |             "cache-control"      : "no-cache", | ||||||
|             "origin": "https://chatgpt.ai", |             "origin"             : "https://chatgpt.ai", | ||||||
|             "pragma": "no-cache", |             "pragma"             : "no-cache", | ||||||
|             "referer": "https://chatgpt.ai/gpt-4/", |             "referer"            : "https://chatgpt.ai/gpt-4/", | ||||||
|             "sec-ch-ua": '"Not.A/Brand";v="8", "Chromium";v="114", "Google Chrome";v="114"', |             "sec-ch-ua"          : '"Not.A/Brand";v="8", "Chromium";v="114", "Google Chrome";v="114"', | ||||||
|             "sec-ch-ua-mobile": "?0", |             "sec-ch-ua-mobile"   : "?0", | ||||||
|             "sec-ch-ua-platform": '"Windows"', |             "sec-ch-ua-platform" : '"Windows"', | ||||||
|             "sec-fetch-dest": "empty", |             "sec-fetch-dest"     : "empty", | ||||||
|             "sec-fetch-mode": "cors", |             "sec-fetch-mode"     : "cors", | ||||||
|             "sec-fetch-site": "same-origin", |             "sec-fetch-site"     : "same-origin", | ||||||
|             "user-agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/114.0.0.0 Safari/537.36", |             "user-agent"         : "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/114.0.0.0 Safari/537.36", | ||||||
|         } |         } | ||||||
|         data = { |         data = { | ||||||
|             "_wpnonce": nonce, |             "_wpnonce" : nonce, | ||||||
|             "post_id": post_id, |             "post_id"  : post_id, | ||||||
|             "url": "https://chatgpt.ai/gpt-4", |             "url"      : "https://chatgpt.ai/gpt-4", | ||||||
|             "action": "wpaicg_chat_shortcode_message", |             "action"   : "wpaicg_chat_shortcode_message", | ||||||
|             "message": chat, |             "message"  : chat, | ||||||
|             "bot_id": bot_id, |             "bot_id"   : bot_id, | ||||||
|         } |         } | ||||||
|  |  | ||||||
|         response = requests.post( |         response = requests.post( | ||||||
|             "https://chatgpt.ai/wp-admin/admin-ajax.php", headers=headers, data=data |             "https://chatgpt.ai/wp-admin/admin-ajax.php", headers=headers, data=data) | ||||||
|         ) |          | ||||||
|         response.raise_for_status() |         response.raise_for_status() | ||||||
|         yield response.json()["data"] |         yield response.json()["data"] | ||||||
| @@ -1,8 +1,4 @@ | |||||||
| import base64 | import base64, os, re, requests | ||||||
| import os |  | ||||||
| import re |  | ||||||
|  |  | ||||||
| import requests |  | ||||||
|  |  | ||||||
| from ..typing       import Any, CreateResult | from ..typing       import Any, CreateResult | ||||||
| from .base_provider import BaseProvider | from .base_provider import BaseProvider | ||||||
| @@ -17,53 +13,50 @@ class ChatgptLogin(BaseProvider): | |||||||
|     def create_completion( |     def create_completion( | ||||||
|         model: str, |         model: str, | ||||||
|         messages: list[dict[str, str]], |         messages: list[dict[str, str]], | ||||||
|         stream: bool, |         stream: bool, **kwargs: Any) -> CreateResult: | ||||||
|         **kwargs: Any, |          | ||||||
|     ) -> CreateResult: |  | ||||||
|         headers = { |         headers = { | ||||||
|             "authority": "chatgptlogin.ac", |             "authority"          : "chatgptlogin.ac", | ||||||
|             "accept": "*/*", |             "accept"             : "*/*", | ||||||
|             "accept-language": "en,fr-FR;q=0.9,fr;q=0.8,es-ES;q=0.7,es;q=0.6,en-US;q=0.5,am;q=0.4,de;q=0.3", |             "accept-language"    : "en,fr-FR;q=0.9,fr;q=0.8,es-ES;q=0.7,es;q=0.6,en-US;q=0.5,am;q=0.4,de;q=0.3", | ||||||
|             "content-type": "application/json", |             "content-type"       : "application/json", | ||||||
|             "origin": "https://opchatgpts.net", |             "origin"             : "https://opchatgpts.net", | ||||||
|             "referer": "https://opchatgpts.net/chatgpt-free-use/", |             "referer"            : "https://opchatgpts.net/chatgpt-free-use/", | ||||||
|             "sec-ch-ua": '"Chromium";v="116", "Not)A;Brand";v="24", "Google Chrome";v="116"', |             "sec-ch-ua"          : '"Chromium";v="116", "Not)A;Brand";v="24", "Google Chrome";v="116"', | ||||||
|             "sec-ch-ua-mobile": "?0", |             "sec-ch-ua-mobile"   : "?0", | ||||||
|             "sec-ch-ua-platform": '"Windows"', |             "sec-ch-ua-platform" : '"Windows"', | ||||||
|             "sec-fetch-dest": "empty", |             "sec-fetch-dest"     : "empty", | ||||||
|             "sec-fetch-mode": "cors", |             "sec-fetch-mode"     : "cors", | ||||||
|             "sec-fetch-site": "same-origin", |             "sec-fetch-site"     : "same-origin", | ||||||
|             "user-agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/116.0.0.0 Safari/537.36", |             "user-agent"         : "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/116.0.0.0 Safari/537.36", | ||||||
|             "x-wp-nonce": _get_nonce(), |             "x-wp-nonce"         : _get_nonce(), | ||||||
|         } |         } | ||||||
|  |  | ||||||
|         conversation = _transform(messages) |         conversation = _transform(messages) | ||||||
|  |  | ||||||
|         json_data = { |         json_data = { | ||||||
|             "env": "chatbot", |             "env"            : "chatbot", | ||||||
|             "session": "N/A", |             "session"        : "N/A", | ||||||
|             "prompt": "Converse as if you were an AI assistant. Be friendly, creative.", |             "prompt"         : "Converse as if you were an AI assistant. Be friendly, creative.", | ||||||
|             "context": "Converse as if you were an AI assistant. Be friendly, creative.", |             "context"        : "Converse as if you were an AI assistant. Be friendly, creative.", | ||||||
|             "messages": conversation, |             "messages"       : conversation, | ||||||
|             "newMessage": messages[-1]["content"], |             "newMessage"     : messages[-1]["content"], | ||||||
|             "userName": '<div class="mwai-name-text">User:</div>', |             "userName"       : '<div class="mwai-name-text">User:</div>', | ||||||
|             "aiName": '<div class="mwai-name-text">AI:</div>', |             "aiName"         : '<div class="mwai-name-text">AI:</div>', | ||||||
|             "model": "gpt-3.5-turbo", |             "model"          : "gpt-3.5-turbo", | ||||||
|             "temperature": kwargs.get("temperature", 0.8), |             "temperature"    : kwargs.get("temperature", 0.8), | ||||||
|             "maxTokens": 1024, |             "maxTokens"      : 1024, | ||||||
|             "maxResults": 1, |             "maxResults"     : 1, | ||||||
|             "apiKey": "", |             "apiKey"         : "", | ||||||
|             "service": "openai", |             "service"        : "openai", | ||||||
|             "embeddingsIndex": "", |             "embeddingsIndex": "", | ||||||
|             "stop": "", |             "stop"           : "", | ||||||
|             "clientId": os.urandom(6).hex(), |             "clientId"       : os.urandom(6).hex() | ||||||
|         } |         } | ||||||
|  |  | ||||||
|         response = requests.post( |         response = requests.post("https://opchatgpts.net/wp-json/ai-chatbot/v1/chat", | ||||||
|             "https://opchatgpts.net/wp-json/ai-chatbot/v1/chat", |             headers=headers, json=json_data) | ||||||
|             headers=headers, |          | ||||||
|             json=json_data, |  | ||||||
|         ) |  | ||||||
|         response.raise_for_status() |         response.raise_for_status() | ||||||
|         yield response.json()["reply"] |         yield response.json()["reply"] | ||||||
|  |  | ||||||
| @@ -81,18 +74,15 @@ class ChatgptLogin(BaseProvider): | |||||||
|  |  | ||||||
|  |  | ||||||
| def _get_nonce() -> str: | def _get_nonce() -> str: | ||||||
|     res = requests.get( |     res = requests.get("https://opchatgpts.net/chatgpt-free-use/", | ||||||
|         "https://opchatgpts.net/chatgpt-free-use/", |         headers = { | ||||||
|         headers={ |             "Referer"   : "https://opchatgpts.net/chatgpt-free-use/", | ||||||
|             "Referer": "https://opchatgpts.net/chatgpt-free-use/", |             "User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/114.0.0.0 Safari/537.36"}) | ||||||
|             "User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/114.0.0.0 Safari/537.36", |  | ||||||
|         }, |  | ||||||
|     ) |  | ||||||
|  |  | ||||||
|     result = re.search( |     result = re.search( | ||||||
|         r'class="mwai-chat mwai-chatgpt">.*<span>Send</span></button></div></div></div> <script defer src="(.*?)">', |         r'class="mwai-chat mwai-chatgpt">.*<span>Send</span></button></div></div></div> <script defer src="(.*?)">', | ||||||
|         res.text, |         res.text) | ||||||
|     ) |      | ||||||
|     if result is None: |     if result is None: | ||||||
|         return "" |         return "" | ||||||
|      |      | ||||||
| @@ -106,11 +96,11 @@ def _get_nonce() -> str: | |||||||
| def _transform(messages: list[dict[str, str]]) -> list[dict[str, Any]]: | def _transform(messages: list[dict[str, str]]) -> list[dict[str, Any]]: | ||||||
|     return [ |     return [ | ||||||
|         { |         { | ||||||
|             "id": os.urandom(6).hex(), |             "id"     : os.urandom(6).hex(), | ||||||
|             "role": message["role"], |             "role"   : message["role"], | ||||||
|             "content": message["content"], |             "content": message["content"], | ||||||
|             "who": "AI: " if message["role"] == "assistant" else "User: ", |             "who"    : "AI: " if message["role"] == "assistant" else "User: ", | ||||||
|             "html": _html_encode(message["content"]), |             "html"   : _html_encode(message["content"]), | ||||||
|         } |         } | ||||||
|         for message in messages |         for message in messages | ||||||
|     ] |     ] | ||||||
| @@ -118,14 +108,14 @@ def _transform(messages: list[dict[str, str]]) -> list[dict[str, Any]]: | |||||||
|  |  | ||||||
| def _html_encode(string: str) -> str: | def _html_encode(string: str) -> str: | ||||||
|     table = { |     table = { | ||||||
|         '"': """, |         '"' : """, | ||||||
|         "'": "'", |         "'" : "'", | ||||||
|         "&": "&", |         "&" : "&", | ||||||
|         ">": ">", |         ">" : ">", | ||||||
|         "<": "<", |         "<" : "<", | ||||||
|         "\n": "<br>", |         "\n": "<br>", | ||||||
|         "\t": "    ", |         "\t": "    ", | ||||||
|         " ": " ", |         " " : " ", | ||||||
|     } |     } | ||||||
|  |  | ||||||
|     for key in table: |     for key in table: | ||||||
|   | |||||||
| @@ -1,14 +1,11 @@ | |||||||
| import json | import json, js2py, requests | ||||||
|  |  | ||||||
| import js2py |  | ||||||
| import requests |  | ||||||
|  |  | ||||||
| from ..typing       import Any, CreateResult | from ..typing       import Any, CreateResult | ||||||
| from .base_provider import BaseProvider | from .base_provider import BaseProvider | ||||||
|  |  | ||||||
|  |  | ||||||
| class DeepAi(BaseProvider): | class DeepAi(BaseProvider): | ||||||
|     url = "https://deepai.org" |     url: str              = "https://deepai.org" | ||||||
|     working               = True |     working               = True | ||||||
|     supports_stream       = True |     supports_stream       = True | ||||||
|     supports_gpt_35_turbo = True |     supports_gpt_35_turbo = True | ||||||
| @@ -17,10 +14,8 @@ class DeepAi(BaseProvider): | |||||||
|     def create_completion( |     def create_completion( | ||||||
|         model: str, |         model: str, | ||||||
|         messages: list[dict[str, str]], |         messages: list[dict[str, str]], | ||||||
|         stream: bool, |         stream: bool, **kwargs: Any) -> CreateResult: | ||||||
|         **kwargs: Any, |          | ||||||
|     ) -> CreateResult: |  | ||||||
|         url = "https://api.deepai.org/make_me_a_pizza" |  | ||||||
|         token_js = """ |         token_js = """ | ||||||
| var agent = 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/115.0.0.0 Safari/537.36' | var agent = 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/115.0.0.0 Safari/537.36' | ||||||
| var a, b, c, d, e, h, f, l, g, k, m, n, r, x, C, E, N, F, T, O, P, w, D, G, Q, R, W, I, aa, fa, na, oa, ha, ba, X, ia, ja, ka, J, la, K, L, ca, S, U, M, ma, B, da, V, Y; | var a, b, c, d, e, h, f, l, g, k, m, n, r, x, C, E, N, F, T, O, P, w, D, G, Q, R, W, I, aa, fa, na, oa, ha, ba, X, ia, ja, ka, J, la, K, L, ca, S, U, M, ma, B, da, V, Y; | ||||||
| @@ -54,7 +49,9 @@ f = function () { | |||||||
|             "User-Agent": "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/115.0.0.0 Safari/537.36", |             "User-Agent": "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/115.0.0.0 Safari/537.36", | ||||||
|         } |         } | ||||||
|  |  | ||||||
|         response = requests.post(url, headers=headers, data=payload, stream=True) |         response = requests.post("https://api.deepai.org/make_me_a_pizza",  | ||||||
|  |                                  headers=headers, data=payload, stream=True) | ||||||
|  |          | ||||||
|         for chunk in response.iter_content(chunk_size=None): |         for chunk in response.iter_content(chunk_size=None): | ||||||
|             response.raise_for_status() |             response.raise_for_status() | ||||||
|             yield chunk.decode() |             yield chunk.decode() | ||||||
|   | |||||||
| @@ -1,8 +1,4 @@ | |||||||
| import json | import json, re, time , requests | ||||||
| import re |  | ||||||
| import time |  | ||||||
|  |  | ||||||
| import requests |  | ||||||
|  |  | ||||||
| from ..typing       import Any, CreateResult | from ..typing       import Any, CreateResult | ||||||
| from .base_provider import BaseProvider | from .base_provider import BaseProvider | ||||||
| @@ -17,41 +13,37 @@ class DfeHub(BaseProvider): | |||||||
|     def create_completion( |     def create_completion( | ||||||
|         model: str, |         model: str, | ||||||
|         messages: list[dict[str, str]], |         messages: list[dict[str, str]], | ||||||
|         stream: bool, |         stream: bool, **kwargs: Any) -> CreateResult: | ||||||
|         **kwargs: Any, |          | ||||||
|     ) -> CreateResult: |  | ||||||
|         headers = { |         headers = { | ||||||
|             "authority": "chat.dfehub.com", |             "authority"         : "chat.dfehub.com", | ||||||
|             "accept": "*/*", |             "accept"            : "*/*", | ||||||
|             "accept-language": "en,fr-FR;q=0.9,fr;q=0.8,es-ES;q=0.7,es;q=0.6,en-US;q=0.5,am;q=0.4,de;q=0.3", |             "accept-language"   : "en,fr-FR;q=0.9,fr;q=0.8,es-ES;q=0.7,es;q=0.6,en-US;q=0.5,am;q=0.4,de;q=0.3", | ||||||
|             "content-type": "application/json", |             "content-type"      : "application/json", | ||||||
|             "origin": "https://chat.dfehub.com", |             "origin"            : "https://chat.dfehub.com", | ||||||
|             "referer": "https://chat.dfehub.com/", |             "referer"           : "https://chat.dfehub.com/", | ||||||
|             "sec-ch-ua": '"Not.A/Brand";v="8", "Chromium";v="114", "Google Chrome";v="114"', |             "sec-ch-ua"         : '"Not.A/Brand";v="8", "Chromium";v="114", "Google Chrome";v="114"', | ||||||
|             "sec-ch-ua-mobile": "?0", |             "sec-ch-ua-mobile"  : "?0", | ||||||
|             "sec-ch-ua-platform": '"macOS"', |             "sec-ch-ua-platform": '"macOS"', | ||||||
|             "sec-fetch-dest": "empty", |             "sec-fetch-dest"    : "empty", | ||||||
|             "sec-fetch-mode": "cors", |             "sec-fetch-mode"    : "cors", | ||||||
|             "sec-fetch-site": "same-origin", |             "sec-fetch-site"    : "same-origin", | ||||||
|             "user-agent": "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/114.0.0.0 Safari/537.36", |             "user-agent"        : "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/114.0.0.0 Safari/537.36", | ||||||
|             "x-requested-with": "XMLHttpRequest", |             "x-requested-with"  : "XMLHttpRequest", | ||||||
|         } |         } | ||||||
|  |  | ||||||
|         json_data = { |         json_data = { | ||||||
|             "messages": messages, |             "messages"          : messages, | ||||||
|             "model": "gpt-3.5-turbo", |             "model"             : "gpt-3.5-turbo", | ||||||
|             "temperature": kwargs.get("temperature", 0.5), |             "temperature"       : kwargs.get("temperature", 0.5), | ||||||
|             "presence_penalty": kwargs.get("presence_penalty", 0), |             "presence_penalty"  : kwargs.get("presence_penalty", 0), | ||||||
|             "frequency_penalty": kwargs.get("frequency_penalty", 0), |             "frequency_penalty" : kwargs.get("frequency_penalty", 0), | ||||||
|             "top_p": kwargs.get("top_p", 1), |             "top_p"             : kwargs.get("top_p", 1), | ||||||
|             "stream": True, |             "stream"            : True | ||||||
|         } |         } | ||||||
|         response = requests.post( |          | ||||||
|             "https://chat.dfehub.com/api/openai/v1/chat/completions", |         response = requests.post("https://chat.dfehub.com/api/openai/v1/chat/completions", | ||||||
|             headers=headers, |             headers=headers, json=json_data, timeout=3) | ||||||
|             json=json_data, |  | ||||||
|             timeout=3 |  | ||||||
|         ) |  | ||||||
|  |  | ||||||
|         for chunk in response.iter_lines(): |         for chunk in response.iter_lines(): | ||||||
|             if b"detail" in chunk: |             if b"detail" in chunk: | ||||||
|   | |||||||
| @@ -1,13 +1,11 @@ | |||||||
| import json | import json, requests, random | ||||||
|  |  | ||||||
| import requests |  | ||||||
|  |  | ||||||
| from ..typing       import Any, CreateResult | from ..typing       import Any, CreateResult | ||||||
| from .base_provider import BaseProvider | from .base_provider import BaseProvider | ||||||
|  |  | ||||||
|  |  | ||||||
| class EasyChat(BaseProvider): | class EasyChat(BaseProvider): | ||||||
|     url = "https://free.easychat.work" |     url: str              = "https://free.easychat.work" | ||||||
|     supports_stream       = True |     supports_stream       = True | ||||||
|     supports_gpt_35_turbo = True |     supports_gpt_35_turbo = True | ||||||
|     working               = True |     working               = True | ||||||
| @@ -16,9 +14,8 @@ class EasyChat(BaseProvider): | |||||||
|     def create_completion( |     def create_completion( | ||||||
|         model: str, |         model: str, | ||||||
|         messages: list[dict[str, str]], |         messages: list[dict[str, str]], | ||||||
|         stream: bool, |         stream: bool, **kwargs: Any) -> CreateResult: | ||||||
|         **kwargs: Any, |          | ||||||
|     ) -> CreateResult: |  | ||||||
|         active_servers = [ |         active_servers = [ | ||||||
|             "https://chat10.fastgpt.me", |             "https://chat10.fastgpt.me", | ||||||
|             "https://chat9.fastgpt.me", |             "https://chat9.fastgpt.me", | ||||||
| @@ -28,59 +25,62 @@ class EasyChat(BaseProvider): | |||||||
|             "https://chat4.fastgpt.me", |             "https://chat4.fastgpt.me", | ||||||
|             "https://gxos1h1ddt.fastgpt.me" |             "https://gxos1h1ddt.fastgpt.me" | ||||||
|         ] |         ] | ||||||
|         server = active_servers[kwargs.get("active_server", 0)] |          | ||||||
|  |         server  = active_servers[kwargs.get("active_server", random.randint(0, 5))] | ||||||
|         headers = { |         headers = { | ||||||
|             "authority": f"{server}".replace("https://", ""), |             "authority"         : f"{server}".replace("https://", ""), | ||||||
|             "accept": "text/event-stream", |             "accept"            : "text/event-stream", | ||||||
|             "accept-language": "en,fr-FR;q=0.9,fr;q=0.8,es-ES;q=0.7,es;q=0.6,en-US;q=0.5,am;q=0.4,de;q=0.3,fa=0.2", |             "accept-language"   : "en,fr-FR;q=0.9,fr;q=0.8,es-ES;q=0.7,es;q=0.6,en-US;q=0.5,am;q=0.4,de;q=0.3,fa=0.2", | ||||||
|             "content-type": "application/json", |             "content-type"      : "application/json", | ||||||
|             "origin": f"{server}", |             "origin"            : f"{server}", | ||||||
|             "referer": f"{server}/", |             "referer"           : f"{server}/", | ||||||
|             "x-requested-with": "XMLHttpRequest", |             "x-requested-with"  : "XMLHttpRequest", | ||||||
|             'plugins': '0', |             'plugins'           : '0', | ||||||
|             'sec-ch-ua': '"Chromium";v="116", "Not)A;Brand";v="24", "Google Chrome";v="116"', |             'sec-ch-ua'         : '"Chromium";v="116", "Not)A;Brand";v="24", "Google Chrome";v="116"', | ||||||
|             'sec-ch-ua-mobile': '?0', |             'sec-ch-ua-mobile'  : '?0', | ||||||
|             'sec-ch-ua-platform': '"Windows"', |             'sec-ch-ua-platform': '"Windows"', | ||||||
|             'sec-fetch-dest': 'empty', |             'sec-fetch-dest'    : 'empty', | ||||||
|             'sec-fetch-mode': 'cors', |             'sec-fetch-mode'    : 'cors', | ||||||
|             'sec-fetch-site': 'same-origin', |             'sec-fetch-site'    : 'same-origin', | ||||||
|             'user-agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/116.0.0.0 Safari/537.36', |             'user-agent'        : 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/116.0.0.0 Safari/537.36', | ||||||
|             'usesearch': 'false', |             'usesearch'         : 'false', | ||||||
|             'x-requested-with': 'XMLHttpRequest' |             'x-requested-with'  : 'XMLHttpRequest' | ||||||
|         } |         } | ||||||
|  |  | ||||||
|         json_data = { |         json_data = { | ||||||
|             "messages": messages, |             "messages"          : messages, | ||||||
|             "stream": stream, |             "stream"            : stream, | ||||||
|             "model": model, |             "model"             : model, | ||||||
|             "temperature": kwargs.get("temperature", 0.5), |             "temperature"       : kwargs.get("temperature", 0.5), | ||||||
|             "presence_penalty": kwargs.get("presence_penalty", 0), |             "presence_penalty"  : kwargs.get("presence_penalty", 0), | ||||||
|             "frequency_penalty": kwargs.get("frequency_penalty", 0), |             "frequency_penalty" : kwargs.get("frequency_penalty", 0), | ||||||
|             "top_p": kwargs.get("top_p", 1), |             "top_p"             : kwargs.get("top_p", 1) | ||||||
|         } |         } | ||||||
|  |  | ||||||
|         session = requests.Session() |         session = requests.Session() | ||||||
|         # init cookies from server |         # init cookies from server | ||||||
|         session.get(f"{server}/") |         session.get(f"{server}/") | ||||||
|  |  | ||||||
|         response = session.post( |         response = session.post(f"{server}/api/openai/v1/chat/completions", | ||||||
|             f"{server}/api/openai/v1/chat/completions", |             headers=headers, json=json_data, stream=stream) | ||||||
|             headers=headers, |          | ||||||
|             json=json_data, |  | ||||||
|             stream=stream, |  | ||||||
|         ) |  | ||||||
|         if response.status_code == 200: |         if response.status_code == 200: | ||||||
|  |              | ||||||
|             if stream == False: |             if stream == False: | ||||||
|                 json_data = response.json() |                 json_data = response.json() | ||||||
|  |                  | ||||||
|                 if "choices" in json_data: |                 if "choices" in json_data: | ||||||
|                     yield json_data["choices"][0]["message"]["content"] |                     yield json_data["choices"][0]["message"]["content"] | ||||||
|                 else: |                 else: | ||||||
|                     raise Exception("No response from server") |                     raise Exception("No response from server") | ||||||
|  |              | ||||||
|             else: |             else: | ||||||
|                  |                  | ||||||
|                 for chunk in response.iter_lines(): |                 for chunk in response.iter_lines(): | ||||||
|  |                      | ||||||
|                     if b"content" in chunk: |                     if b"content" in chunk: | ||||||
|                         splitData = chunk.decode().split("data:") |                         splitData = chunk.decode().split("data:") | ||||||
|  |                          | ||||||
|                         if len(splitData) > 1: |                         if len(splitData) > 1: | ||||||
|                             yield json.loads(splitData[1])["choices"][0]["delta"]["content"] |                             yield json.loads(splitData[1])["choices"][0]["delta"]["content"] | ||||||
|                         else: |                         else: | ||||||
|   | |||||||
| @@ -1,6 +1,6 @@ | |||||||
| import requests, json | import requests, json | ||||||
| from abc import ABC, abstractmethod |  | ||||||
|  |  | ||||||
|  | from abc      import ABC, abstractmethod | ||||||
| from ..typing import Any, CreateResult | from ..typing import Any, CreateResult | ||||||
|  |  | ||||||
|  |  | ||||||
| @@ -17,42 +17,42 @@ class Equing(ABC): | |||||||
|     def create_completion( |     def create_completion( | ||||||
|         model: str, |         model: str, | ||||||
|         messages: list[dict[str, str]], |         messages: list[dict[str, str]], | ||||||
|         stream: bool, |         stream: bool, **kwargs: Any) -> CreateResult: | ||||||
|         **kwargs: Any) -> CreateResult: |  | ||||||
|  |  | ||||||
|         headers = { |         headers = { | ||||||
|             'authority': 'next.eqing.tech', |             'authority'         : 'next.eqing.tech', | ||||||
|             'accept': 'text/event-stream', |             'accept'            : 'text/event-stream', | ||||||
|             'accept-language': 'en,fr-FR;q=0.9,fr;q=0.8,es-ES;q=0.7,es;q=0.6,en-US;q=0.5,am;q=0.4,de;q=0.3', |             'accept-language'   : 'en,fr-FR;q=0.9,fr;q=0.8,es-ES;q=0.7,es;q=0.6,en-US;q=0.5,am;q=0.4,de;q=0.3', | ||||||
|             'cache-control': 'no-cache', |             'cache-control'     : 'no-cache', | ||||||
|             'content-type': 'application/json', |             'content-type'      : 'application/json', | ||||||
|             'origin': 'https://next.eqing.tech', |             'origin'            : 'https://next.eqing.tech', | ||||||
|             'plugins': '0', |             'plugins'           : '0', | ||||||
|             'pragma': 'no-cache', |             'pragma'            : 'no-cache', | ||||||
|             'referer': 'https://next.eqing.tech/', |             'referer'           : 'https://next.eqing.tech/', | ||||||
|             'sec-ch-ua': '"Not/A)Brand";v="99", "Google Chrome";v="115", "Chromium";v="115"', |             'sec-ch-ua'         : '"Not/A)Brand";v="99", "Google Chrome";v="115", "Chromium";v="115"', | ||||||
|             'sec-ch-ua-mobile': '?0', |             'sec-ch-ua-mobile'  : '?0', | ||||||
|             'sec-ch-ua-platform': '"macOS"', |             'sec-ch-ua-platform': '"macOS"', | ||||||
|             'sec-fetch-dest': 'empty', |             'sec-fetch-dest'    : 'empty', | ||||||
|             'sec-fetch-mode': 'cors', |             'sec-fetch-mode'    : 'cors', | ||||||
|             'sec-fetch-site': 'same-origin', |             'sec-fetch-site'    : 'same-origin', | ||||||
|             'user-agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/115.0.0.0 Safari/537.36', |             'user-agent'        : 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/115.0.0.0 Safari/537.36', | ||||||
|             'usesearch': 'false', |             'usesearch'         : 'false', | ||||||
|             'x-requested-with': 'XMLHttpRequest', |             'x-requested-with'  : 'XMLHttpRequest' | ||||||
|         } |         } | ||||||
|  |  | ||||||
|         json_data = { |         json_data = { | ||||||
|             'messages': messages, |             'messages'          : messages, | ||||||
|             'stream': stream, |             'stream'            : stream, | ||||||
|             'model': model, |             'model'             : model, | ||||||
|             'temperature': kwargs.get('temperature', 0.5), |             'temperature'       : kwargs.get('temperature', 0.5), | ||||||
|             'presence_penalty': kwargs.get('presence_penalty', 0), |             'presence_penalty'  : kwargs.get('presence_penalty', 0), | ||||||
|             'frequency_penalty': kwargs.get('frequency_penalty', 0), |             'frequency_penalty' : kwargs.get('frequency_penalty', 0), | ||||||
|             'top_p': kwargs.get('top_p', 1), |             'top_p'             : kwargs.get('top_p', 1), | ||||||
|         } |         } | ||||||
|  |  | ||||||
|         response = requests.post('https://next.eqing.tech/api/openai/v1/chat/completions', |         response = requests.post('https://next.eqing.tech/api/openai/v1/chat/completions', | ||||||
|             headers=headers, json=json_data, stream=stream) |             headers=headers, json=json_data, stream=stream) | ||||||
|  |          | ||||||
|         if not stream: |         if not stream: | ||||||
|             yield response.json()["choices"][0]["message"]["content"] |             yield response.json()["choices"][0]["message"]["content"] | ||||||
|             return |             return | ||||||
|   | |||||||
| @@ -17,39 +17,37 @@ class FastGpt(ABC): | |||||||
|     def create_completion( |     def create_completion( | ||||||
|         model: str, |         model: str, | ||||||
|         messages: list[dict[str, str]], |         messages: list[dict[str, str]], | ||||||
|         stream: bool, |         stream: bool, **kwargs: Any) -> CreateResult: | ||||||
|         **kwargs: Any) -> CreateResult: |  | ||||||
|  |  | ||||||
|         headers = { |         headers = { | ||||||
|             'authority': 'chat9.fastgpt.me', |             'authority'         : 'chat9.fastgpt.me', | ||||||
|             'accept': 'text/event-stream', |             'accept'            : 'text/event-stream', | ||||||
|             'accept-language': 'en,fr-FR;q=0.9,fr;q=0.8,es-ES;q=0.7,es;q=0.6,en-US;q=0.5,am;q=0.4,de;q=0.3', |             'accept-language'   : 'en,fr-FR;q=0.9,fr;q=0.8,es-ES;q=0.7,es;q=0.6,en-US;q=0.5,am;q=0.4,de;q=0.3', | ||||||
|             'cache-control': 'no-cache', |             'cache-control'     : 'no-cache', | ||||||
|             'content-type': 'application/json', |             'content-type'      : 'application/json', | ||||||
|             # 'cookie': 'cf_clearance=idIAwtoSCn0uCzcWLGuD.KtiAJv9a1GsPduEOqIkyHU-1692278595-0-1-cb11fd7a.ab1546d4.ccf35fd7-0.2.1692278595; Hm_lvt_563fb31e93813a8a7094966df6671d3f=1691966491,1692278597; Hm_lpvt_563fb31e93813a8a7094966df6671d3f=1692278597', |             'origin'            : 'https://chat9.fastgpt.me', | ||||||
|             'origin': 'https://chat9.fastgpt.me', |             'plugins'           : '0', | ||||||
|             'plugins': '0', |             'pragma'            : 'no-cache', | ||||||
|             'pragma': 'no-cache', |             'referer'           : 'https://chat9.fastgpt.me/', | ||||||
|             'referer': 'https://chat9.fastgpt.me/', |             'sec-ch-ua'         : '"Not/A)Brand";v="99", "Google Chrome";v="115", "Chromium";v="115"', | ||||||
|             'sec-ch-ua': '"Not/A)Brand";v="99", "Google Chrome";v="115", "Chromium";v="115"', |             'sec-ch-ua-mobile'  : '?0', | ||||||
|             'sec-ch-ua-mobile': '?0', |  | ||||||
|             'sec-ch-ua-platform': '"macOS"', |             'sec-ch-ua-platform': '"macOS"', | ||||||
|             'sec-fetch-dest': 'empty', |             'sec-fetch-dest'    : 'empty', | ||||||
|             'sec-fetch-mode': 'cors', |             'sec-fetch-mode'    : 'cors', | ||||||
|             'sec-fetch-site': 'same-origin', |             'sec-fetch-site'    : 'same-origin', | ||||||
|             'user-agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/115.0.0.0 Safari/537.36', |             'user-agent'        : 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/115.0.0.0 Safari/537.36', | ||||||
|             'usesearch': 'false', |             'usesearch'         : 'false', | ||||||
|             'x-requested-with': 'XMLHttpRequest', |             'x-requested-with'  : 'XMLHttpRequest', | ||||||
|         } |         } | ||||||
|  |  | ||||||
|         json_data = { |         json_data = { | ||||||
|             'messages': messages, |             'messages'          : messages, | ||||||
|             'stream': stream, |             'stream'            : stream, | ||||||
|             'model': model, |             'model'             : model, | ||||||
|             'temperature': kwargs.get('temperature', 0.5), |             'temperature'       : kwargs.get('temperature', 0.5), | ||||||
|             'presence_penalty': kwargs.get('presence_penalty', 0), |             'presence_penalty'  : kwargs.get('presence_penalty', 0), | ||||||
|             'frequency_penalty': kwargs.get('frequency_penalty', 0), |             'frequency_penalty' : kwargs.get('frequency_penalty', 0), | ||||||
|             'top_p': kwargs.get('top_p', 1), |             'top_p'             : kwargs.get('top_p', 1), | ||||||
|         } |         } | ||||||
|          |          | ||||||
|         subdomain = random.choice([ |         subdomain = random.choice([ | ||||||
|   | |||||||
| @@ -15,26 +15,23 @@ class Forefront(BaseProvider): | |||||||
|     def create_completion( |     def create_completion( | ||||||
|         model: str, |         model: str, | ||||||
|         messages: list[dict[str, str]], |         messages: list[dict[str, str]], | ||||||
|         stream: bool, |         stream: bool, **kwargs: Any) -> CreateResult: | ||||||
|         **kwargs: Any, |          | ||||||
|     ) -> CreateResult: |  | ||||||
|         json_data = { |         json_data = { | ||||||
|             "text": messages[-1]["content"], |             "text"          : messages[-1]["content"], | ||||||
|             "action": "noauth", |             "action"        : "noauth", | ||||||
|             "id": "", |             "id"            : "", | ||||||
|             "parentId": "", |             "parentId"      : "", | ||||||
|             "workspaceId": "", |             "workspaceId"   : "", | ||||||
|             "messagePersona": "607e41fe-95be-497e-8e97-010a59b2e2c0", |             "messagePersona": "607e41fe-95be-497e-8e97-010a59b2e2c0", | ||||||
|             "model": "gpt-4", |             "model"         : "gpt-4", | ||||||
|             "messages": messages[:-1] if len(messages) > 1 else [], |             "messages"      : messages[:-1] if len(messages) > 1 else [], | ||||||
|             "internetMode": "auto", |             "internetMode"  : "auto", | ||||||
|         } |         } | ||||||
|  |  | ||||||
|         response = requests.post( |         response = requests.post("https://streaming.tenant-forefront-default.knative.chi.coreweave.com/free-chat", | ||||||
|             "https://streaming.tenant-forefront-default.knative.chi.coreweave.com/free-chat", |             json=json_data, stream=True) | ||||||
|             json=json_data, |          | ||||||
|             stream=True, |  | ||||||
|         ) |  | ||||||
|         response.raise_for_status() |         response.raise_for_status() | ||||||
|         for token in response.iter_lines(): |         for token in response.iter_lines(): | ||||||
|             if b"delta" in token: |             if b"delta" in token: | ||||||
|   | |||||||
| @@ -1,16 +1,12 @@ | |||||||
| import json | import os, json, uuid, requests | ||||||
| import os |  | ||||||
| import uuid |  | ||||||
|  |  | ||||||
| import requests |  | ||||||
| from Crypto.Cipher  import AES | from Crypto.Cipher  import AES | ||||||
|  |  | ||||||
| from ..typing       import Any, CreateResult | from ..typing       import Any, CreateResult | ||||||
| from .base_provider import BaseProvider | from .base_provider import BaseProvider | ||||||
|  |  | ||||||
|  |  | ||||||
| class GetGpt(BaseProvider): | class GetGpt(BaseProvider): | ||||||
|     url = "https://chat.getgpt.world/" |     url                   = 'https://chat.getgpt.world/' | ||||||
|     supports_stream       = True |     supports_stream       = True | ||||||
|     working               = True |     working               = True | ||||||
|     supports_gpt_35_turbo = True |     supports_gpt_35_turbo = True | ||||||
| @@ -19,69 +15,68 @@ class GetGpt(BaseProvider): | |||||||
|     def create_completion( |     def create_completion( | ||||||
|         model: str, |         model: str, | ||||||
|         messages: list[dict[str, str]], |         messages: list[dict[str, str]], | ||||||
|         stream: bool, |         stream: bool, **kwargs: Any) -> CreateResult: | ||||||
|         **kwargs: Any, |          | ||||||
|     ) -> CreateResult: |  | ||||||
|         headers = { |         headers = { | ||||||
|             "Content-Type": "application/json", |             'Content-Type'  : 'application/json', | ||||||
|             "Referer": "https://chat.getgpt.world/", |             'Referer'       : 'https://chat.getgpt.world/', | ||||||
|             "user-agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/114.0.0.0 Safari/537.36", |             'user-agent'    : 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/114.0.0.0 Safari/537.36', | ||||||
|         } |         } | ||||||
|  |          | ||||||
|         data = json.dumps( |         data = json.dumps( | ||||||
|             { |             { | ||||||
|                 "messages": messages, |                 'messages'          : messages, | ||||||
|                 "frequency_penalty": kwargs.get("frequency_penalty", 0), |                 'frequency_penalty' : kwargs.get('frequency_penalty', 0), | ||||||
|                 "max_tokens": kwargs.get("max_tokens", 4000), |                 'max_tokens'        : kwargs.get('max_tokens', 4000), | ||||||
|                 "model": "gpt-3.5-turbo", |                 'model'             : 'gpt-3.5-turbo', | ||||||
|                 "presence_penalty": kwargs.get("presence_penalty", 0), |                 'presence_penalty'  : kwargs.get('presence_penalty', 0), | ||||||
|                 "temperature": kwargs.get("temperature", 1), |                 'temperature'       : kwargs.get('temperature', 1), | ||||||
|                 "top_p": kwargs.get("top_p", 1), |                 'top_p'             : kwargs.get('top_p', 1), | ||||||
|                 "stream": True, |                 'stream'            : True, | ||||||
|                 "uuid": str(uuid.uuid4()), |                 'uuid'              : str(uuid.uuid4()) | ||||||
|             } |             } | ||||||
|         ) |         ) | ||||||
|  |  | ||||||
|         res = requests.post( |         res = requests.post('https://chat.getgpt.world/api/chat/stream', | ||||||
|             "https://chat.getgpt.world/api/chat/stream", |             headers=headers, json={'signature': _encrypt(data)}, stream=True) | ||||||
|             headers=headers, |  | ||||||
|             json={"signature": _encrypt(data)}, |  | ||||||
|             stream=True, |  | ||||||
|         ) |  | ||||||
|  |  | ||||||
|         res.raise_for_status() |         res.raise_for_status() | ||||||
|         for line in res.iter_lines(): |         for line in res.iter_lines(): | ||||||
|             if b"content" in line: |             if b'content' in line: | ||||||
|                 line_json = json.loads(line.decode("utf-8").split("data: ")[1]) |                 line_json = json.loads(line.decode('utf-8').split('data: ')[1]) | ||||||
|                 yield (line_json["choices"][0]["delta"]["content"]) |                 yield (line_json['choices'][0]['delta']['content']) | ||||||
|  |  | ||||||
|     @classmethod |     @classmethod | ||||||
|     @property |     @property | ||||||
|     def params(cls): |     def params(cls): | ||||||
|         params = [ |         params = [ | ||||||
|             ("model", "str"), |             ('model', 'str'), | ||||||
|             ("messages", "list[dict[str, str]]"), |             ('messages', 'list[dict[str, str]]'), | ||||||
|             ("stream", "bool"), |             ('stream', 'bool'), | ||||||
|             ("temperature", "float"), |             ('temperature', 'float'), | ||||||
|             ("presence_penalty", "int"), |             ('presence_penalty', 'int'), | ||||||
|             ("frequency_penalty", "int"), |             ('frequency_penalty', 'int'), | ||||||
|             ("top_p", "int"), |             ('top_p', 'int'), | ||||||
|             ("max_tokens", "int"), |             ('max_tokens', 'int'), | ||||||
|         ] |         ] | ||||||
|         param = ", ".join([": ".join(p) for p in params]) |         param = ', '.join([': '.join(p) for p in params]) | ||||||
|         return f"g4f.provider.{cls.__name__} supports: ({param})" |         return f'g4f.provider.{cls.__name__} supports: ({param})' | ||||||
|  |  | ||||||
|  |  | ||||||
| def _encrypt(e: str): | def _encrypt(e: str): | ||||||
|     t = os.urandom(8).hex().encode("utf-8") |     t = os.urandom(8).hex().encode('utf-8') | ||||||
|     n = os.urandom(8).hex().encode("utf-8") |     n = os.urandom(8).hex().encode('utf-8') | ||||||
|     r = e.encode("utf-8") |     r = e.encode('utf-8') | ||||||
|  |      | ||||||
|     cipher     = AES.new(t, AES.MODE_CBC, n) |     cipher     = AES.new(t, AES.MODE_CBC, n) | ||||||
|     ciphertext = cipher.encrypt(_pad_data(r)) |     ciphertext = cipher.encrypt(_pad_data(r)) | ||||||
|     return ciphertext.hex() + t.decode("utf-8") + n.decode("utf-8") |      | ||||||
|  |     return ciphertext.hex() + t.decode('utf-8') + n.decode('utf-8') | ||||||
|  |  | ||||||
|  |  | ||||||
| def _pad_data(data: bytes) -> bytes: | def _pad_data(data: bytes) -> bytes: | ||||||
|     block_size   = AES.block_size |     block_size   = AES.block_size | ||||||
|     padding_size = block_size - len(data) % block_size |     padding_size = block_size - len(data) % block_size | ||||||
|     padding      = bytes([padding_size] * padding_size) |     padding      = bytes([padding_size] * padding_size) | ||||||
|  |      | ||||||
|     return data + padding |     return data + padding | ||||||
|   | |||||||
| @@ -1,7 +1,4 @@ | |||||||
| import json | import json, uuid, requests | ||||||
| import uuid |  | ||||||
|  |  | ||||||
| import requests |  | ||||||
|  |  | ||||||
| from ..typing       import Any, CreateResult | from ..typing       import Any, CreateResult | ||||||
| from .base_provider import BaseProvider | from .base_provider import BaseProvider | ||||||
| @@ -17,9 +14,8 @@ class H2o(BaseProvider): | |||||||
|     def create_completion( |     def create_completion( | ||||||
|         model: str, |         model: str, | ||||||
|         messages: list[dict[str, str]], |         messages: list[dict[str, str]], | ||||||
|         stream: bool, |         stream: bool, **kwargs: Any) -> CreateResult: | ||||||
|         **kwargs: Any, |          | ||||||
|     ) -> CreateResult: |  | ||||||
|         conversation = "" |         conversation = "" | ||||||
|         for message in messages: |         for message in messages: | ||||||
|             conversation += "%s: %s\n" % (message["role"], message["content"]) |             conversation += "%s: %s\n" % (message["role"], message["content"]) | ||||||
| @@ -29,54 +25,48 @@ class H2o(BaseProvider): | |||||||
|  |  | ||||||
|         headers = {"Referer": "https://gpt-gm.h2o.ai/r/jGfKSwU"} |         headers = {"Referer": "https://gpt-gm.h2o.ai/r/jGfKSwU"} | ||||||
|         data = { |         data = { | ||||||
|             "ethicsModalAccepted": "true", |             "ethicsModalAccepted"               : "true", | ||||||
|             "shareConversationsWithModelAuthors": "true", |             "shareConversationsWithModelAuthors": "true", | ||||||
|             "ethicsModalAcceptedAt": "", |             "ethicsModalAcceptedAt"             : "", | ||||||
|             "activeModel": model, |             "activeModel"                       : model, | ||||||
|             "searchEnabled": "true", |             "searchEnabled"                     : "true", | ||||||
|         } |         } | ||||||
|         session.post( |          | ||||||
|             "https://gpt-gm.h2o.ai/settings", |         session.post("https://gpt-gm.h2o.ai/settings", | ||||||
|             headers=headers, |                      headers=headers, data=data) | ||||||
|             data=data, |  | ||||||
|         ) |  | ||||||
|  |  | ||||||
|         headers = {"Referer": "https://gpt-gm.h2o.ai/"} |         headers = {"Referer": "https://gpt-gm.h2o.ai/"} | ||||||
|         data    = {"model": model} |         data    = {"model": model} | ||||||
|  |  | ||||||
|         response = session.post( |         response = session.post("https://gpt-gm.h2o.ai/conversation", | ||||||
|             "https://gpt-gm.h2o.ai/conversation", |                                 headers=headers, json=data).json() | ||||||
|             headers=headers, |          | ||||||
|             json=data, |  | ||||||
|         ).json() |  | ||||||
|         if "conversationId" not in response: |         if "conversationId" not in response: | ||||||
|             return |             return | ||||||
|  |  | ||||||
|         data = { |         data = { | ||||||
|             "inputs": conversation, |             "inputs": conversation, | ||||||
|             "parameters": { |             "parameters": { | ||||||
|                 "temperature": kwargs.get("temperature", 0.4), |                 "temperature"   : kwargs.get("temperature", 0.4), | ||||||
|                 "truncate": kwargs.get("truncate", 2048), |                 "truncate"          : kwargs.get("truncate", 2048), | ||||||
|                 "max_new_tokens": kwargs.get("max_new_tokens", 1024), |                 "max_new_tokens"    : kwargs.get("max_new_tokens", 1024), | ||||||
|                 "do_sample": kwargs.get("do_sample", True), |                 "do_sample"         : kwargs.get("do_sample", True), | ||||||
|                 "repetition_penalty": kwargs.get("repetition_penalty", 1.2), |                 "repetition_penalty": kwargs.get("repetition_penalty", 1.2), | ||||||
|                 "return_full_text": kwargs.get("return_full_text", False), |                 "return_full_text"  : kwargs.get("return_full_text", False), | ||||||
|             }, |             }, | ||||||
|             "stream": True, |             "stream" : True, | ||||||
|             "options": { |             "options": { | ||||||
|                 "id": kwargs.get("id", str(uuid.uuid4())), |                 "id"           : kwargs.get("id", str(uuid.uuid4())), | ||||||
|                 "response_id": kwargs.get("response_id", str(uuid.uuid4())), |                 "response_id"  : kwargs.get("response_id", str(uuid.uuid4())), | ||||||
|                 "is_retry": False, |                 "is_retry"     : False, | ||||||
|                 "use_cache": False, |                 "use_cache"    : False, | ||||||
|                 "web_search_id": "", |                 "web_search_id": "", | ||||||
|             }, |             }, | ||||||
|         } |         } | ||||||
|  |  | ||||||
|         response = session.post( |         response = session.post(f"https://gpt-gm.h2o.ai/conversation/{response['conversationId']}", | ||||||
|             f"https://gpt-gm.h2o.ai/conversation/{response['conversationId']}", |             headers=headers, json=data) | ||||||
|             headers=headers, |          | ||||||
|             json=data, |  | ||||||
|         ) |  | ||||||
|         response.raise_for_status() |         response.raise_for_status() | ||||||
|         response.encoding = "utf-8" |         response.encoding = "utf-8" | ||||||
|         generated_text    = response.text.replace("\n", "").split("data:") |         generated_text    = response.text.replace("\n", "").split("data:") | ||||||
|   | |||||||
| @@ -20,12 +20,10 @@ class Hugchat(BaseProvider): | |||||||
|         messages: list[dict[str, str]], |         messages: list[dict[str, str]], | ||||||
|         stream: bool = False, |         stream: bool = False, | ||||||
|         proxy: str = None, |         proxy: str = None, | ||||||
|         cookies: str = get_cookies(".huggingface.co"), |         cookies: str = get_cookies(".huggingface.co"), **kwargs) -> CreateResult: | ||||||
|         **kwargs |          | ||||||
|     ) -> CreateResult: |  | ||||||
|         bot = ChatBot( |         bot = ChatBot( | ||||||
|             cookies=cookies |             cookies=cookies) | ||||||
|         ) |  | ||||||
|          |          | ||||||
|         if proxy and "://" not in proxy: |         if proxy and "://" not in proxy: | ||||||
|             proxy = f"http://{proxy}" |             proxy = f"http://{proxy}" | ||||||
|   | |||||||
| @@ -1,13 +1,11 @@ | |||||||
| import uuid | import uuid, requests | ||||||
|  |  | ||||||
| import requests |  | ||||||
|  |  | ||||||
| from ..typing       import Any, CreateResult | from ..typing       import Any, CreateResult | ||||||
| from .base_provider import BaseProvider | from .base_provider import BaseProvider | ||||||
|  |  | ||||||
|  |  | ||||||
| class Liaobots(BaseProvider): | class Liaobots(BaseProvider): | ||||||
|     url = "https://liaobots.com" |     url: str                = "https://liaobots.com" | ||||||
|     supports_stream         = True |     supports_stream         = True | ||||||
|     needs_auth              = True |     needs_auth              = True | ||||||
|     supports_gpt_35_turbo   = True |     supports_gpt_35_turbo   = True | ||||||
| @@ -17,17 +15,17 @@ class Liaobots(BaseProvider): | |||||||
|     def create_completion( |     def create_completion( | ||||||
|         model: str, |         model: str, | ||||||
|         messages: list[dict[str, str]], |         messages: list[dict[str, str]], | ||||||
|         stream: bool, |         stream: bool, **kwargs: Any) -> CreateResult: | ||||||
|         **kwargs: Any, |          | ||||||
|     ) -> CreateResult: |  | ||||||
|         headers = { |         headers = { | ||||||
|             "authority": "liaobots.com", |             "authority"     : "liaobots.com", | ||||||
|             "content-type": "application/json", |             "content-type"  : "application/json", | ||||||
|             "origin": "https://liaobots.com", |             "origin"        : "https://liaobots.com", | ||||||
|             "referer": "https://liaobots.com/", |             "referer"       : "https://liaobots.com/", | ||||||
|             "user-agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/112.0.0.0 Safari/537.36", |             "user-agent"    : "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/112.0.0.0 Safari/537.36", | ||||||
|             "x-auth-code": str(kwargs.get("auth")), |             "x-auth-code"   : str(kwargs.get("auth")), | ||||||
|         } |         } | ||||||
|  |          | ||||||
|         models = { |         models = { | ||||||
|             "gpt-4": { |             "gpt-4": { | ||||||
|                 "id": "gpt-4", |                 "id": "gpt-4", | ||||||
| @@ -44,18 +42,15 @@ class Liaobots(BaseProvider): | |||||||
|         } |         } | ||||||
|         json_data = { |         json_data = { | ||||||
|             "conversationId": str(uuid.uuid4()), |             "conversationId": str(uuid.uuid4()), | ||||||
|             "model": models[model], |             "model"         : models[model], | ||||||
|             "messages": messages, |             "messages"      : messages, | ||||||
|             "key": "", |             "key"           : "", | ||||||
|             "prompt": "You are ChatGPT, a large language model trained by OpenAI. Follow the user's instructions carefully. Respond using markdown.", |             "prompt"        : "You are ChatGPT, a large language model trained by OpenAI. Follow the user's instructions carefully. Respond using markdown.", | ||||||
|         } |         } | ||||||
|  |  | ||||||
|         response = requests.post( |         response = requests.post("https://liaobots.com/api/chat", | ||||||
|             "https://liaobots.com/api/chat", |             headers=headers, json=json_data, stream=True) | ||||||
|             headers=headers, |          | ||||||
|             json=json_data, |  | ||||||
|             stream=True, |  | ||||||
|         ) |  | ||||||
|         response.raise_for_status() |         response.raise_for_status() | ||||||
|         for token in response.iter_content(chunk_size=2046): |         for token in response.iter_content(chunk_size=2046): | ||||||
|             yield token.decode("utf-8") |             yield token.decode("utf-8") | ||||||
|   | |||||||
| @@ -1,13 +1,11 @@ | |||||||
| import json | import json, requests | ||||||
|  |  | ||||||
| import requests |  | ||||||
|  |  | ||||||
| from ..typing       import Any, CreateResult | from ..typing       import Any, CreateResult | ||||||
| from .base_provider import BaseProvider | from .base_provider import BaseProvider | ||||||
|  |  | ||||||
|  |  | ||||||
| class Lockchat(BaseProvider): | class Lockchat(BaseProvider): | ||||||
|     url = "http://supertest.lockchat.app" |     url: str              = "http://supertest.lockchat.app" | ||||||
|     supports_stream       = True |     supports_stream       = True | ||||||
|     supports_gpt_35_turbo = True |     supports_gpt_35_turbo = True | ||||||
|     supports_gpt_4        = True |     supports_gpt_4        = True | ||||||
| @@ -16,37 +14,33 @@ class Lockchat(BaseProvider): | |||||||
|     def create_completion( |     def create_completion( | ||||||
|         model: str, |         model: str, | ||||||
|         messages: list[dict[str, str]], |         messages: list[dict[str, str]], | ||||||
|         stream: bool, |         stream: bool, **kwargs: Any) -> CreateResult: | ||||||
|         **kwargs: Any, |  | ||||||
|     ) -> CreateResult: |  | ||||||
|         temperature = float(kwargs.get("temperature", 0.7)) |         temperature = float(kwargs.get("temperature", 0.7)) | ||||||
|         payload = { |         payload = { | ||||||
|             "temperature": temperature, |             "temperature": temperature, | ||||||
|             "messages": messages, |             "messages"   : messages, | ||||||
|             "model": model, |             "model"      : model, | ||||||
|             "stream": True, |             "stream"     : True, | ||||||
|         } |         } | ||||||
|  |  | ||||||
|         headers = { |         headers = { | ||||||
|             "user-agent": "ChatX/39 CFNetwork/1408.0.4 Darwin/22.5.0", |             "user-agent": "ChatX/39 CFNetwork/1408.0.4 Darwin/22.5.0", | ||||||
|         } |         } | ||||||
|         response = requests.post( |         response = requests.post("http://supertest.lockchat.app/v1/chat/completions", | ||||||
|             "http://supertest.lockchat.app/v1/chat/completions", |                                  json=payload, headers=headers, stream=True) | ||||||
|             json=payload, |          | ||||||
|             headers=headers, |  | ||||||
|             stream=True, |  | ||||||
|         ) |  | ||||||
|         response.raise_for_status() |         response.raise_for_status() | ||||||
|         for token in response.iter_lines(): |         for token in response.iter_lines(): | ||||||
|             if b"The model: `gpt-4` does not exist" in token: |             if b"The model: `gpt-4` does not exist" in token: | ||||||
|                 print("error, retrying...") |                 print("error, retrying...") | ||||||
|                 Lockchat.create_completion( |                 Lockchat.create_completion( | ||||||
|                     model=model, |                     model       = model, | ||||||
|                     messages=messages, |                     messages    = messages, | ||||||
|                     stream=stream, |                     stream      = stream, | ||||||
|                     temperature=temperature, |                     temperature = temperature, | ||||||
|                     **kwargs, |                     **kwargs) | ||||||
|                 ) |              | ||||||
|             if b"content" in token: |             if b"content" in token: | ||||||
|                 token = json.loads(token.decode("utf-8").split("data: ")[1]) |                 token = json.loads(token.decode("utf-8").split("data: ")[1]) | ||||||
|                 token = token["choices"][0]["delta"].get("content") |                 token = token["choices"][0]["delta"].get("content") | ||||||
|   | |||||||
| @@ -13,25 +13,22 @@ class Opchatgpts(BaseProvider): | |||||||
|     def create_completion( |     def create_completion( | ||||||
|         model: str, |         model: str, | ||||||
|         messages: list[dict[str, str]], |         messages: list[dict[str, str]], | ||||||
|         stream: bool, |         stream: bool, **kwargs: Any) -> CreateResult: | ||||||
|         **kwargs: Any, |          | ||||||
|     ) -> CreateResult: |  | ||||||
|         temperature   = kwargs.get("temperature", 0.8) |         temperature   = kwargs.get("temperature", 0.8) | ||||||
|         max_tokens    = kwargs.get("max_tokens", 1024) |         max_tokens    = kwargs.get("max_tokens", 1024) | ||||||
|         system_prompt = kwargs.get( |         system_prompt = kwargs.get( | ||||||
|             "system_prompt", |             "system_prompt", | ||||||
|             "Converse as if you were an AI assistant. Be friendly, creative.", |             "Converse as if you were an AI assistant. Be friendly, creative.") | ||||||
|         ) |          | ||||||
|         payload = _create_payload( |         payload = _create_payload( | ||||||
|             messages=messages, |             messages        = messages, | ||||||
|             temperature=temperature, |             temperature     = temperature, | ||||||
|             max_tokens=max_tokens, |             max_tokens      = max_tokens, | ||||||
|             system_prompt=system_prompt, |             system_prompt   = system_prompt) | ||||||
|         ) |  | ||||||
|  |         response = requests.post("https://opchatgpts.net/wp-json/ai-chatbot/v1/chat", json=payload) | ||||||
|          |          | ||||||
|         response = requests.post( |  | ||||||
|             "https://opchatgpts.net/wp-json/ai-chatbot/v1/chat", json=payload |  | ||||||
|         ) |  | ||||||
|         response.raise_for_status() |         response.raise_for_status() | ||||||
|         yield response.json()["reply"] |         yield response.json()["reply"] | ||||||
|  |  | ||||||
| @@ -39,24 +36,23 @@ class Opchatgpts(BaseProvider): | |||||||
| def _create_payload( | def _create_payload( | ||||||
|     messages: list[dict[str, str]], |     messages: list[dict[str, str]], | ||||||
|     temperature: float, |     temperature: float, | ||||||
|     max_tokens: int, |     max_tokens: int, system_prompt: str) -> dict: | ||||||
|     system_prompt: str, |      | ||||||
| ): |  | ||||||
|     return { |     return { | ||||||
|         "env": "chatbot", |         "env"             : "chatbot", | ||||||
|         "session": "N/A", |         "session"         : "N/A", | ||||||
|         "prompt": "\n", |         "prompt"          : "\n", | ||||||
|         "context": system_prompt, |         "context"         : system_prompt, | ||||||
|         "messages": messages, |         "messages"        : messages, | ||||||
|         "newMessage": messages[::-1][0]["content"], |         "newMessage"      : messages[::-1][0]["content"], | ||||||
|         "userName": '<div class="mwai-name-text">User:</div>', |         "userName"        : '<div class="mwai-name-text">User:</div>', | ||||||
|         "aiName": '<div class="mwai-name-text">AI:</div>', |         "aiName"          : '<div class="mwai-name-text">AI:</div>', | ||||||
|         "model": "gpt-3.5-turbo", |         "model"           : "gpt-3.5-turbo", | ||||||
|         "temperature": temperature, |         "temperature"     : temperature, | ||||||
|         "maxTokens": max_tokens, |         "maxTokens"       : max_tokens, | ||||||
|         "maxResults": 1, |         "maxResults"      : 1, | ||||||
|         "apiKey": "", |         "apiKey"          : "", | ||||||
|         "service": "openai", |         "service"         : "openai", | ||||||
|         "embeddingsIndex": "", |         "embeddingsIndex" : "", | ||||||
|         "stop": "", |         "stop"            : "", | ||||||
|     } |     } | ||||||
|   | |||||||
| @@ -3,6 +3,7 @@ try: | |||||||
|     from revChatGPT.V1 import AsyncChatbot |     from revChatGPT.V1 import AsyncChatbot | ||||||
| except ImportError: | except ImportError: | ||||||
|     has_module = False |     has_module = False | ||||||
|  |  | ||||||
| from .base_provider import AsyncGeneratorProvider, get_cookies | from .base_provider import AsyncGeneratorProvider, get_cookies | ||||||
| from ..typing       import AsyncGenerator | from ..typing       import AsyncGenerator | ||||||
|  |  | ||||||
|   | |||||||
| @@ -1,12 +1,11 @@ | |||||||
| import json | import json, requests | ||||||
| import requests |  | ||||||
| from ..typing       import Any, CreateResult | from ..typing       import Any, CreateResult | ||||||
| from .base_provider import BaseProvider | from .base_provider import BaseProvider | ||||||
|  |  | ||||||
|  |  | ||||||
| class Raycast(BaseProvider): | class Raycast(BaseProvider): | ||||||
|     url                     = "https://raycast.com" |     url                     = "https://raycast.com" | ||||||
|     # model = ['gpt-3.5-turbo', 'gpt-4'] |  | ||||||
|     supports_gpt_35_turbo   = True |     supports_gpt_35_turbo   = True | ||||||
|     supports_gpt_4          = True |     supports_gpt_4          = True | ||||||
|     supports_stream         = True |     supports_stream         = True | ||||||
|   | |||||||
| @@ -1,5 +1,5 @@ | |||||||
| import json,random,requests | import json, random, requests | ||||||
| # from curl_cffi import requests |  | ||||||
| from ..typing       import Any, CreateResult | from ..typing       import Any, CreateResult | ||||||
| from .base_provider import BaseProvider | from .base_provider import BaseProvider | ||||||
|  |  | ||||||
| @@ -15,60 +15,58 @@ class Theb(BaseProvider): | |||||||
|     def create_completion( |     def create_completion( | ||||||
|         model: str, |         model: str, | ||||||
|         messages: list[dict[str, str]], |         messages: list[dict[str, str]], | ||||||
|         stream: bool, |         stream: bool, **kwargs: Any) -> CreateResult: | ||||||
|         **kwargs: Any, |          | ||||||
|     ) -> CreateResult: |  | ||||||
|         conversation = '' |         conversation = '' | ||||||
|         for message in messages: |         for message in messages: | ||||||
|             conversation += '%s: %s\n' % (message['role'], message['content']) |             conversation += '%s: %s\n' % (message['role'], message['content']) | ||||||
|          |  | ||||||
|         conversation += 'assistant: ' |         conversation += 'assistant: ' | ||||||
|  |          | ||||||
|         auth = kwargs.get("auth", { |         auth = kwargs.get("auth", { | ||||||
|             "bearer_token":"free", |             "bearer_token":"free", | ||||||
|             "org_id":"theb", |             "org_id":"theb", | ||||||
|         }) |         }) | ||||||
|  |          | ||||||
|         bearer_token = auth["bearer_token"] |         bearer_token = auth["bearer_token"] | ||||||
|         org_id       = auth["org_id"] |         org_id       = auth["org_id"] | ||||||
|  |          | ||||||
|         headers = { |         headers = { | ||||||
|             'authority': 'beta.theb.ai', |             'authority'         : 'beta.theb.ai', | ||||||
|             'accept': 'text/event-stream', |             'accept'            : 'text/event-stream', | ||||||
|             'accept-language': 'id-ID,id;q=0.9,en-US;q=0.8,en;q=0.7', |             'accept-language'   : 'id-ID,id;q=0.9,en-US;q=0.8,en;q=0.7', | ||||||
|             'authorization': 'Bearer '+bearer_token, |             'authorization'     : 'Bearer '+bearer_token, | ||||||
|             'content-type': 'application/json', |             'content-type'      : 'application/json', | ||||||
|             'origin': 'https://beta.theb.ai', |             'origin'            : 'https://beta.theb.ai', | ||||||
|             'referer': 'https://beta.theb.ai/home', |             'referer'           : 'https://beta.theb.ai/home', | ||||||
|             'sec-ch-ua': '"Chromium";v="116", "Not)A;Brand";v="24", "Google Chrome";v="116"', |             'sec-ch-ua'         : '"Chromium";v="116", "Not)A;Brand";v="24", "Google Chrome";v="116"', | ||||||
|             'sec-ch-ua-mobile': '?0', |             'sec-ch-ua-mobile'  : '?0', | ||||||
|             'sec-ch-ua-platform': '"Windows"', |             'sec-ch-ua-platform': '"Windows"', | ||||||
|             'sec-fetch-dest': 'empty', |             'sec-fetch-dest'    : 'empty', | ||||||
|             'sec-fetch-mode': 'cors', |             'sec-fetch-mode'    : 'cors', | ||||||
|             'sec-fetch-site': 'same-origin', |             'sec-fetch-site'    : 'same-origin', | ||||||
|             'user-agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/116.0.0.0 Safari/537.36', |             'user-agent'        : 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/116.0.0.0 Safari/537.36', | ||||||
|             'x-ai-model': 'ee8d4f29cb7047f78cbe84313ed6ace8', |             'x-ai-model'        : 'ee8d4f29cb7047f78cbe84313ed6ace8', | ||||||
|         } |         } | ||||||
|         # generate 10 random number |          | ||||||
|         # 0.1 - 0.9 |  | ||||||
|         req_rand = random.randint(100000000, 9999999999) |         req_rand = random.randint(100000000, 9999999999) | ||||||
|  |  | ||||||
|         json_data: dict[str, Any] = { |         json_data: dict[str, Any] = { | ||||||
|             "text": conversation, |             "text"      : conversation, | ||||||
|             "category": "04f58f64a4aa4191a957b47290fee864", |             "category"  : "04f58f64a4aa4191a957b47290fee864", | ||||||
|             "model": "ee8d4f29cb7047f78cbe84313ed6ace8", |             "model"     : "ee8d4f29cb7047f78cbe84313ed6ace8", | ||||||
|             "model_params": { |             "model_params": { | ||||||
|                 "system_prompt": "You are ChatGPT, a large language model trained by OpenAI, based on the GPT-3.5 architecture.\nKnowledge cutoff: 2021-09\nCurrent date: {{YYYY-MM-DD}}", |                 "system_prompt"     : "You are ChatGPT, a large language model trained by OpenAI, based on the GPT-3.5 architecture.\nKnowledge cutoff: 2021-09\nCurrent date: {{YYYY-MM-DD}}", | ||||||
|                 "temperature": kwargs.get("temperature", 1), |                 "temperature"       : kwargs.get("temperature", 1), | ||||||
|                 "top_p": kwargs.get("top_p", 1), |                 "top_p"             : kwargs.get("top_p", 1), | ||||||
|                 "frequency_penalty": kwargs.get("frequency_penalty", 0), |                 "frequency_penalty" : kwargs.get("frequency_penalty", 0), | ||||||
|                 "presence_penalty": kwargs.get("presence_penalty", 0), |                 "presence_penalty"  : kwargs.get("presence_penalty", 0), | ||||||
|                 "long_term_memory": "auto" |                 "long_term_memory"  : "auto" | ||||||
|             } |             } | ||||||
|         } |         } | ||||||
|         response = requests.post( |          | ||||||
|             "https://beta.theb.ai/api/conversation?org_id="+org_id+"&req_rand="+str(req_rand), |         response = requests.post(f"https://beta.theb.ai/api/conversation?org_id={org_id}&req_rand={req_rand}", | ||||||
|             headers=headers, |                                  headers=headers, json=json_data, stream=True) | ||||||
|             json=json_data, |          | ||||||
|             stream=True, |  | ||||||
|         ) |  | ||||||
|         response.raise_for_status() |         response.raise_for_status() | ||||||
|         content = "" |         content = "" | ||||||
|         next_content = "" |         next_content = "" | ||||||
|   | |||||||
| @@ -1,8 +1,8 @@ | |||||||
| import uuid, requests | import uuid, requests | ||||||
|  |  | ||||||
| from ..typing       import Any, CreateResult | from ..typing       import Any, CreateResult | ||||||
| from .base_provider import BaseProvider | from .base_provider import BaseProvider | ||||||
|  |  | ||||||
|  |  | ||||||
| class V50(BaseProvider): | class V50(BaseProvider): | ||||||
|     url                     = 'https://p5.v50.ltd' |     url                     = 'https://p5.v50.ltd' | ||||||
|     supports_gpt_35_turbo   = True |     supports_gpt_35_turbo   = True | ||||||
| @@ -14,38 +14,39 @@ class V50(BaseProvider): | |||||||
|     def create_completion( |     def create_completion( | ||||||
|         model: str, |         model: str, | ||||||
|         messages: list[dict[str, str]], |         messages: list[dict[str, str]], | ||||||
|         stream: bool, |         stream: bool, **kwargs: Any) -> CreateResult: | ||||||
|         **kwargs: Any, |          | ||||||
|     ) -> CreateResult: |  | ||||||
|         conversation = '' |         conversation = '' | ||||||
|         for message in messages: |         for message in messages: | ||||||
|             conversation += '%s: %s\n' % (message['role'], message['content']) |             conversation += '%s: %s\n' % (message['role'], message['content']) | ||||||
|          |          | ||||||
|         conversation += 'assistant: ' |         conversation += 'assistant: ' | ||||||
|         payload = { |         payload = { | ||||||
|             "prompt": conversation, |             "prompt"        : conversation, | ||||||
|             "options": {}, |             "options"       : {}, | ||||||
|             "systemMessage": ".", |             "systemMessage" : ".", | ||||||
|             "temperature": kwargs.get("temperature", 0.4), |             "temperature"   : kwargs.get("temperature", 0.4), | ||||||
|             "top_p": kwargs.get("top_p", 0.4), |             "top_p"         : kwargs.get("top_p", 0.4), | ||||||
|             "model": model, |             "model"         : model, | ||||||
|             "user": str(uuid.uuid4()) |             "user"          : str(uuid.uuid4()) | ||||||
|         } |         } | ||||||
|  |          | ||||||
|         headers = { |         headers = { | ||||||
|             'authority': 'p5.v50.ltd', |             'authority'         : 'p5.v50.ltd', | ||||||
|             'accept': 'application/json, text/plain, */*', |             'accept'            : 'application/json, text/plain, */*', | ||||||
|             'accept-language': 'id-ID,id;q=0.9,en-US;q=0.8,en;q=0.7', |             'accept-language'   : 'id-ID,id;q=0.9,en-US;q=0.8,en;q=0.7', | ||||||
|             'content-type': 'application/json', |             'content-type'      : 'application/json', | ||||||
|             'origin': 'https://p5.v50.ltd', |             'origin'            : 'https://p5.v50.ltd', | ||||||
|             'referer': 'https://p5.v50.ltd/', |             'referer'           : 'https://p5.v50.ltd/', | ||||||
|             'sec-ch-ua-platform': '"Windows"', |             'sec-ch-ua-platform': '"Windows"', | ||||||
|             'sec-fetch-dest': 'empty', |             'sec-fetch-dest'    : 'empty', | ||||||
|             'sec-fetch-mode': 'cors', |             'sec-fetch-mode'    : 'cors', | ||||||
|             'sec-fetch-site': 'same-origin', |             'sec-fetch-site'    : 'same-origin', | ||||||
|             'user-agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/116.0.0.0 Safari/537.36' |             'user-agent'        : 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/116.0.0.0 Safari/537.36' | ||||||
|         } |         } | ||||||
|         response = requests.post("https://p5.v50.ltd/api/chat-process",  |         response = requests.post("https://p5.v50.ltd/api/chat-process",  | ||||||
|                                 json=payload, headers=headers, proxies=kwargs['proxy'] if 'proxy' in kwargs else {}) |                                 json=payload, headers=headers, proxies=kwargs['proxy'] if 'proxy' in kwargs else {}) | ||||||
|  |          | ||||||
|         if "https://fk1.v50.ltd" not in response.text: |         if "https://fk1.v50.ltd" not in response.text: | ||||||
|             yield response.text |             yield response.text | ||||||
|  |  | ||||||
|   | |||||||
| @@ -1,10 +1,6 @@ | |||||||
| import base64 | import base64, json, uuid, quickjs | ||||||
| import json |  | ||||||
| import uuid |  | ||||||
|  |  | ||||||
| import quickjs |  | ||||||
| from curl_cffi      import requests | from curl_cffi      import requests | ||||||
|  |  | ||||||
| from ..typing       import Any, CreateResult, TypedDict | from ..typing       import Any, CreateResult, TypedDict | ||||||
| from .base_provider import BaseProvider | from .base_provider import BaseProvider | ||||||
|  |  | ||||||
| @@ -18,9 +14,8 @@ class Vercel(BaseProvider): | |||||||
|     def create_completion( |     def create_completion( | ||||||
|         model: str, |         model: str, | ||||||
|         messages: list[dict[str, str]], |         messages: list[dict[str, str]], | ||||||
|         stream: bool, |         stream: bool, **kwargs: Any) -> CreateResult: | ||||||
|         **kwargs: Any, |          | ||||||
|     ) -> CreateResult: |  | ||||||
|         if model in ["gpt-3.5-turbo", "gpt-4"]: |         if model in ["gpt-3.5-turbo", "gpt-4"]: | ||||||
|             model = "openai:" + model |             model = "openai:" + model | ||||||
|         yield _chat(model_id=model, messages=messages) |         yield _chat(model_id=model, messages=messages) | ||||||
| @@ -44,15 +39,13 @@ def _create_payload(model_id: str, messages: list[dict[str, str]]) -> dict[str, | |||||||
|         "messages": messages, |         "messages": messages, | ||||||
|         "playgroundId": str(uuid.uuid4()), |         "playgroundId": str(uuid.uuid4()), | ||||||
|         "chatIndex": 0, |         "chatIndex": 0, | ||||||
|         "model": model_id, |         "model": model_id} | default_params | ||||||
|     } | default_params |  | ||||||
|  |  | ||||||
|  |  | ||||||
| def _create_header(session: requests.Session): | def _create_header(session: requests.Session): | ||||||
|     custom_encoding = _get_custom_encoding(session) |     custom_encoding = _get_custom_encoding(session) | ||||||
|     return {"custom-encoding": custom_encoding} |     return {"custom-encoding": custom_encoding} | ||||||
|  |  | ||||||
|  |  | ||||||
| # based on https://github.com/ading2210/vercel-llm-api | # based on https://github.com/ading2210/vercel-llm-api | ||||||
| def _get_custom_encoding(session: requests.Session): | def _get_custom_encoding(session: requests.Session): | ||||||
|     url = "https://sdk.vercel.ai/openai.jpeg" |     url = "https://sdk.vercel.ai/openai.jpeg" | ||||||
|   | |||||||
| @@ -1,9 +1,4 @@ | |||||||
| import json | import json, random, string, time, requests | ||||||
| import random |  | ||||||
| import string |  | ||||||
| import time |  | ||||||
|  |  | ||||||
| import requests |  | ||||||
|  |  | ||||||
| from ..typing       import Any, CreateResult | from ..typing       import Any, CreateResult | ||||||
| from .base_provider import BaseProvider | from .base_provider import BaseProvider | ||||||
| @@ -19,51 +14,53 @@ class Wewordle(BaseProvider): | |||||||
|         cls, |         cls, | ||||||
|         model: str, |         model: str, | ||||||
|         messages: list[dict[str, str]], |         messages: list[dict[str, str]], | ||||||
|         stream: bool, |         stream: bool, **kwargs: Any) -> CreateResult: | ||||||
|         **kwargs: Any, |          | ||||||
|     ) -> CreateResult: |  | ||||||
|         # randomize user id and app id |         # randomize user id and app id | ||||||
|         _user_id = "".join( |         _user_id = "".join( | ||||||
|             random.choices(f"{string.ascii_lowercase}{string.digits}", k=16) |             random.choices(f"{string.ascii_lowercase}{string.digits}", k=16)) | ||||||
|         ) |          | ||||||
|         _app_id = "".join( |         _app_id = "".join( | ||||||
|             random.choices(f"{string.ascii_lowercase}{string.digits}", k=31) |             random.choices(f"{string.ascii_lowercase}{string.digits}", k=31)) | ||||||
|         ) |          | ||||||
|         # make current date with format utc |         # make current date with format utc | ||||||
|         _request_date = time.strftime("%Y-%m-%dT%H:%M:%S.000Z", time.gmtime()) |         _request_date = time.strftime("%Y-%m-%dT%H:%M:%S.000Z", time.gmtime()) | ||||||
|         headers = { |         headers = { | ||||||
|             "accept": "*/*", |             "accept"        : "*/*", | ||||||
|             "pragma": "no-cache", |             "pragma"        : "no-cache", | ||||||
|             "Content-Type": "application/json", |             "Content-Type"  : "application/json", | ||||||
|             "Connection": "keep-alive" |             "Connection"    : "keep-alive" | ||||||
|             # user agent android client |             # user agent android client | ||||||
|             # 'User-Agent': 'Dalvik/2.1.0 (Linux; U; Android 10; SM-G975F Build/QP1A.190711.020)', |             # 'User-Agent': 'Dalvik/2.1.0 (Linux; U; Android 10; SM-G975F Build/QP1A.190711.020)', | ||||||
|         } |         } | ||||||
|  |          | ||||||
|         data: dict[str, Any] = { |         data: dict[str, Any] = { | ||||||
|             "user": _user_id, |             "user"      : _user_id, | ||||||
|             "messages": messages, |             "messages"  : messages, | ||||||
|             "subscriber": { |             "subscriber": { | ||||||
|                 "originalPurchaseDate": None, |                 "originalPurchaseDate"          : None, | ||||||
|                 "originalApplicationVersion": None, |                 "originalApplicationVersion"    : None, | ||||||
|                 "allPurchaseDatesMillis": {}, |                 "allPurchaseDatesMillis"        : {}, | ||||||
|                 "entitlements": {"active": {}, "all": {}}, |                 "entitlements"                  : {"active": {}, "all": {}}, | ||||||
|                 "allPurchaseDates": {}, |                 "allPurchaseDates"              : {}, | ||||||
|                 "allExpirationDatesMillis": {}, |                 "allExpirationDatesMillis"      : {}, | ||||||
|                 "allExpirationDates": {}, |                 "allExpirationDates"            : {}, | ||||||
|                 "originalAppUserId": f"$RCAnonymousID:{_app_id}", |                 "originalAppUserId"             : f"$RCAnonymousID:{_app_id}", | ||||||
|                 "latestExpirationDate": None, |                 "latestExpirationDate"          : None, | ||||||
|                 "requestDate": _request_date, |                 "requestDate"                   : _request_date, | ||||||
|                 "latestExpirationDateMillis": None, |                 "latestExpirationDateMillis"    : None, | ||||||
|                 "nonSubscriptionTransactions": [], |                 "nonSubscriptionTransactions"   : [], | ||||||
|                 "originalPurchaseDateMillis": None, |                 "originalPurchaseDateMillis"    : None, | ||||||
|                 "managementURL": None, |                 "managementURL"                 : None, | ||||||
|                 "allPurchasedProductIdentifiers": [], |                 "allPurchasedProductIdentifiers": [], | ||||||
|                 "firstSeen": _request_date, |                 "firstSeen"                     : _request_date, | ||||||
|                 "activeSubscriptions": [], |                 "activeSubscriptions"           : [], | ||||||
|             }, |             } | ||||||
|         } |         } | ||||||
|  |  | ||||||
|         response = requests.post(f"{cls.url}gptapi/v1/android/turbo", headers=headers, data=json.dumps(data)) |         response = requests.post(f"{cls.url}gptapi/v1/android/turbo",  | ||||||
|  |                                  headers=headers, data=json.dumps(data)) | ||||||
|  |          | ||||||
|         response.raise_for_status() |         response.raise_for_status() | ||||||
|         _json = response.json() |         _json = response.json() | ||||||
|         if "message" in _json: |         if "message" in _json: | ||||||
|   | |||||||
| @@ -1,9 +1,6 @@ | |||||||
| import re | import urllib.parse, json | ||||||
| import urllib.parse |  | ||||||
| import json |  | ||||||
|  |  | ||||||
| from curl_cffi      import requests | from curl_cffi      import requests | ||||||
|  |  | ||||||
| from ..typing       import Any, CreateResult | from ..typing       import Any, CreateResult | ||||||
| from .base_provider import BaseProvider | from .base_provider import BaseProvider | ||||||
|  |  | ||||||
| @@ -17,17 +14,14 @@ class You(BaseProvider): | |||||||
|     def create_completion( |     def create_completion( | ||||||
|         model: str, |         model: str, | ||||||
|         messages: list[dict[str, str]], |         messages: list[dict[str, str]], | ||||||
|         stream: bool, |         stream: bool, **kwargs: Any) -> CreateResult: | ||||||
|         **kwargs: Any, |          | ||||||
|     ) -> CreateResult: |  | ||||||
|         url_param = _create_url_param(messages, kwargs.get("history", [])) |         url_param = _create_url_param(messages, kwargs.get("history", [])) | ||||||
|         headers   = _create_header() |         headers   = _create_header() | ||||||
|         url = f"https://you.com/api/streamingSearch?{url_param}" |          | ||||||
|         response = requests.get( |         response = requests.get(f"https://you.com/api/streamingSearch?{url_param}", | ||||||
|             url, |             headers=headers, impersonate="chrome107") | ||||||
|             headers=headers, |          | ||||||
|             impersonate="chrome107", |  | ||||||
|         ) |  | ||||||
|         response.raise_for_status() |         response.raise_for_status() | ||||||
|          |          | ||||||
|         start = 'data: {"youChatToken": ' |         start = 'data: {"youChatToken": ' | ||||||
|   | |||||||
| @@ -13,14 +13,14 @@ class Yqcloud(BaseProvider): | |||||||
|     def create_completion( |     def create_completion( | ||||||
|         model: str, |         model: str, | ||||||
|         messages: list[dict[str, str]], |         messages: list[dict[str, str]], | ||||||
|         stream: bool, |         stream: bool, **kwargs: Any) -> CreateResult: | ||||||
|         **kwargs: Any, |          | ||||||
|     ) -> CreateResult: |  | ||||||
|         headers = _create_header() |         headers = _create_header() | ||||||
|         payload = _create_payload(messages) |         payload = _create_payload(messages) | ||||||
|  |  | ||||||
|         url = "https://api.aichatos.cloud/api/generateStream" |         response = requests.post("https://api.aichatos.cloud/api/generateStream",  | ||||||
|         response = requests.post(url=url, headers=headers, json=payload) |                                  headers=headers, json=payload) | ||||||
|  |          | ||||||
|         response.raise_for_status() |         response.raise_for_status() | ||||||
|         response.encoding = 'utf-8' |         response.encoding = 'utf-8' | ||||||
|         yield response.text |         yield response.text | ||||||
| @@ -28,9 +28,9 @@ class Yqcloud(BaseProvider): | |||||||
|  |  | ||||||
| def _create_header(): | def _create_header(): | ||||||
|     return { |     return { | ||||||
|         "accept": "application/json, text/plain, */*", |         "accept"        : "application/json, text/plain, */*", | ||||||
|         "content-type": "application/json", |         "content-type"  : "application/json", | ||||||
|         "origin": "https://chat9.yqcloud.top", |         "origin"        : "https://chat9.yqcloud.top", | ||||||
|     } |     } | ||||||
|  |  | ||||||
|  |  | ||||||
| @@ -39,10 +39,11 @@ def _create_payload(messages: list[dict[str, str]]): | |||||||
|     for message in messages: |     for message in messages: | ||||||
|         prompt += "%s: %s\n" % (message["role"], message["content"]) |         prompt += "%s: %s\n" % (message["role"], message["content"]) | ||||||
|     prompt += "assistant:" |     prompt += "assistant:" | ||||||
|  |      | ||||||
|     return { |     return { | ||||||
|         "prompt": prompt, |         "prompt"        : prompt, | ||||||
|         "network": True, |         "network"       : True, | ||||||
|         "system": "", |         "system"        : "", | ||||||
|         "withoutContext": False, |         "withoutContext": False, | ||||||
|         "stream": False, |         "stream"        : False, | ||||||
|     } |     } | ||||||
| @@ -4,7 +4,6 @@ from .Ails import Ails | |||||||
| from .AiService     import AiService | from .AiService     import AiService | ||||||
| from .AItianhu      import AItianhu | from .AItianhu      import AItianhu | ||||||
| from .Bard          import Bard | from .Bard          import Bard | ||||||
| from .base_provider import BaseProvider |  | ||||||
| from .Bing          import Bing | from .Bing          import Bing | ||||||
| from .ChatgptAi     import ChatgptAi | from .ChatgptAi     import ChatgptAi | ||||||
| from .ChatgptLogin  import ChatgptLogin | from .ChatgptLogin  import ChatgptLogin | ||||||
| @@ -30,36 +29,38 @@ from .FastGpt import FastGpt | |||||||
| from .V50           import V50 | from .V50           import V50 | ||||||
| from .Wuguokai      import Wuguokai | from .Wuguokai      import Wuguokai | ||||||
|  |  | ||||||
|  | from .base_provider import BaseProvider | ||||||
|  |  | ||||||
| __all__ = [ | __all__ = [ | ||||||
|     "BaseProvider", |     'BaseProvider', | ||||||
|     "Acytoo", |     'Acytoo', | ||||||
|     "Aichat", |     'Aichat', | ||||||
|     "Ails", |     'Ails', | ||||||
|     "AiService", |     'AiService', | ||||||
|     "AItianhu", |     'AItianhu', | ||||||
|     "Bard", |     'Bard', | ||||||
|     "Bing", |     'Bing', | ||||||
|     "ChatgptAi", |     'ChatgptAi', | ||||||
|     "ChatgptLogin", |     'ChatgptLogin', | ||||||
|     "DeepAi", |     'DeepAi', | ||||||
|     "DfeHub", |     'DfeHub', | ||||||
|     "EasyChat", |     'EasyChat', | ||||||
|     "Forefront", |     'Forefront', | ||||||
|     "GetGpt", |     'GetGpt', | ||||||
|     "H2o", |     'H2o', | ||||||
|     "Hugchat", |     'Hugchat', | ||||||
|     "Liaobots", |     'Liaobots', | ||||||
|     "Lockchat", |     'Lockchat', | ||||||
|     "Opchatgpts", |     'Opchatgpts', | ||||||
|     "Raycast", |     'Raycast', | ||||||
|     "OpenaiChat", |     'OpenaiChat', | ||||||
|     "Theb", |     'Theb', | ||||||
|     "Vercel", |     'Vercel', | ||||||
|     "Wewordle", |     'Wewordle', | ||||||
|     "You", |     'You', | ||||||
|     "Yqcloud", |     'Yqcloud', | ||||||
|     "Equing", |     'Equing', | ||||||
|     "FastGpt", |     'FastGpt', | ||||||
|     "Wuguokai" |     'Wuguokai', | ||||||
|     "V50" |     'V50' | ||||||
| ] | ] | ||||||
|   | |||||||
| @@ -20,9 +20,8 @@ class BaseProvider(ABC): | |||||||
|     def create_completion( |     def create_completion( | ||||||
|         model: str, |         model: str, | ||||||
|         messages: list[dict[str, str]], |         messages: list[dict[str, str]], | ||||||
|         stream: bool, |         stream: bool, **kwargs: Any) -> CreateResult: | ||||||
|         **kwargs: Any, |          | ||||||
|     ) -> CreateResult: |  | ||||||
|         raise NotImplementedError() |         raise NotImplementedError() | ||||||
|  |  | ||||||
|     @classmethod |     @classmethod | ||||||
| @@ -42,8 +41,10 @@ _cookies = {} | |||||||
| def get_cookies(cookie_domain: str) -> dict: | def get_cookies(cookie_domain: str) -> dict: | ||||||
|     if cookie_domain not in _cookies: |     if cookie_domain not in _cookies: | ||||||
|         _cookies[cookie_domain] = {} |         _cookies[cookie_domain] = {} | ||||||
|  |          | ||||||
|         for cookie in browser_cookie3.load(cookie_domain): |         for cookie in browser_cookie3.load(cookie_domain): | ||||||
|             _cookies[cookie_domain][cookie.name] = cookie.value |             _cookies[cookie_domain][cookie.name] = cookie.value | ||||||
|  |      | ||||||
|     return _cookies[cookie_domain] |     return _cookies[cookie_domain] | ||||||
|  |  | ||||||
|  |  | ||||||
| @@ -53,18 +54,15 @@ class AsyncProvider(BaseProvider): | |||||||
|         cls, |         cls, | ||||||
|         model: str, |         model: str, | ||||||
|         messages: list[dict[str, str]], |         messages: list[dict[str, str]], | ||||||
|         stream: bool = False, |         stream: bool = False, **kwargs: Any) -> CreateResult: | ||||||
|         **kwargs: Any |          | ||||||
|     ) -> CreateResult: |  | ||||||
|         yield asyncio.run(cls.create_async(model, messages, **kwargs)) |         yield asyncio.run(cls.create_async(model, messages, **kwargs)) | ||||||
|  |  | ||||||
|     @staticmethod |     @staticmethod | ||||||
|     @abstractmethod |     @abstractmethod | ||||||
|     async def create_async( |     async def create_async( | ||||||
|         model: str, |         model: str, | ||||||
|         messages: list[dict[str, str]], |         messages: list[dict[str, str]], **kwargs: Any) -> str: | ||||||
|         **kwargs: Any, |  | ||||||
|     ) -> str: |  | ||||||
|         raise NotImplementedError() |         raise NotImplementedError() | ||||||
|  |  | ||||||
|  |  | ||||||
| @@ -74,9 +72,8 @@ class AsyncGeneratorProvider(AsyncProvider): | |||||||
|         cls, |         cls, | ||||||
|         model: str, |         model: str, | ||||||
|         messages: list[dict[str, str]], |         messages: list[dict[str, str]], | ||||||
|         stream: bool = True, |         stream: bool = True, **kwargs: Any) -> CreateResult: | ||||||
|         **kwargs: Any |          | ||||||
|     ) -> CreateResult: |  | ||||||
|         if stream: |         if stream: | ||||||
|             yield from run_generator(cls.create_async_generator(model, messages, **kwargs)) |             yield from run_generator(cls.create_async_generator(model, messages, **kwargs)) | ||||||
|         else: |         else: | ||||||
| @@ -86,9 +83,8 @@ class AsyncGeneratorProvider(AsyncProvider): | |||||||
|     async def create_async( |     async def create_async( | ||||||
|         cls, |         cls, | ||||||
|         model: str, |         model: str, | ||||||
|         messages: list[dict[str, str]], |         messages: list[dict[str, str]], **kwargs: Any) -> str: | ||||||
|         **kwargs: Any, |          | ||||||
|     ) -> str: |  | ||||||
|         chunks = [chunk async for chunk in cls.create_async_generator(model, messages, **kwargs)] |         chunks = [chunk async for chunk in cls.create_async_generator(model, messages, **kwargs)] | ||||||
|         if chunks: |         if chunks: | ||||||
|             return "".join(chunks) |             return "".join(chunks) | ||||||
| @@ -97,8 +93,8 @@ class AsyncGeneratorProvider(AsyncProvider): | |||||||
|     @abstractmethod |     @abstractmethod | ||||||
|     def create_async_generator( |     def create_async_generator( | ||||||
|             model: str, |             model: str, | ||||||
|             messages: list[dict[str, str]], |             messages: list[dict[str, str]]) -> AsyncGenerator: | ||||||
|         ) -> AsyncGenerator: |          | ||||||
|         raise NotImplementedError() |         raise NotImplementedError() | ||||||
|  |  | ||||||
|  |  | ||||||
|   | |||||||
| @@ -4,42 +4,39 @@ from .typing import Any, CreateResult, Union | |||||||
|  |  | ||||||
| logging = False | logging = False | ||||||
|  |  | ||||||
|  |  | ||||||
| class ChatCompletion: | class ChatCompletion: | ||||||
|     @staticmethod |     @staticmethod | ||||||
|     def create( |     def create( | ||||||
|         model: Union[models.Model, str], |         model    : Union[models.Model, str], | ||||||
|         messages: list[dict[str, str]], |         messages : list[dict[str, str]], | ||||||
|         provider: Union[type[BaseProvider], None] = None, |         provider : Union[type[BaseProvider], None] = None, | ||||||
|         stream: bool = False, |         stream   : bool                            = False, | ||||||
|         auth: Union[str, None] = None, |         auth     : Union[str, None]                = None, **kwargs: Any) -> Union[CreateResult, str]: | ||||||
|         **kwargs: Any, |          | ||||||
|     ) -> Union[CreateResult, str]: |  | ||||||
|         if isinstance(model, str): |         if isinstance(model, str): | ||||||
|             try: |             try: | ||||||
|                 model = models.ModelUtils.convert[model] |                 model = models.ModelUtils.convert[model] | ||||||
|             except KeyError: |             except KeyError: | ||||||
|                 raise Exception(f"The model: {model} does not exist") |                 raise Exception(f'The model: {model} does not exist') | ||||||
|  |  | ||||||
|         provider = model.best_provider if provider == None else provider |         provider = model.best_provider if provider == None else provider | ||||||
|  |  | ||||||
|         if not provider.working: |         if not provider.working: | ||||||
|             raise Exception(f"{provider.__name__} is not working") |             raise Exception(f'{provider.__name__} is not working') | ||||||
|  |  | ||||||
|         if provider.needs_auth and not auth: |         if provider.needs_auth and not auth: | ||||||
|             raise Exception( |             raise Exception( | ||||||
|                 f'ValueError: {provider.__name__} requires authentication (use auth="cookie or token or jwt ..." param)' |                 f'ValueError: {provider.__name__} requires authentication (use auth=\'cookie or token or jwt ...\' param)') | ||||||
|             ) |              | ||||||
|         if provider.needs_auth: |         if provider.needs_auth: | ||||||
|             kwargs["auth"] = auth |             kwargs['auth'] = auth | ||||||
|  |  | ||||||
|         if not provider.supports_stream and stream: |         if not provider.supports_stream and stream: | ||||||
|             raise Exception( |             raise Exception( | ||||||
|                 f"ValueError: {provider.__name__} does not support 'stream' argument" |                 f'ValueError: {provider.__name__} does not support "stream" argument') | ||||||
|             ) |  | ||||||
|  |  | ||||||
|         if logging: |         if logging: | ||||||
|             print(f"Using {provider.__name__} provider") |             print(f'Using {provider.__name__} provider') | ||||||
|  |  | ||||||
|         result = provider.create_completion(model.name, messages, stream, **kwargs) |         result = provider.create_completion(model.name, messages, stream, **kwargs) | ||||||
|         return result if stream else "".join(result) |         return result if stream else ''.join(result) | ||||||
|   | |||||||
							
								
								
									
										268
									
								
								g4f/models.py
									
									
									
									
									
								
							
							
						
						
									
										268
									
								
								g4f/models.py
									
									
									
									
									
								
							| @@ -1,8 +1,6 @@ | |||||||
| from dataclasses import dataclass | from dataclasses import dataclass | ||||||
|  |  | ||||||
| from .Provider import Bard, BaseProvider, GetGpt, H2o, Liaobots, Vercel, Equing | from .Provider import Bard, BaseProvider, GetGpt, H2o, Liaobots, Vercel, Equing | ||||||
|  |  | ||||||
|  |  | ||||||
| @dataclass | @dataclass | ||||||
| class Model: | class Model: | ||||||
|     name: str |     name: str | ||||||
| @@ -12,214 +10,190 @@ class Model: | |||||||
|  |  | ||||||
| # GPT-3.5 / GPT-4 | # GPT-3.5 / GPT-4 | ||||||
| gpt_35_turbo = Model( | gpt_35_turbo = Model( | ||||||
|     name="gpt-3.5-turbo", |     name          = 'gpt-3.5-turbo', | ||||||
|     base_provider="openai", |     base_provider = 'openai', | ||||||
|     best_provider=GetGpt, |     best_provider = GetGpt) | ||||||
| ) |  | ||||||
|  |  | ||||||
| gpt_4 = Model( | gpt_4 = Model( | ||||||
|     name="gpt-4", |     name          = 'gpt-4', | ||||||
|     base_provider="openai", |     base_provider = 'openai', | ||||||
|     best_provider=Liaobots, |     best_provider = Liaobots) | ||||||
| ) |  | ||||||
|  |  | ||||||
| # Bard | # Bard | ||||||
| palm = Model( | palm = Model( | ||||||
|     name="palm", |     name          = 'palm', | ||||||
|     base_provider="google", |     base_provider = 'google', | ||||||
|     best_provider=Bard, |     best_provider = Bard) | ||||||
| ) |  | ||||||
|  |  | ||||||
| # H2o | # H2o | ||||||
| falcon_7b = Model( | falcon_7b = Model( | ||||||
|     name="h2oai/h2ogpt-gm-oasst1-en-2048-falcon-7b-v3", |     name          = 'h2oai/h2ogpt-gm-oasst1-en-2048-falcon-7b-v3', | ||||||
|     base_provider="huggingface", |     base_provider = 'huggingface', | ||||||
|     best_provider=H2o, |     best_provider = H2o) | ||||||
| ) |  | ||||||
|  |  | ||||||
| falcon_40b = Model( | falcon_40b = Model( | ||||||
|     name="h2oai/h2ogpt-gm-oasst1-en-2048-falcon-40b-v1", |     name          = 'h2oai/h2ogpt-gm-oasst1-en-2048-falcon-40b-v1', | ||||||
|     base_provider="huggingface", |     base_provider = 'huggingface', | ||||||
|     best_provider=H2o, |     best_provider = H2o) | ||||||
| ) |  | ||||||
|  |  | ||||||
| llama_13b = Model( | llama_13b = Model( | ||||||
|     name="h2oai/h2ogpt-gm-oasst1-en-2048-open-llama-13b", |     name          = 'h2oai/h2ogpt-gm-oasst1-en-2048-open-llama-13b', | ||||||
|     base_provider="huggingface", |     base_provider = 'huggingface', | ||||||
|     best_provider=H2o, |     best_provider = H2o) | ||||||
| ) |  | ||||||
|  |  | ||||||
| # Vercel | # Vercel | ||||||
| claude_instant_v1 = Model( | claude_instant_v1 = Model( | ||||||
|     name="anthropic:claude-instant-v1", |     name          = 'anthropic:claude-instant-v1', | ||||||
|     base_provider="anthropic", |     base_provider = 'anthropic', | ||||||
|     best_provider=Vercel, |     best_provider = Vercel) | ||||||
| ) |  | ||||||
|  |  | ||||||
| claude_v1 = Model( | claude_v1 = Model( | ||||||
|     name="anthropic:claude-v1", |     name          = 'anthropic:claude-v1', | ||||||
|     base_provider="anthropic", |     base_provider = 'anthropic', | ||||||
|     best_provider=Vercel, |     best_provider = Vercel) | ||||||
| ) |  | ||||||
|  |  | ||||||
| claude_v2 = Model( | claude_v2 = Model( | ||||||
|     name="anthropic:claude-v2", |     name          = 'anthropic:claude-v2', | ||||||
|     base_provider="anthropic", |     base_provider = 'anthropic', | ||||||
|     best_provider=Vercel, |     best_provider = Vercel) | ||||||
| ) |  | ||||||
|  |  | ||||||
| command_light_nightly = Model( | command_light_nightly = Model( | ||||||
|     name="cohere:command-light-nightly", |     name          = 'cohere:command-light-nightly', | ||||||
|     base_provider="cohere", |     base_provider = 'cohere', | ||||||
|     best_provider=Vercel, |     best_provider = Vercel) | ||||||
| ) |  | ||||||
|  |  | ||||||
| command_nightly = Model( | command_nightly = Model( | ||||||
|     name="cohere:command-nightly", |     name          = 'cohere:command-nightly', | ||||||
|     base_provider="cohere", |     base_provider = 'cohere', | ||||||
|     best_provider=Vercel, |     best_provider = Vercel) | ||||||
| ) |  | ||||||
|  |  | ||||||
| gpt_neox_20b = Model( | gpt_neox_20b = Model( | ||||||
|     name="huggingface:EleutherAI/gpt-neox-20b", |     name          = 'huggingface:EleutherAI/gpt-neox-20b', | ||||||
|     base_provider="huggingface", |     base_provider = 'huggingface', | ||||||
|     best_provider=Vercel, |     best_provider = Vercel) | ||||||
| ) |  | ||||||
|  |  | ||||||
| oasst_sft_1_pythia_12b = Model( | oasst_sft_1_pythia_12b = Model( | ||||||
|     name="huggingface:OpenAssistant/oasst-sft-1-pythia-12b", |     name          = 'huggingface:OpenAssistant/oasst-sft-1-pythia-12b', | ||||||
|     base_provider="huggingface", |     base_provider = 'huggingface', | ||||||
|     best_provider=Vercel, |     best_provider = Vercel) | ||||||
| ) |  | ||||||
|  |  | ||||||
| oasst_sft_4_pythia_12b_epoch_35 = Model( | oasst_sft_4_pythia_12b_epoch_35 = Model( | ||||||
|     name="huggingface:OpenAssistant/oasst-sft-4-pythia-12b-epoch-3.5", |     name          = 'huggingface:OpenAssistant/oasst-sft-4-pythia-12b-epoch-3.5', | ||||||
|     base_provider="huggingface", |     base_provider = 'huggingface', | ||||||
|     best_provider=Vercel, |     best_provider = Vercel) | ||||||
| ) |  | ||||||
|  |  | ||||||
| santacoder = Model( | santacoder = Model( | ||||||
|     name="huggingface:bigcode/santacoder", |     name          = 'huggingface:bigcode/santacoder', | ||||||
|     base_provider="huggingface", |     base_provider = 'huggingface', | ||||||
|     best_provider=Vercel, |     best_provider = Vercel) | ||||||
| ) |  | ||||||
|  |  | ||||||
| bloom = Model( | bloom = Model( | ||||||
|     name="huggingface:bigscience/bloom", |     name          = 'huggingface:bigscience/bloom', | ||||||
|     base_provider="huggingface", |     base_provider = 'huggingface', | ||||||
|     best_provider=Vercel, |     best_provider = Vercel) | ||||||
| ) |  | ||||||
|  |  | ||||||
| flan_t5_xxl = Model( | flan_t5_xxl = Model( | ||||||
|     name="huggingface:google/flan-t5-xxl", |     name          = 'huggingface:google/flan-t5-xxl', | ||||||
|     base_provider="huggingface", |     base_provider = 'huggingface', | ||||||
|     best_provider=Vercel, |     best_provider = Vercel) | ||||||
| ) |  | ||||||
|  |  | ||||||
| code_davinci_002 = Model( | code_davinci_002 = Model( | ||||||
|     name="openai:code-davinci-002", |     name          = 'openai:code-davinci-002', | ||||||
|     base_provider="openai", |     base_provider = 'openai', | ||||||
|     best_provider=Vercel, |     best_provider = Vercel) | ||||||
| ) |  | ||||||
|  |  | ||||||
| gpt_35_turbo_16k = Model( | gpt_35_turbo_16k = Model( | ||||||
|     name="openai:gpt-3.5-turbo-16k", |     name          = 'openai:gpt-3.5-turbo-16k', | ||||||
|     base_provider="openai", |     base_provider = 'openai', | ||||||
|     best_provider=Vercel, |     best_provider = Vercel) | ||||||
| ) |  | ||||||
|  |  | ||||||
| gpt_35_turbo_16k_0613 = Model( | gpt_35_turbo_16k_0613 = Model( | ||||||
|     name="openai:gpt-3.5-turbo-16k-0613", |     name          = 'openai:gpt-3.5-turbo-16k-0613', | ||||||
|     base_provider="openai", |     base_provider = 'openai', | ||||||
|     best_provider=Equing, |     best_provider = Equing) | ||||||
| ) |  | ||||||
|  |  | ||||||
| gpt_4_0613 = Model( | gpt_4_0613 = Model( | ||||||
|     name="openai:gpt-4-0613", |     name          = 'openai:gpt-4-0613', | ||||||
|     base_provider="openai", |     base_provider = 'openai', | ||||||
|     best_provider=Vercel, |     best_provider = Vercel) | ||||||
| ) |  | ||||||
|  |  | ||||||
| text_ada_001 = Model( | text_ada_001 = Model( | ||||||
|     name="openai:text-ada-001", |     name          = 'openai:text-ada-001', | ||||||
|     base_provider="openai", |     base_provider = 'openai', | ||||||
|     best_provider=Vercel, |     best_provider = Vercel) | ||||||
| ) |  | ||||||
|  |  | ||||||
| text_babbage_001 = Model( | text_babbage_001 = Model( | ||||||
|     name="openai:text-babbage-001", |     name          = 'openai:text-babbage-001', | ||||||
|     base_provider="openai", |     base_provider = 'openai', | ||||||
|     best_provider=Vercel, |     best_provider = Vercel) | ||||||
| ) |  | ||||||
|  |  | ||||||
| text_curie_001 = Model( | text_curie_001 = Model( | ||||||
|     name="openai:text-curie-001", |     name          = 'openai:text-curie-001', | ||||||
|     base_provider="openai", |     base_provider = 'openai', | ||||||
|     best_provider=Vercel, |     best_provider = Vercel) | ||||||
| ) |  | ||||||
|  |  | ||||||
| text_davinci_002 = Model( | text_davinci_002 = Model( | ||||||
|     name="openai:text-davinci-002", |     name          = 'openai:text-davinci-002', | ||||||
|     base_provider="openai", |     base_provider = 'openai', | ||||||
|     best_provider=Vercel, |     best_provider = Vercel) | ||||||
| ) |  | ||||||
|  |  | ||||||
| text_davinci_003 = Model( | text_davinci_003 = Model( | ||||||
|     name="openai:text-davinci-003", |     name          = 'openai:text-davinci-003', | ||||||
|     base_provider="openai", |     base_provider = 'openai', | ||||||
|     best_provider=Vercel, |     best_provider = Vercel) | ||||||
| ) |  | ||||||
|  |  | ||||||
| llama13b_v2_chat = Model( | llama13b_v2_chat = Model( | ||||||
|     name="replicate:a16z-infra/llama13b-v2-chat", |     name          = 'replicate:a16z-infra/llama13b-v2-chat', | ||||||
|     base_provider="replicate", |     base_provider = 'replicate', | ||||||
|     best_provider=Vercel, |     best_provider = Vercel) | ||||||
| ) |  | ||||||
|  |  | ||||||
| llama7b_v2_chat = Model( | llama7b_v2_chat = Model( | ||||||
|     name="replicate:a16z-infra/llama7b-v2-chat", |     name          = 'replicate:a16z-infra/llama7b-v2-chat', | ||||||
|     base_provider="replicate", |     base_provider = 'replicate', | ||||||
|     best_provider=Vercel, |     best_provider = Vercel) | ||||||
| ) |  | ||||||
|  |  | ||||||
|  |  | ||||||
| class ModelUtils: | class ModelUtils: | ||||||
|     convert: dict[str, Model] = { |     convert: dict[str, Model] = { | ||||||
|         # GPT-3.5 / GPT-4 |         # GPT-3.5 / GPT-4 | ||||||
|         "gpt-3.5-turbo": gpt_35_turbo, |         'gpt-3.5-turbo' : gpt_35_turbo, | ||||||
|         "gpt-4": gpt_4, |         'gpt-4'         : gpt_4, | ||||||
|  |          | ||||||
|         # Bard |         # Bard | ||||||
|         "palm2": palm, |         'palm2'       : palm, | ||||||
|         "palm": palm, |         'palm'        : palm, | ||||||
|         "google": palm, |         'google'      : palm, | ||||||
|         "google-bard": palm, |         'google-bard' : palm, | ||||||
|         "google-palm": palm, |         'google-palm' : palm, | ||||||
|         "bard": palm, |         'bard'        : palm, | ||||||
|  |          | ||||||
|         # H2o |         # H2o | ||||||
|         "falcon-40b": falcon_40b, |         'falcon-40b' : falcon_40b, | ||||||
|         "falcon-7b": falcon_7b, |         'falcon-7b'  : falcon_7b, | ||||||
|         "llama-13b": llama_13b, |         'llama-13b'  : llama_13b, | ||||||
|  |          | ||||||
|         # Vercel |         # Vercel | ||||||
|         "claude-instant-v1": claude_instant_v1, |         'claude-instant-v1' : claude_instant_v1, | ||||||
|         "claude-v1": claude_v1, |         'claude-v1'         : claude_v1, | ||||||
|         "claude-v2": claude_v2, |         'claude-v2'         : claude_v2, | ||||||
|         "command-light-nightly": command_light_nightly, |         'command-nightly'   : command_nightly, | ||||||
|         "command-nightly": command_nightly, |         'gpt-neox-20b'      : gpt_neox_20b, | ||||||
|         "gpt-neox-20b": gpt_neox_20b, |         'santacoder'        : santacoder, | ||||||
|         "oasst-sft-1-pythia-12b": oasst_sft_1_pythia_12b, |         'bloom'             : bloom, | ||||||
|         "oasst-sft-4-pythia-12b-epoch-3.5": oasst_sft_4_pythia_12b_epoch_35, |         'flan-t5-xxl'       : flan_t5_xxl, | ||||||
|         "santacoder": santacoder, |         'code-davinci-002'  : code_davinci_002, | ||||||
|         "bloom": bloom, |         'gpt-3.5-turbo-16k' : gpt_35_turbo_16k, | ||||||
|         "flan-t5-xxl": flan_t5_xxl, |         'gpt-4-0613'        : gpt_4_0613, | ||||||
|         "code-davinci-002": code_davinci_002, |         'text-ada-001'      : text_ada_001, | ||||||
|         "gpt-3.5-turbo-16k": gpt_35_turbo_16k, |         'text-babbage-001'  : text_babbage_001, | ||||||
|         "gpt-3.5-turbo-16k-0613": gpt_35_turbo_16k_0613, |         'text-curie-001'    : text_curie_001, | ||||||
|         "gpt-4-0613": gpt_4_0613, |         'text-davinci-002'  : text_davinci_002, | ||||||
|         "text-ada-001": text_ada_001, |         'text-davinci-003'  : text_davinci_003, | ||||||
|         "text-babbage-001": text_babbage_001, |         'llama13b-v2-chat'  : llama13b_v2_chat, | ||||||
|         "text-curie-001": text_curie_001, |         'llama7b-v2-chat'   : llama7b_v2_chat, | ||||||
|         "text-davinci-002": text_davinci_002, |          | ||||||
|         "text-davinci-003": text_davinci_003, |         'oasst-sft-1-pythia-12b'           : oasst_sft_1_pythia_12b, | ||||||
|         "llama13b-v2-chat": llama13b_v2_chat, |         'oasst-sft-4-pythia-12b-epoch-3.5' : oasst_sft_4_pythia_12b_epoch_35, | ||||||
|         "llama7b-v2-chat": llama7b_v2_chat, |         'command-light-nightly'            : command_light_nightly, | ||||||
|  |         'gpt-3.5-turbo-16k-0613'           : gpt_35_turbo_16k_0613, | ||||||
|     } |     } | ||||||
| @@ -1,15 +1,14 @@ | |||||||
| from typing import Any, AsyncGenerator, Generator, NewType, Tuple, TypedDict, Union | from typing import Any, AsyncGenerator, Generator, NewType, Tuple, TypedDict, Union | ||||||
|  |  | ||||||
| SHA256 = NewType("sha_256_hash", str) | SHA256 = NewType('sha_256_hash', str) | ||||||
| CreateResult = Generator[str, None, None] | CreateResult = Generator[str, None, None] | ||||||
|  |  | ||||||
|  |  | ||||||
| __all__ = [ | __all__ = [ | ||||||
|     "Any", |     'Any', | ||||||
|     "AsyncGenerator", |     'AsyncGenerator', | ||||||
|     "Generator", |     'Generator', | ||||||
|     "Tuple", |     'Tuple', | ||||||
|     "TypedDict", |     'TypedDict', | ||||||
|     "SHA256", |     'SHA256', | ||||||
|     "CreateResult", |     'CreateResult', | ||||||
| ] | ] | ||||||
		Reference in New Issue
	
	Block a user
	 abc
					abc