Remove all not working provider (#1679)

Fix many providers
Add selenium-wire to requierments
This commit is contained in:
H Lohaus
2024-03-12 02:06:06 +01:00
committed by GitHub
parent 843f6db564
commit 6ef282de3a
57 changed files with 696 additions and 542 deletions

View File

@@ -230,71 +230,64 @@ set G4F_PROXY=http://host:port
| Website | Provider | GPT-3.5 | GPT-4 | Stream | Status | Auth | | Website | Provider | GPT-3.5 | GPT-4 | Stream | Status | Auth |
| ------ | ------- | ------- | ----- | ------ | ------ | ---- | | ------ | ------- | ------- | ----- | ------ | ------ | ---- |
| [bing.com](https://bing.com/chat) | `g4f.Provider.Bing` | ❌ | ✔️ | ✔️ | ![Active](https://img.shields.io/badge/Active-brightgreen) | ❌ | | [bing.com](https://bing.com/chat) | `g4f.Provider.Bing` | ❌ | ✔️ | ✔️ | ![Unknown](https://img.shields.io/badge/Unknown-grey) | ❌ |
| [chat.openai.com](https://chat.openai.com) | `g4f.Provider.OpenaiChat` | ✔️ | ✔️ | ✔️ | ![Unknown](https://img.shields.io/badge/Active-brightgreen) | ✔️ | | [chatgpt.ai](https://chatgpt.ai) | `g4f.Provider.ChatgptAi` | | ✔️ | ✔️ | ![Active](https://img.shields.io/badge/Active-brightgreen) | |
| [you.com](https://you.com) | `g4f.Provider.You` | ✔️ | ✔️ | ✔️ | ![Active](https://img.shields.io/badge/Active-brightgreen) | ❌ |
| [liaobots.site](https://liaobots.site) | `g4f.Provider.Liaobots` | ✔️ | ✔️ | ✔️ | ![Active](https://img.shields.io/badge/Active-brightgreen) | ❌ | | [liaobots.site](https://liaobots.site) | `g4f.Provider.Liaobots` | ✔️ | ✔️ | ✔️ | ![Active](https://img.shields.io/badge/Active-brightgreen) | ❌ |
| [chat.openai.com](https://chat.openai.com) | `g4f.Provider.OpenaiChat` | ✔️ | ✔️ | ✔️ | ![Unknown](https://img.shields.io/badge/Unknown-grey) | ✔️ |
| [raycast.com](https://raycast.com) | `g4f.Provider.Raycast` | ✔️ | ✔️ | ✔️ | ![Unknown](https://img.shields.io/badge/Unknown-grey) | ✔️ | | [raycast.com](https://raycast.com) | `g4f.Provider.Raycast` | ✔️ | ✔️ | ✔️ | ![Unknown](https://img.shields.io/badge/Unknown-grey) | ✔️ |
| [chat.geekgpt.org](https://chat.geekgpt.org) | `g4f.Provider.GeekGpt` | ✔️ | ✔️ | ✔️ | ![Inactive](https://img.shields.io/badge/Inactive-red) | ❌ | | [beta.theb.ai](https://beta.theb.ai) | `g4f.Provider.Theb` | ✔️ | ✔️ | ✔️ | ![Unknown](https://img.shields.io/badge/Unknown-grey) | ❌ |
| [you.com](https://you.com) | `g4f.Provider.You` | ✔️ | ✔️ | ✔️ | ![Unknown](https://img.shields.io/badge/Unknown-grey) | ❌ |
### GPT-3.5 ### GPT-3.5
| Website | Provider | GPT-3.5 | GPT-4 | Stream | Status | Auth | | Website | Provider | GPT-3.5 | GPT-4 | Stream | Status | Auth |
| ------ | ------- | ------- | ----- | ------ | ------ | ---- | | ------ | ------- | ------- | ----- | ------ | ------ | ---- |
| [chat3.aiyunos.top](https://chat3.aiyunos.top/) | `g4f.Provider.AItianhuSpace` | ✔️ | ❌ | ✔️ | ![Unknown](https://img.shields.io/badge/Unknown-grey) | ❌ | | [chat3.aiyunos.top](https://chat3.aiyunos.top/) | `g4f.Provider.AItianhuSpace` | ✔️ | ❌ | ✔️ | ![Unknown](https://img.shields.io/badge/Unknown-grey) | ❌ |
| [aichatonline.org](https://aichatonline.org) | `g4f.Provider.AiChatOnline` | ✔️ | ❌ | ✔️ | ![Unknown](https://img.shields.io/badge/Unknown-grey) | ❌ | | [chatforai.store](https://chatforai.store) | `g4f.Provider.ChatForAi` | ✔️ | ❌ | ✔️ | ![Active](https://img.shields.io/badge/Active-brightgreen) | ❌ |
| [openchat.team](https://openchat.team) | `g4f.Provider.Aura` | ✔️ | ❌ | ✔️ | ![Active](https://img.shields.io/badge/Active-brightgreen) | ❌ | | [chatgpt4online.org](https://chatgpt4online.org) | `g4f.Provider.Chatgpt4Online` | ✔️ | ❌ | ✔️ | ![Active](https://img.shields.io/badge/Active-brightgreen) | ❌ |
| [chatbase.co](https://www.chatbase.co) | `g4f.Provider.ChatBase` | ✔️ | ❌ | ✔️ | ![Unknown](https://img.shields.io/badge/Unknown-grey) | ❌ | | [chatgpt-free.cc](https://www.chatgpt-free.cc) | `g4f.Provider.ChatgptNext` | ✔️ | ❌ | ✔️ | ![Active](https://img.shields.io/badge/Active-brightgreen) | ❌ |
| [chatforai.store](https://chatforai.store) | `g4f.Provider.ChatForAi` | ✔️ | ❌ | ✔️ | ![Unknown](https://img.shields.io/badge/Unknown-grey) | ❌ | | [chatgptx.de](https://chatgptx.de) | `g4f.Provider.ChatgptX` | ✔️ | ❌ | ✔️ | ![Active](https://img.shields.io/badge/Active-brightgreen) | ❌ |
| [chatgpt.ai](https://chatgpt.ai) | `g4f.Provider.ChatgptAi` | ✔️ | ❌ | ✔️ | ![Unknown](https://img.shields.io/badge/Unknown-grey) | ❌ | | [flowgpt.com](https://flowgpt.com/chat) | `g4f.Provider.FlowGpt` | ✔️ | ❌ | ✔️ | ![Active](https://img.shields.io/badge/Active-brightgreen) | ❌ |
| [chat.chatgptdemo.net](https://chat.chatgptdemo.net) | `g4f.Provider.ChatgptDemo` | ✔️ | ❌ | ✔️ | ![Unknown](https://img.shields.io/badge/Unknown-grey) | ❌ | | [freegptsnav.aifree.site](https://freegptsnav.aifree.site) | `g4f.Provider.FreeGpt` | ✔️ | ❌ | ✔️ | ![Unknown](https://img.shields.io/badge/Unknown-grey) | ❌ |
| [chatgpt-free.cc](https://www.chatgpt-free.cc) | `g4f.Provider.ChatgptNext` | ✔️ | ❌ | ✔️ | ![Unknown](https://img.shields.io/badge/Unknown-grey) | ❌ | | [gpttalk.ru](https://gpttalk.ru) | `g4f.Provider.GptTalkRu` | ✔️ | ❌ | ✔️ | ![Active](https://img.shields.io/badge/Active-brightgreen) | ❌ |
| [chat.3211000.xyz](https://chat.3211000.xyz) | `g4f.Provider.Chatxyz` | ✔️ | ❌ | ✔️ | ![Unknown](https://img.shields.io/badge/Unknown-grey) | ❌ |
| [gptalk.net](https://gptalk.net) | `g4f.Provider.GPTalk` | ✔️ | ❌ | ✔️ | ![Unknown](https://img.shields.io/badge/Unknown-grey) | ❌ |
| [gpt6.ai](https://gpt6.ai) | `g4f.Provider.Gpt6` | ✔️ | ❌ | ✔️ | ![Unknown](https://img.shields.io/badge/Unknown-grey) | ❌ |
| [gptchatly.com](https://gptchatly.com) | `g4f.Provider.GptChatly` | ✔️ | ❌ | ❌ | ![Unknown](https://img.shields.io/badge/Unknown-grey) | ❌ |
| [ai18.gptforlove.com](https://ai18.gptforlove.com) | `g4f.Provider.GptForLove` | ✔️ | ❌ | ✔️ | ![Unknown](https://img.shields.io/badge/Unknown-grey) | ❌ |
| [gptgo.ai](https://gptgo.ai) | `g4f.Provider.GptGo` | ✔️ | ❌ | ✔️ | ![Unknown](https://img.shields.io/badge/Unknown-grey) | ❌ |
| [gpttalk.ru](https://gpttalk.ru) | `g4f.Provider.GptTalkRu` | ✔️ | ❌ | ✔️ | ![Unknown](https://img.shields.io/badge/Unknown-grey) | ❌ |
| [koala.sh](https://koala.sh) | `g4f.Provider.Koala` | ✔️ | ❌ | ✔️ | ![Active](https://img.shields.io/badge/Active-brightgreen) | ❌ | | [koala.sh](https://koala.sh) | `g4f.Provider.Koala` | ✔️ | ❌ | ✔️ | ![Active](https://img.shields.io/badge/Active-brightgreen) | ❌ |
| [app.myshell.ai](https://app.myshell.ai/chat) | `g4f.Provider.MyShell` | ✔️ | ❌ | ✔️ | ![Unknown](https://img.shields.io/badge/Unknown-grey) | ❌ | | [app.myshell.ai](https://app.myshell.ai/chat) | `g4f.Provider.MyShell` | ✔️ | ❌ | ✔️ | ![Unknown](https://img.shields.io/badge/Unknown-grey) | ❌ |
| [onlinegpt.org](https://onlinegpt.org) | `g4f.Provider.OnlineGpt` | ✔️ | ❌ | ✔️ | ![Unknown](https://img.shields.io/badge/Unknown-grey) | ❌ |
| [perplexity.ai](https://www.perplexity.ai) | `g4f.Provider.PerplexityAi` | ✔️ | ❌ | ✔️ | ![Unknown](https://img.shields.io/badge/Unknown-grey) | ❌ | | [perplexity.ai](https://www.perplexity.ai) | `g4f.Provider.PerplexityAi` | ✔️ | ❌ | ✔️ | ![Unknown](https://img.shields.io/badge/Unknown-grey) | ❌ |
| [poe.com](https://poe.com) | `g4f.Provider.Poe` | ✔️ | ❌ | ✔️ | ![Unknown](https://img.shields.io/badge/Unknown-grey) | ✔️ | | [poe.com](https://poe.com) | `g4f.Provider.Poe` | ✔️ | ❌ | ✔️ | ![Unknown](https://img.shields.io/badge/Unknown-grey) | ✔️ |
| [talkai.info](https://talkai.info) | `g4f.Provider.TalkAi` | ✔️ | ❌ | ✔️ | ![Unknown](https://img.shields.io/badge/Unknown-grey) | ❌ | | [talkai.info](https://talkai.info) | `g4f.Provider.TalkAi` | ✔️ | ❌ | ✔️ | ![Unknown](https://img.shields.io/badge/Unknown-grey) | ❌ |
| [chat.vercel.ai](https://chat.vercel.ai) | `g4f.Provider.Vercel` | ✔️ | ❌ | ✔️ | ![Active](https://img.shields.io/badge/Active-brightgreen) | ❌ |
| [aitianhu.com](https://www.aitianhu.com) | `g4f.Provider.AItianhu` | ✔️ | ❌ | ✔️ | ![Inactive](https://img.shields.io/badge/Inactive-red) | ❌ | | [aitianhu.com](https://www.aitianhu.com) | `g4f.Provider.AItianhu` | ✔️ | ❌ | ✔️ | ![Inactive](https://img.shields.io/badge/Inactive-red) | ❌ |
| [e.aiask.me](https://e.aiask.me) | `g4f.Provider.AiAsk` | ✔️ | ❌ | ✔️ | ![Inactive](https://img.shields.io/badge/Inactive-red) | ❌ |
| [chatgpt.bestim.org](https://chatgpt.bestim.org) | `g4f.Provider.Bestim` | ✔️ | ❌ | ✔️ | ![Inactive](https://img.shields.io/badge/Inactive-red) | ❌ | | [chatgpt.bestim.org](https://chatgpt.bestim.org) | `g4f.Provider.Bestim` | ✔️ | ❌ | ✔️ | ![Inactive](https://img.shields.io/badge/Inactive-red) | ❌ |
| [chatanywhere.cn](https://chatanywhere.cn) | `g4f.Provider.ChatAnywhere` | ✔️ | ❌ | ✔️ | ![Inactive](https://img.shields.io/badge/Inactive-red) | ❌ | | [chatbase.co](https://www.chatbase.co) | `g4f.Provider.ChatBase` | ✔️ | ❌ | ✔️ | ![Inactive](https://img.shields.io/badge/Inactive-red) | ❌ |
| [chatgpt4online.org](https://chatgpt4online.org) | `g4f.Provider.Chatgpt4Online` | ✔️ | ❌ | ✔️ | ![Inactive](https://img.shields.io/badge/Inactive-red) | ❌ | | [chatgptdemo.info](https://chatgptdemo.info/chat) | `g4f.Provider.ChatgptDemo` | ✔️ | ❌ | ✔️ | ![Inactive](https://img.shields.io/badge/Inactive-red) | ❌ |
| [chat.chatgptdemo.ai](https://chat.chatgptdemo.ai) | `g4f.Provider.ChatgptDemoAi` | ✔️ | ❌ | ✔️ | ![Inactive](https://img.shields.io/badge/Inactive-red) | ❌ | | [chat.chatgptdemo.ai](https://chat.chatgptdemo.ai) | `g4f.Provider.ChatgptDemoAi` | ✔️ | ❌ | ✔️ | ![Inactive](https://img.shields.io/badge/Inactive-red) | ❌ |
| [chatgptfree.ai](https://chatgptfree.ai) | `g4f.Provider.ChatgptFree` | ✔️ | ❌ | ❌ | ![Inactive](https://img.shields.io/badge/Inactive-red) | ❌ | | [chatgptfree.ai](https://chatgptfree.ai) | `g4f.Provider.ChatgptFree` | ✔️ | ❌ | ❌ | ![Inactive](https://img.shields.io/badge/Inactive-red) | ❌ |
| [chatgptlogin.ai](https://chatgptlogin.ai) | `g4f.Provider.ChatgptLogin` | ✔️ | ❌ | ✔️ | ![Inactive](https://img.shields.io/badge/Inactive-red) | ❌ | | [chatgptlogin.ai](https://chatgptlogin.ai) | `g4f.Provider.ChatgptLogin` | ✔️ | ❌ | ✔️ | ![Inactive](https://img.shields.io/badge/Inactive-red) | ❌ |
| [chatgptx.de](https://chatgptx.de) | `g4f.Provider.ChatgptX` | ✔️ | ❌ | ✔️ | ![Inactive](https://img.shields.io/badge/Inactive-red) | ❌ | | [chat.3211000.xyz](https://chat.3211000.xyz) | `g4f.Provider.Chatxyz` | ✔️ | ❌ | ✔️ | ![Inactive](https://img.shields.io/badge/Inactive-red) | ❌ |
| [chat-shared2.zhile.io](https://chat-shared2.zhile.io) | `g4f.Provider.FakeGpt` | ✔️ | ❌ | ✔️ | ![Inactive](https://img.shields.io/badge/Inactive-red) | ❌ | | [gpt6.ai](https://gpt6.ai) | `g4f.Provider.Gpt6` | ✔️ | ❌ | ✔️ | ![Inactive](https://img.shields.io/badge/Inactive-red) | ❌ |
| [freegpts1.aifree.site](https://freegpts1.aifree.site/) | `g4f.Provider.FreeGpt` | ✔️ | ❌ | ✔️ | ![Inactive](https://img.shields.io/badge/Inactive-red) | ❌ | | [gptchatly.com](https://gptchatly.com) | `g4f.Provider.GptChatly` | ✔️ | ❌ | | ![Inactive](https://img.shields.io/badge/Inactive-red) | ❌ |
| [ai18.gptforlove.com](https://ai18.gptforlove.com) | `g4f.Provider.GptForLove` | ✔️ | ❌ | ✔️ | ![Inactive](https://img.shields.io/badge/Inactive-red) | ❌ |
| [gptgo.ai](https://gptgo.ai) | `g4f.Provider.GptGo` | ✔️ | ❌ | ✔️ | ![Inactive](https://img.shields.io/badge/Inactive-red) | ❌ |
| [gptgod.site](https://gptgod.site) | `g4f.Provider.GptGod` | ✔️ | ❌ | ✔️ | ![Inactive](https://img.shields.io/badge/Inactive-red) | ❌ | | [gptgod.site](https://gptgod.site) | `g4f.Provider.GptGod` | ✔️ | ❌ | ✔️ | ![Inactive](https://img.shields.io/badge/Inactive-red) | ❌ |
| [hashnode.com](https://hashnode.com) | `g4f.Provider.Hashnode` | ✔️ | ❌ | ✔️ | ![Inactive](https://img.shields.io/badge/Inactive-red) | ❌ | | [onlinegpt.org](https://onlinegpt.org) | `g4f.Provider.OnlineGpt` | ✔️ | ❌ | ✔️ | ![Inactive](https://img.shields.io/badge/Inactive-red) | ❌ |
| [sdk.vercel.ai](https://sdk.vercel.ai) | `g4f.Provider.Vercel` | ✔️ | ❌ | ✔️ | ![Inactive](https://img.shields.io/badge/Inactive-red) | ❌ |
| [chat.ylokh.xyz](https://chat.ylokh.xyz) | `g4f.Provider.Ylokh` | ✔️ | ❌ | ✔️ | ![Inactive](https://img.shields.io/badge/Inactive-red) | ❌ |
### Other ### Other
| Website | Provider | GPT-3.5 | GPT-4 | Stream | Status | Auth | | Website | Provider | GPT-3.5 | GPT-4 | Stream | Status | Auth |
| ------ | ------- | ------- | ----- | ------ | ------ | ---- | | ------ | ------- | ------- | ----- | ------ | ------ | ---- |
| [openchat.team](https://openchat.team) | `g4f.Provider.Aura` | ❌ | ❌ | ✔️ | ![Active](https://img.shields.io/badge/Active-brightgreen) | ❌ |
| [bard.google.com](https://bard.google.com) | `g4f.Provider.Bard` | ❌ | ❌ | ❌ | ![Unknown](https://img.shields.io/badge/Unknown-grey) | ✔️ | | [bard.google.com](https://bard.google.com) | `g4f.Provider.Bard` | ❌ | ❌ | ❌ | ![Unknown](https://img.shields.io/badge/Unknown-grey) | ✔️ |
| [deepinfra.com](https://deepinfra.com) | `g4f.Provider.DeepInfra` | ❌ | ❌ | ✔️ | ![Active](https://img.shields.io/badge/Active-brightgreen) | ❌ | | [deepinfra.com](https://deepinfra.com) | `g4f.Provider.DeepInfra` | ❌ | ❌ | ✔️ | ![Active](https://img.shields.io/badge/Active-brightgreen) | ❌ |
| [free.chatgpt.org.uk](https://free.chatgpt.org.uk) | `g4f.Provider.FreeChatgpt` | ❌ | ❌ | ✔️ | ![Active](https://img.shields.io/badge/Active-brightgreen) | ❌ |
| [gemini.google.com](https://gemini.google.com) | `g4f.Provider.Gemini` | ❌ | ❌ | ✔️ | ![Active](https://img.shields.io/badge/Active-brightgreen) | ✔️ | | [gemini.google.com](https://gemini.google.com) | `g4f.Provider.Gemini` | ❌ | ❌ | ✔️ | ![Active](https://img.shields.io/badge/Active-brightgreen) | ✔️ |
| [ai.google.dev](https://ai.google.dev) | `g4f.Provider.GeminiPro` | ❌ | ❌ | ✔️ | ![Unknown](https://img.shields.io/badge/Active-brightgreen) | | | [ai.google.dev](https://ai.google.dev) | `g4f.Provider.GeminiPro` | ❌ | ❌ | ✔️ | ![Active](https://img.shields.io/badge/Active-brightgreen) | ✔️ |
| [gemini-chatbot-sigma.vercel.app](https://gemini-chatbot-sigma.vercel.app) | `g4f.Provider.GeminiProChat` | ✔️ | ❌ | ✔️ | ![Active](https://img.shields.io/badge/Active-brightgreen) | ❌ | | [gemini-chatbot-sigma.vercel.app](https://gemini-chatbot-sigma.vercel.app) | `g4f.Provider.GeminiProChat` | | ❌ | ✔️ | ![Unknown](https://img.shields.io/badge/Unknown-grey) | ❌ |
| [huggingface.co](https://huggingface.co/chat) | `g4f.Provider.HuggingChat` | ❌ | ❌ | ✔️ | ![Unknown](https://img.shields.io/badge/Unknown-grey) | ❌ | | [huggingface.co](https://huggingface.co/chat) | `g4f.Provider.HuggingChat` | ❌ | ❌ | ✔️ | ![Active](https://img.shields.io/badge/Active-brightgreen) | ❌ |
| [huggingface.co](https://huggingface.co/chat) | `g4f.Provider.HuggingFace` | ❌ | ❌ | ✔️ | ![Active](https://img.shields.io/badge/Active-brightgreen) | ❌ |
| [llama2.ai](https://www.llama2.ai) | `g4f.Provider.Llama2` | ❌ | ❌ | ✔️ | ![Active](https://img.shields.io/badge/Active-brightgreen) | ❌ | | [llama2.ai](https://www.llama2.ai) | `g4f.Provider.Llama2` | ❌ | ❌ | ✔️ | ![Active](https://img.shields.io/badge/Active-brightgreen) | ❌ |
| [labs.perplexity.ai](https://labs.perplexity.ai) | `g4f.Provider.PerplexityLabs` | ❌ | ❌ | ✔️ | ![Active](https://img.shields.io/badge/Active-brightgreen) | ❌ | | [labs.perplexity.ai](https://labs.perplexity.ai) | `g4f.Provider.PerplexityLabs` | ❌ | ❌ | ✔️ | ![Active](https://img.shields.io/badge/Active-brightgreen) | ❌ |
| [pi.ai](https://pi.ai/talk) | `g4f.Provider.Pi` | ❌ | ❌ | ✔️ | ![Unknown](https://img.shields.io/badge/Active-brightgreen) | ❌ | | [pi.ai](https://pi.ai/talk) | `g4f.Provider.Pi` | ❌ | ❌ | ✔️ | ![Active](https://img.shields.io/badge/Active-brightgreen) | ❌ |
| [beta.theb.ai](https://beta.theb.ai) | `g4f.Provider.Theb` | ✔️ | ✔️ | ✔️ | ![Unknown](https://img.shields.io/badge/Unknown-grey) | ❌ |
| [free.chatgpt.org.uk](https://free.chatgpt.org.uk) | `g4f.Provider.FreeChatgpt` | ✔️ | ✔️ | ✔️ | ![Unknown](https://img.shields.io/badge/Unknown-grey) | ❌ |
| [theb.ai](https://theb.ai) | `g4f.Provider.ThebApi` | ❌ | ❌ | ❌ | ![Unknown](https://img.shields.io/badge/Unknown-grey) | ✔️ | | [theb.ai](https://theb.ai) | `g4f.Provider.ThebApi` | ❌ | ❌ | ❌ | ![Unknown](https://img.shields.io/badge/Unknown-grey) | ✔️ |
| [phind.com](https://www.phind.com) | `g4f.Provider.Phind` | ❌ | ❌ | ✔️ | ![Inactive](https://img.shields.io/badge/Inactive-red) | ❌
| [open-assistant.io](https://open-assistant.io/chat) | `g4f.Provider.OpenAssistant` | ❌ | ❌ | ✔️ | ![Inactive](https://img.shields.io/badge/Inactive-red) | ✔️ | | [open-assistant.io](https://open-assistant.io/chat) | `g4f.Provider.OpenAssistant` | ❌ | ❌ | ✔️ | ![Inactive](https://img.shields.io/badge/Inactive-red) | ✔️ |
### Models ### Models
@@ -306,11 +299,11 @@ set G4F_PROXY=http://host:port
| gpt-4-turbo | OpenAI | g4f.Provider.Bing | [openai.com](https://openai.com/) | | gpt-4-turbo | OpenAI | g4f.Provider.Bing | [openai.com](https://openai.com/) |
| Llama-2-7b-chat-hf | Meta | 2+ Providers | [llama.meta.com](https://llama.meta.com/) | | Llama-2-7b-chat-hf | Meta | 2+ Providers | [llama.meta.com](https://llama.meta.com/) |
| Llama-2-13b-chat-hf | Meta | 2+ Providers | [llama.meta.com](https://llama.meta.com/) | | Llama-2-13b-chat-hf | Meta | 2+ Providers | [llama.meta.com](https://llama.meta.com/) |
| Llama-2-70b-chat-hf | Meta | 4+ Providers | [llama.meta.com](https://llama.meta.com/) | | Llama-2-70b-chat-hf | Meta | 3+ Providers | [llama.meta.com](https://llama.meta.com/) |
| CodeLlama-34b-Instruct-hf | Meta | 3+ Providers | [llama.meta.com](https://llama.meta.com/) | | CodeLlama-34b-Instruct-hf | Meta | 2+ Providers | [llama.meta.com](https://llama.meta.com/) |
| CodeLlama-70b-Instruct-hf | Meta | g4f.Provider.DeepInfra | [llama.meta.com](https://llama.meta.com/) | | CodeLlama-70b-Instruct-hf | Meta | 2+ Providers | [llama.meta.com](https://llama.meta.com/) |
| Mixtral-8x7B-Instruct-v0.1 | Huggingface | 3+ Providers | [huggingface.co](https://huggingface.co/) | | Mixtral-8x7B-Instruct-v0.1 | Huggingface | 4+ Providers | [huggingface.co](https://huggingface.co/) |
| Mistral-7B-Instruct-v0.1 | Huggingface | 3+ Providers | [huggingface.co](https://huggingface.co/) | | Mistral-7B-Instruct-v0.1 | Huggingface | 4+ Providers | [huggingface.co](https://huggingface.co/) |
| dolphin-2.6-mixtral-8x7b | Huggingface | g4f.Provider.DeepInfra | [huggingface.co](https://huggingface.co/) | | dolphin-2.6-mixtral-8x7b | Huggingface | g4f.Provider.DeepInfra | [huggingface.co](https://huggingface.co/) |
| lzlv_70b_fp16_hf | Huggingface | g4f.Provider.DeepInfra | [huggingface.co](https://huggingface.co/) | | lzlv_70b_fp16_hf | Huggingface | g4f.Provider.DeepInfra | [huggingface.co](https://huggingface.co/) |
| airoboros-70b | Huggingface | g4f.Provider.DeepInfra | [huggingface.co](https://huggingface.co/) | | airoboros-70b | Huggingface | g4f.Provider.DeepInfra | [huggingface.co](https://huggingface.co/) |
@@ -318,7 +311,9 @@ set G4F_PROXY=http://host:port
| openchat_3.5 | Huggingface | 2+ Providers | [huggingface.co](https://huggingface.co/) | | openchat_3.5 | Huggingface | 2+ Providers | [huggingface.co](https://huggingface.co/) |
| gemini | Google | g4f.Provider.Gemini | [gemini.google.com](https://gemini.google.com/) | | gemini | Google | g4f.Provider.Gemini | [gemini.google.com](https://gemini.google.com/) |
| gemini-pro | Google | 2+ Providers | [gemini.google.com](https://gemini.google.com/) | | gemini-pro | Google | 2+ Providers | [gemini.google.com](https://gemini.google.com/) |
| claude-v2 | Anthropic | 2+ Providers | [anthropic.com](https://www.anthropic.com/) | | claude-v2 | Anthropic | 1+ Providers | [anthropic.com](https://www.anthropic.com/) |
| claude-3-opus | Anthropic | g4f.Provider.You | [anthropic.com](https://www.anthropic.com/) |
| claude-3-sonnet | Anthropic | g4f.Provider.You | [anthropic.com](https://www.anthropic.com/) |
| pi | Inflection | g4f.Provider.Pi | [inflection.ai](https://inflection.ai/) | | pi | Inflection | g4f.Provider.Pi | [inflection.ai](https://inflection.ai/) |
## 🔗 Related GPT4Free Projects ## 🔗 Related GPT4Free Projects

View File

@@ -4,6 +4,8 @@ from aiohttp import ClientSession
from ..typing import AsyncResult, Messages from ..typing import AsyncResult, Messages
from .base_provider import AsyncGeneratorProvider from .base_provider import AsyncGeneratorProvider
from ..requests import get_args_from_browser
from ..webdriver import WebDriver
class Aura(AsyncGeneratorProvider): class Aura(AsyncGeneratorProvider):
url = "https://openchat.team" url = "https://openchat.team"
@@ -15,24 +17,11 @@ class Aura(AsyncGeneratorProvider):
model: str, model: str,
messages: Messages, messages: Messages,
proxy: str = None, proxy: str = None,
webdriver: WebDriver = None,
**kwargs **kwargs
) -> AsyncResult: ) -> AsyncResult:
headers = { args = get_args_from_browser(cls.url, webdriver, proxy)
"Accept": "*/*", async with ClientSession(**args) as session:
"Accept-Encoding": "gzip, deflate, br",
"Accept-Language": "en-GB,en-US;q=0.9,en;q=0.8",
"Content-Type": "application/json",
"Origin": f"{cls.url}",
"Referer": f"{cls.url}/",
"Sec-Ch-Ua": '"Not_A Brand";v="8", "Chromium";v="120", "Google Chrome";v="120"',
"Sec-Ch-Ua-Mobile": "?0",
"Sec-Ch-Ua-Platform": '"Linux"',
"Sec-Fetch-Dest": "empty",
"Sec-Fetch-Mode": "cors",
"Sec-Fetch-Site": "same-origin",
"User-Agent": "Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/120.0.0.0 Safari/537.36",
}
async with ClientSession(headers=headers) as session:
new_messages = [] new_messages = []
system_message = [] system_message = []
for message in messages: for message in messages:

View File

@@ -2,13 +2,12 @@ from __future__ import annotations
import random import random
import json import json
import os
import uuid import uuid
import time import time
from urllib import parse from urllib import parse
from aiohttp import ClientSession, ClientTimeout, BaseConnector from aiohttp import ClientSession, ClientTimeout, BaseConnector
from ..typing import AsyncResult, Messages, ImageType from ..typing import AsyncResult, Messages, ImageType, Cookies
from ..image import ImageResponse, ImageRequest from ..image import ImageResponse, ImageRequest
from .base_provider import AsyncGeneratorProvider from .base_provider import AsyncGeneratorProvider
from .helper import get_connector from .helper import get_connector
@@ -39,7 +38,7 @@ class Bing(AsyncGeneratorProvider):
messages: Messages, messages: Messages,
proxy: str = None, proxy: str = None,
timeout: int = 900, timeout: int = 900,
cookies: dict = None, cookies: Cookies = None,
connector: BaseConnector = None, connector: BaseConnector = None,
tone: str = Tones.balanced, tone: str = Tones.balanced,
image: ImageType = None, image: ImageType = None,
@@ -65,7 +64,7 @@ class Bing(AsyncGeneratorProvider):
else: else:
prompt = messages[-1]["content"] prompt = messages[-1]["content"]
context = create_context(messages[:-1]) context = create_context(messages[:-1])
cookies = {**get_default_cookies(), **cookies} if cookies else get_default_cookies() cookies = {**get_default_cookies(), **cookies} if cookies else get_default_cookies()
gpt4_turbo = True if model.startswith("gpt-4-turbo") else False gpt4_turbo = True if model.startswith("gpt-4-turbo") else False
@@ -79,32 +78,88 @@ def create_context(messages: Messages) -> str:
:param messages: A list of message dictionaries. :param messages: A list of message dictionaries.
:return: A string representing the context created from the messages. :return: A string representing the context created from the messages.
""" """
return "".join( return "\n\n".join(
f"[{message['role']}]" + ("(#message)" if message['role'] != "system" else "(#additional_instructions)") + f"\n{message['content']}\n\n" f"[{message['role']}]" + ("(#message)" if message['role'] != "system" else "(#additional_instructions)") + f"\n{message['content']}"
for message in messages for message in messages
) )
def get_ip_address() -> str:
return f"13.{random.randint(104, 107)}.{random.randint(0, 255)}.{random.randint(0, 255)}"
class Defaults: class Defaults:
""" """
Default settings and configurations for the Bing provider. Default settings and configurations for the Bing provider.
""" """
delimiter = "\x1e" delimiter = "\x1e"
ip_address = f"13.{random.randint(104, 107)}.{random.randint(0, 255)}.{random.randint(0, 255)}"
# List of allowed message types for Bing responses # List of allowed message types for Bing responses
allowedMessageTypes = [ allowedMessageTypes = [
"ActionRequest", "Chat", "Context", "Progress", "SemanticSerp", "ActionRequest","Chat",
"GenerateContentQuery", "SearchQuery", "RenderCardRequest" "ConfirmationCard", "Context",
"InternalSearchQuery", #"InternalSearchResult",
"Disengaged", #"InternalLoaderMessage",
"Progress", "RenderCardRequest",
"RenderContentRequest", "AdsQuery",
"SemanticSerp", "GenerateContentQuery",
"SearchQuery", "GeneratedCode",
"InternalTasksMessage"
] ]
sliceIds = [ sliceIds = {
'abv2', 'srdicton', 'convcssclick', 'stylewv2', 'contctxp2tf', "Balanced": [
'802fluxv1pc_a', '806log2sphs0', '727savemem', '277teditgnds0', '207hlthgrds0' "supllmnfe","archnewtf",
] "stpstream", "stpsig", "vnextvoicecf", "scmcbase", "cmcpupsalltf", "sydtransctrl",
"thdnsrch", "220dcl1s0", "0215wcrwips0", "0305hrthrots0", "0130gpt4t",
"bingfc", "0225unsticky1", "0228scss0",
"defquerycf", "defcontrol", "3022tphpv"
],
"Creative": [
"bgstream", "fltltst2c",
"stpstream", "stpsig", "vnextvoicecf", "cmcpupsalltf", "sydtransctrl",
"0301techgnd", "220dcl1bt15", "0215wcrwip", "0305hrthrot", "0130gpt4t",
"bingfccf", "0225unsticky1", "0228scss0",
"3022tpvs0"
],
"Precise": [
"bgstream", "fltltst2c",
"stpstream", "stpsig", "vnextvoicecf", "cmcpupsalltf", "sydtransctrl",
"0301techgnd", "220dcl1bt15", "0215wcrwip", "0305hrthrot", "0130gpt4t",
"bingfccf", "0225unsticky1", "0228scss0",
"defquerycf", "3022tpvs0"
],
}
optionsSets = {
"Balanced": [
"nlu_direct_response_filter", "deepleo",
"disable_emoji_spoken_text", "responsible_ai_policy_235",
"enablemm", "dv3sugg", "autosave",
"iyxapbing", "iycapbing",
"galileo", "saharagenconv5", "gldcl1p",
"gpt4tmncnp"
],
"Creative": [
"nlu_direct_response_filter", "deepleo",
"disable_emoji_spoken_text", "responsible_ai_policy_235",
"enablemm", "dv3sugg",
"iyxapbing", "iycapbing",
"h3imaginative", "techinstgnd", "hourthrot", "clgalileo", "gencontentv3",
"gpt4tmncnp"
],
"Precise": [
"nlu_direct_response_filter", "deepleo",
"disable_emoji_spoken_text", "responsible_ai_policy_235",
"enablemm", "dv3sugg",
"iyxapbing", "iycapbing",
"h3precise", "techinstgnd", "hourthrot", "techinstgnd", "hourthrot",
"clgalileo", "gencontentv3"
],
}
# Default location settings # Default location settings
location = { location = {
"locale": "en-US", "market": "en-US", "region": "US", "locale": "en-US", "market": "en-US", "region": "US",
"location":"lat:34.0536909;long:-118.242766;re=1000m;",
"locationHints": [{ "locationHints": [{
"country": "United States", "state": "California", "city": "Los Angeles", "country": "United States", "state": "California", "city": "Los Angeles",
"timezoneoffset": 8, "countryConfidence": 8, "timezoneoffset": 8, "countryConfidence": 8,
@@ -134,17 +189,8 @@ class Defaults:
'upgrade-insecure-requests': '1', 'upgrade-insecure-requests': '1',
'user-agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/110.0.0.0 Safari/537.36 Edg/110.0.1587.69', 'user-agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/110.0.0.0 Safari/537.36 Edg/110.0.1587.69',
'x-edge-shopping-flag': '1', 'x-edge-shopping-flag': '1',
'x-forwarded-for': ip_address, 'x-forwarded-for': get_ip_address(),
} }
optionsSets = [
'nlu_direct_response_filter', 'deepleo', 'disable_emoji_spoken_text',
'responsible_ai_policy_235', 'enablemm', 'iyxapbing', 'iycapbing',
'gencontentv3', 'fluxsrtrunc', 'fluxtrunc', 'fluxv1', 'rai278',
'replaceurl', 'eredirecturl', 'nojbfedge', "fluxcopilot", "nojbf",
"dgencontentv3", "nointernalsugg", "disable_telemetry", "machine_affinity",
"streamf", "codeint", "langdtwb", "fdwtlst", "fluxprod", "deuct3"
]
def get_default_cookies(): def get_default_cookies():
return { return {
@@ -156,11 +202,6 @@ def get_default_cookies():
'SRCHHPGUSR' : f'HV={int(time.time())}', 'SRCHHPGUSR' : f'HV={int(time.time())}',
} }
class ConversationStyleOptionSets():
CREATIVE = ["h3imaginative", "clgalileo", "gencontentv3"]
BALANCED = ["galileo", "gldcl1p"]
PRECISE = ["h3precise", "clgalileo"]
def format_message(msg: dict) -> str: def format_message(msg: dict) -> str:
""" """
Formats a message dictionary into a JSON string with a delimiter. Formats a message dictionary into a JSON string with a delimiter.
@@ -191,18 +232,8 @@ def create_message(
:param gpt4_turbo: Flag to enable GPT-4 Turbo. :param gpt4_turbo: Flag to enable GPT-4 Turbo.
:return: A formatted string message for the Bing API. :return: A formatted string message for the Bing API.
""" """
options_sets = Defaults.optionsSets.copy()
# Append tone-specific options
if tone == Tones.creative:
options_sets.extend(ConversationStyleOptionSets.CREATIVE)
elif tone == Tones.precise:
options_sets.extend(ConversationStyleOptionSets.PRECISE)
elif tone == Tones.balanced:
options_sets.extend(ConversationStyleOptionSets.BALANCED)
else:
options_sets.append("harmonyv3")
# Additional configurations based on parameters options_sets = []
if not web_search: if not web_search:
options_sets.append("nosearchall") options_sets.append("nosearchall")
if gpt4_turbo: if gpt4_turbo:
@@ -210,34 +241,38 @@ def create_message(
request_id = str(uuid.uuid4()) request_id = str(uuid.uuid4())
struct = { struct = {
'arguments': [{ "arguments":[{
'source': 'cib', "source": "cib",
'optionsSets': options_sets, "optionsSets": [*Defaults.optionsSets[tone], *options_sets],
'allowedMessageTypes': Defaults.allowedMessageTypes, "allowedMessageTypes": Defaults.allowedMessageTypes,
'sliceIds': Defaults.sliceIds, "sliceIds": Defaults.sliceIds[tone],
'traceId': os.urandom(16).hex(),
'isStartOfSession': True,
'requestId': request_id,
'message': {
**Defaults.location,
'author': 'user',
'inputMethod': 'Keyboard',
'text': prompt,
'messageType': 'Chat',
'requestId': request_id,
'messageId': request_id
},
"verbosity": "verbose", "verbosity": "verbose",
"scenario": "SERP", "scenario": "SERP",
"plugins": [{"id": "c310c353-b9f0-4d76-ab0d-1dd5e979cf68", "category": 1}] if web_search else [], "plugins": [{"id": "c310c353-b9f0-4d76-ab0d-1dd5e979cf68", "category": 1}] if web_search else [],
'tone': tone, "traceId": str(uuid.uuid4()),
'spokenTextMode': 'None', "conversationHistoryOptionsSets": ["autosave","savemem","uprofupd","uprofgen"],
'conversationId': conversation.conversationId, "gptId": "copilot",
'participant': {'id': conversation.clientId}, "isStartOfSession": True,
"requestId": request_id,
"message":{
**Defaults.location,
"userIpAddress": get_ip_address(),
"timestamp": "2024-03-11T22:40:36+01:00",
"author": "user",
"inputMethod": "Keyboard",
"text": prompt,
"messageType": "Chat",
"requestId": request_id,
"messageId": request_id
},
"tone": tone,
"spokenTextMode": "None",
"conversationId": conversation.conversationId,
"participant": {"id": conversation.clientId}
}], }],
'invocationId': '1', "invocationId": "0",
'target': 'chat', "target": "chat",
'type': 4 "type": 4
} }
if image_request and image_request.get('imageUrl') and image_request.get('originalImageUrl'): if image_request and image_request.get('imageUrl') and image_request.get('originalImageUrl'):
@@ -283,14 +318,13 @@ async def stream_generate(
""" """
headers = Defaults.headers headers = Defaults.headers
if cookies: if cookies:
headers["Cookie"] = "; ".join(f"{k}={v}" for k, v in cookies.items()) headers["cookie"] = "; ".join(f"{k}={v}" for k, v in cookies.items())
async with ClientSession( async with ClientSession(
timeout=ClientTimeout(total=timeout), headers=headers, connector=connector headers=headers, cookies=cookies,
timeout=ClientTimeout(total=timeout), connector=connector
) as session: ) as session:
conversation = await create_conversation(session) conversation = await create_conversation(session)
image_request = await upload_image(session, image, tone) if image else None image_request = await upload_image(session, image, tone) if image else None
try: try:
async with session.ws_connect( async with session.ws_connect(
'wss://sydney.bing.com/sydney/ChatHub', 'wss://sydney.bing.com/sydney/ChatHub',
@@ -298,12 +332,13 @@ async def stream_generate(
params={'sec_access_token': conversation.conversationSignature} params={'sec_access_token': conversation.conversationSignature}
) as wss: ) as wss:
await wss.send_str(format_message({'protocol': 'json', 'version': 1})) await wss.send_str(format_message({'protocol': 'json', 'version': 1}))
await wss.send_str(format_message({"type": 6}))
await wss.receive(timeout=timeout) await wss.receive(timeout=timeout)
await wss.send_str(create_message(conversation, prompt, tone, context, image_request, web_search, gpt4_turbo)) await wss.send_str(create_message(conversation, prompt, tone, context, image_request, web_search, gpt4_turbo))
response_txt = '' response_txt = ''
returned_text = '' returned_text = ''
final = False final = False
message_id = None
while not final: while not final:
msg = await wss.receive(timeout=timeout) msg = await wss.receive(timeout=timeout)
if not msg.data: if not msg.data:
@@ -315,13 +350,17 @@ async def stream_generate(
response = json.loads(obj) response = json.loads(obj)
if response and response.get('type') == 1 and response['arguments'][0].get('messages'): if response and response.get('type') == 1 and response['arguments'][0].get('messages'):
message = response['arguments'][0]['messages'][0] message = response['arguments'][0]['messages'][0]
# Reset memory, if we have a new message
if message_id is not None and message_id != message["messageId"]:
returned_text = ''
message_id = message["messageId"]
image_response = None image_response = None
if (message['contentOrigin'] != 'Apology'): if (message['contentOrigin'] != 'Apology'):
if 'adaptiveCards' in message: if 'adaptiveCards' in message:
card = message['adaptiveCards'][0]['body'][0] card = message['adaptiveCards'][0]['body'][0]
if "text" in card: if "text" in card:
response_txt = card.get('text') response_txt = card.get('text')
if message.get('messageType'): if message.get('messageType') and "inlines" in card:
inline_txt = card['inlines'][0].get('text') inline_txt = card['inlines'][0].get('text')
response_txt += inline_txt + '\n' response_txt += inline_txt + '\n'
elif message.get('contentType') == "IMAGE": elif message.get('contentType') == "IMAGE":

View File

@@ -2,15 +2,17 @@ from __future__ import annotations
import time import time
import hashlib import hashlib
import uuid
from ..typing import AsyncResult, Messages from ..typing import AsyncResult, Messages
from ..requests import StreamSession from ..requests import StreamSession
from .base_provider import AsyncGeneratorProvider from ..errors import RateLimitError
from .base_provider import AsyncGeneratorProvider, ProviderModelMixin
class ChatForAi(AsyncGeneratorProvider, ProviderModelMixin):
class ChatForAi(AsyncGeneratorProvider):
url = "https://chatforai.store" url = "https://chatforai.store"
working = True working = True
default_model = "gpt-3.5-turbo"
supports_message_history = True supports_message_history = True
supports_gpt_35_turbo = True supports_gpt_35_turbo = True
@@ -21,36 +23,39 @@ class ChatForAi(AsyncGeneratorProvider):
messages: Messages, messages: Messages,
proxy: str = None, proxy: str = None,
timeout: int = 120, timeout: int = 120,
temperature: float = 0.7,
top_p: float = 1,
**kwargs **kwargs
) -> AsyncResult: ) -> AsyncResult:
model = cls.get_model(model)
headers = { headers = {
"Content-Type": "text/plain;charset=UTF-8", "Content-Type": "text/plain;charset=UTF-8",
"Origin": cls.url, "Origin": cls.url,
"Referer": f"{cls.url}/?r=b", "Referer": f"{cls.url}/?r=b",
} }
async with StreamSession(impersonate="chrome107", headers=headers, proxies={"https": proxy}, timeout=timeout) as session: async with StreamSession(impersonate="chrome", headers=headers, proxies={"https": proxy}, timeout=timeout) as session:
prompt = messages[-1]["content"]
timestamp = int(time.time() * 1e3) timestamp = int(time.time() * 1e3)
conversation_id = f"id_{timestamp-123}" conversation_id = str(uuid.uuid4())
data = { data = {
"conversationId": conversation_id, "conversationId": conversation_id,
"conversationType": "chat_continuous", "conversationType": "chat_continuous",
"botId": "chat_continuous", "botId": "chat_continuous",
"globalSettings":{ "globalSettings":{
"baseUrl": "https://api.openai.com", "baseUrl": "https://api.openai.com",
"model": model if model else "gpt-3.5-turbo", "model": model,
"messageHistorySize": 5, "messageHistorySize": 5,
"temperature": 0.7, "temperature": temperature,
"top_p": 1, "top_p": top_p,
**kwargs **kwargs
}, },
"botSettings": {}, "prompt": "",
"prompt": prompt,
"messages": messages, "messages": messages,
"timestamp": timestamp, "timestamp": timestamp,
"sign": generate_signature(timestamp, prompt, conversation_id) "sign": generate_signature(timestamp, "", conversation_id)
} }
async with session.post(f"{cls.url}/api/handle/provider-openai", json=data) as response: async with session.post(f"{cls.url}/api/handle/provider-openai", json=data) as response:
if response.status == 429:
raise RateLimitError("Rate limit reached")
response.raise_for_status() response.raise_for_status()
async for chunk in response.iter_content(): async for chunk in response.iter_content():
if b"https://chatforai.store" in chunk: if b"https://chatforai.store" in chunk:
@@ -59,5 +64,5 @@ class ChatForAi(AsyncGeneratorProvider):
def generate_signature(timestamp: int, message: str, id: str): def generate_signature(timestamp: int, message: str, id: str):
buffer = f"{timestamp}:{id}:{message}:7YN8z6d6" buffer = f"{id}:{timestamp}:{message}:h496Jd6b"
return hashlib.sha256(buffer.encode()).hexdigest() return hashlib.sha256(buffer.encode()).hexdigest()

View File

@@ -13,7 +13,7 @@ class Chatgpt4Online(AsyncGeneratorProvider):
url = "https://chatgpt4online.org" url = "https://chatgpt4online.org"
supports_message_history = True supports_message_history = True
supports_gpt_35_turbo = True supports_gpt_35_turbo = True
working = False working = True
_wpnonce = None _wpnonce = None
_context_id = None _context_id = None

View File

@@ -4,14 +4,16 @@ import re, html, json, string, random
from aiohttp import ClientSession from aiohttp import ClientSession
from ..typing import Messages, AsyncResult from ..typing import Messages, AsyncResult
from ..errors import RateLimitError
from .base_provider import AsyncGeneratorProvider from .base_provider import AsyncGeneratorProvider
from .helper import get_random_string
class ChatgptAi(AsyncGeneratorProvider): class ChatgptAi(AsyncGeneratorProvider):
url = "https://chatgpt.ai" url = "https://chatgpt.ai"
working = False working = True
supports_message_history = True supports_message_history = True
supports_gpt_35_turbo = True supports_system_message = True,
supports_gpt_4 = True,
_system = None _system = None
@classmethod @classmethod
@@ -45,7 +47,6 @@ class ChatgptAi(AsyncGeneratorProvider):
async with session.get(cls.url, proxy=proxy) as response: async with session.get(cls.url, proxy=proxy) as response:
response.raise_for_status() response.raise_for_status()
text = await response.text() text = await response.text()
result = re.search(r"data-system='(.*?)'", text) result = re.search(r"data-system='(.*?)'", text)
if result : if result :
cls._system = json.loads(html.unescape(result.group(1))) cls._system = json.loads(html.unescape(result.group(1)))
@@ -56,14 +57,15 @@ class ChatgptAi(AsyncGeneratorProvider):
"botId": cls._system["botId"], "botId": cls._system["botId"],
"customId": cls._system["customId"], "customId": cls._system["customId"],
"session": cls._system["sessionId"], "session": cls._system["sessionId"],
"chatId": "".join(random.choices(f"{string.ascii_lowercase}{string.digits}", k=11)), "chatId": get_random_string(),
"contextId": cls._system["contextId"], "contextId": cls._system["contextId"],
"messages": messages, "messages": messages[:-1],
"newMessage": messages[-1]["content"], "newMessage": messages[-1]["content"],
"stream": True "newFileId": None,
"stream":True
} }
async with session.post( async with session.post(
f"{cls.url}/wp-json/mwai-ui/v1/chats/submit", "https://chatgate.ai/wp-json/mwai-ui/v1/chats/submit",
proxy=proxy, proxy=proxy,
json=data, json=data,
headers={"X-Wp-Nonce": cls._system["restNonce"]} headers={"X-Wp-Nonce": cls._system["restNonce"]}
@@ -76,6 +78,10 @@ class ChatgptAi(AsyncGeneratorProvider):
assert "type" in line assert "type" in line
except: except:
raise RuntimeError(f"Broken line: {line.decode()}") raise RuntimeError(f"Broken line: {line.decode()}")
if line["type"] == "error":
if "https://chatgate.ai/login" in line["data"]:
raise RateLimitError("Rate limit reached")
raise RuntimeError(line["data"])
if line["type"] == "live": if line["type"] == "live":
yield line["data"] yield line["data"]
elif line["type"] == "end": elif line["type"] == "end":

View File

@@ -5,8 +5,7 @@ import re
from ..requests import StreamSession from ..requests import StreamSession
from ..typing import Messages from ..typing import Messages
from .base_provider import AsyncProvider from .base_provider import AsyncProvider
from .helper import format_prompt, get_cookies from .helper import format_prompt
class ChatgptFree(AsyncProvider): class ChatgptFree(AsyncProvider):
url = "https://chatgptfree.ai" url = "https://chatgptfree.ai"
@@ -25,12 +24,6 @@ class ChatgptFree(AsyncProvider):
cookies: dict = None, cookies: dict = None,
**kwargs **kwargs
) -> str: ) -> str:
if not cookies:
cookies = get_cookies('chatgptfree.ai')
if not cookies:
raise RuntimeError(f"g4f.provider.{cls.__name__} requires cookies [refresh https://chatgptfree.ai on chrome]")
headers = { headers = {
'authority': 'chatgptfree.ai', 'authority': 'chatgptfree.ai',
'accept': '*/*', 'accept': '*/*',
@@ -82,6 +75,5 @@ class ChatgptFree(AsyncProvider):
"bot_id": "0" "bot_id": "0"
} }
async with session.post(f"{cls.url}/wp-admin/admin-ajax.php", data=data, cookies=cookies) as response: async with session.post(f"{cls.url}/wp-admin/admin-ajax.php", data=data, cookies=cookies) as response:
response.raise_for_status() response.raise_for_status()
return (await response.json())["data"] return (await response.json())["data"]

View File

@@ -4,13 +4,14 @@ import json
from aiohttp import ClientSession from aiohttp import ClientSession
from ..typing import AsyncResult, Messages from ..typing import AsyncResult, Messages
from ..providers.base_provider import AsyncGeneratorProvider from .base_provider import AsyncGeneratorProvider
class ChatgptNext(AsyncGeneratorProvider): class ChatgptNext(AsyncGeneratorProvider):
url = "https://www.chatgpt-free.cc" url = "https://www.chatgpt-free.cc"
working = True working = True
supports_gpt_35_turbo = True supports_gpt_35_turbo = True
supports_message_history = True
supports_system_message = True
@classmethod @classmethod
async def create_async_generator( async def create_async_generator(
@@ -18,6 +19,11 @@ class ChatgptNext(AsyncGeneratorProvider):
model: str, model: str,
messages: Messages, messages: Messages,
proxy: str = None, proxy: str = None,
max_tokens: int = None,
temperature: float = 0.7,
top_p: float = 1,
presence_penalty: float = 0,
frequency_penalty: float = 0,
**kwargs **kwargs
) -> AsyncResult: ) -> AsyncResult:
if not model: if not model:
@@ -43,11 +49,11 @@ class ChatgptNext(AsyncGeneratorProvider):
"messages": messages, "messages": messages,
"stream": True, "stream": True,
"model": model, "model": model,
"temperature": 0.5, "temperature": temperature,
"presence_penalty": 0, "presence_penalty": presence_penalty,
"frequency_penalty": 0, "frequency_penalty": frequency_penalty,
"top_p": 1, "top_p": top_p,
**kwargs "max_tokens": max_tokens,
} }
async with session.post(f"https://chat.fstha.com/api/openai/v1/chat/completions", json=data, proxy=proxy) as response: async with session.post(f"https://chat.fstha.com/api/openai/v1/chat/completions", json=data, proxy=proxy) as response:
response.raise_for_status() response.raise_for_status()

View File

@@ -7,12 +7,12 @@ from aiohttp import ClientSession
from ..typing import AsyncResult, Messages from ..typing import AsyncResult, Messages
from .base_provider import AsyncGeneratorProvider from .base_provider import AsyncGeneratorProvider
from .helper import format_prompt from .helper import format_prompt
from ..errors import RateLimitError
class ChatgptX(AsyncGeneratorProvider): class ChatgptX(AsyncGeneratorProvider):
url = "https://chatgptx.de" url = "https://chatgptx.de"
supports_gpt_35_turbo = True supports_gpt_35_turbo = True
working = False working = True
@classmethod @classmethod
async def create_async_generator( async def create_async_generator(
@@ -73,6 +73,8 @@ class ChatgptX(AsyncGeneratorProvider):
async with session.post(f'{cls.url}/sendchat', data=data, headers=headers, proxy=proxy) as response: async with session.post(f'{cls.url}/sendchat', data=data, headers=headers, proxy=proxy) as response:
response.raise_for_status() response.raise_for_status()
chat = await response.json() chat = await response.json()
if "messages" in chat and "Anfragelimit" in chat["messages"]:
raise RateLimitError("Rate limit reached")
if "response" not in chat or not chat["response"]: if "response" not in chat or not chat["response"]:
raise RuntimeError(f'Response: {chat}') raise RuntimeError(f'Response: {chat}')
headers = { headers = {

View File

@@ -5,12 +5,14 @@ from aiohttp import ClientSession
from ..typing import AsyncResult, Messages from ..typing import AsyncResult, Messages
from .base_provider import AsyncGeneratorProvider, ProviderModelMixin from .base_provider import AsyncGeneratorProvider, ProviderModelMixin
from ..errors import RateLimitError
class FlowGpt(AsyncGeneratorProvider, ProviderModelMixin): class FlowGpt(AsyncGeneratorProvider, ProviderModelMixin):
url = "https://flowgpt.com/chat" url = "https://flowgpt.com/chat"
working = True working = True
supports_gpt_35_turbo = True supports_gpt_35_turbo = True
supports_message_history = True supports_message_history = True
supports_system_message = True
default_model = "gpt-3.5-turbo" default_model = "gpt-3.5-turbo"
models = [ models = [
"gpt-3.5-turbo", "gpt-3.5-turbo",
@@ -30,6 +32,7 @@ class FlowGpt(AsyncGeneratorProvider, ProviderModelMixin):
model: str, model: str,
messages: Messages, messages: Messages,
proxy: str = None, proxy: str = None,
temperature: float = 0.7,
**kwargs **kwargs
) -> AsyncResult: ) -> AsyncResult:
model = cls.get_model(model) model = cls.get_model(model)
@@ -59,7 +62,7 @@ class FlowGpt(AsyncGeneratorProvider, ProviderModelMixin):
"question": messages[-1]["content"], "question": messages[-1]["content"],
"history": [{"role": "assistant", "content": "Hello, how can I help you today?"}, *history], "history": [{"role": "assistant", "content": "Hello, how can I help you today?"}, *history],
"system": system_message, "system": system_message,
"temperature": kwargs.get("temperature", 0.7), "temperature": temperature,
"promptId": f"model-{model}", "promptId": f"model-{model}",
"documentIds": [], "documentIds": [],
"chatFileDocumentIds": [], "chatFileDocumentIds": [],
@@ -67,6 +70,8 @@ class FlowGpt(AsyncGeneratorProvider, ProviderModelMixin):
"generateAudio": False "generateAudio": False
} }
async with session.post("https://backend-k8s.flowgpt.com/v2/chat-anonymous", json=data, proxy=proxy) as response: async with session.post("https://backend-k8s.flowgpt.com/v2/chat-anonymous", json=data, proxy=proxy) as response:
if response.status == 429:
raise RateLimitError("Rate limit reached")
response.raise_for_status() response.raise_for_status()
async for chunk in response.content: async for chunk in response.content:
if chunk.strip(): if chunk.strip():

View File

@@ -1,7 +1,7 @@
from __future__ import annotations from __future__ import annotations
import json, random import json
from aiohttp import ClientSession from aiohttp import ClientSession, ClientTimeout
from ..typing import AsyncResult, Messages from ..typing import AsyncResult, Messages
from .base_provider import AsyncGeneratorProvider, ProviderModelMixin from .base_provider import AsyncGeneratorProvider, ProviderModelMixin
@@ -18,6 +18,7 @@ class FreeChatgpt(AsyncGeneratorProvider, ProviderModelMixin):
model: str, model: str,
messages: Messages, messages: Messages,
proxy: str = None, proxy: str = None,
timeout: int = 120,
**kwargs **kwargs
) -> AsyncResult: ) -> AsyncResult:
headers = { headers = {
@@ -33,7 +34,7 @@ class FreeChatgpt(AsyncGeneratorProvider, ProviderModelMixin):
"Sec-Fetch-Site": "same-origin", "Sec-Fetch-Site": "same-origin",
"User-Agent": "Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/120.0.0.0 Safari/537.36", "User-Agent": "Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/120.0.0.0 Safari/537.36",
} }
async with ClientSession(headers=headers) as session: async with ClientSession(headers=headers, timeout=ClientTimeout(timeout)) as session:
data = { data = {
"messages": messages, "messages": messages,
"stream": True, "stream": True,

View File

@@ -5,15 +5,18 @@ import time, hashlib, random
from ..typing import AsyncResult, Messages from ..typing import AsyncResult, Messages
from ..requests import StreamSession from ..requests import StreamSession
from .base_provider import AsyncGeneratorProvider from .base_provider import AsyncGeneratorProvider
from ..errors import RateLimitError
domains = [ domains = [
'https://s.aifree.site' "https://s.aifree.site",
"https://v.aifree.site/"
] ]
class FreeGpt(AsyncGeneratorProvider): class FreeGpt(AsyncGeneratorProvider):
url = "https://freegpts1.aifree.site/" url = "https://freegptsnav.aifree.site"
working = False working = True
supports_message_history = True supports_message_history = True
supports_system_message = True
supports_gpt_35_turbo = True supports_gpt_35_turbo = True
@classmethod @classmethod
@@ -38,15 +41,14 @@ class FreeGpt(AsyncGeneratorProvider):
"pass": None, "pass": None,
"sign": generate_signature(timestamp, prompt) "sign": generate_signature(timestamp, prompt)
} }
url = random.choice(domains) domain = random.choice(domains)
async with session.post(f"{url}/api/generate", json=data) as response: async with session.post(f"{domain}/api/generate", json=data) as response:
response.raise_for_status() response.raise_for_status()
async for chunk in response.iter_content(): async for chunk in response.iter_content():
chunk = chunk.decode() chunk = chunk.decode()
if chunk == "当前地区当日额度已消耗完": if chunk == "当前地区当日额度已消耗完":
raise RuntimeError("Rate limit reached") raise RateLimitError("Rate limit reached")
yield chunk yield chunk
def generate_signature(timestamp: int, message: str, secret: str = ""): def generate_signature(timestamp: int, message: str, secret: str = ""):
data = f"{timestamp}:{message}:{secret}" data = f"{timestamp}:{message}:{secret}"

View File

@@ -2,15 +2,18 @@ from __future__ import annotations
import time import time
from hashlib import sha256 from hashlib import sha256
from aiohttp import ClientSession from aiohttp import ClientSession, BaseConnector
from ..typing import AsyncResult, Messages from ..typing import AsyncResult, Messages
from .base_provider import AsyncGeneratorProvider from .base_provider import AsyncGeneratorProvider
from ..errors import RateLimitError
from ..requests import raise_for_status
from ..requests.aiohttp import get_connector
class GeminiProChat(AsyncGeneratorProvider): class GeminiProChat(AsyncGeneratorProvider):
url = "https://gemini-chatbot-sigma.vercel.app" url = "https://gemini-chatbot-sigma.vercel.app"
working = True working = True
supports_message_history = True
@classmethod @classmethod
async def create_async_generator( async def create_async_generator(
@@ -18,6 +21,7 @@ class GeminiProChat(AsyncGeneratorProvider):
model: str, model: str,
messages: Messages, messages: Messages,
proxy: str = None, proxy: str = None,
connector: BaseConnector = None,
**kwargs **kwargs
) -> AsyncResult: ) -> AsyncResult:
headers = { headers = {
@@ -34,7 +38,7 @@ class GeminiProChat(AsyncGeneratorProvider):
"Connection": "keep-alive", "Connection": "keep-alive",
"TE": "trailers", "TE": "trailers",
} }
async with ClientSession(headers=headers) as session: async with ClientSession(connector=get_connector(connector, proxy), headers=headers) as session:
timestamp = int(time.time() * 1e3) timestamp = int(time.time() * 1e3)
data = { data = {
"messages":[{ "messages":[{
@@ -46,7 +50,10 @@ class GeminiProChat(AsyncGeneratorProvider):
"sign": generate_signature(timestamp, messages[-1]["content"]), "sign": generate_signature(timestamp, messages[-1]["content"]),
} }
async with session.post(f"{cls.url}/api/generate", json=data, proxy=proxy) as response: async with session.post(f"{cls.url}/api/generate", json=data, proxy=proxy) as response:
response.raise_for_status() if response.status == 500:
if "Quota exceeded" in await response.text():
raise RateLimitError(f"Response {response.status}: Rate limit reached")
await raise_for_status(response)
async for chunk in response.content.iter_any(): async for chunk in response.content.iter_any():
yield chunk.decode() yield chunk.decode()

View File

@@ -1,10 +1,13 @@
from __future__ import annotations from __future__ import annotations
from aiohttp import ClientSession from aiohttp import ClientSession, BaseConnector
from ..typing import AsyncResult, Messages from ..typing import AsyncResult, Messages
from .base_provider import AsyncGeneratorProvider from .base_provider import AsyncGeneratorProvider
from .helper import get_random_string, get_connector
from ..requests import raise_for_status, get_args_from_browser, WebDriver
from ..webdriver import has_seleniumwire
from ..errors import MissingRequirementsError
class GptTalkRu(AsyncGeneratorProvider): class GptTalkRu(AsyncGeneratorProvider):
url = "https://gpttalk.ru" url = "https://gpttalk.ru"
@@ -17,33 +20,40 @@ class GptTalkRu(AsyncGeneratorProvider):
model: str, model: str,
messages: Messages, messages: Messages,
proxy: str = None, proxy: str = None,
connector: BaseConnector = None,
webdriver: WebDriver = None,
**kwargs **kwargs
) -> AsyncResult: ) -> AsyncResult:
if not model: if not model:
model = "gpt-3.5-turbo" model = "gpt-3.5-turbo"
headers = { if not has_seleniumwire:
"Accept": "application/json, text/plain, */*", raise MissingRequirementsError('Install "selenium-wire" package')
"Accept-Language": "en-US", args = get_args_from_browser(f"{cls.url}", webdriver)
"Connection": "keep-alive", args["headers"]["accept"] = "application/json, text/plain, */*"
"Content-Type": "application/json", async with ClientSession(connector=get_connector(connector, proxy), **args) as session:
"Origin": "https://gpttalk.ru", async with session.get("https://gpttalk.ru/getToken") as response:
"Referer": "https://gpttalk.ru/", await raise_for_status(response)
"Sec-Fetch-Dest": "empty", public_key = (await response.json())["response"]["key"]["publicKey"]
"Sec-Fetch-Mode": "cors", random_string = get_random_string(8)
"Sec-Fetch-Site": "same-origin",
"User-Agent": "Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/119.0.0.0 Safari/537.36",
"sec-ch-ua": '"Google Chrome";v="119", "Chromium";v="119", "Not?A_Brand";v="24"',
"sec-ch-ua-mobile": "?0",
"sec-ch-ua-platform": '"Linux"',
}
async with ClientSession(headers=headers) as session:
data = { data = {
"model": model, "model": model,
"modelType": 1, "modelType": 1,
"prompt": messages, "prompt": messages,
"responseType": "stream", "responseType": "stream",
"security": {
"randomMessage": random_string,
"shifrText": encrypt(public_key, random_string)
}
} }
async with session.post(f"{cls.url}/gpt2", json=data, proxy=proxy) as response: async with session.post(f"{cls.url}/gpt2", json=data, proxy=proxy) as response:
response.raise_for_status() await raise_for_status(response)
async for chunk in response.content.iter_any(): async for chunk in response.content.iter_any():
yield chunk.decode() yield chunk.decode()
def encrypt(public_key: str, value: str) -> str:
from Crypto.Cipher import PKCS1_v1_5
from Crypto.PublicKey import RSA
import base64
rsa_key = RSA.importKey(public_key)
cipher = PKCS1_v1_5.new(rsa_key)
return base64.b64encode(cipher.encrypt(value.encode())).decode()

View File

@@ -1,17 +1,18 @@
from __future__ import annotations from __future__ import annotations
import json import json
from aiohttp import ClientSession from aiohttp import ClientSession, BaseConnector
from ..typing import AsyncResult, Messages from ..typing import AsyncResult, Messages
from .base_provider import AsyncGeneratorProvider from .base_provider import AsyncGeneratorProvider
from .helper import get_random_string from .helper import get_random_string, get_connector
from ..requests import raise_for_status
class Koala(AsyncGeneratorProvider): class Koala(AsyncGeneratorProvider):
url = "https://koala.sh" url = "https://koala.sh"
working = True
supports_gpt_35_turbo = True supports_gpt_35_turbo = True
supports_message_history = True supports_message_history = True
working = True
@classmethod @classmethod
async def create_async_generator( async def create_async_generator(
@@ -19,35 +20,36 @@ class Koala(AsyncGeneratorProvider):
model: str, model: str,
messages: Messages, messages: Messages,
proxy: str = None, proxy: str = None,
connector: BaseConnector = None,
**kwargs **kwargs
) -> AsyncResult: ) -> AsyncResult:
if not model: if not model:
model = "gpt-3.5-turbo" model = "gpt-3.5-turbo"
headers = { headers = {
"User-Agent": "Mozilla/5.0 (X11; Ubuntu; Linux x86_64; rv:109.0) Gecko/20100101 Firefox/119.0", "User-Agent": "Mozilla/5.0 (X11; Ubuntu; Linux x86_64; rv:122.0) Gecko/20100101 Firefox/122.0",
"Accept": "text/event-stream", "Accept": "text/event-stream",
"Accept-Language": "de,en-US;q=0.7,en;q=0.3", "Accept-Language": "de,en-US;q=0.7,en;q=0.3",
"Accept-Encoding": "gzip, deflate, br", "Accept-Encoding": "gzip, deflate, br",
"Referer": f"{cls.url}/chat", "Referer": f"{cls.url}/chat",
"Content-Type": "application/json",
"Flag-Real-Time-Data": "false", "Flag-Real-Time-Data": "false",
"Visitor-ID": get_random_string(20), "Visitor-ID": get_random_string(20),
"Origin": cls.url, "Origin": cls.url,
"Alt-Used": "koala.sh", "Alt-Used": "koala.sh",
"Connection": "keep-alive",
"Sec-Fetch-Dest": "empty", "Sec-Fetch-Dest": "empty",
"Sec-Fetch-Mode": "cors", "Sec-Fetch-Mode": "cors",
"Sec-Fetch-Site": "same-origin", "Sec-Fetch-Site": "same-origin",
"Pragma": "no-cache",
"Cache-Control": "no-cache",
"TE": "trailers", "TE": "trailers",
} }
async with ClientSession(headers=headers) as session: async with ClientSession(headers=headers, connector=get_connector(connector, proxy)) as session:
input = messages[-1]["content"]
system_messages = [message["content"] for message in messages if message["role"] == "system"]
if system_messages:
input += " ".join(system_messages)
data = { data = {
"input": messages[-1]["content"], "input": input,
"inputHistory": [ "inputHistory": [
message["content"] message["content"]
for message in messages for message in messages[:-1]
if message["role"] == "user" if message["role"] == "user"
], ],
"outputHistory": [ "outputHistory": [
@@ -58,7 +60,7 @@ class Koala(AsyncGeneratorProvider):
"model": model, "model": model,
} }
async with session.post(f"{cls.url}/api/gpt/", json=data, proxy=proxy) as response: async with session.post(f"{cls.url}/api/gpt/", json=data, proxy=proxy) as response:
response.raise_for_status() await raise_for_status(response)
async for chunk in response.content: async for chunk in response.content:
if chunk.startswith(b"data: "): if chunk.startswith(b"data: "):
yield json.loads(chunk[6:]) yield json.loads(chunk[6:])

View File

@@ -7,7 +7,7 @@ from aiohttp import ClientSession, BaseConnector
from ..typing import AsyncResult, Messages from ..typing import AsyncResult, Messages
from .base_provider import AsyncGeneratorProvider, ProviderModelMixin from .base_provider import AsyncGeneratorProvider, ProviderModelMixin
from .helper import get_connector from .helper import get_connector
from ..errors import RateLimitError from ..requests import raise_for_status
models = { models = {
"gpt-4": { "gpt-4": {
@@ -76,6 +76,7 @@ class Liaobots(AsyncGeneratorProvider, ProviderModelMixin):
url = "https://liaobots.site" url = "https://liaobots.site"
working = True working = True
supports_message_history = True supports_message_history = True
supports_system_message = True
supports_gpt_35_turbo = True supports_gpt_35_turbo = True
supports_gpt_4 = True supports_gpt_4 = True
default_model = "gpt-3.5-turbo" default_model = "gpt-3.5-turbo"
@@ -116,19 +117,17 @@ class Liaobots(AsyncGeneratorProvider, ProviderModelMixin):
data={"token": "abcdefghijklmnopqrst"}, data={"token": "abcdefghijklmnopqrst"},
verify_ssl=False verify_ssl=False
) as response: ) as response:
response.raise_for_status() await raise_for_status(response)
async with session.post( async with session.post(
"https://liaobots.work/api/user", "https://liaobots.work/api/user",
proxy=proxy, proxy=proxy,
json={"authcode": ""}, json={"authcode": ""},
verify_ssl=False verify_ssl=False
) as response: ) as response:
if response.status == 401: await raise_for_status(response)
raise RateLimitError("Rate limit reached. Use a other provider or ip address")
response.raise_for_status()
cls._auth_code = (await response.json(content_type=None))["authCode"] cls._auth_code = (await response.json(content_type=None))["authCode"]
cls._cookie_jar = session.cookie_jar cls._cookie_jar = session.cookie_jar
data = { data = {
"conversationId": str(uuid.uuid4()), "conversationId": str(uuid.uuid4()),
"model": models[cls.get_model(model)], "model": models[cls.get_model(model)],
@@ -143,7 +142,7 @@ class Liaobots(AsyncGeneratorProvider, ProviderModelMixin):
headers={"x-auth-code": cls._auth_code}, headers={"x-auth-code": cls._auth_code},
verify_ssl=False verify_ssl=False
) as response: ) as response:
response.raise_for_status() await raise_for_status(response)
async for chunk in response.content.iter_any(): async for chunk in response.content.iter_any():
if b"<html coupert-item=" in chunk: if b"<html coupert-item=" in chunk:
raise RuntimeError("Invalid session") raise RuntimeError("Invalid session")

View File

@@ -14,7 +14,7 @@ WS_URL = "wss://labs-api.perplexity.ai/socket.io/"
class PerplexityLabs(AsyncGeneratorProvider, ProviderModelMixin): class PerplexityLabs(AsyncGeneratorProvider, ProviderModelMixin):
url = "https://labs.perplexity.ai" url = "https://labs.perplexity.ai"
working = True working = True
default_model = "sonar-medium-online" default_model = "mixtral-8x7b-instruct"
models = [ models = [
"sonar-small-online", "sonar-medium-online", "sonar-small-chat", "sonar-medium-chat", "mistral-7b-instruct", "sonar-small-online", "sonar-medium-online", "sonar-small-chat", "sonar-medium-chat", "mistral-7b-instruct",
"codellama-70b-instruct", "llava-v1.5-7b-wrapper", "llava-v1.6-34b", "mixtral-8x7b-instruct", "codellama-70b-instruct", "llava-v1.5-7b-wrapper", "llava-v1.6-34b", "mixtral-8x7b-instruct",

View File

@@ -4,12 +4,13 @@ import json
from ..typing import CreateResult, Messages from ..typing import CreateResult, Messages
from .base_provider import AbstractProvider, format_prompt from .base_provider import AbstractProvider, format_prompt
from ..requests import Session, get_session_from_browser from ..requests import Session, get_session_from_browser, raise_for_status
class Pi(AbstractProvider): class Pi(AbstractProvider):
url = "https://pi.ai/talk" url = "https://pi.ai/talk"
working = True working = True
supports_stream = True supports_stream = True
_session = None
@classmethod @classmethod
def create_completion( def create_completion(
@@ -17,20 +18,19 @@ class Pi(AbstractProvider):
model: str, model: str,
messages: Messages, messages: Messages,
stream: bool, stream: bool,
session: Session = None,
proxy: str = None, proxy: str = None,
timeout: int = 180, timeout: int = 180,
conversation_id: str = None, conversation_id: str = None,
**kwargs **kwargs
) -> CreateResult: ) -> CreateResult:
if not session: if cls._session is None:
session = get_session_from_browser(url=cls.url, proxy=proxy, timeout=timeout) cls._session = get_session_from_browser(url=cls.url, proxy=proxy, timeout=timeout)
if not conversation_id: if not conversation_id:
conversation_id = cls.start_conversation(session) conversation_id = cls.start_conversation(cls._session)
prompt = format_prompt(messages) prompt = format_prompt(messages)
else: else:
prompt = messages[-1]["content"] prompt = messages[-1]["content"]
answer = cls.ask(session, prompt, conversation_id) answer = cls.ask(cls._session, prompt, conversation_id)
for line in answer: for line in answer:
if "text" in line: if "text" in line:
yield line["text"] yield line["text"]
@@ -41,8 +41,7 @@ class Pi(AbstractProvider):
'accept': 'application/json', 'accept': 'application/json',
'x-api-version': '3' 'x-api-version': '3'
}) })
if 'Just a moment' in response.text: raise_for_status(response)
raise RuntimeError('Error: Cloudflare detected')
return response.json()['conversations'][0]['sid'] return response.json()['conversations'][0]['sid']
def get_chat_history(session: Session, conversation_id: str): def get_chat_history(session: Session, conversation_id: str):
@@ -50,8 +49,7 @@ class Pi(AbstractProvider):
'conversation': conversation_id, 'conversation': conversation_id,
} }
response = session.get('https://pi.ai/api/chat/history', params=params) response = session.get('https://pi.ai/api/chat/history', params=params)
if 'Just a moment' in response.text: raise_for_status(response)
raise RuntimeError('Error: Cloudflare detected')
return response.json() return response.json()
def ask(session: Session, prompt: str, conversation_id: str): def ask(session: Session, prompt: str, conversation_id: str):
@@ -61,9 +59,8 @@ class Pi(AbstractProvider):
'mode': 'BASE', 'mode': 'BASE',
} }
response = session.post('https://pi.ai/api/chat', json=json_data, stream=True) response = session.post('https://pi.ai/api/chat', json=json_data, stream=True)
raise_for_status(response)
for line in response.iter_lines(): for line in response.iter_lines():
if b'Just a moment' in line:
raise RuntimeError('Error: Cloudflare detected')
if line.startswith(b'data: {"text":'): if line.startswith(b'data: {"text":'):
yield json.loads(line.split(b'data: ')[1]) yield json.loads(line.split(b'data: ')[1])
elif line.startswith(b'data: {"title":'): elif line.startswith(b'data: {"title":'):

View File

@@ -8,17 +8,18 @@ try:
except ImportError: except ImportError:
has_requirements = False has_requirements = False
from ..typing import Messages, TypedDict, CreateResult, Any from ..typing import Messages, CreateResult
from .base_provider import AbstractProvider from .base_provider import AbstractProvider
from ..errors import MissingRequirementsError from ..requests import raise_for_status
from ..errors import MissingRequirementsError, RateLimitError, ResponseStatusError
class Vercel(AbstractProvider): class Vercel(AbstractProvider):
url = 'https://chat.vercel.ai' url = 'https://chat.vercel.ai'
working = True working = True
supports_message_history = True supports_message_history = True
supports_system_message = True
supports_gpt_35_turbo = True supports_gpt_35_turbo = True
supports_stream = True supports_stream = True
supports_gpt_4 = False
@staticmethod @staticmethod
def create_completion( def create_completion(
@@ -26,6 +27,7 @@ class Vercel(AbstractProvider):
messages: Messages, messages: Messages,
stream: bool, stream: bool,
proxy: str = None, proxy: str = None,
max_retries: int = 6,
**kwargs **kwargs
) -> CreateResult: ) -> CreateResult:
if not has_requirements: if not has_requirements:
@@ -54,19 +56,17 @@ class Vercel(AbstractProvider):
'messages': messages, 'messages': messages,
'id' : f'{os.urandom(3).hex()}a', 'id' : f'{os.urandom(3).hex()}a',
} }
response = None
max_retries = kwargs.get('max_retries', 6)
for _ in range(max_retries): for _ in range(max_retries):
response = requests.post('https://chat.vercel.ai/api/chat', response = requests.post('https://chat.vercel.ai/api/chat',
headers=headers, json=json_data, stream=True, proxies={"https": proxy}) headers=headers, json=json_data, stream=True, proxies={"https": proxy})
try: if not response.ok:
response.raise_for_status()
except:
continue continue
for token in response.iter_content(chunk_size=None): for token in response.iter_content(chunk_size=None):
yield token.decode() yield token.decode()
break break
raise_for_status(response)
def get_anti_bot_token() -> str: def get_anti_bot_token() -> str:
headers = { headers = {
'authority': 'sdk.vercel.ai', 'authority': 'sdk.vercel.ai',
@@ -92,7 +92,7 @@ def get_anti_bot_token() -> str:
js_script = '''const globalThis={marker:"mark"};String.prototype.fontcolor=function(){return `<font>${this}</font>`}; js_script = '''const globalThis={marker:"mark"};String.prototype.fontcolor=function(){return `<font>${this}</font>`};
return (%s)(%s)''' % (raw_data['c'], raw_data['a']) return (%s)(%s)''' % (raw_data['c'], raw_data['a'])
sec_list = [execjs.compile(js_script).call('')[0], [], "sentinel"] sec_list = [execjs.compile(js_script).call('')[0], [], "sentinel"]
raw_token = json.dumps({'r': sec_list, 't': raw_data['t']}, raw_token = json.dumps({'r': sec_list, 't': raw_data['t']},

View File

@@ -4,13 +4,14 @@ import re
import json import json
import base64 import base64
import uuid import uuid
from aiohttp import ClientSession, FormData, BaseConnector from asyncio import get_running_loop
from aiohttp import ClientSession, FormData, BaseConnector, CookieJar
from ..typing import AsyncResult, Messages, ImageType, Cookies from ..typing import AsyncResult, Messages, ImageType, Cookies
from .base_provider import AsyncGeneratorProvider, ProviderModelMixin from .base_provider import AsyncGeneratorProvider, ProviderModelMixin
from ..providers.helper import get_connector, format_prompt from .helper import format_prompt, get_connector
from ..image import to_bytes, ImageResponse from ..image import to_bytes, ImageResponse
from ..requests.defaults import DEFAULT_HEADERS from ..requests import WebDriver, raise_for_status, get_args_from_browser
class You(AsyncGeneratorProvider, ProviderModelMixin): class You(AsyncGeneratorProvider, ProviderModelMixin):
url = "https://you.com" url = "https://you.com"
@@ -32,6 +33,8 @@ class You(AsyncGeneratorProvider, ProviderModelMixin):
model_aliases = { model_aliases = {
"claude-v2": "claude-2" "claude-v2": "claude-2"
} }
_args: dict = None
_cookie_jar: CookieJar = None
_cookies = None _cookies = None
_cookies_used = 0 _cookies_used = 0
@@ -43,25 +46,34 @@ class You(AsyncGeneratorProvider, ProviderModelMixin):
image: ImageType = None, image: ImageType = None,
image_name: str = None, image_name: str = None,
connector: BaseConnector = None, connector: BaseConnector = None,
webdriver: WebDriver = None,
proxy: str = None, proxy: str = None,
chat_mode: str = "default", chat_mode: str = "default",
**kwargs, **kwargs,
) -> AsyncResult: ) -> AsyncResult:
if cls._args is None:
cls._args = get_args_from_browser(cls.url, webdriver, proxy)
cls._cookie_jar = CookieJar(loop=get_running_loop())
else:
if "cookies" in cls._args:
del cls._args["cookies"]
cls._cookie_jar._loop = get_running_loop()
if image is not None:
chat_mode = "agent"
elif not model or model == cls.default_model:
chat_mode = "default"
elif model.startswith("dall-e"):
chat_mode = "create"
else:
chat_mode = "custom"
model = cls.get_model(model)
async with ClientSession( async with ClientSession(
connector=get_connector(connector, proxy), connector=get_connector(connector, proxy),
headers=DEFAULT_HEADERS cookie_jar=cls._cookie_jar,
) as client: **cls._args
if image is not None: ) as session:
chat_mode = "agent" cookies = await cls.get_cookies(session) if chat_mode != "default" else None
elif not model or model == cls.default_model: upload = json.dumps([await cls.upload_file(session, cookies, to_bytes(image), image_name)]) if image else ""
chat_mode = "default"
elif model.startswith("dall-e"):
chat_mode = "create"
else:
chat_mode = "custom"
model = cls.get_model(model)
cookies = await cls.get_cookies(client) if chat_mode != "default" else None
upload = json.dumps([await cls.upload_file(client, cookies, to_bytes(image), image_name)]) if image else ""
#questions = [message["content"] for message in messages if message["role"] == "user"] #questions = [message["content"] for message in messages if message["role"] == "user"]
# chat = [ # chat = [
# {"question": questions[idx-1], "answer": message["content"]} # {"question": questions[idx-1], "answer": message["content"]}
@@ -70,8 +82,8 @@ class You(AsyncGeneratorProvider, ProviderModelMixin):
# and idx < len(questions) # and idx < len(questions)
# ] # ]
headers = { headers = {
"Accept": "text/event-stream", "accept": "text/event-stream",
"Referer": f"{cls.url}/search?fromSearchBar=true&tbm=youchat", "referer": f"{cls.url}/search?fromSearchBar=true&tbm=youchat",
} }
data = { data = {
"userFiles": upload, "userFiles": upload,
@@ -86,14 +98,14 @@ class You(AsyncGeneratorProvider, ProviderModelMixin):
} }
if chat_mode == "custom": if chat_mode == "custom":
params["selectedAIModel"] = model.replace("-", "_") params["selectedAIModel"] = model.replace("-", "_")
async with (client.post if chat_mode == "default" else client.get)( async with (session.post if chat_mode == "default" else session.get)(
f"{cls.url}/api/streamingSearch", f"{cls.url}/api/streamingSearch",
data=data, data=data,
params=params, params=params,
headers=headers, headers=headers,
cookies=cookies cookies=cookies
) as response: ) as response:
response.raise_for_status() await raise_for_status(response)
async for line in response.content: async for line in response.content:
if line.startswith(b'event: '): if line.startswith(b'event: '):
event = line[7:-1].decode() event = line[7:-1].decode()
@@ -115,7 +127,7 @@ class You(AsyncGeneratorProvider, ProviderModelMixin):
f"{cls.url}/api/get_nonce", f"{cls.url}/api/get_nonce",
cookies=cookies, cookies=cookies,
) as response: ) as response:
response.raise_for_status() await raise_for_status(response)
upload_nonce = await response.text() upload_nonce = await response.text()
data = FormData() data = FormData()
data.add_field('file', file, filename=filename) data.add_field('file', file, filename=filename)
@@ -127,8 +139,7 @@ class You(AsyncGeneratorProvider, ProviderModelMixin):
}, },
cookies=cookies cookies=cookies
) as response: ) as response:
if not response.ok: await raise_for_status(response)
raise RuntimeError(f"Response: {await response.text()}")
result = await response.json() result = await response.json()
result["user_filename"] = filename result["user_filename"] = filename
result["size"] = len(file) result["size"] = len(file)
@@ -177,8 +188,7 @@ class You(AsyncGeneratorProvider, ProviderModelMixin):
"session_duration_minutes": 129600 "session_duration_minutes": 129600
} }
) as response: ) as response:
if not response.ok: await raise_for_status(response)
raise RuntimeError(f"Response: {await response.text()}")
session = (await response.json())["data"] session = (await response.json())["data"]
return { return {
"stytch_session": session["session_token"], "stytch_session": session["session_token"],

View File

@@ -6,56 +6,36 @@ from ..providers.base_provider import AsyncProvider, AsyncGeneratorProvider
from ..providers.create_images import CreateImagesProvider from ..providers.create_images import CreateImagesProvider
from .deprecated import * from .deprecated import *
from .not_working import *
from .selenium import * from .selenium import *
from .needs_auth import * from .needs_auth import *
from .unfinished import * from .unfinished import *
from .AiAsk import AiAsk from .Aura import Aura
from .AiChatOnline import AiChatOnline from .Bing import Bing
from .AItianhu import AItianhu
from .Aura import Aura
from .Bestim import Bestim
from .Bing import Bing
from .BingCreateImages import BingCreateImages from .BingCreateImages import BingCreateImages
from .ChatAnywhere import ChatAnywhere from .ChatForAi import ChatForAi
from .ChatBase import ChatBase from .Chatgpt4Online import Chatgpt4Online
from .ChatForAi import ChatForAi from .ChatgptAi import ChatgptAi
from .Chatgpt4Online import Chatgpt4Online from .ChatgptFree import ChatgptFree
from .ChatgptAi import ChatgptAi from .ChatgptNext import ChatgptNext
from .ChatgptDemo import ChatgptDemo from .ChatgptX import ChatgptX
from .ChatgptDemoAi import ChatgptDemoAi from .DeepInfra import DeepInfra
from .ChatgptFree import ChatgptFree from .FlowGpt import FlowGpt
from .ChatgptLogin import ChatgptLogin from .FreeChatgpt import FreeChatgpt
from .ChatgptNext import ChatgptNext from .FreeGpt import FreeGpt
from .ChatgptX import ChatgptX from .GeminiPro import GeminiPro
from .Chatxyz import Chatxyz from .GeminiProChat import GeminiProChat
from .DeepInfra import DeepInfra from .GptTalkRu import GptTalkRu
from .FakeGpt import FakeGpt from .HuggingChat import HuggingChat
from .FlowGpt import FlowGpt from .HuggingFace import HuggingFace
from .FreeChatgpt import FreeChatgpt from .Koala import Koala
from .FreeGpt import FreeGpt from .Liaobots import Liaobots
from .GeekGpt import GeekGpt from .Llama2 import Llama2
from .GeminiPro import GeminiPro from .PerplexityLabs import PerplexityLabs
from .GeminiProChat import GeminiProChat from .Pi import Pi
from .Gpt6 import Gpt6 from .Vercel import Vercel
from .GPTalk import GPTalk from .You import You
from .GptChatly import GptChatly
from .GptForLove import GptForLove
from .GptGo import GptGo
from .GptGod import GptGod
from .GptTalkRu import GptTalkRu
from .Hashnode import Hashnode
from .HuggingChat import HuggingChat
from .HuggingFace import HuggingFace
from .Koala import Koala
from .Liaobots import Liaobots
from .Llama2 import Llama2
from .OnlineGpt import OnlineGpt
from .PerplexityLabs import PerplexityLabs
from .Pi import Pi
from .Vercel import Vercel
from .Ylokh import Ylokh
from .You import You
import sys import sys

View File

@@ -2,6 +2,8 @@ from __future__ import annotations
import uuid import uuid
from aiohttp import ClientSession from aiohttp import ClientSession
from ...errors import ResponseStatusError
from ...requests import raise_for_status
class Conversation: class Conversation:
""" """
@@ -32,8 +34,11 @@ async def create_conversation(session: ClientSession, proxy: str = None) -> Conv
Conversation: An instance representing the created conversation. Conversation: An instance representing the created conversation.
""" """
url = 'https://www.bing.com/search?toncp=0&FORM=hpcodx&q=Bing+AI&showconv=1&cc=en' url = 'https://www.bing.com/search?toncp=0&FORM=hpcodx&q=Bing+AI&showconv=1&cc=en'
async with session.get(url, proxy=proxy) as response: headers = {
response.raise_for_status() "cookie": "; ".join(f"{c.key}={c.value}" for c in session.cookie_jar)
}
async with session.get(url, headers=headers) as response:
await raise_for_status(response)
headers = { headers = {
"accept": "application/json", "accept": "application/json",
"sec-fetch-dest": "empty", "sec-fetch-dest": "empty",
@@ -41,25 +46,21 @@ async def create_conversation(session: ClientSession, proxy: str = None) -> Conv
"sec-fetch-site": "same-origin", "sec-fetch-site": "same-origin",
"x-ms-client-request-id": str(uuid.uuid4()), "x-ms-client-request-id": str(uuid.uuid4()),
"x-ms-useragent": "azsdk-js-api-client-factory/1.0.0-beta.1 core-rest-pipeline/1.12.3 OS/Windows", "x-ms-useragent": "azsdk-js-api-client-factory/1.0.0-beta.1 core-rest-pipeline/1.12.3 OS/Windows",
"referer": url, "referer": "https://www.bing.com/search?toncp=0&FORM=hpcodx&q=Bing+AI&showconv=1&cc=en",
"Cookie": "; ".join(f"{c.key}={c.value}" for c in session.cookie_jar) "cookie": "; ".join(f"{c.key}={c.value}" for c in session.cookie_jar)
} }
for k, v in headers.items(): url = "https://www.bing.com/turing/conversation/create?bundleVersion=1.1634.0-service-contracts"
session.headers[k] = v
url = 'https://www.bing.com/turing/conversation/create?bundleVersion=1.1626.1'
async with session.get(url, headers=headers, proxy=proxy) as response: async with session.get(url, headers=headers, proxy=proxy) as response:
try: if response.status == 404:
data = await response.json() raise ResponseStatusError(f"Response {response.status}: Can't create a new chat")
except: await raise_for_status(response)
raise RuntimeError(f"Response: {await response.text()}") data = await response.json()
conversationId = data.get('conversationId')
conversationId = data.get('conversationId') clientId = data.get('clientId')
clientId = data.get('clientId') conversationSignature = response.headers.get('X-Sydney-Encryptedconversationsignature')
conversationSignature = response.headers.get('X-Sydney-Encryptedconversationsignature') if not conversationId or not clientId or not conversationSignature:
raise Exception('Failed to create conversation.')
if not conversationId or not clientId or not conversationSignature: return Conversation(conversationId, clientId, conversationSignature)
raise Exception('Failed to create conversation.')
return Conversation(conversationId, clientId, conversationSignature)
async def list_conversations(session: ClientSession) -> list: async def list_conversations(session: ClientSession) -> list:
""" """

View File

@@ -1,8 +1,8 @@
from __future__ import annotations from __future__ import annotations
from aiohttp import ClientSession from aiohttp import ClientSession
from ..typing import AsyncResult, Messages from ...typing import AsyncResult, Messages
from .base_provider import AsyncGeneratorProvider from ..base_provider import AsyncGeneratorProvider
class AiAsk(AsyncGeneratorProvider): class AiAsk(AsyncGeneratorProvider):
url = "https://e.aiask.me" url = "https://e.aiask.me"

View File

@@ -3,9 +3,9 @@ from __future__ import annotations
import json import json
from aiohttp import ClientSession from aiohttp import ClientSession
from ..typing import AsyncResult, Messages from ...typing import AsyncResult, Messages
from .base_provider import AsyncGeneratorProvider from ..base_provider import AsyncGeneratorProvider
from .helper import get_random_string from ..helper import get_random_string
class AiChatOnline(AsyncGeneratorProvider): class AiChatOnline(AsyncGeneratorProvider):
url = "https://aichatonline.org" url = "https://aichatonline.org"

View File

@@ -2,8 +2,8 @@ from __future__ import annotations
from aiohttp import ClientSession, ClientTimeout from aiohttp import ClientSession, ClientTimeout
from ..typing import AsyncResult, Messages from ...typing import AsyncResult, Messages
from .base_provider import AsyncGeneratorProvider from ..base_provider import AsyncGeneratorProvider
class ChatAnywhere(AsyncGeneratorProvider): class ChatAnywhere(AsyncGeneratorProvider):

View File

@@ -3,9 +3,9 @@ from __future__ import annotations
import uuid, time, random, json import uuid, time, random, json
from aiohttp import ClientSession from aiohttp import ClientSession
from ..typing import AsyncResult, Messages from ...typing import AsyncResult, Messages
from .base_provider import AsyncGeneratorProvider from ..base_provider import AsyncGeneratorProvider
from .helper import format_prompt, get_random_string from ..helper import format_prompt, get_random_string
class FakeGpt(AsyncGeneratorProvider): class FakeGpt(AsyncGeneratorProvider):

View File

@@ -3,14 +3,14 @@ from __future__ import annotations
import secrets, time, json import secrets, time, json
from aiohttp import ClientSession from aiohttp import ClientSession
from ..typing import AsyncResult, Messages from ...typing import AsyncResult, Messages
from .base_provider import AsyncGeneratorProvider from ..base_provider import AsyncGeneratorProvider
from .helper import format_prompt from ..helper import format_prompt
class GPTalk(AsyncGeneratorProvider): class GPTalk(AsyncGeneratorProvider):
url = "https://gptalk.net" url = "https://gptalk.net"
working = True working = False
supports_gpt_35_turbo = True supports_gpt_35_turbo = True
_auth = None _auth = None
used_times = 0 used_times = 0

View File

@@ -1,8 +1,8 @@
from __future__ import annotations from __future__ import annotations
import requests, json import requests, json
from .base_provider import AbstractProvider from ..base_provider import AbstractProvider
from ..typing import CreateResult, Messages from ...typing import CreateResult, Messages
from json import dumps from json import dumps

View File

@@ -2,9 +2,9 @@ from __future__ import annotations
from aiohttp import ClientSession from aiohttp import ClientSession
from ..typing import AsyncResult, Messages from ...typing import AsyncResult, Messages
from .base_provider import AsyncGeneratorProvider from ..base_provider import AsyncGeneratorProvider
from .helper import get_random_hex from ..helper import get_random_hex
class SearchTypes(): class SearchTypes():
quick = "quick" quick = "quick"

View File

@@ -2,9 +2,9 @@ from __future__ import annotations
import json import json
from ..requests import StreamSession from ...requests import StreamSession
from .base_provider import AsyncGeneratorProvider from ..base_provider import AsyncGeneratorProvider
from ..typing import AsyncResult, Messages from ...typing import AsyncResult, Messages
class Ylokh(AsyncGeneratorProvider): class Ylokh(AsyncGeneratorProvider):
url = "https://chat.ylokh.xyz" url = "https://chat.ylokh.xyz"

View File

@@ -23,4 +23,12 @@ from .Opchatgpts import Opchatgpts
from .Yqcloud import Yqcloud from .Yqcloud import Yqcloud
from .Aichat import Aichat from .Aichat import Aichat
from .Berlin import Berlin from .Berlin import Berlin
from .Phind import Phind from .Phind import Phind
from .AiAsk import AiAsk
from .AiChatOnline import AiChatOnline
from .ChatAnywhere import ChatAnywhere
from .FakeGpt import FakeGpt
from .GeekGpt import GeekGpt
from .GPTalk import GPTalk
from .Hashnode import Hashnode
from .Ylokh import Ylokh

View File

@@ -1,2 +1,3 @@
from ..providers.helper import * from ..providers.helper import *
from ..cookies import get_cookies from ..cookies import get_cookies
from ..requests.aiohttp import get_connector

View File

@@ -2,9 +2,9 @@ from __future__ import annotations
import json import json
from ..typing import AsyncResult, Messages from ...typing import AsyncResult, Messages
from ..requests import StreamSession from ...requests import StreamSession
from .base_provider import AsyncGeneratorProvider, format_prompt, get_cookies from ..base_provider import AsyncGeneratorProvider, format_prompt, get_cookies
class AItianhu(AsyncGeneratorProvider): class AItianhu(AsyncGeneratorProvider):

View File

@@ -1,56 +1,56 @@
from __future__ import annotations from __future__ import annotations
from ..typing import Messages from ...typing import Messages
from .base_provider import BaseProvider, CreateResult from ..base_provider import BaseProvider, CreateResult
from ..requests import get_session_from_browser from ...requests import get_session_from_browser
from uuid import uuid4 from uuid import uuid4
class Bestim(BaseProvider): class Bestim(BaseProvider):
url = "https://chatgpt.bestim.org" url = "https://chatgpt.bestim.org"
supports_gpt_35_turbo = True working = False
supports_message_history = True supports_gpt_35_turbo = True
working = False supports_message_history = True
supports_stream = True supports_stream = True
@classmethod @classmethod
def create_completion( def create_completion(
cls, cls,
model: str, model: str,
messages: Messages, messages: Messages,
stream: bool, stream: bool,
proxy: str = None, proxy: str = None,
**kwargs **kwargs
) -> CreateResult: ) -> CreateResult:
session = get_session_from_browser(cls.url, proxy=proxy) session = get_session_from_browser(cls.url, proxy=proxy)
headers = { headers = {
'Accept': 'application/json, text/event-stream', 'Accept': 'application/json, text/event-stream',
} }
data = { data = {
"messagesHistory": [{ "messagesHistory": [{
"id": str(uuid4()), "id": str(uuid4()),
"content": m["content"], "content": m["content"],
"from": "you" if m["role"] == "user" else "bot" "from": "you" if m["role"] == "user" else "bot"
} for m in messages], } for m in messages],
"type": "chat", "type": "chat",
} }
response = session.post( response = session.post(
url="https://chatgpt.bestim.org/chat/send2/", url="https://chatgpt.bestim.org/chat/send2/",
json=data, json=data,
headers=headers, headers=headers,
stream=True stream=True
) )
response.raise_for_status() response.raise_for_status()
for line in response.iter_lines(): for line in response.iter_lines():
if not line.startswith(b"event: trylimit"): if not line.startswith(b"event: trylimit"):
yield line.decode().removeprefix("data: ") yield line.decode().removeprefix("data: ")

View File

@@ -2,15 +2,15 @@ from __future__ import annotations
from aiohttp import ClientSession from aiohttp import ClientSession
from ..typing import AsyncResult, Messages from ...typing import AsyncResult, Messages
from .base_provider import AsyncGeneratorProvider from ..base_provider import AsyncGeneratorProvider
class ChatBase(AsyncGeneratorProvider): class ChatBase(AsyncGeneratorProvider):
url = "https://www.chatbase.co" url = "https://www.chatbase.co"
working = False
supports_gpt_35_turbo = True supports_gpt_35_turbo = True
supports_message_history = True supports_message_history = True
working = True
jailbreak = True jailbreak = True
list_incorrect_responses = ["support@chatbase", list_incorrect_responses = ["support@chatbase",
"about Chatbase"] "about Chatbase"]

View File

@@ -1,16 +1,17 @@
from __future__ import annotations from __future__ import annotations
import time, json, re import time, json, re, asyncio
from aiohttp import ClientSession from aiohttp import ClientSession
from ..typing import AsyncResult, Messages from ...typing import AsyncResult, Messages
from .base_provider import AsyncGeneratorProvider from ...errors import RateLimitError
from .helper import format_prompt from ..base_provider import AsyncGeneratorProvider
from ..helper import format_prompt
class ChatgptDemo(AsyncGeneratorProvider): class ChatgptDemo(AsyncGeneratorProvider):
url = "https://chat.chatgptdemo.net" url = "https://chatgptdemo.info/chat"
supports_gpt_35_turbo = True
working = False working = False
supports_gpt_35_turbo = True
@classmethod @classmethod
async def create_async_generator( async def create_async_generator(
@@ -21,10 +22,10 @@ class ChatgptDemo(AsyncGeneratorProvider):
**kwargs **kwargs
) -> AsyncResult: ) -> AsyncResult:
headers = { headers = {
"authority": "chat.chatgptdemo.net", "authority": "chatgptdemo.info",
"accept-language": "de-DE,de;q=0.9,en-DE;q=0.8,en;q=0.7,en-US", "accept-language": "en-US",
"origin": "https://chat.chatgptdemo.net", "origin": "https://chatgptdemo.info",
"referer": "https://chat.chatgptdemo.net/", "referer": "https://chatgptdemo.info/chat/",
"sec-ch-ua": '"Google Chrome";v="117", "Not;A=Brand";v="8", "Chromium";v="117"', "sec-ch-ua": '"Google Chrome";v="117", "Not;A=Brand";v="8", "Chromium";v="117"',
"sec-ch-ua-mobile": "?0", "sec-ch-ua-mobile": "?0",
"sec-ch-ua-platform": '"Linux"', "sec-ch-ua-platform": '"Linux"',
@@ -36,28 +37,29 @@ class ChatgptDemo(AsyncGeneratorProvider):
async with ClientSession(headers=headers) as session: async with ClientSession(headers=headers) as session:
async with session.get(f"{cls.url}/", proxy=proxy) as response: async with session.get(f"{cls.url}/", proxy=proxy) as response:
response.raise_for_status() response.raise_for_status()
response = await response.text() text = await response.text()
result = re.search(
result = re.search( r'<div id="USERID" style="display: none">(.*?)<\/div>',
r'<div id="USERID" style="display: none">(.*?)<\/div>', text,
response, )
) if result:
user_id = result.group(1)
if result: else:
user_id = result.group(1) raise RuntimeError("No user id found")
else: async with session.post(f"https://chatgptdemo.info/chat/new_chat", json={"user_id": user_id}, proxy=proxy) as response:
raise RuntimeError("No user id found")
async with session.post(f"{cls.url}/new_chat", json={"user_id": user_id}, proxy=proxy) as response:
response.raise_for_status() response.raise_for_status()
chat_id = (await response.json())["id_"] chat_id = (await response.json())["id_"]
if not chat_id: if not chat_id:
raise RuntimeError("Could not create new chat") raise RuntimeError("Could not create new chat")
await asyncio.sleep(10)
data = { data = {
"question": format_prompt(messages), "question": format_prompt(messages),
"chat_id": chat_id, "chat_id": chat_id,
"timestamp": int(time.time()*1000), "timestamp": int((time.time())*1e3),
} }
async with session.post(f"{cls.url}/chat_api_stream", json=data, proxy=proxy) as response: async with session.post(f"https://chatgptdemo.info/chat/chat_api_stream", json=data, proxy=proxy) as response:
if response.status == 429:
raise RateLimitError("Rate limit reached")
response.raise_for_status() response.raise_for_status()
async for line in response.content: async for line in response.content:
if line.startswith(b"data: "): if line.startswith(b"data: "):

View File

@@ -3,9 +3,9 @@ from __future__ import annotations
import json import json
from aiohttp import ClientSession from aiohttp import ClientSession
from ..typing import AsyncResult, Messages from ...typing import AsyncResult, Messages
from .base_provider import AsyncGeneratorProvider from ..base_provider import AsyncGeneratorProvider
from .helper import get_random_string from ..helper import get_random_string
class ChatgptDemoAi(AsyncGeneratorProvider): class ChatgptDemoAi(AsyncGeneratorProvider):
url = "https://chat.chatgptdemo.ai" url = "https://chat.chatgptdemo.ai"
@@ -49,6 +49,7 @@ class ChatgptDemoAi(AsyncGeneratorProvider):
async with session.post(f"{cls.url}/wp-json/mwai-ui/v1/chats/submit", json=data, proxy=proxy) as response: async with session.post(f"{cls.url}/wp-json/mwai-ui/v1/chats/submit", json=data, proxy=proxy) as response:
response.raise_for_status() response.raise_for_status()
async for chunk in response.content: async for chunk in response.content:
response.raise_for_status()
if chunk.startswith(b"data: "): if chunk.startswith(b"data: "):
data = json.loads(chunk[6:]) data = json.loads(chunk[6:])
if data["type"] == "live": if data["type"] == "live":

View File

@@ -5,15 +5,15 @@ import time
import json import json
from aiohttp import ClientSession from aiohttp import ClientSession
from ..typing import AsyncResult, Messages from ...typing import AsyncResult, Messages
from .base_provider import AsyncGeneratorProvider from ..base_provider import AsyncGeneratorProvider
from .helper import format_prompt from ..helper import format_prompt
class ChatgptLogin(AsyncGeneratorProvider): class ChatgptLogin(AsyncGeneratorProvider):
url = "https://chatgptlogin.ai" url = "https://chatgptlogin.ai"
supports_gpt_35_turbo = True
working = False working = False
supports_gpt_35_turbo = True
_user_id = None _user_id = None
@classmethod @classmethod

View File

@@ -3,8 +3,8 @@ from __future__ import annotations
import json import json
from aiohttp import ClientSession from aiohttp import ClientSession
from ..typing import AsyncResult, Messages from ...typing import AsyncResult, Messages
from .base_provider import AsyncGeneratorProvider from ..base_provider import AsyncGeneratorProvider
class Chatxyz(AsyncGeneratorProvider): class Chatxyz(AsyncGeneratorProvider):
url = "https://chat.3211000.xyz" url = "https://chat.3211000.xyz"

View File

@@ -3,14 +3,12 @@ from __future__ import annotations
import json import json
from aiohttp import ClientSession from aiohttp import ClientSession
from ..typing import AsyncResult, Messages from ...typing import AsyncResult, Messages
from .base_provider import AsyncGeneratorProvider from ..base_provider import AsyncGeneratorProvider
from .helper import format_prompt
class Gpt6(AsyncGeneratorProvider): class Gpt6(AsyncGeneratorProvider):
url = "https://gpt6.ai" url = "https://gpt6.ai"
working = True working = False
supports_gpt_35_turbo = True supports_gpt_35_turbo = True
@classmethod @classmethod
@@ -45,6 +43,7 @@ class Gpt6(AsyncGeneratorProvider):
async with session.post(f"https://seahorse-app-d29hu.ondigitalocean.app/api/v1/query", json=data, proxy=proxy) as response: async with session.post(f"https://seahorse-app-d29hu.ondigitalocean.app/api/v1/query", json=data, proxy=proxy) as response:
response.raise_for_status() response.raise_for_status()
async for line in response.content: async for line in response.content:
print(line)
if line.startswith(b"data: [DONE]"): if line.startswith(b"data: [DONE]"):
break break
elif line.startswith(b"data: "): elif line.startswith(b"data: "):

View File

@@ -1,13 +1,13 @@
from __future__ import annotations from __future__ import annotations
from ..requests import Session, get_session_from_browser from ...requests import Session, get_session_from_browser
from ..typing import Messages from ...typing import Messages
from .base_provider import AsyncProvider from ..base_provider import AsyncProvider
class GptChatly(AsyncProvider): class GptChatly(AsyncProvider):
url = "https://gptchatly.com" url = "https://gptchatly.com"
working = True working = False
supports_message_history = True supports_message_history = True
supports_gpt_35_turbo = True supports_gpt_35_turbo = True

View File

@@ -9,14 +9,14 @@ try:
except ImportError: except ImportError:
has_requirements = False has_requirements = False
from ..typing import AsyncResult, Messages from ...typing import AsyncResult, Messages
from .base_provider import AsyncGeneratorProvider from ..base_provider import AsyncGeneratorProvider
from .helper import format_prompt from ..helper import format_prompt
from ..errors import MissingRequirementsError from ...errors import MissingRequirementsError
class GptForLove(AsyncGeneratorProvider): class GptForLove(AsyncGeneratorProvider):
url = "https://ai18.gptforlove.com" url = "https://ai18.gptforlove.com"
working = True working = False
supports_gpt_35_turbo = True supports_gpt_35_turbo = True
@classmethod @classmethod

View File

@@ -4,14 +4,14 @@ from aiohttp import ClientSession
import json import json
import base64 import base64
from ..typing import AsyncResult, Messages from ...typing import AsyncResult, Messages
from .base_provider import AsyncGeneratorProvider, format_prompt from ..base_provider import AsyncGeneratorProvider, format_prompt
class GptGo(AsyncGeneratorProvider): class GptGo(AsyncGeneratorProvider):
url = "https://gptgo.ai" url = "https://gptgo.ai"
working = False
supports_gpt_35_turbo = True supports_gpt_35_turbo = True
working = True
@classmethod @classmethod
async def create_async_generator( async def create_async_generator(
@@ -44,6 +44,8 @@ class GptGo(AsyncGeneratorProvider):
) as response: ) as response:
response.raise_for_status() response.raise_for_status()
token = await response.text(); token = await response.text();
if token == "error token":
raise RuntimeError(f"Response: {token}")
token = base64.b64decode(token[10:-20]).decode() token = base64.b64decode(token[10:-20]).decode()
async with session.get( async with session.get(
@@ -57,6 +59,8 @@ class GptGo(AsyncGeneratorProvider):
break break
if line.startswith(b"data: "): if line.startswith(b"data: "):
line = json.loads(line[6:]) line = json.loads(line[6:])
if "choices" not in line:
raise RuntimeError(f"Response: {line}")
content = line["choices"][0]["delta"].get("content") content = line["choices"][0]["delta"].get("content")
if content and content != "\n#GPTGO ": if content and content != "\n#GPTGO ":
yield content yield content

View File

@@ -4,14 +4,14 @@ import secrets
import json import json
from aiohttp import ClientSession from aiohttp import ClientSession
from ..typing import AsyncResult, Messages from ...typing import AsyncResult, Messages
from .base_provider import AsyncGeneratorProvider from ..base_provider import AsyncGeneratorProvider
from .helper import format_prompt from ..helper import format_prompt
class GptGod(AsyncGeneratorProvider): class GptGod(AsyncGeneratorProvider):
url = "https://gptgod.site" url = "https://gptgod.site"
supports_gpt_35_turbo = True
working = False working = False
supports_gpt_35_turbo = True
@classmethod @classmethod
async def create_async_generator( async def create_async_generator(

View File

@@ -3,14 +3,13 @@ from __future__ import annotations
import json import json
from aiohttp import ClientSession from aiohttp import ClientSession
from ..typing import AsyncResult, Messages from ...typing import AsyncResult, Messages
from .base_provider import AsyncGeneratorProvider from ..base_provider import AsyncGeneratorProvider
from .helper import get_random_string from ..helper import get_random_string
class OnlineGpt(AsyncGeneratorProvider): class OnlineGpt(AsyncGeneratorProvider):
url = "https://onlinegpt.org" url = "https://onlinegpt.org"
working = True working = False
supports_gpt_35_turbo = True supports_gpt_35_turbo = True
supports_message_history = False supports_message_history = False

View File

@@ -0,0 +1,14 @@
from .AItianhu import AItianhu
from .Bestim import Bestim
from .ChatBase import ChatBase
from .ChatgptDemo import ChatgptDemo
from .ChatgptDemoAi import ChatgptDemoAi
from .ChatgptLogin import ChatgptLogin
from .Chatxyz import Chatxyz
from .Gpt6 import Gpt6
from .GptChatly import GptChatly
from .GptForLove import GptForLove
from .GptGo import GptGo
from .GptGod import GptGod
from .OnlineGpt import OnlineGpt

View File

@@ -133,12 +133,18 @@ class Completions():
max_tokens: int = None, max_tokens: int = None,
stop: Union[list[str], str] = None, stop: Union[list[str], str] = None,
api_key: str = None, api_key: str = None,
ignored : list[str] = None,
ignore_working: bool = False,
ignore_stream: bool = False,
**kwargs **kwargs
) -> Union[ChatCompletion, Iterator[ChatCompletionChunk]]: ) -> Union[ChatCompletion, Iterator[ChatCompletionChunk]]:
model, provider = get_model_and_provider( model, provider = get_model_and_provider(
model, model,
self.provider if provider is None else provider, self.provider if provider is None else provider,
stream, stream,
ignored,
ignore_working,
ignore_stream,
**kwargs **kwargs
) )
stop = [stop] if isinstance(stop, str) else stop stop = [stop] if isinstance(stop, str) else stop

View File

@@ -38,4 +38,7 @@ class NoImageResponseError(Exception):
... ...
class RateLimitError(Exception): class RateLimitError(Exception):
...
class ResponseStatusError(Exception):
... ...

View File

@@ -1013,6 +1013,7 @@ a:-webkit-any-link {
font-size: 15px; font-size: 15px;
width: 100%; width: 100%;
color: var(--colour-3); color: var(--colour-3);
min-height: 59px;
height: 59px; height: 59px;
outline: none; outline: none;
padding: var(--inner-gap) var(--section-gap); padding: var(--inner-gap) var(--section-gap);

View File

@@ -6,13 +6,11 @@ from .Provider import RetryProvider, ProviderType
from .Provider import ( from .Provider import (
Chatgpt4Online, Chatgpt4Online,
PerplexityLabs, PerplexityLabs,
ChatgptDemoAi,
GeminiProChat, GeminiProChat,
ChatgptNext, ChatgptNext,
HuggingChat, HuggingChat,
HuggingFace, HuggingFace,
ChatgptDemo, ChatgptDemo,
FreeChatgpt,
GptForLove, GptForLove,
ChatgptAi, ChatgptAi,
DeepInfra, DeepInfra,
@@ -66,7 +64,6 @@ gpt_35_long = Model(
best_provider = RetryProvider([ best_provider = RetryProvider([
FreeGpt, You, FreeGpt, You,
Chatgpt4Online, Chatgpt4Online,
ChatgptDemoAi,
ChatgptNext, ChatgptNext,
ChatgptDemo, ChatgptDemo,
Gpt6, Gpt6,
@@ -182,7 +179,7 @@ gemini = bard = palm = Model(
claude_v2 = Model( claude_v2 = Model(
name = 'claude-v2', name = 'claude-v2',
base_provider = 'anthropic', base_provider = 'anthropic',
best_provider = RetryProvider([FreeChatgpt, Vercel]) best_provider = RetryProvider([Vercel])
) )
claude_3_opus = Model( claude_3_opus = Model(
@@ -236,7 +233,7 @@ gpt_4_32k_0613 = Model(
gemini_pro = Model( gemini_pro = Model(
name = 'gemini-pro', name = 'gemini-pro',
base_provider = 'google', base_provider = 'google',
best_provider = RetryProvider([FreeChatgpt, GeminiProChat, You]) best_provider = RetryProvider([GeminiProChat, You])
) )
pi = Model( pi = Model(

View File

@@ -3,10 +3,8 @@ from __future__ import annotations
import random import random
import secrets import secrets
import string import string
from aiohttp import BaseConnector
from ..typing import Messages, Optional from ..typing import Messages
from ..errors import MissingRequirementsError
def format_prompt(messages: Messages, add_special_tokens=False) -> str: def format_prompt(messages: Messages, add_special_tokens=False) -> str:
""" """
@@ -49,16 +47,4 @@ def get_random_hex() -> str:
Returns: Returns:
str: A random hexadecimal string of 32 characters (16 bytes). str: A random hexadecimal string of 32 characters (16 bytes).
""" """
return secrets.token_hex(16).zfill(32) return secrets.token_hex(16).zfill(32)
def get_connector(connector: BaseConnector = None, proxy: str = None, rdns: bool = False) -> Optional[BaseConnector]:
if proxy and not connector:
try:
from aiohttp_socks import ProxyConnector
if proxy.startswith("socks5h://"):
proxy = proxy.replace("socks5h://", "socks5://")
rdns = True
connector = ProxyConnector.from_url(proxy, rdns=rdns)
except ImportError:
raise MissingRequirementsError('Install "aiohttp_socks" package for proxy support')
return connector

View File

@@ -1,18 +1,22 @@
from __future__ import annotations from __future__ import annotations
from urllib.parse import urlparse from urllib.parse import urlparse
from typing import Union
from aiohttp import ClientResponse
from requests import Response as RequestsResponse
try: try:
from curl_cffi.requests import Session from curl_cffi.requests import Session, Response
from .curl_cffi import StreamResponse, StreamSession from .curl_cffi import StreamResponse, StreamSession
has_curl_cffi = True has_curl_cffi = True
except ImportError: except ImportError:
from typing import Type as Session from typing import Type as Session, Type as Response
from .aiohttp import StreamResponse, StreamSession from .aiohttp import StreamResponse, StreamSession
has_curl_cffi = False has_curl_cffi = False
from ..webdriver import WebDriver, WebDriverSession, bypass_cloudflare, get_driver_cookies from ..webdriver import WebDriver, WebDriverSession
from ..errors import MissingRequirementsError from ..webdriver import user_config_dir, bypass_cloudflare, get_driver_cookies
from ..errors import MissingRequirementsError, RateLimitError, ResponseStatusError
from .defaults import DEFAULT_HEADERS from .defaults import DEFAULT_HEADERS
def get_args_from_browser( def get_args_from_browser(
@@ -20,7 +24,8 @@ def get_args_from_browser(
webdriver: WebDriver = None, webdriver: WebDriver = None,
proxy: str = None, proxy: str = None,
timeout: int = 120, timeout: int = 120,
do_bypass_cloudflare: bool = True do_bypass_cloudflare: bool = True,
virtual_display: bool = False
) -> dict: ) -> dict:
""" """
Create a Session object using a WebDriver to handle cookies and headers. Create a Session object using a WebDriver to handle cookies and headers.
@@ -34,21 +39,37 @@ def get_args_from_browser(
Returns: Returns:
Session: A Session object configured with cookies and headers from the WebDriver. Session: A Session object configured with cookies and headers from the WebDriver.
""" """
with WebDriverSession(webdriver, "", proxy=proxy, virtual_display=False) as driver: user_data_dir = "" #user_config_dir(f"g4f-{urlparse(url).hostname}")
with WebDriverSession(webdriver, user_data_dir, proxy=proxy, virtual_display=virtual_display) as driver:
if do_bypass_cloudflare: if do_bypass_cloudflare:
bypass_cloudflare(driver, url, timeout) bypass_cloudflare(driver, url, timeout)
cookies = get_driver_cookies(driver)
user_agent = driver.execute_script("return navigator.userAgent") user_agent = driver.execute_script("return navigator.userAgent")
parse = urlparse(url) headers = {
**DEFAULT_HEADERS,
'referer': url,
'user-agent': user_agent,
}
if hasattr(driver, "requests"):
for request in driver.requests:
if request.url.startswith(url):
for key, value in request.headers.items():
if key in (
"accept-encoding",
"accept-language",
"user-agent",
"sec-ch-ua",
"sec-ch-ua-platform",
"sec-ch-ua-arch",
"sec-ch-ua-full-version",
"sec-ch-ua-platform-version",
"sec-ch-ua-bitness"
):
headers[key] = value
break
cookies = get_driver_cookies(driver)
return { return {
'cookies': cookies, 'cookies': cookies,
'headers': { 'headers': headers,
**DEFAULT_HEADERS,
'Authority': parse.netloc,
'Origin': f'{parse.scheme}://{parse.netloc}',
'Referer': url,
'User-Agent': user_agent,
},
} }
def get_session_from_browser(url: str, webdriver: WebDriver = None, proxy: str = None, timeout: int = 120) -> Session: def get_session_from_browser(url: str, webdriver: WebDriver = None, proxy: str = None, timeout: int = 120) -> Session:
@@ -59,5 +80,25 @@ def get_session_from_browser(url: str, webdriver: WebDriver = None, proxy: str =
**args, **args,
proxies={"https": proxy, "http": proxy}, proxies={"https": proxy, "http": proxy},
timeout=timeout, timeout=timeout,
impersonate="chrome110" impersonate="chrome"
) )
async def raise_for_status_async(response: Union[StreamResponse, ClientResponse]):
if response.status in (429, 402):
raise RateLimitError(f"Response {response.status}: Rate limit reached")
text = await response.text() if not response.ok else None
if response.status == 403 and "<title>Just a moment...</title>" in text:
raise ResponseStatusError(f"Response {response.status}: Cloudflare detected")
elif not response.ok:
raise ResponseStatusError(f"Response {response.status}: {text}")
def raise_for_status(response: Union[StreamResponse, ClientResponse, Response, RequestsResponse]):
if isinstance(response, StreamSession) or isinstance(response, ClientResponse):
return raise_for_status_async(response)
if response.status_code in (429, 402):
raise RateLimitError(f"Response {response.status_code}: Rate limit reached")
elif response.status_code == 403 and "<title>Just a moment...</title>" in response.text:
raise ResponseStatusError(f"Response {response.status_code}: Cloudflare detected")
elif not response.ok:
raise ResponseStatusError(f"Response {response.status_code}: {response.text}")

View File

@@ -1,16 +1,20 @@
from __future__ import annotations from __future__ import annotations
from aiohttp import ClientSession, ClientResponse, ClientTimeout from aiohttp import ClientSession, ClientResponse, ClientTimeout, BaseConnector
from typing import AsyncGenerator, Any from typing import AsyncIterator, Any, Optional
from ..providers.helper import get_connector
from .defaults import DEFAULT_HEADERS from .defaults import DEFAULT_HEADERS
from ..errors import MissingRequirementsError
class StreamResponse(ClientResponse): class StreamResponse(ClientResponse):
async def iter_lines(self) -> AsyncGenerator[bytes, None]: async def iter_lines(self) -> AsyncIterator[bytes]:
async for line in self.content: async for line in self.content:
yield line.rstrip(b"\r\n") yield line.rstrip(b"\r\n")
async def iter_content(self) -> AsyncIterator[bytes]:
async for chunk in self.content.iter_any():
yield chunk
async def json(self) -> Any: async def json(self) -> Any:
return await super().json(content_type=None) return await super().json(content_type=None)
@@ -27,4 +31,16 @@ class StreamSession(ClientSession):
response_class=StreamResponse, response_class=StreamResponse,
connector=get_connector(kwargs.get("connector"), proxies.get("https")), connector=get_connector(kwargs.get("connector"), proxies.get("https")),
headers=headers headers=headers
) )
def get_connector(connector: BaseConnector = None, proxy: str = None, rdns: bool = False) -> Optional[BaseConnector]:
if proxy and not connector:
try:
from aiohttp_socks import ProxyConnector
if proxy.startswith("socks5h://"):
proxy = proxy.replace("socks5h://", "socks5://")
rdns = True
connector = ProxyConnector.from_url(proxy, rdns=rdns)
except ImportError:
raise MissingRequirementsError('Install "aiohttp_socks" package for proxy support')
return connector

View File

@@ -1,13 +1,19 @@
DEFAULT_HEADERS = { DEFAULT_HEADERS = {
'Accept': '*/*', "sec-ch-ua": '"Chromium";v="122", "Not(A:Brand";v="24", "Google Chrome";v="122"',
'Accept-Encoding': 'gzip, deflate, br', "sec-ch-ua-mobile": "?0",
'Accept-Language': 'en-US', "user-agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/122.0.0.0 Safari/537.36",
'Connection': 'keep-alive', "ec-ch-ua-arch": '"x86"',
'Sec-Ch-Ua': '"Not A(Brand";v="99", "Google Chrome";v="121", "Chromium";v="121"', "sec-ch-ua-full-version": '"122.0.6261.69"',
'Sec-Ch-Ua-Mobile': '?0', "accept": "*/*",
'Sec-Ch-Ua-Platform': '"Windows"', "sec-ch-ua-platform-version:": '"6.5.0"',
'Sec-Fetch-Dest': 'empty', "sec-ch-ua-full-version-list": '"Chromium";v="122.0.6261.69", "Not(A:Brand";v="24.0.0.0", "Google Chrome";v="122.0.6261.69"',
'Sec-Fetch-Mode': 'cors', "sec-ch-ua-bitness": '"64"',
'Sec-Fetch-Site': 'same-site', "sec-ch-ua-model": '""',
'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/121.0.0.0 Safari/537.36' "sec-ch-ua-platform": '"Windows"',
"sec-fetch-site": "same-site",
"sec-fetch-mode": "cors",
"sec-fetch-dest": "empty",
"referer": "",
"accept-encoding": "gzip, deflate, br",
"accept-language": "en-US",
} }

View File

@@ -2,9 +2,9 @@ from __future__ import annotations
try: try:
from platformdirs import user_config_dir from platformdirs import user_config_dir
from undetected_chromedriver import Chrome, ChromeOptions
from selenium.webdriver.remote.webdriver import WebDriver from selenium.webdriver.remote.webdriver import WebDriver
from selenium.webdriver.remote.webelement import WebElement from selenium.webdriver.remote.webelement import WebElement
from undetected_chromedriver import Chrome, ChromeOptions
from selenium.webdriver.common.by import By from selenium.webdriver.common.by import By
from selenium.webdriver.support.ui import WebDriverWait from selenium.webdriver.support.ui import WebDriverWait
from selenium.webdriver.support import expected_conditions as EC from selenium.webdriver.support import expected_conditions as EC
@@ -12,10 +12,10 @@ try:
from selenium.common.exceptions import NoSuchElementException from selenium.common.exceptions import NoSuchElementException
has_requirements = True has_requirements = True
except ImportError: except ImportError:
from typing import Type as WebDriver from typing import Type as WebDriver, Callable as user_config_dir
has_requirements = False has_requirements = False
import time import time
from shutil import which from shutil import which
from os import path from os import path
from os import access, R_OK from os import access, R_OK
@@ -29,6 +29,24 @@ try:
except ImportError: except ImportError:
has_pyvirtualdisplay = False has_pyvirtualdisplay = False
try:
from undetected_chromedriver import Chrome as _Chrome, ChromeOptions
from seleniumwire.webdriver import InspectRequestsMixin, DriverCommonMixin
class Chrome(InspectRequestsMixin, DriverCommonMixin, _Chrome):
def __init__(self, *args, options=None, seleniumwire_options={}, **kwargs):
if options is None:
options = ChromeOptions()
options.add_argument('--proxy-bypass-list=<-loopback>')
config = self._setup_backend(seleniumwire_options)
options.add_argument(f"--proxy-server={config['proxy']['httpProxy']}")
options.add_argument("--ignore-certificate-errors")
super().__init__(*args, options=options, **kwargs)
has_seleniumwire = True
except:
from undetected_chromedriver import Chrome, ChromeOptions
has_seleniumwire = False
def get_browser( def get_browser(
user_data_dir: str = None, user_data_dir: str = None,
headless: bool = False, headless: bool = False,
@@ -106,7 +124,7 @@ def bypass_cloudflare(driver: WebDriver, url: str, timeout: int) -> None:
}}); }});
""", element, url) """, element, url)
element.click() element.click()
time.sleep(3) time.sleep(5)
# Switch to the new tab and close the old tab # Switch to the new tab and close the old tab
original_window = driver.current_window_handle original_window = driver.current_window_handle
@@ -126,9 +144,10 @@ def bypass_cloudflare(driver: WebDriver, url: str, timeout: int) -> None:
... ...
except Exception as e: except Exception as e:
if debug.logging: if debug.logging:
print(f"Error bypassing Cloudflare: {e}") print(f"Error bypassing Cloudflare: {str(e).splitlines()[0]}")
finally: #driver.switch_to.default_content()
driver.switch_to.default_content() driver.switch_to.window(window_handle)
driver.execute_script("document.href = document.href;")
WebDriverWait(driver, timeout).until( WebDriverWait(driver, timeout).until(
EC.presence_of_element_located((By.CSS_SELECTOR, "body:not(.no-js)")) EC.presence_of_element_located((By.CSS_SELECTOR, "body:not(.no-js)"))
) )
@@ -223,13 +242,13 @@ class WebDriverSession:
self.default_driver.close() self.default_driver.close()
except Exception as e: except Exception as e:
if debug.logging: if debug.logging:
print(f"Error closing WebDriver: {e}") print(f"Error closing WebDriver: {str(e).splitlines()[0]}")
finally: finally:
self.default_driver.quit() self.default_driver.quit()
if self.virtual_display: if self.virtual_display:
self.virtual_display.stop() self.virtual_display.stop()
def element_send_text(element: WebElement, text: str) -> None: def element_send_text(element: WebElement, text: str) -> None:
script = "arguments[0].innerText = arguments[1]" script = "arguments[0].innerText = arguments[1];"
element.parent.execute_script(script, element, text) element.parent.execute_script(script, element, text)
element.send_keys(Keys.ENTER) element.send_keys(Keys.ENTER)

View File

@@ -21,4 +21,5 @@ brotli
beautifulsoup4 beautifulsoup4
setuptools setuptools
aiohttp_socks aiohttp_socks
selenium-wire
gpt4all gpt4all

View File

@@ -33,6 +33,7 @@ EXTRA_REQUIRE = {
"werkzeug", "flask", # gui "werkzeug", "flask", # gui
"loguru", "fastapi", "loguru", "fastapi",
"uvicorn", "nest_asyncio", # api "uvicorn", "nest_asyncio", # api
"selenium-wire"
], ],
"image": [ "image": [
"pillow", "pillow",
@@ -42,7 +43,8 @@ EXTRA_REQUIRE = {
"webdriver": [ "webdriver": [
"platformdirs", "platformdirs",
"undetected-chromedriver", "undetected-chromedriver",
"setuptools" "setuptools",
"selenium-wire"
], ],
"openai": [ "openai": [
"async-property", "async-property",