mirror of
https://github.com/xtekky/gpt4free.git
synced 2025-10-30 19:16:27 +08:00
Add multiple images support (#2478)
* Add multiple images support * Add multiple images support in gui * Support multiple images in legacy client and in the api Fix some model names in provider model list * Fix unittests * Add vision and providers docs
This commit is contained in:
575
docs/providers.md
Normal file
575
docs/providers.md
Normal file
@@ -0,0 +1,575 @@
|
||||
|
||||
## Free
|
||||
|
||||
### AmigoChat
|
||||
| Provider | `g4f.Provider.AmigoChat` |
|
||||
| -------- | ---- |
|
||||
| **Website** | [amigochat.io](https://amigochat.io/chat/) |
|
||||
| **Status** |  |
|
||||
| **Models** | gpt-4o, gpt-4o-mini, llama-3.1-405b, mistral-nemo, gemini-flash, gemma-2b, claude-3.5-sonnet, command-r-plus, qwen-2.5-72b, grok-beta (37)|
|
||||
| **Image Models (Image Generation)** | flux-realism, flux-pro, dall-e-3, flux-dev |
|
||||
| **Authentication** | ❌ |
|
||||
| **Streaming** | ✔️ |
|
||||
| **System message** | ✔️ |
|
||||
| **Message history** | ✔️ |
|
||||
### Blackbox AI
|
||||
| Provider | `g4f.Provider.Blackbox` |
|
||||
| -------- | ---- |
|
||||
| **Website** | [blackbox.ai](https://www.blackbox.ai) |
|
||||
| **Status** |  |
|
||||
| **Models** | gpt-4, gpt-4o, llama-3.1-8b, llama-3.1-70b, llama-3.1-405b, gemini-pro, gemini-flash, claude-3.5-sonnet, blackboxai, blackboxai-pro (41)|
|
||||
| **Image Models (Image Generation)** | flux |
|
||||
| **Vision (Image Upload)** | ✔️ |
|
||||
| **Authentication** | ❌ |
|
||||
| **Streaming** | ✔️ |
|
||||
| **System message** | ✔️ |
|
||||
| **Message history** | ✔️ |
|
||||
### Blackbox2
|
||||
| Provider | `g4f.Provider.Blackbox2` |
|
||||
| -------- | ---- |
|
||||
| **Website** | [blackbox.ai](https://www.blackbox.ai) |
|
||||
| **Status** |  |
|
||||
| **Models** | llama-3.1-70b (2)|
|
||||
| **Image Models (Image Generation)** | flux |
|
||||
| **Authentication** | ❌ |
|
||||
| **Streaming** | ❌ |
|
||||
| **System message** | ✔️ |
|
||||
| **Message history** | ✔️ |
|
||||
### ChatGpt
|
||||
| Provider | `g4f.Provider.ChatGpt` |
|
||||
| -------- | ---- |
|
||||
| **Website** | [chatgpt.com](https://chatgpt.com) |
|
||||
| **Status** |  |
|
||||
| **Models** | gpt-3.5-turbo, gpt-4, gpt-4-turbo, gpt-4o, gpt-4o-mini (7)|
|
||||
| **Authentication** | ❌ |
|
||||
| **Streaming** | ✔️ |
|
||||
| **System message** | ✔️ |
|
||||
| **Message history** | ✔️ |
|
||||
### ChatGptEs
|
||||
| Provider | `g4f.Provider.ChatGptEs` |
|
||||
| -------- | ---- |
|
||||
| **Website** | [chatgpt.es](https://chatgpt.es) |
|
||||
| **Status** |  |
|
||||
| **Models** | gpt-4, gpt-4o, gpt-4o-mini (3)|
|
||||
| **Authentication** | ❌ |
|
||||
| **Streaming** | ✔️ |
|
||||
| **System message** | ✔️ |
|
||||
| **Message history** | ✔️ |
|
||||
### Cloudflare AI
|
||||
| Provider | `g4f.Provider.Cloudflare` |
|
||||
| -------- | ---- |
|
||||
| **Website** | [playground.ai.cloudflare.com](https://playground.ai.cloudflare.com) |
|
||||
| **Status** |  |
|
||||
| **Models** | llama-2-7b, llama-3-8b, llama-3.1-8b, llama-3.2-1b, qwen-1.5-7b (37)|
|
||||
| **Authentication** | ❌ |
|
||||
| **Streaming** | ✔️ |
|
||||
| **System message** | ✔️ |
|
||||
| **Message history** | ✔️ |
|
||||
### Microsoft Copilot
|
||||
| Provider | `g4f.Provider.Copilot` |
|
||||
| -------- | ---- |
|
||||
| **Website** | [copilot.microsoft.com](https://copilot.microsoft.com) |
|
||||
| **Status** |  |
|
||||
| **Models** | gpt-4 (1)|
|
||||
| **Authentication** | ❌ |
|
||||
| **Streaming** | ✔️ |
|
||||
| **System message** | ❌ |
|
||||
| **Message history** | ❌ |
|
||||
### DuckDuckGo AI Chat
|
||||
| Provider | `g4f.Provider.DDG` |
|
||||
| -------- | ---- |
|
||||
| **Website** | [duckduckgo.com](https://duckduckgo.com/aichat) |
|
||||
| **Status** |  |
|
||||
| **Models** | gpt-4, gpt-4o, gpt-4o-mini, llama-3.1-70b, mixtral-8x7b, claude-3-haiku (8)|
|
||||
| **Authentication** | ❌ |
|
||||
| **Streaming** | ✔️ |
|
||||
| **System message** | ✔️ |
|
||||
| **Message history** | ✔️ |
|
||||
### DarkAI
|
||||
| Provider | `g4f.Provider.DarkAI` |
|
||||
| -------- | ---- |
|
||||
| **Website** | [darkai.foundation](https://darkai.foundation/chat) |
|
||||
| **Status** |  |
|
||||
| **Models** | gpt-3.5-turbo, gpt-4o, llama-3.1-70b (3)|
|
||||
| **Authentication** | ❌ |
|
||||
| **Streaming** | ✔️ |
|
||||
| **System message** | ✔️ |
|
||||
| **Message history** | ✔️ |
|
||||
### Flux (HuggingSpace)
|
||||
| Provider | `g4f.Provider.Flux` |
|
||||
| -------- | ---- |
|
||||
| **Website** | [black-forest-labs-flux-1-dev.hf.space](https://black-forest-labs-flux-1-dev.hf.space) |
|
||||
| **Status** |  |
|
||||
| **Image Models (Image Generation)** | flux-dev |
|
||||
| **Authentication** | ❌ |
|
||||
| **Streaming** | ✔️ |
|
||||
| **System message** | ❌ |
|
||||
| **Message history** | ❌ |
|
||||
### Free2GPT
|
||||
| Provider | `g4f.Provider.Free2GPT` |
|
||||
| -------- | ---- |
|
||||
| **Website** | [chat10.free2gpt.xyz](https://chat10.free2gpt.xyz) |
|
||||
| **Status** |  |
|
||||
| **Authentication** | ❌ |
|
||||
| **Streaming** | ✔️ |
|
||||
| **System message** | ❌ |
|
||||
| **Message history** | ✔️ |
|
||||
### FreeGpt
|
||||
| Provider | `g4f.Provider.FreeGpt` |
|
||||
| -------- | ---- |
|
||||
| **Website** | [freegptsnav.aifree.site](https://freegptsnav.aifree.site) |
|
||||
| **Status** |  |
|
||||
| **Models** | gemini-pro (1)|
|
||||
| **Authentication** | ❌ |
|
||||
| **Streaming** | ✔️ |
|
||||
| **System message** | ✔️ |
|
||||
| **Message history** | ✔️ |
|
||||
### GizAI
|
||||
| Provider | `g4f.Provider.GizAI` |
|
||||
| -------- | ---- |
|
||||
| **Website** | [app.giz.ai](https://app.giz.ai/assistant) |
|
||||
| **Status** |  |
|
||||
| **Models** | gemini-flash (1)|
|
||||
| **Authentication** | ❌ |
|
||||
| **Streaming** | ❌ |
|
||||
| **System message** | ✔️ |
|
||||
| **Message history** | ✔️ |
|
||||
### HuggingFace
|
||||
| Provider | `g4f.Provider.HuggingFace` |
|
||||
| -------- | ---- |
|
||||
| **Website** | [huggingface.co](https://huggingface.co/chat) |
|
||||
| **Status** |  |
|
||||
| **Models** | llama-3.2-11b, llama-3.3-70b, mistral-nemo, hermes-3, phi-3.5-mini, command-r-plus, qwen-2.5-72b, qwen-2.5-coder-32b, qwq-32b, nemotron-70b (11)|
|
||||
| **Image Models (Image Generation)** | flux-dev |
|
||||
| **Authentication** | ❌ |
|
||||
| **Streaming** | ✔️ |
|
||||
| **System message** | ❌ |
|
||||
| **Message history** | ✔️ |
|
||||
### Liaobots
|
||||
| Provider | `g4f.Provider.Liaobots` |
|
||||
| -------- | ---- |
|
||||
| **Website** | [liaobots.site](https://liaobots.site) |
|
||||
| **Status** |  |
|
||||
| **Models** | gpt-4, gpt-4o, gpt-4o-mini, o1-preview, o1-mini, gemini-pro, gemini-flash, claude-3-opus, claude-3-sonnet, claude-3.5-sonnet, grok-beta (14)|
|
||||
| **Authentication** | ❌ |
|
||||
| **Streaming** | ✔️ |
|
||||
| **System message** | ✔️ |
|
||||
| **Message history** | ✔️ |
|
||||
### GPT4All
|
||||
| Provider | `g4f.Provider.Local` |
|
||||
| -------- | ---- |
|
||||
| **Website** | ❌ |
|
||||
| **Status** |  |
|
||||
| **Authentication** | ❌ |
|
||||
| **Streaming** | ✔️ |
|
||||
| **System message** | ✔️ |
|
||||
| **Message history** | ✔️ |
|
||||
### Meta AI
|
||||
| Provider | `g4f.Provider.MetaAI` |
|
||||
| -------- | ---- |
|
||||
| **Website** | [meta.ai](https://www.meta.ai) |
|
||||
| **Status** |  |
|
||||
| **Models** | meta-ai (1)|
|
||||
| **Authentication** | ❌ |
|
||||
| **Streaming** | ✔️ |
|
||||
| **System message** | ❌ |
|
||||
| **Message history** | ❌ |
|
||||
### Mhystical
|
||||
| Provider | `g4f.Provider.Mhystical` |
|
||||
| -------- | ---- |
|
||||
| **Website** | [api.mhystical.cc](https://api.mhystical.cc) |
|
||||
| **Status** |  |
|
||||
| **Models** | gpt-4 (1)|
|
||||
| **Authentication** | ❌ |
|
||||
| **Streaming** | ❌ |
|
||||
| **System message** | ❌ |
|
||||
| **Message history** | ✔️ |
|
||||
### Ollama
|
||||
| Provider | `g4f.Provider.Ollama` |
|
||||
| -------- | ---- |
|
||||
| **Website** | [ollama.com](https://ollama.com) |
|
||||
| **Status** |  |
|
||||
| **Authentication** | ❌ |
|
||||
| **Streaming** | ✔️ |
|
||||
| **System message** | ✔️ |
|
||||
| **Message history** | ✔️ |
|
||||
### OpenAI ChatGPT
|
||||
| Provider | `g4f.Provider.OpenaiChat` |
|
||||
| -------- | ---- |
|
||||
| **Website** | [chatgpt.com](https://chatgpt.com) |
|
||||
| **Status** |  |
|
||||
| **Models** | gpt-4, gpt-4o, gpt-4o-mini, o1-preview, o1-mini (8)|
|
||||
| **Vision (Image Upload)** | ✔️ |
|
||||
| **Authentication** | ❌ |
|
||||
| **Streaming** | ✔️ |
|
||||
| **System message** | ✔️ |
|
||||
| **Message history** | ✔️ |
|
||||
### PerplexityLabs
|
||||
| Provider | `g4f.Provider.PerplexityLabs` |
|
||||
| -------- | ---- |
|
||||
| **Website** | [labs.perplexity.ai](https://labs.perplexity.ai) |
|
||||
| **Status** |  |
|
||||
| **Models** | llama-3.1-8b, llama-3.1-70b, llama-3.3-70b, sonar-online, sonar-chat, lfm-40b (8)|
|
||||
| **Authentication** | ❌ |
|
||||
| **Streaming** | ✔️ |
|
||||
| **System message** | ❌ |
|
||||
| **Message history** | ❌ |
|
||||
### Pi
|
||||
| Provider | `g4f.Provider.Pi` |
|
||||
| -------- | ---- |
|
||||
| **Website** | [pi.ai](https://pi.ai/talk) |
|
||||
| **Status** |  |
|
||||
| **Authentication** | ❌ |
|
||||
| **Streaming** | ✔️ |
|
||||
| **System message** | ❌ |
|
||||
| **Message history** | ❌ |
|
||||
### Pizzagpt
|
||||
| Provider | `g4f.Provider.Pizzagpt` |
|
||||
| -------- | ---- |
|
||||
| **Website** | [pizzagpt.it](https://www.pizzagpt.it) |
|
||||
| **Status** |  |
|
||||
| **Models** | gpt-4o-mini (1)|
|
||||
| **Authentication** | ❌ |
|
||||
| **Streaming** | ✔️ |
|
||||
| **System message** | ❌ |
|
||||
| **Message history** | ❌ |
|
||||
### Pollinations AI
|
||||
| Provider | `g4f.Provider.PollinationsAI` |
|
||||
| -------- | ---- |
|
||||
| **Website** | [pollinations.ai](https://pollinations.ai) |
|
||||
| **Status** |  |
|
||||
| **Models** | gpt-4, gpt-4o, llama-3.1-70b, mistral-nemo, mistral-large, claude-3.5-sonnet, command-r, qwen-2.5-coder-32b, p1, evil, midijourney, unity, rtist (25)|
|
||||
| **Image Models (Image Generation)** | flux, flux-realism, flux-cablyai, flux-anime, flux-3d, any-dark, flux-pro, turbo, midjourney, dall-e-3 |
|
||||
| **Authentication** | ❌ |
|
||||
| **Streaming** | ✔️ |
|
||||
| **System message** | ✔️ |
|
||||
| **Message history** | ✔️ |
|
||||
### Prodia
|
||||
| Provider | `g4f.Provider.Prodia` |
|
||||
| -------- | ---- |
|
||||
| **Website** | [app.prodia.com](https://app.prodia.com) |
|
||||
| **Status** |  |
|
||||
| **Authentication** | ❌ |
|
||||
| **Streaming** | ✔️ |
|
||||
| **System message** | ❌ |
|
||||
| **Message history** | ❌ |
|
||||
### ReplicateHome
|
||||
| Provider | `g4f.Provider.ReplicateHome` |
|
||||
| -------- | ---- |
|
||||
| **Website** | [replicate.com](https://replicate.com) |
|
||||
| **Status** |  |
|
||||
| **Models** | gemma-2b (4)|
|
||||
| **Image Models (Image Generation)** | sd-3, sdxl, playground-v2.5 |
|
||||
| **Authentication** | ❌ |
|
||||
| **Streaming** | ✔️ |
|
||||
| **System message** | ✔️ |
|
||||
| **Message history** | ✔️ |
|
||||
### Rubiks AI
|
||||
| Provider | `g4f.Provider.RubiksAI` |
|
||||
| -------- | ---- |
|
||||
| **Website** | [rubiks.ai](https://rubiks.ai) |
|
||||
| **Status** |  |
|
||||
| **Models** | gpt-4o, gpt-4o-mini, o1-mini, llama-3.1-70b, claude-3.5-sonnet, grok-beta (8)|
|
||||
| **Authentication** | ❌ |
|
||||
| **Streaming** | ✔️ |
|
||||
| **System message** | ✔️ |
|
||||
| **Message history** | ✔️ |
|
||||
### TeachAnything
|
||||
| Provider | `g4f.Provider.TeachAnything` |
|
||||
| -------- | ---- |
|
||||
| **Website** | [teach-anything.com](https://www.teach-anything.com) |
|
||||
| **Status** |  |
|
||||
| **Models** | llama-3.1-70b (1)|
|
||||
| **Authentication** | ❌ |
|
||||
| **Streaming** | ✔️ |
|
||||
| **System message** | ❌ |
|
||||
| **Message history** | ❌ |
|
||||
### TheB.AI
|
||||
| Provider | `g4f.Provider.Theb` |
|
||||
| -------- | ---- |
|
||||
| **Website** | [beta.theb.ai](https://beta.theb.ai) |
|
||||
| **Status** |  |
|
||||
| **Authentication** | ❌ |
|
||||
| **Streaming** | ✔️ |
|
||||
| **System message** | ❌ |
|
||||
| **Message history** | ❌ |
|
||||
### You.com
|
||||
| Provider | `g4f.Provider.You` |
|
||||
| -------- | ---- |
|
||||
| **Website** | [you.com](https://you.com) |
|
||||
| **Status** |  |
|
||||
| **Models** | gpt-4, gpt-4-turbo, gpt-4o, gpt-4o-mini, llama-3.1-70b, claude-3-opus, claude-3-sonnet, claude-3-haiku, claude-3.5-sonnet, command-r-plus, command-r (20)|
|
||||
| **Authentication** | ❌ |
|
||||
| **Streaming** | ✔️ |
|
||||
| **System message** | ❌ |
|
||||
| **Message history** | ❌ |
|
||||
|
||||
## Auth
|
||||
|
||||
### Airforce
|
||||
| Provider | `g4f.Provider.Airforce` |
|
||||
| -------- | ---- |
|
||||
| **Website** | [llmplayground.net](https://llmplayground.net) |
|
||||
| **Status** |  |
|
||||
| **Models** | gpt-4, gpt-4-turbo, gpt-4o, gpt-4o-mini, o1-mini, llama-2-7b, llama-3.1-8b, llama-3.1-70b, hermes-2-dpo, hermes-2-pro, phi-2, openchat-3.5, deepseek-coder, german-7b, openhermes-2.5, lfm-40b, zephyr-7b, neural-7b, evil (40)|
|
||||
| **Image Models (Image Generation)** | flux, flux-realism, flux-anime, flux-3d, flux-disney, flux-pixel, flux-4o, any-dark, midjourney, dall-e-3, sdxl, flux-pro |
|
||||
| **Authentication** | ✔️ |
|
||||
| **Streaming** | ✔️ |
|
||||
| **System message** | ✔️ |
|
||||
| **Message history** | ✔️ |
|
||||
### Microsoft Designer in Bing
|
||||
| Provider | `g4f.Provider.BingCreateImages` |
|
||||
| -------- | ---- |
|
||||
| **Website** | [bing.com](https://www.bing.com/images/create) |
|
||||
| **Status** |  |
|
||||
| **Image Models (Image Generation)** | dall-e-3 |
|
||||
| **Authentication** | ✔️ |
|
||||
| **Streaming** | ✔️ |
|
||||
| **System message** | ❌ |
|
||||
| **Message history** | ❌ |
|
||||
### Cerebras Inference
|
||||
| Provider | `g4f.Provider.Cerebras` |
|
||||
| -------- | ---- |
|
||||
| **Website** | [inference.cerebras.ai](https://inference.cerebras.ai/) |
|
||||
| **Status** |  |
|
||||
| **Models** | llama-3.1-8b, llama-3.1-70b (2)|
|
||||
| **Authentication** | ✔️ |
|
||||
| **Streaming** | ✔️ |
|
||||
| **System message** | ✔️ |
|
||||
| **Message history** | ✔️ |
|
||||
### Microsoft Copilot
|
||||
| Provider | `g4f.Provider.CopilotAccount` |
|
||||
| -------- | ---- |
|
||||
| **Website** | [copilot.microsoft.com](https://copilot.microsoft.com) |
|
||||
| **Status** |  |
|
||||
| **Image Models (Image Generation)** | dall-e-3 |
|
||||
| **Authentication** | ✔️ |
|
||||
| **Streaming** | ✔️ |
|
||||
| **System message** | ❌ |
|
||||
| **Message history** | ❌ |
|
||||
### DeepInfra
|
||||
| Provider | `g4f.Provider.DeepInfra` |
|
||||
| -------- | ---- |
|
||||
| **Website** | [deepinfra.com](https://deepinfra.com) |
|
||||
| **Status** |  |
|
||||
| **Authentication** | ✔️ |
|
||||
| **Streaming** | ✔️ |
|
||||
| **System message** | ✔️ |
|
||||
| **Message history** | ✔️ |
|
||||
### DeepInfra Chat
|
||||
| Provider | `g4f.Provider.DeepInfraChat` |
|
||||
| -------- | ---- |
|
||||
| **Website** | [deepinfra.com](https://deepinfra.com/chat) |
|
||||
| **Status** |  |
|
||||
| **Models** | llama-3.1-8b, llama-3.1-70b, qwen-2-72b, qwen-2.5-coder-32b, qwq-32b, wizardlm-2-8x22b, nemotron-70b (7)|
|
||||
| **Authentication** | ✔️ |
|
||||
| **Streaming** | ✔️ |
|
||||
| **System message** | ✔️ |
|
||||
| **Message history** | ✔️ |
|
||||
### DeepInfraImage
|
||||
| Provider | `g4f.Provider.DeepInfraImage` |
|
||||
| -------- | ---- |
|
||||
| **Website** | [deepinfra.com](https://deepinfra.com) |
|
||||
| **Status** |  |
|
||||
| **Authentication** | ✔️ |
|
||||
| **Streaming** | ✔️ |
|
||||
| **System message** | ❌ |
|
||||
| **Message history** | ❌ |
|
||||
### Google Gemini
|
||||
| Provider | `g4f.Provider.Gemini` |
|
||||
| -------- | ---- |
|
||||
| **Website** | [gemini.google.com](https://gemini.google.com) |
|
||||
| **Status** |  |
|
||||
| **Models** | gemini-pro, gemini-flash (3)|
|
||||
| **Image Models (Image Generation)** | gemini |
|
||||
| **Authentication** | ✔️ |
|
||||
| **Streaming** | ✔️ |
|
||||
| **System message** | ❌ |
|
||||
| **Message history** | ❌ |
|
||||
### Google Gemini API
|
||||
| Provider | `g4f.Provider.GeminiPro` |
|
||||
| -------- | ---- |
|
||||
| **Website** | [ai.google.dev](https://ai.google.dev) |
|
||||
| **Status** |  |
|
||||
| **Models** | gemini-pro, gemini-flash (4)|
|
||||
| **Authentication** | ✔️ |
|
||||
| **Streaming** | ✔️ |
|
||||
| **System message** | ❌ |
|
||||
| **Message history** | ✔️ |
|
||||
### GigaChat
|
||||
| Provider | `g4f.Provider.GigaChat` |
|
||||
| -------- | ---- |
|
||||
| **Website** | [developers.sber.ru](https://developers.sber.ru/gigachat) |
|
||||
| **Status** |  |
|
||||
| **Models** | GigaChat:latest (3)|
|
||||
| **Authentication** | ✔️ |
|
||||
| **Streaming** | ✔️ |
|
||||
| **System message** | ✔️ |
|
||||
| **Message history** | ✔️ |
|
||||
### GithubCopilot
|
||||
| Provider | `g4f.Provider.GithubCopilot` |
|
||||
| -------- | ---- |
|
||||
| **Website** | [github.com](https://github.com/copilot) |
|
||||
| **Status** |  |
|
||||
| **Models** | gpt-4o, o1-preview, o1-mini, claude-3.5-sonnet (4)|
|
||||
| **Authentication** | ✔️ |
|
||||
| **Streaming** | ✔️ |
|
||||
| **System message** | ❌ |
|
||||
| **Message history** | ❌ |
|
||||
### Groq
|
||||
| Provider | `g4f.Provider.Groq` |
|
||||
| -------- | ---- |
|
||||
| **Website** | [console.groq.com](https://console.groq.com/playground) |
|
||||
| **Status** |  |
|
||||
| **Models** | mixtral-8x7b (18)|
|
||||
| **Authentication** | ✔️ |
|
||||
| **Streaming** | ✔️ |
|
||||
| **System message** | ✔️ |
|
||||
| **Message history** | ✔️ |
|
||||
### HuggingChat
|
||||
| Provider | `g4f.Provider.HuggingChat` |
|
||||
| -------- | ---- |
|
||||
| **Website** | [huggingface.co](https://huggingface.co/chat) |
|
||||
| **Status** |  |
|
||||
| **Models** | llama-3.2-11b, llama-3.3-70b, mistral-nemo, hermes-3, phi-3.5-mini, command-r-plus, qwen-2.5-72b, qwen-2.5-coder-32b, qwq-32b, nemotron-70b (11)|
|
||||
| **Image Models (Image Generation)** | flux-dev |
|
||||
| **Authentication** | ✔️ |
|
||||
| **Streaming** | ✔️ |
|
||||
| **System message** | ❌ |
|
||||
| **Message history** | ❌ |
|
||||
### HuggingFace (Inference API)
|
||||
| Provider | `g4f.Provider.HuggingFaceAPI` |
|
||||
| -------- | ---- |
|
||||
| **Website** | [api-inference.huggingface.co](https://api-inference.huggingface.co) |
|
||||
| **Status** |  |
|
||||
| **Authentication** | ✔️ |
|
||||
| **Streaming** | ✔️ |
|
||||
| **System message** | ✔️ |
|
||||
| **Message history** | ✔️ |
|
||||
### Meta AI
|
||||
| Provider | `g4f.Provider.MetaAIAccount` |
|
||||
| -------- | ---- |
|
||||
| **Website** | [meta.ai](https://www.meta.ai) |
|
||||
| **Status** |  |
|
||||
| **Models** | meta-ai (1)|
|
||||
| **Authentication** | ✔️ |
|
||||
| **Streaming** | ✔️ |
|
||||
| **System message** | ❌ |
|
||||
| **Message history** | ❌ |
|
||||
### Microsoft Designer
|
||||
| Provider | `g4f.Provider.MicrosoftDesigner` |
|
||||
| -------- | ---- |
|
||||
| **Website** | [designer.microsoft.com](https://designer.microsoft.com) |
|
||||
| **Status** |  |
|
||||
| **Image Models (Image Generation)** | dall-e-3 |
|
||||
| **Authentication** | ✔️ |
|
||||
| **Streaming** | ✔️ |
|
||||
| **System message** | ❌ |
|
||||
| **Message history** | ❌ |
|
||||
### OpenAI API
|
||||
| Provider | `g4f.Provider.OpenaiAPI` |
|
||||
| -------- | ---- |
|
||||
| **Website** | [platform.openai.com](https://platform.openai.com) |
|
||||
| **Status** |  |
|
||||
| **Authentication** | ✔️ |
|
||||
| **Streaming** | ✔️ |
|
||||
| **System message** | ✔️ |
|
||||
| **Message history** | ✔️ |
|
||||
### OpenAI ChatGPT
|
||||
| Provider | `g4f.Provider.OpenaiAccount` |
|
||||
| -------- | ---- |
|
||||
| **Website** | [chatgpt.com](https://chatgpt.com) |
|
||||
| **Status** |  |
|
||||
| **Models** | gpt-4o-mini, o1-preview, o1-mini (9)|
|
||||
| **Image Models (Image Generation)** | dall-e-3 |
|
||||
| **Vision (Image Upload)** | ✔️ |
|
||||
| **Authentication** | ✔️ |
|
||||
| **Streaming** | ✔️ |
|
||||
| **System message** | ✔️ |
|
||||
| **Message history** | ✔️ |
|
||||
### Perplexity API
|
||||
| Provider | `g4f.Provider.PerplexityApi` |
|
||||
| -------- | ---- |
|
||||
| **Website** | [perplexity.ai](https://www.perplexity.ai) |
|
||||
| **Status** |  |
|
||||
| **Authentication** | ✔️ |
|
||||
| **Streaming** | ✔️ |
|
||||
| **System message** | ✔️ |
|
||||
| **Message history** | ✔️ |
|
||||
### Poe
|
||||
| Provider | `g4f.Provider.Poe` |
|
||||
| -------- | ---- |
|
||||
| **Website** | [poe.com](https://poe.com) |
|
||||
| **Status** |  |
|
||||
| **Authentication** | ✔️ |
|
||||
| **Streaming** | ✔️ |
|
||||
| **System message** | ❌ |
|
||||
| **Message history** | ❌ |
|
||||
### Raycast
|
||||
| Provider | `g4f.Provider.Raycast` |
|
||||
| -------- | ---- |
|
||||
| **Website** | [raycast.com](https://raycast.com) |
|
||||
| **Status** |  |
|
||||
| **Authentication** | ✔️ |
|
||||
| **Streaming** | ✔️ |
|
||||
| **System message** | ❌ |
|
||||
| **Message history** | ❌ |
|
||||
### Reka
|
||||
| Provider | `g4f.Provider.Reka` |
|
||||
| -------- | ---- |
|
||||
| **Website** | [chat.reka.ai](https://chat.reka.ai/) |
|
||||
| **Status** |  |
|
||||
| **Authentication** | ✔️ |
|
||||
| **Streaming** | ✔️ |
|
||||
| **System message** | ❌ |
|
||||
| **Message history** | ❌ |
|
||||
### Replicate
|
||||
| Provider | `g4f.Provider.Replicate` |
|
||||
| -------- | ---- |
|
||||
| **Website** | [replicate.com](https://replicate.com) |
|
||||
| **Status** |  |
|
||||
| **Authentication** | ✔️ |
|
||||
| **Streaming** | ✔️ |
|
||||
| **System message** | ❌ |
|
||||
| **Message history** | ❌ |
|
||||
### TheB.AI API
|
||||
| Provider | `g4f.Provider.ThebApi` |
|
||||
| -------- | ---- |
|
||||
| **Website** | [theb.ai](https://theb.ai) |
|
||||
| **Status** |  |
|
||||
| **Models** | gpt-3.5-turbo, gpt-4, gpt-4-turbo (21)|
|
||||
| **Authentication** | ✔️ |
|
||||
| **Streaming** | ✔️ |
|
||||
| **System message** | ✔️ |
|
||||
| **Message history** | ✔️ |
|
||||
### WhiteRabbitNeo
|
||||
| Provider | `g4f.Provider.WhiteRabbitNeo` |
|
||||
| -------- | ---- |
|
||||
| **Website** | [whiterabbitneo.com](https://www.whiterabbitneo.com) |
|
||||
| **Status** |  |
|
||||
| **Authentication** | ✔️ |
|
||||
| **Streaming** | ✔️ |
|
||||
| **System message** | ❌ |
|
||||
| **Message history** | ✔️ |
|
||||
--------------------------------------------------
|
||||
| Label | Provider | Image Model | Vision Model | Website |
|
||||
| ----- | -------- | ----------- | ------------ | ------- |
|
||||
| Airforce | `g4f.Provider.Airforce` | flux, flux-realism, flux-anime, flux-3d, flux-disney, flux-pixel, flux-4o, any-dark, midjourney, dall-e-3, sdxl, flux-pro| ❌ | [llmplayground.net](https://llmplayground.net) |
|
||||
| AmigoChat | `g4f.Provider.AmigoChat` | flux-realism, flux-pro, dall-e-3, flux-dev| ❌ | [amigochat.io](https://amigochat.io/chat/) |
|
||||
| Microsoft Designer in Bing | `g4f.Provider.BingCreateImages` | dall-e-3| ❌ | [bing.com](https://www.bing.com/images/create) |
|
||||
| Blackbox AI | `g4f.Provider.Blackbox` | flux| ✔️ | [blackbox.ai](https://www.blackbox.ai) |
|
||||
| Blackbox2 | `g4f.Provider.Blackbox2` | flux| ❌ | [blackbox.ai](https://www.blackbox.ai) |
|
||||
| Microsoft Copilot | `g4f.Provider.CopilotAccount` | dall-e-3| ❌ | [copilot.microsoft.com](https://copilot.microsoft.com) |
|
||||
| DeepInfraImage | `g4f.Provider.DeepInfraImage` | | ❌ | [deepinfra.com](https://deepinfra.com) |
|
||||
| Flux (HuggingSpace) | `g4f.Provider.Flux` | flux-dev| ❌ | [black-forest-labs-flux-1-dev.hf.space](https://black-forest-labs-flux-1-dev.hf.space) |
|
||||
| Google Gemini | `g4f.Provider.Gemini` | gemini| ❌ | [gemini.google.com](https://gemini.google.com) |
|
||||
| HuggingChat | `g4f.Provider.HuggingChat` | flux-dev| ❌ | [huggingface.co](https://huggingface.co/chat) |
|
||||
| HuggingFace | `g4f.Provider.HuggingFace` | flux-dev| ❌ | [huggingface.co](https://huggingface.co/chat) |
|
||||
| Meta AI | `g4f.Provider.MetaAIAccount` | | ❌ | [meta.ai](https://www.meta.ai) |
|
||||
| Microsoft Designer | `g4f.Provider.MicrosoftDesigner` | dall-e-3| ❌ | [designer.microsoft.com](https://designer.microsoft.com) |
|
||||
| OpenAI ChatGPT | `g4f.Provider.OpenaiAccount` | dall-e-3, gpt-4, gpt-4o, dall-e-3| ✔️ | [chatgpt.com](https://chatgpt.com) |
|
||||
| OpenAI ChatGPT | `g4f.Provider.OpenaiChat` | ❌| ✔️ | [chatgpt.com](https://chatgpt.com) |
|
||||
| Pollinations AI | `g4f.Provider.PollinationsAI` | flux, flux-realism, flux-cablyai, flux-anime, flux-3d, any-dark, flux-pro, turbo, midjourney, dall-e-3| ❌ | [pollinations.ai](https://pollinations.ai) |
|
||||
| Prodia | `g4f.Provider.Prodia` | | ❌ | [app.prodia.com](https://app.prodia.com) |
|
||||
| ReplicateHome | `g4f.Provider.ReplicateHome` | sd-3, sdxl, playground-v2.5| ❌ | [replicate.com](https://replicate.com) |
|
||||
| You.com | `g4f.Provider.You` | | ❌ | [you.com](https://you.com) |
|
||||
83
docs/vision.md
Normal file
83
docs/vision.md
Normal file
@@ -0,0 +1,83 @@
|
||||
## Vision Support in Chat Completion
|
||||
|
||||
This documentation provides an overview of how to integrate vision support into chat completions using an API and a client. It includes examples to guide you through the process.
|
||||
|
||||
### Example with the API
|
||||
|
||||
To use vision support in chat completion with the API, follow the example below:
|
||||
|
||||
```python
|
||||
import requests
|
||||
import json
|
||||
from g4f.image import to_data_uri
|
||||
from g4f.requests.raise_for_status import raise_for_status
|
||||
|
||||
url = "http://localhost:8080/v1/chat/completions"
|
||||
body = {
|
||||
"model": "",
|
||||
"provider": "Copilot",
|
||||
"messages": [
|
||||
{"role": "user", "content": "what are on this image?"}
|
||||
],
|
||||
"images": [
|
||||
["data:image/jpeg;base64,...", "cat.jpeg"]
|
||||
]
|
||||
}
|
||||
response = requests.post(url, json=body, headers={"g4f-api-key": "secret"})
|
||||
raise_for_status(response)
|
||||
print(response.json())
|
||||
```
|
||||
|
||||
In this example:
|
||||
- `url` is the endpoint for the chat completion API.
|
||||
- `body` contains the model, provider, messages, and images.
|
||||
- `messages` is a list of message objects with roles and content.
|
||||
- `images` is a list of image data in Data URI format and optional filenames.
|
||||
- `response` stores the API response.
|
||||
|
||||
### Example with the Client
|
||||
|
||||
To use vision support in chat completion with the client, follow the example below:
|
||||
|
||||
```python
|
||||
import g4f
|
||||
import g4f.Provider
|
||||
|
||||
def chat_completion(prompt):
|
||||
client = g4f.Client(provider=g4f.Provider.Blackbox)
|
||||
images = [
|
||||
[open("docs/images/waterfall.jpeg", "rb"), "waterfall.jpeg"],
|
||||
[open("docs/images/cat.webp", "rb"), "cat.webp"]
|
||||
]
|
||||
response = client.chat.completions.create([{"content": prompt, "role": "user"}], "", images=images)
|
||||
print(response.choices[0].message.content)
|
||||
|
||||
prompt = "what are on this images?"
|
||||
chat_completion(prompt)
|
||||
```
|
||||
|
||||
```
|
||||
**Image 1**
|
||||
|
||||
* A waterfall with a rainbow
|
||||
* Lush greenery surrounding the waterfall
|
||||
* A stream flowing from the waterfall
|
||||
|
||||
**Image 2**
|
||||
|
||||
* A white cat with blue eyes
|
||||
* A bird perched on a window sill
|
||||
* Sunlight streaming through the window
|
||||
```
|
||||
|
||||
In this example:
|
||||
- `client` initializes a new client with the specified provider.
|
||||
- `images` is a list of image data and optional filenames.
|
||||
- `response` stores the response from the client.
|
||||
- The `chat_completion` function prints the chat completion output.
|
||||
|
||||
### Notes
|
||||
|
||||
- Multiple images can be sent. Each image has two data parts: image data (in Data URI format for the API) and an optional filename.
|
||||
- The client supports bytes, IO objects, and PIL images as input.
|
||||
- Ensure you use a provider that supports vision and multiple images.
|
||||
@@ -4,7 +4,9 @@ import asyncio
|
||||
|
||||
from g4f import models, ChatCompletion
|
||||
from g4f.providers.types import BaseRetryProvider, ProviderType
|
||||
from etc.testing._providers import get_providers
|
||||
from g4f.providers.base_provider import ProviderModelMixin
|
||||
from g4f.Provider import __providers__
|
||||
from g4f.models import _all_models
|
||||
from g4f import debug
|
||||
|
||||
debug.logging = True
|
||||
@@ -35,38 +37,39 @@ def test_async_list(providers: list[ProviderType]):
|
||||
return responses
|
||||
|
||||
def print_providers():
|
||||
|
||||
providers = get_providers()
|
||||
providers = [provider for provider in __providers__ if provider.working]
|
||||
responses = test_async_list(providers)
|
||||
|
||||
for type in ("GPT-4", "GPT-3.5", "Other"):
|
||||
lines = [
|
||||
lines = []
|
||||
for type in ("Free", "Auth"):
|
||||
lines += [
|
||||
"",
|
||||
f"### {type}",
|
||||
f"## {type}",
|
||||
"",
|
||||
"| Website | Provider | GPT-3.5 | GPT-4 | Stream | Status | Auth |",
|
||||
"| ------ | ------- | ------- | ----- | ------ | ------ | ---- |",
|
||||
]
|
||||
for is_working in (True, False):
|
||||
for idx, _provider in enumerate(providers):
|
||||
if is_working != _provider.working:
|
||||
continue
|
||||
do_continue = False
|
||||
if type == "GPT-4" and _provider.supports_gpt_4:
|
||||
if type == "Auth" and _provider.needs_auth:
|
||||
do_continue = True
|
||||
elif type == "GPT-3.5" and not _provider.supports_gpt_4 and _provider.supports_gpt_35_turbo:
|
||||
do_continue = True
|
||||
elif type == "Other" and not _provider.supports_gpt_4 and not _provider.supports_gpt_35_turbo:
|
||||
elif type == "Free" and not _provider.needs_auth:
|
||||
do_continue = True
|
||||
if not do_continue:
|
||||
continue
|
||||
|
||||
lines.append(
|
||||
f"### {getattr(_provider, 'label', _provider.__name__)}",
|
||||
)
|
||||
provider_name = f"`g4f.Provider.{_provider.__name__}`"
|
||||
lines.append(f"| Provider | {provider_name} |")
|
||||
lines.append("| -------- | ---- |")
|
||||
|
||||
if _provider.url:
|
||||
netloc = urlparse(_provider.url).netloc.replace("www.", "")
|
||||
website = f"[{netloc}]({_provider.url})"
|
||||
else:
|
||||
website = "❌"
|
||||
|
||||
provider_name = f"`g4f.Provider.{_provider.__name__}`"
|
||||
|
||||
has_gpt_35 = "✔️" if _provider.supports_gpt_35_turbo else "❌"
|
||||
has_gpt_4 = "✔️" if _provider.supports_gpt_4 else "❌"
|
||||
message_history = "✔️" if _provider.supports_message_history else "❌"
|
||||
system = "✔️" if _provider.supports_system_message else "❌"
|
||||
stream = "✔️" if _provider.supports_stream else "❌"
|
||||
if _provider.working:
|
||||
status = ''
|
||||
@@ -78,10 +81,32 @@ def print_providers():
|
||||
status = ''
|
||||
auth = "✔️" if _provider.needs_auth else "❌"
|
||||
|
||||
lines.append(
|
||||
f"| {website} | {provider_name} | {has_gpt_35} | {has_gpt_4} | {stream} | {status} | {auth} |"
|
||||
)
|
||||
print("\n".join(lines))
|
||||
lines.append(f"| **Website** | {website} | \n| **Status** | {status} |")
|
||||
|
||||
if issubclass(_provider, ProviderModelMixin):
|
||||
try:
|
||||
all_models = _provider.get_models()
|
||||
models = [model for model in _all_models if model in all_models or model in _provider.model_aliases]
|
||||
image_models = _provider.image_models
|
||||
if image_models:
|
||||
for alias, name in _provider.model_aliases.items():
|
||||
if alias in _all_models and name in image_models:
|
||||
image_models.append(alias)
|
||||
image_models = [model for model in image_models if model in _all_models]
|
||||
if image_models:
|
||||
models = [model for model in models if model not in image_models]
|
||||
if models:
|
||||
lines.append(f"| **Models** | {', '.join(models)} ({len(all_models)})|")
|
||||
if image_models:
|
||||
lines.append(f"| **Image Models (Image Generation)** | {', '.join(image_models)} |")
|
||||
if hasattr(_provider, "vision_models"):
|
||||
lines.append(f"| **Vision (Image Upload)** | ✔️ |")
|
||||
except:
|
||||
pass
|
||||
|
||||
lines.append(f"| **Authentication** | {auth} | \n| **Streaming** | {stream} |")
|
||||
lines.append(f"| **System message** | {system} | \n| **Message history** | {message_history} |")
|
||||
return lines
|
||||
|
||||
def print_models():
|
||||
base_provider_names = {
|
||||
@@ -123,30 +148,34 @@ def print_models():
|
||||
|
||||
lines.append(f"| {name} | {base_provider} | {provider_name} | {website} |")
|
||||
|
||||
print("\n".join(lines))
|
||||
return lines
|
||||
|
||||
def print_image_models():
|
||||
lines = [
|
||||
"| Label | Provider | Image Model | Vision Model | Website |",
|
||||
"| ----- | -------- | ----------- | ------------ | ------- |",
|
||||
]
|
||||
from g4f.gui.server.api import Api
|
||||
for image_model in Api.get_image_models():
|
||||
provider_url = image_model["url"]
|
||||
for provider in [provider for provider in __providers__ if provider.working and getattr(provider, "image_models", None) or getattr(provider, "vision_models", None)]:
|
||||
provider_url = provider.url if provider.url else "❌"
|
||||
netloc = urlparse(provider_url).netloc.replace("www.", "")
|
||||
website = f"[{netloc}]({provider_url})"
|
||||
label = image_model["provider"] if image_model["label"] is None else image_model["label"]
|
||||
if image_model["image_model"] is None:
|
||||
image_model["image_model"] = "❌"
|
||||
if image_model["vision_model"] is None:
|
||||
image_model["vision_model"] = "❌"
|
||||
lines.append(f'| {label} | `g4f.Provider.{image_model["provider"]}` | {image_model["image_model"]}| {image_model["vision_model"]} | {website} |')
|
||||
label = getattr(provider, "label", provider.__name__)
|
||||
if provider.image_models:
|
||||
image_models = ", ".join([model for model in provider.image_models if model in _all_models])
|
||||
else:
|
||||
image_models = "❌"
|
||||
if hasattr(provider, "vision_models"):
|
||||
vision_models = "✔️"
|
||||
else:
|
||||
vision_models = "❌"
|
||||
lines.append(f'| {label} | `g4f.Provider.{provider.__name__}` | {image_models}| {vision_models} | {website} |')
|
||||
|
||||
print("\n".join(lines))
|
||||
return lines
|
||||
|
||||
if __name__ == "__main__":
|
||||
#print_providers()
|
||||
#print("\n", "-" * 50, "\n")
|
||||
#print_models()
|
||||
print("\n", "-" * 50, "\n")
|
||||
print_image_models()
|
||||
with open("docs/providers.md", "w") as f:
|
||||
f.write("\n".join(print_providers()))
|
||||
f.write(f"\n{'-' * 50} \n")
|
||||
#f.write("\n".join(print_models()))
|
||||
#f.write(f"\n{'-' * 50} \n")
|
||||
f.write("\n".join(print_image_models()))
|
||||
@@ -4,7 +4,7 @@ import asyncio
|
||||
|
||||
from g4f.models import __models__
|
||||
from g4f.providers.base_provider import BaseProvider, ProviderModelMixin
|
||||
from g4f.models import Model
|
||||
from g4f.errors import MissingRequirementsError, MissingAuthError
|
||||
|
||||
class TestProviderHasModel(unittest.IsolatedAsyncioTestCase):
|
||||
cache: dict = {}
|
||||
@@ -13,11 +13,17 @@ class TestProviderHasModel(unittest.IsolatedAsyncioTestCase):
|
||||
for model, providers in __models__.values():
|
||||
for provider in providers:
|
||||
if issubclass(provider, ProviderModelMixin):
|
||||
if model.name not in provider.model_aliases:
|
||||
await asyncio.wait_for(self.provider_has_model(provider, model), 10)
|
||||
if model.name in provider.model_aliases:
|
||||
model_name = provider.model_aliases[model.name]
|
||||
else:
|
||||
model_name = model.name
|
||||
await asyncio.wait_for(self.provider_has_model(provider, model_name), 10)
|
||||
|
||||
async def provider_has_model(self, provider: Type[BaseProvider], model: Model):
|
||||
async def provider_has_model(self, provider: Type[BaseProvider], model: str):
|
||||
if provider.__name__ not in self.cache:
|
||||
try:
|
||||
self.cache[provider.__name__] = provider.get_models()
|
||||
except (MissingRequirementsError, MissingAuthError):
|
||||
return
|
||||
if self.cache[provider.__name__]:
|
||||
self.assertIn(model.name, self.cache[provider.__name__], provider.__name__)
|
||||
self.assertIn(model, self.cache[provider.__name__], provider.__name__)
|
||||
@@ -10,7 +10,7 @@ import aiohttp
|
||||
import json
|
||||
from pathlib import Path
|
||||
|
||||
from ..typing import AsyncResult, Messages, ImageType
|
||||
from ..typing import AsyncResult, Messages, ImagesType
|
||||
from .base_provider import AsyncGeneratorProvider, ProviderModelMixin
|
||||
from ..image import ImageResponse, to_data_uri
|
||||
from ..cookies import get_cookies_dir
|
||||
@@ -197,8 +197,7 @@ class Blackbox(AsyncGeneratorProvider, ProviderModelMixin):
|
||||
prompt: str = None,
|
||||
proxy: str = None,
|
||||
web_search: bool = False,
|
||||
image: ImageType = None,
|
||||
image_name: str = None,
|
||||
images: ImagesType = None,
|
||||
top_p: float = 0.9,
|
||||
temperature: float = 0.5,
|
||||
max_tokens: int = 1024,
|
||||
@@ -212,13 +211,14 @@ class Blackbox(AsyncGeneratorProvider, ProviderModelMixin):
|
||||
|
||||
messages = [{"id": message_id, "content": formatted_message, "role": "user"}]
|
||||
|
||||
if image is not None:
|
||||
if images is not None:
|
||||
messages[-1]['data'] = {
|
||||
"imagesData": [
|
||||
{
|
||||
"filePath": f"MultipleFiles/{image_name}",
|
||||
"contents": to_data_uri(image)
|
||||
}
|
||||
for image, image_name in images
|
||||
],
|
||||
"fileText": "",
|
||||
"title": ""
|
||||
|
||||
@@ -3,7 +3,7 @@ from __future__ import annotations
|
||||
import random
|
||||
import asyncio
|
||||
from aiohttp import ClientSession
|
||||
from typing import Union, AsyncGenerator
|
||||
from typing import AsyncGenerator
|
||||
|
||||
from ..typing import AsyncResult, Messages
|
||||
from ..image import ImageResponse
|
||||
@@ -37,12 +37,15 @@ class Blackbox2(AsyncGeneratorProvider, ProviderModelMixin):
|
||||
max_retries: int = 3,
|
||||
delay: int = 1,
|
||||
**kwargs
|
||||
) -> AsyncGenerator:
|
||||
) -> AsyncResult:
|
||||
if not model:
|
||||
model = cls.default_model
|
||||
if model in cls.chat_models:
|
||||
async for result in cls._generate_text(model, messages, proxy, max_retries, delay):
|
||||
yield result
|
||||
elif model in cls.image_models:
|
||||
async for result in cls._generate_image(model, messages, proxy):
|
||||
prompt = messages[-1]["content"] if prompt is None else prompt
|
||||
async for result in cls._generate_image(model, prompt, proxy):
|
||||
yield result
|
||||
else:
|
||||
raise ValueError(f"Unsupported model: {model}")
|
||||
@@ -87,14 +90,13 @@ class Blackbox2(AsyncGeneratorProvider, ProviderModelMixin):
|
||||
async def _generate_image(
|
||||
cls,
|
||||
model: str,
|
||||
messages: Messages,
|
||||
prompt: str,
|
||||
proxy: str = None
|
||||
) -> AsyncGenerator:
|
||||
headers = cls._get_headers()
|
||||
api_endpoint = cls.api_endpoints[model]
|
||||
|
||||
async with ClientSession(headers=headers) as session:
|
||||
prompt = messages[-1]["content"]
|
||||
data = {
|
||||
"query": prompt
|
||||
}
|
||||
|
||||
@@ -18,7 +18,7 @@ except ImportError:
|
||||
|
||||
from .base_provider import AbstractProvider, ProviderModelMixin, BaseConversation
|
||||
from .helper import format_prompt
|
||||
from ..typing import CreateResult, Messages, ImageType
|
||||
from ..typing import CreateResult, Messages, ImagesType
|
||||
from ..errors import MissingRequirementsError, NoValidHarFileError
|
||||
from ..requests.raise_for_status import raise_for_status
|
||||
from ..providers.asyncio import get_running_loop
|
||||
@@ -58,7 +58,7 @@ class Copilot(AbstractProvider, ProviderModelMixin):
|
||||
stream: bool = False,
|
||||
proxy: str = None,
|
||||
timeout: int = 900,
|
||||
image: ImageType = None,
|
||||
images: ImagesType = None,
|
||||
conversation: Conversation = None,
|
||||
return_conversation: bool = False,
|
||||
web_search: bool = True,
|
||||
@@ -69,7 +69,7 @@ class Copilot(AbstractProvider, ProviderModelMixin):
|
||||
|
||||
websocket_url = cls.websocket_url
|
||||
headers = None
|
||||
if cls.needs_auth or image is not None:
|
||||
if cls.needs_auth or images is not None:
|
||||
if cls._access_token is None:
|
||||
try:
|
||||
cls._access_token, cls._cookies = readHAR(cls.url)
|
||||
@@ -112,8 +112,9 @@ class Copilot(AbstractProvider, ProviderModelMixin):
|
||||
prompt = messages[-1]["content"]
|
||||
debug.log(f"Copilot: Use conversation: {conversation_id}")
|
||||
|
||||
images = []
|
||||
if image is not None:
|
||||
uploaded_images = []
|
||||
if images is not None:
|
||||
for image, _ in images:
|
||||
data = to_bytes(image)
|
||||
response = session.post(
|
||||
"https://copilot.microsoft.com/c/api/attachments",
|
||||
@@ -121,13 +122,13 @@ class Copilot(AbstractProvider, ProviderModelMixin):
|
||||
data=data
|
||||
)
|
||||
raise_for_status(response)
|
||||
images.append({"type":"image", "url": response.json().get("url")})
|
||||
uploaded_images.append({"type":"image", "url": response.json().get("url")})
|
||||
|
||||
wss = session.ws_connect(cls.websocket_url)
|
||||
wss.send(json.dumps({
|
||||
"event": "send",
|
||||
"conversationId": conversation_id,
|
||||
"content": [*images, {
|
||||
"content": [*uploaded_images, {
|
||||
"type": "text",
|
||||
"text": prompt,
|
||||
}],
|
||||
|
||||
@@ -1,19 +1,12 @@
|
||||
from __future__ import annotations
|
||||
|
||||
from aiohttp import ClientSession, ClientResponseError
|
||||
import json
|
||||
from ..typing import AsyncResult, Messages, ImageType
|
||||
from .base_provider import AsyncGeneratorProvider, ProviderModelMixin
|
||||
from ..typing import AsyncResult, Messages
|
||||
from .needs_auth import OpenaiAPI
|
||||
|
||||
|
||||
class DeepInfraChat(AsyncGeneratorProvider, ProviderModelMixin):
|
||||
class DeepInfraChat(OpenaiAPI):
|
||||
label = "DeepInfra Chat"
|
||||
url = "https://deepinfra.com/chat"
|
||||
api_endpoint = "https://api.deepinfra.com/v1/openai/chat/completions"
|
||||
|
||||
working = True
|
||||
supports_stream = True
|
||||
supports_system_message = True
|
||||
supports_message_history = True
|
||||
|
||||
default_model = 'meta-llama/Meta-Llama-3.1-70B-Instruct-Turbo'
|
||||
models = [
|
||||
@@ -31,16 +24,17 @@ class DeepInfraChat(AsyncGeneratorProvider, ProviderModelMixin):
|
||||
"qwq-32b": "Qwen/QwQ-32B-Preview",
|
||||
"wizardlm-2-8x22b": "microsoft/WizardLM-2-8x22B",
|
||||
"qwen-2-72b": "Qwen/Qwen2.5-72B-Instruct",
|
||||
"qwen-2.5-coder-32b": "Qwen2.5-Coder-32B-Instruct",
|
||||
"qwen-2.5-coder-32b": "Qwen/Qwen2.5-Coder-32B-Instruct",
|
||||
"nemotron-70b": "nvidia/Llama-3.1-Nemotron-70B-Instruct",
|
||||
}
|
||||
|
||||
@classmethod
|
||||
async def create_async_generator(
|
||||
def create_async_generator(
|
||||
cls,
|
||||
model: str,
|
||||
messages: Messages,
|
||||
proxy: str = None,
|
||||
api_base: str = "https://api.deepinfra.com/v1/openai",
|
||||
**kwargs
|
||||
) -> AsyncResult:
|
||||
headers = {
|
||||
@@ -52,30 +46,4 @@ class DeepInfraChat(AsyncGeneratorProvider, ProviderModelMixin):
|
||||
'X-Deepinfra-Source': 'web-page',
|
||||
'accept': 'text/event-stream',
|
||||
}
|
||||
|
||||
data = {
|
||||
'model': model,
|
||||
'messages': messages,
|
||||
'stream': True
|
||||
}
|
||||
|
||||
async with ClientSession(headers=headers) as session:
|
||||
async with session.post(cls.api_endpoint, json=data, proxy=proxy) as response:
|
||||
response.raise_for_status()
|
||||
async for line in response.content:
|
||||
if line:
|
||||
decoded_line = line.decode('utf-8').strip()
|
||||
if decoded_line.startswith('data:'):
|
||||
json_part = decoded_line[5:].strip()
|
||||
if json_part == '[DONE]':
|
||||
break
|
||||
try:
|
||||
data = json.loads(json_part)
|
||||
choices = data.get('choices', [])
|
||||
if choices:
|
||||
delta = choices[0].get('delta', {})
|
||||
content = delta.get('content', '')
|
||||
if content:
|
||||
yield content
|
||||
except json.JSONDecodeError:
|
||||
print(f"JSON decode error: {json_part}")
|
||||
return super().create_async_generator(model, messages, proxy, api_base=api_base, headers=headers, **kwargs)
|
||||
@@ -8,14 +8,13 @@ from ..image import ImageResponse, ImagePreview
|
||||
from .base_provider import AsyncGeneratorProvider, ProviderModelMixin
|
||||
|
||||
class Flux(AsyncGeneratorProvider, ProviderModelMixin):
|
||||
label = "HuggingSpace (black-forest-labs-flux-1-dev)"
|
||||
label = "Flux (HuggingSpace)"
|
||||
url = "https://black-forest-labs-flux-1-dev.hf.space"
|
||||
api_endpoint = "/gradio_api/call/infer"
|
||||
working = True
|
||||
default_model = 'flux-dev'
|
||||
models = [default_model]
|
||||
image_models = [default_model]
|
||||
model_aliases = {"flux-dev": "flux-1-dev"}
|
||||
|
||||
@classmethod
|
||||
async def create_async_generator(
|
||||
|
||||
@@ -21,14 +21,13 @@ class PerplexityLabs(AsyncGeneratorProvider, ProviderModelMixin):
|
||||
"llama-3.1-sonar-small-128k-chat",
|
||||
"llama-3.1-8b-instruct",
|
||||
"llama-3.1-70b-instruct",
|
||||
"llama-3.3-70b-instruct",
|
||||
"/models/LiquidCloud",
|
||||
]
|
||||
|
||||
model_aliases = {
|
||||
"sonar-online": "llama-3.1-sonar-large-128k-online",
|
||||
"sonar-online": "sonar-small-128k-online",
|
||||
"sonar-chat": "llama-3.1-sonar-large-128k-chat",
|
||||
"sonar-chat": "llama-3.1-sonar-small-128k-chat",
|
||||
"llama-3.3-70b": "llama-3.3-70b-instruct",
|
||||
"llama-3.1-8b": "llama-3.1-8b-instruct",
|
||||
"llama-3.1-70b": "llama-3.1-70b-instruct",
|
||||
|
||||
@@ -23,7 +23,7 @@ class RubiksAI(AsyncGeneratorProvider, ProviderModelMixin):
|
||||
supports_message_history = True
|
||||
|
||||
default_model = 'gpt-4o-mini'
|
||||
models = [default_model, 'gpt-4o', 'o1-mini', 'claude-3.5-sonnet', 'grok-beta', 'gemini-1.5-pro', 'nova-pro']
|
||||
models = [default_model, 'gpt-4o', 'o1-mini', 'claude-3.5-sonnet', 'grok-beta', 'gemini-1.5-pro', 'nova-pro', "llama-3.1-70b-versatile"]
|
||||
model_aliases = {
|
||||
"llama-3.1-70b": "llama-3.1-70b-versatile",
|
||||
}
|
||||
|
||||
@@ -5,6 +5,7 @@ import json
|
||||
import random
|
||||
import re
|
||||
import base64
|
||||
import asyncio
|
||||
|
||||
from aiohttp import ClientSession, BaseConnector
|
||||
|
||||
@@ -15,7 +16,7 @@ except ImportError:
|
||||
has_nodriver = False
|
||||
|
||||
from ... import debug
|
||||
from ...typing import Messages, Cookies, ImageType, AsyncResult, AsyncIterator
|
||||
from ...typing import Messages, Cookies, ImagesType, AsyncResult, AsyncIterator
|
||||
from ..base_provider import AsyncGeneratorProvider, ProviderModelMixin, BaseConversation, SynthesizeData
|
||||
from ..helper import format_prompt, get_cookies
|
||||
from ...requests.raise_for_status import raise_for_status
|
||||
@@ -97,8 +98,7 @@ class Gemini(AsyncGeneratorProvider, ProviderModelMixin):
|
||||
proxy: str = None,
|
||||
cookies: Cookies = None,
|
||||
connector: BaseConnector = None,
|
||||
image: ImageType = None,
|
||||
image_name: str = None,
|
||||
images: ImagesType = None,
|
||||
return_conversation: bool = False,
|
||||
conversation: Conversation = None,
|
||||
language: str = "en",
|
||||
@@ -128,8 +128,7 @@ class Gemini(AsyncGeneratorProvider, ProviderModelMixin):
|
||||
raise RuntimeError("Invalid cookies. SNlM0e not found")
|
||||
|
||||
yield SynthesizeData(cls.__name__, {"text": messages[-1]["content"]})
|
||||
image_url = await cls.upload_image(base_connector, to_bytes(image), image_name) if image else None
|
||||
|
||||
images = await cls.upload_images(base_connector, images) if images else None
|
||||
async with ClientSession(
|
||||
cookies=cls._cookies,
|
||||
headers=REQUEST_HEADERS,
|
||||
@@ -148,8 +147,7 @@ class Gemini(AsyncGeneratorProvider, ProviderModelMixin):
|
||||
prompt,
|
||||
language=language,
|
||||
conversation=conversation,
|
||||
image_url=image_url,
|
||||
image_name=image_name
|
||||
images=images
|
||||
))])
|
||||
}
|
||||
async with client.post(
|
||||
@@ -195,7 +193,7 @@ class Gemini(AsyncGeneratorProvider, ProviderModelMixin):
|
||||
images = [image[0][3][3] for image in response_part[4][0][12][7][0]]
|
||||
image_prompt = image_prompt.replace("a fake image", "")
|
||||
yield ImageResponse(images, image_prompt, {"cookies": cls._cookies})
|
||||
except TypeError:
|
||||
except (TypeError, IndexError, KeyError):
|
||||
pass
|
||||
|
||||
@classmethod
|
||||
@@ -235,11 +233,10 @@ class Gemini(AsyncGeneratorProvider, ProviderModelMixin):
|
||||
prompt: str,
|
||||
language: str,
|
||||
conversation: Conversation = None,
|
||||
image_url: str = None,
|
||||
image_name: str = None,
|
||||
images: list[list[str, str]] = None,
|
||||
tools: list[list[str]] = []
|
||||
) -> list:
|
||||
image_list = [[[image_url, 1], image_name]] if image_url else []
|
||||
image_list = [[[image_url, 1], image_name] for image_url, image_name in images] if images else []
|
||||
return [
|
||||
[prompt, 0, None, image_list, None, None, 0],
|
||||
[language],
|
||||
@@ -262,11 +259,14 @@ class Gemini(AsyncGeneratorProvider, ProviderModelMixin):
|
||||
0,
|
||||
]
|
||||
|
||||
async def upload_image(connector: BaseConnector, image: bytes, image_name: str = None):
|
||||
async def upload_images(connector: BaseConnector, images: ImagesType) -> list:
|
||||
async def upload_image(image: bytes, image_name: str = None):
|
||||
async with ClientSession(
|
||||
headers=UPLOAD_IMAGE_HEADERS,
|
||||
connector=connector
|
||||
) as session:
|
||||
image = to_bytes(image)
|
||||
|
||||
async with session.options(UPLOAD_IMAGE_URL) as response:
|
||||
await raise_for_status(response)
|
||||
|
||||
@@ -290,7 +290,8 @@ class Gemini(AsyncGeneratorProvider, ProviderModelMixin):
|
||||
upload_url, headers=headers, data=image
|
||||
) as response:
|
||||
await raise_for_status(response)
|
||||
return await response.text()
|
||||
return [await response.text(), image_name]
|
||||
return await asyncio.gather(*[upload_image(image, image_name) for image, image_name in images])
|
||||
|
||||
@classmethod
|
||||
async def fetch_snlm0e(cls, session: ClientSession, cookies: Cookies):
|
||||
|
||||
@@ -4,7 +4,7 @@ import base64
|
||||
import json
|
||||
from aiohttp import ClientSession, BaseConnector
|
||||
|
||||
from ...typing import AsyncResult, Messages, ImageType
|
||||
from ...typing import AsyncResult, Messages, ImagesType
|
||||
from ..base_provider import AsyncGeneratorProvider, ProviderModelMixin
|
||||
from ...image import to_bytes, is_accepted_format
|
||||
from ...errors import MissingAuthError
|
||||
@@ -36,7 +36,7 @@ class GeminiPro(AsyncGeneratorProvider, ProviderModelMixin):
|
||||
api_key: str = None,
|
||||
api_base: str = "https://generativelanguage.googleapis.com/v1beta",
|
||||
use_auth_header: bool = False,
|
||||
image: ImageType = None,
|
||||
images: ImagesType = None,
|
||||
connector: BaseConnector = None,
|
||||
**kwargs
|
||||
) -> AsyncResult:
|
||||
@@ -62,7 +62,8 @@ class GeminiPro(AsyncGeneratorProvider, ProviderModelMixin):
|
||||
for message in messages
|
||||
if message["role"] != "system"
|
||||
]
|
||||
if image is not None:
|
||||
if images is not None:
|
||||
for image, _ in images:
|
||||
image = to_bytes(image)
|
||||
contents[-1]["parts"].append({
|
||||
"inline_data": {
|
||||
|
||||
@@ -18,7 +18,7 @@ class HuggingFace(AsyncGeneratorProvider, ProviderModelMixin):
|
||||
supports_message_history = True
|
||||
default_model = HuggingChat.default_model
|
||||
default_image_model = HuggingChat.default_image_model
|
||||
models = [*HuggingChat.models, default_image_model]
|
||||
models = HuggingChat.models
|
||||
image_models = [default_image_model]
|
||||
model_aliases = HuggingChat.model_aliases
|
||||
|
||||
|
||||
@@ -4,7 +4,7 @@ import json
|
||||
|
||||
from ..helper import filter_none
|
||||
from ..base_provider import AsyncGeneratorProvider, ProviderModelMixin, FinishReason
|
||||
from ...typing import Union, Optional, AsyncResult, Messages, ImageType
|
||||
from ...typing import Union, Optional, AsyncResult, Messages, ImagesType
|
||||
from ...requests import StreamSession, raise_for_status
|
||||
from ...errors import MissingAuthError, ResponseError
|
||||
from ...image import to_data_uri
|
||||
@@ -25,7 +25,7 @@ class OpenaiAPI(AsyncGeneratorProvider, ProviderModelMixin):
|
||||
messages: Messages,
|
||||
proxy: str = None,
|
||||
timeout: int = 120,
|
||||
image: ImageType = None,
|
||||
images: ImagesType = None,
|
||||
api_key: str = None,
|
||||
api_base: str = "https://api.openai.com/v1",
|
||||
temperature: float = None,
|
||||
@@ -40,14 +40,14 @@ class OpenaiAPI(AsyncGeneratorProvider, ProviderModelMixin):
|
||||
) -> AsyncResult:
|
||||
if cls.needs_auth and api_key is None:
|
||||
raise MissingAuthError('Add a "api_key"')
|
||||
if image is not None:
|
||||
if images is not None:
|
||||
if not model and hasattr(cls, "default_vision_model"):
|
||||
model = cls.default_vision_model
|
||||
messages[-1]["content"] = [
|
||||
{
|
||||
*[{
|
||||
"type": "image_url",
|
||||
"image_url": {"url": to_data_uri(image)}
|
||||
},
|
||||
} for image, image_name in images],
|
||||
{
|
||||
"type": "text",
|
||||
"text": messages[-1]["content"]
|
||||
|
||||
@@ -18,7 +18,7 @@ except ImportError:
|
||||
has_nodriver = False
|
||||
|
||||
from ..base_provider import AsyncGeneratorProvider, ProviderModelMixin
|
||||
from ...typing import AsyncResult, Messages, Cookies, ImageType, AsyncIterator
|
||||
from ...typing import AsyncResult, Messages, Cookies, ImagesType, AsyncIterator
|
||||
from ...requests.raise_for_status import raise_for_status
|
||||
from ...requests import StreamSession
|
||||
from ...requests import get_nodriver
|
||||
@@ -37,14 +37,14 @@ DEFAULT_HEADERS = {
|
||||
"accept-encoding": "gzip, deflate, br, zstd",
|
||||
'accept-language': 'en-US,en;q=0.8',
|
||||
"referer": "https://chatgpt.com/",
|
||||
"sec-ch-ua": "\"Brave\";v=\"123\", \"Not:A-Brand\";v=\"8\", \"Chromium\";v=\"123\"",
|
||||
"sec-ch-ua": "\"Google Chrome\";v=\"131\", \"Chromium\";v=\"131\", \"Not_A Brand\";v=\"24\"",
|
||||
"sec-ch-ua-mobile": "?0",
|
||||
"sec-ch-ua-platform": "\"Windows\"",
|
||||
"sec-fetch-dest": "empty",
|
||||
"sec-fetch-mode": "cors",
|
||||
"sec-fetch-site": "same-origin",
|
||||
"sec-gpc": "1",
|
||||
"user-agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/123.0.0.0 Safari/537.36"
|
||||
"user-agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/131.0.0.0 Safari/537.36"
|
||||
}
|
||||
|
||||
INIT_HEADERS = {
|
||||
@@ -53,19 +53,35 @@ INIT_HEADERS = {
|
||||
'cache-control': 'no-cache',
|
||||
'pragma': 'no-cache',
|
||||
'priority': 'u=0, i',
|
||||
'sec-ch-ua': '"Not)A;Brand";v="99", "Google Chrome";v="127", "Chromium";v="127"',
|
||||
"sec-ch-ua": "\"Google Chrome\";v=\"131\", \"Chromium\";v=\"131\", \"Not_A Brand\";v=\"24\"",
|
||||
'sec-ch-ua-arch': '"arm"',
|
||||
'sec-ch-ua-bitness': '"64"',
|
||||
'sec-ch-ua-mobile': '?0',
|
||||
'sec-ch-ua-model': '""',
|
||||
'sec-ch-ua-platform': '"macOS"',
|
||||
"sec-ch-ua-platform": "\"Windows\"",
|
||||
'sec-ch-ua-platform-version': '"14.4.0"',
|
||||
'sec-fetch-dest': 'document',
|
||||
'sec-fetch-mode': 'navigate',
|
||||
'sec-fetch-site': 'none',
|
||||
'sec-fetch-user': '?1',
|
||||
'upgrade-insecure-requests': '1',
|
||||
"user-agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/123.0.0.0 Safari/537.36"
|
||||
"user-agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/131.0.0.0 Safari/537.36"
|
||||
}
|
||||
|
||||
UPLOAD_HEADERS = {
|
||||
"accept": "application/json, text/plain, */*",
|
||||
'accept-language': 'en-US,en;q=0.8',
|
||||
"referer": "https://chatgpt.com/",
|
||||
"priority": "u=1, i",
|
||||
"sec-ch-ua": "\"Google Chrome\";v=\"131\", \"Chromium\";v=\"131\", \"Not_A Brand\";v=\"24\"",
|
||||
"sec-ch-ua-mobile": "?0",
|
||||
'sec-ch-ua-platform': '"macOS"',
|
||||
"sec-fetch-dest": "empty",
|
||||
"sec-fetch-mode": "cors",
|
||||
"sec-fetch-site": "cross-site",
|
||||
"x-ms-blob-type": "BlockBlob",
|
||||
"x-ms-version": "2020-04-08",
|
||||
"user-agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/131.0.0.0 Safari/537.36"
|
||||
}
|
||||
|
||||
class OpenaiChat(AsyncGeneratorProvider, ProviderModelMixin):
|
||||
@@ -78,7 +94,7 @@ class OpenaiChat(AsyncGeneratorProvider, ProviderModelMixin):
|
||||
supports_message_history = True
|
||||
supports_system_message = True
|
||||
default_model = "auto"
|
||||
fallback_models = [default_model, "gpt-4", "gpt-4o", "gpt-4o-mini", "gpt-4o-canmore", "o1-preview", "o1-mini"]
|
||||
fallback_models = [default_model, "gpt-4", "gpt-4o", "gpt-4o-mini", "gpt-4o-canmore", "o1", "o1-preview", "o1-mini"]
|
||||
vision_models = fallback_models
|
||||
synthesize_content_type = "audio/mpeg"
|
||||
|
||||
@@ -100,12 +116,11 @@ class OpenaiChat(AsyncGeneratorProvider, ProviderModelMixin):
|
||||
return cls.models
|
||||
|
||||
@classmethod
|
||||
async def upload_image(
|
||||
async def upload_images(
|
||||
cls,
|
||||
session: StreamSession,
|
||||
headers: dict,
|
||||
image: ImageType,
|
||||
image_name: str = None
|
||||
images: ImagesType,
|
||||
) -> ImageRequest:
|
||||
"""
|
||||
Upload an image to the service and get the download URL
|
||||
@@ -113,11 +128,12 @@ class OpenaiChat(AsyncGeneratorProvider, ProviderModelMixin):
|
||||
Args:
|
||||
session: The StreamSession object to use for requests
|
||||
headers: The headers to include in the requests
|
||||
image: The image to upload, either a PIL Image object or a bytes object
|
||||
images: The images to upload, either a PIL Image object or a bytes object
|
||||
|
||||
Returns:
|
||||
An ImageRequest object that contains the download URL, file name, and other data
|
||||
"""
|
||||
async def upload_image(image, image_name):
|
||||
# Convert the image to a PIL Image object and get the extension
|
||||
data_bytes = to_bytes(image)
|
||||
image = to_image(data_bytes)
|
||||
@@ -140,11 +156,12 @@ class OpenaiChat(AsyncGeneratorProvider, ProviderModelMixin):
|
||||
"width": image.width
|
||||
}
|
||||
# Put the image bytes to the upload URL and check the status
|
||||
await asyncio.sleep(1)
|
||||
async with session.put(
|
||||
image_data["upload_url"],
|
||||
data=data_bytes,
|
||||
headers={
|
||||
**DEFAULT_HEADERS,
|
||||
**UPLOAD_HEADERS,
|
||||
"Content-Type": image_data["mime_type"],
|
||||
"x-ms-blob-type": "BlockBlob",
|
||||
"x-ms-version": "2020-04-08",
|
||||
@@ -162,9 +179,12 @@ class OpenaiChat(AsyncGeneratorProvider, ProviderModelMixin):
|
||||
await raise_for_status(response, "Get download url failed")
|
||||
image_data["download_url"] = (await response.json())["download_url"]
|
||||
return ImageRequest(image_data)
|
||||
if not images:
|
||||
return
|
||||
return [await upload_image(image, image_name) for image, image_name in images]
|
||||
|
||||
@classmethod
|
||||
def create_messages(cls, messages: Messages, image_request: ImageRequest = None, system_hints: list = None):
|
||||
def create_messages(cls, messages: Messages, image_requests: ImageRequest = None, system_hints: list = None):
|
||||
"""
|
||||
Create a list of messages for the user input
|
||||
|
||||
@@ -185,16 +205,18 @@ class OpenaiChat(AsyncGeneratorProvider, ProviderModelMixin):
|
||||
} for message in messages]
|
||||
|
||||
# Check if there is an image response
|
||||
if image_request is not None:
|
||||
if image_requests:
|
||||
# Change content in last user message
|
||||
messages[-1]["content"] = {
|
||||
"content_type": "multimodal_text",
|
||||
"parts": [{
|
||||
"parts": [*[{
|
||||
"asset_pointer": f"file-service://{image_request.get('file_id')}",
|
||||
"height": image_request.get("height"),
|
||||
"size_bytes": image_request.get("file_size"),
|
||||
"width": image_request.get("width"),
|
||||
}, messages[-1]["content"]["parts"][0]]
|
||||
}
|
||||
for image_request in image_requests],
|
||||
messages[-1]["content"]["parts"][0]]
|
||||
}
|
||||
# Add the metadata object with the attachments
|
||||
messages[-1]["metadata"] = {
|
||||
@@ -205,7 +227,8 @@ class OpenaiChat(AsyncGeneratorProvider, ProviderModelMixin):
|
||||
"name": image_request.get("file_name"),
|
||||
"size": image_request.get("file_size"),
|
||||
"width": image_request.get("width"),
|
||||
}]
|
||||
}
|
||||
for image_request in image_requests]
|
||||
}
|
||||
return messages
|
||||
|
||||
@@ -259,8 +282,7 @@ class OpenaiChat(AsyncGeneratorProvider, ProviderModelMixin):
|
||||
conversation_id: str = None,
|
||||
conversation: Conversation = None,
|
||||
parent_id: str = None,
|
||||
image: ImageType = None,
|
||||
image_name: str = None,
|
||||
images: ImagesType = None,
|
||||
return_conversation: bool = False,
|
||||
max_retries: int = 3,
|
||||
web_search: bool = False,
|
||||
@@ -281,7 +303,7 @@ class OpenaiChat(AsyncGeneratorProvider, ProviderModelMixin):
|
||||
action (str): Type of action ('next', 'continue', 'variant').
|
||||
conversation_id (str): ID of the conversation.
|
||||
parent_id (str): ID of the parent message.
|
||||
image (ImageType): Image to include in the conversation.
|
||||
images (ImagesType): Images to include in the conversation.
|
||||
return_conversation (bool): Flag to include response fields in the output.
|
||||
**kwargs: Additional keyword arguments.
|
||||
|
||||
@@ -298,7 +320,7 @@ class OpenaiChat(AsyncGeneratorProvider, ProviderModelMixin):
|
||||
impersonate="chrome",
|
||||
timeout=timeout
|
||||
) as session:
|
||||
image_request = None
|
||||
image_requests = None
|
||||
if not cls.needs_auth:
|
||||
if cls._headers is None:
|
||||
cls._create_request_args(cookies)
|
||||
@@ -310,7 +332,7 @@ class OpenaiChat(AsyncGeneratorProvider, ProviderModelMixin):
|
||||
cls._update_request_args(session)
|
||||
await raise_for_status(response)
|
||||
try:
|
||||
image_request = await cls.upload_image(session, cls._headers, image, image_name) if image else None
|
||||
image_requests = await cls.upload_images(session, cls._headers, images) if images else None
|
||||
except Exception as e:
|
||||
debug.log("OpenaiChat: Upload image failed")
|
||||
debug.log(f"{e.__class__.__name__}: {e}")
|
||||
@@ -384,7 +406,7 @@ class OpenaiChat(AsyncGeneratorProvider, ProviderModelMixin):
|
||||
debug.log(f"OpenaiChat: Use conversation: {conversation.conversation_id}")
|
||||
if action != "continue":
|
||||
messages = messages if conversation_id is None else [messages[-1]]
|
||||
data["messages"] = cls.create_messages(messages, image_request, ["search"] if web_search else None)
|
||||
data["messages"] = cls.create_messages(messages, image_requests, ["search"] if web_search else None)
|
||||
headers = {
|
||||
**cls._headers,
|
||||
"accept": "text/event-stream",
|
||||
|
||||
@@ -2,11 +2,12 @@ from __future__ import annotations
|
||||
|
||||
import os
|
||||
import logging
|
||||
from typing import Union, Optional
|
||||
|
||||
from . import debug, version
|
||||
from .models import Model
|
||||
from .client import Client, AsyncClient
|
||||
from .typing import Messages, CreateResult, AsyncResult, Union
|
||||
from .typing import Messages, CreateResult, AsyncResult, ImageType
|
||||
from .errors import StreamNotSupportedError, ModelNotAllowedError
|
||||
from .cookies import get_cookies, set_cookies
|
||||
from .providers.types import ProviderType
|
||||
@@ -27,32 +28,27 @@ class ChatCompletion:
|
||||
messages : Messages,
|
||||
provider : Union[ProviderType, str, None] = None,
|
||||
stream : bool = False,
|
||||
auth : Union[str, None] = None,
|
||||
image : ImageType = None,
|
||||
image_name: Optional[str] = None,
|
||||
ignored: list[str] = None,
|
||||
ignore_working: bool = False,
|
||||
ignore_stream: bool = False,
|
||||
patch_provider: callable = None,
|
||||
**kwargs) -> Union[CreateResult, str]:
|
||||
model, provider = get_model_and_provider(
|
||||
model, provider, stream,
|
||||
ignored, ignore_working,
|
||||
ignore_stream or kwargs.get("ignore_stream_and_auth")
|
||||
)
|
||||
|
||||
if auth is not None:
|
||||
kwargs['auth'] = auth
|
||||
|
||||
if image is not None:
|
||||
kwargs["images"] = [(image, image_name)]
|
||||
if "proxy" not in kwargs:
|
||||
proxy = os.environ.get("G4F_PROXY")
|
||||
if proxy:
|
||||
kwargs['proxy'] = proxy
|
||||
|
||||
if patch_provider:
|
||||
provider = patch_provider(provider)
|
||||
|
||||
result = provider.create_completion(model, messages, stream=stream, **kwargs)
|
||||
|
||||
return result if stream else ''.join([str(chunk) for chunk in result])
|
||||
return result if stream else ''.join([str(chunk) for chunk in result if chunk])
|
||||
|
||||
@staticmethod
|
||||
def create_async(model : Union[Model, str],
|
||||
@@ -61,40 +57,12 @@ class ChatCompletion:
|
||||
stream : bool = False,
|
||||
ignored : list[str] = None,
|
||||
ignore_working: bool = False,
|
||||
patch_provider: callable = None,
|
||||
**kwargs) -> Union[AsyncResult, str]:
|
||||
model, provider = get_model_and_provider(model, provider, False, ignored, ignore_working)
|
||||
|
||||
if stream:
|
||||
if isinstance(provider, type) and issubclass(provider, AsyncGeneratorProvider):
|
||||
if hasattr(provider, "create_async_generator"):
|
||||
return provider.create_async_generator(model, messages, **kwargs)
|
||||
raise StreamNotSupportedError(f'{provider.__name__} does not support "stream" argument in "create_async"')
|
||||
|
||||
if patch_provider:
|
||||
provider = patch_provider(provider)
|
||||
|
||||
return provider.create_async(model, messages, **kwargs)
|
||||
|
||||
class Completion:
|
||||
@staticmethod
|
||||
def create(model : Union[Model, str],
|
||||
prompt : str,
|
||||
provider : Union[ProviderType, None] = None,
|
||||
stream : bool = False,
|
||||
ignored : list[str] = None, **kwargs) -> Union[CreateResult, str]:
|
||||
allowed_models = [
|
||||
'code-davinci-002',
|
||||
'text-ada-001',
|
||||
'text-babbage-001',
|
||||
'text-curie-001',
|
||||
'text-davinci-002',
|
||||
'text-davinci-003'
|
||||
]
|
||||
if model not in allowed_models:
|
||||
raise ModelNotAllowedError(f'Can\'t use {model} with Completion.create()')
|
||||
|
||||
model, provider = get_model_and_provider(model, provider, stream, ignored)
|
||||
|
||||
result = provider.create_completion(model, [{"role": "user", "content": prompt}], stream=stream, **kwargs)
|
||||
|
||||
return result if stream else ''.join(result)
|
||||
|
||||
@@ -102,6 +102,7 @@ class ChatCompletionsConfig(BaseModel):
|
||||
stream: bool = False
|
||||
image: Optional[str] = None
|
||||
image_name: Optional[str] = None
|
||||
images: Optional[list[tuple[str, str]]] = None
|
||||
temperature: Optional[float] = None
|
||||
max_tokens: Optional[int] = None
|
||||
stop: Union[list[str], str, None] = None
|
||||
@@ -171,7 +172,7 @@ class AppConfig:
|
||||
ignored_providers: Optional[list[str]] = None
|
||||
g4f_api_key: Optional[str] = None
|
||||
ignore_cookie_files: bool = False
|
||||
model: str = None,
|
||||
model: str = None
|
||||
provider: str = None
|
||||
image_provider: str = None
|
||||
proxy: str = None
|
||||
@@ -328,8 +329,14 @@ class Api:
|
||||
try:
|
||||
is_data_uri_an_image(config.image)
|
||||
except ValueError as e:
|
||||
return ErrorResponse.from_message(f"The image you send must be a data URI. Example: data:image/webp;base64,...", status_code=HTTP_422_UNPROCESSABLE_ENTITY)
|
||||
|
||||
return ErrorResponse.from_message(f"The image you send must be a data URI. Example: data:image/jpeg;base64,...", status_code=HTTP_422_UNPROCESSABLE_ENTITY)
|
||||
if config.images is not None:
|
||||
for image in config.images:
|
||||
try:
|
||||
is_data_uri_an_image(image[0])
|
||||
except ValueError as e:
|
||||
example = json.dumps({"images": [["data:image/jpeg;base64,...", "filename"]]})
|
||||
return ErrorResponse.from_message(f'The image you send must be a data URI. Example: {example}', status_code=HTTP_422_UNPROCESSABLE_ENTITY)
|
||||
# Create the completion response
|
||||
response = self.client.chat.completions.create(
|
||||
**filter_none(
|
||||
@@ -522,8 +529,8 @@ def format_exception(e: Union[Exception, str], config: Union[ChatCompletionsConf
|
||||
message = f"{e.__class__.__name__}: {e}"
|
||||
return json.dumps({
|
||||
"error": {"message": message},
|
||||
"model": last_provider.get("model") if model is None else model,
|
||||
**filter_none(
|
||||
model=last_provider.get("model") if model is None else model,
|
||||
provider=last_provider.get("name") if provider is None else provider
|
||||
)
|
||||
})
|
||||
|
||||
@@ -12,7 +12,7 @@ from ..image import ImageResponse, copy_images, images_dir
|
||||
from ..typing import Messages, ImageType
|
||||
from ..providers.types import ProviderType
|
||||
from ..providers.response import ResponseType, FinishReason, BaseConversation, SynthesizeData
|
||||
from ..errors import NoImageResponseError, MissingAuthError, NoValidHarFileError
|
||||
from ..errors import NoImageResponseError
|
||||
from ..providers.retry_provider import IterListProvider
|
||||
from ..providers.asyncio import to_sync_generator, async_generator_to_list
|
||||
from ..Provider.needs_auth import BingCreateImages, OpenaiAccount
|
||||
@@ -192,6 +192,8 @@ class Completions:
|
||||
provider: Optional[ProviderType] = None,
|
||||
stream: Optional[bool] = False,
|
||||
proxy: Optional[str] = None,
|
||||
image: Optional[ImageType] = None,
|
||||
image_name: Optional[str] = None,
|
||||
response_format: Optional[dict] = None,
|
||||
max_tokens: Optional[int] = None,
|
||||
stop: Optional[Union[list[str], str]] = None,
|
||||
@@ -210,7 +212,8 @@ class Completions:
|
||||
ignore_stream,
|
||||
)
|
||||
stop = [stop] if isinstance(stop, str) else stop
|
||||
|
||||
if image is not None:
|
||||
kwargs["images"] = [(image, image_name)]
|
||||
response = provider.create_completion(
|
||||
model,
|
||||
messages,
|
||||
@@ -390,8 +393,6 @@ class Images:
|
||||
e = None
|
||||
response = None
|
||||
if isinstance(provider_handler, IterListProvider):
|
||||
# File pointer can be read only once, so we need to convert it to bytes
|
||||
image = to_bytes(image)
|
||||
for provider in provider_handler.providers:
|
||||
try:
|
||||
response = await self._generate_image_response(provider, provider.__name__, model, prompt, image=image, **kwargs)
|
||||
@@ -471,6 +472,8 @@ class AsyncCompletions:
|
||||
provider: Optional[ProviderType] = None,
|
||||
stream: Optional[bool] = False,
|
||||
proxy: Optional[str] = None,
|
||||
image: Optional[ImageType] = None,
|
||||
image_name: Optional[str] = None,
|
||||
response_format: Optional[dict] = None,
|
||||
max_tokens: Optional[int] = None,
|
||||
stop: Optional[Union[list[str], str]] = None,
|
||||
@@ -489,7 +492,8 @@ class AsyncCompletions:
|
||||
ignore_stream,
|
||||
)
|
||||
stop = [stop] if isinstance(stop, str) else stop
|
||||
|
||||
if image is not None:
|
||||
kwargs["images"] = [(image, image_name)]
|
||||
if hasattr(provider, "create_async_generator"):
|
||||
create_handler = provider.create_async_generator
|
||||
else:
|
||||
|
||||
@@ -240,7 +240,7 @@
|
||||
<textarea id="message-input" placeholder="Ask a question" cols="30" rows="10"
|
||||
style="white-space: pre-wrap;resize: none;"></textarea>
|
||||
<label class="file-label image-label" for="image" title="">
|
||||
<input type="file" id="image" name="image" accept="image/*" required/>
|
||||
<input type="file" id="image" name="image" accept="image/*" required multiple/>
|
||||
<i class="fa-regular fa-image"></i>
|
||||
</label>
|
||||
<label class="file-label image-label" for="camera">
|
||||
|
||||
@@ -343,11 +343,18 @@ const handle_ask = async () => {
|
||||
let message_index = await add_message(window.conversation_id, "user", message);
|
||||
let message_id = get_message_id();
|
||||
|
||||
if (imageInput.dataset.src) URL.revokeObjectURL(imageInput.dataset.src);
|
||||
if (imageInput.dataset.objects) {
|
||||
imageInput.dataset.objects.split(" ").forEach((object)=>URL.revokeObjectURL(object))
|
||||
delete imageInput.dataset.objects;
|
||||
}
|
||||
const input = imageInput && imageInput.files.length > 0 ? imageInput : cameraInput
|
||||
if (input.files.length > 0) imageInput.dataset.src = URL.createObjectURL(input.files[0]);
|
||||
else delete imageInput.dataset.src
|
||||
|
||||
images = [];
|
||||
if (input.files.length > 0) {
|
||||
for (const file of input.files) {
|
||||
images.push(URL.createObjectURL(file));
|
||||
}
|
||||
imageInput.dataset.objects = images.join(" ");
|
||||
}
|
||||
message_box.innerHTML += `
|
||||
<div class="message" data-index="${message_index}">
|
||||
<div class="user">
|
||||
@@ -358,10 +365,7 @@ const handle_ask = async () => {
|
||||
<div class="content" id="user_${message_id}">
|
||||
<div class="content_inner">
|
||||
${markdown_render(message)}
|
||||
${imageInput.dataset.src
|
||||
? '<img src="' + imageInput.dataset.src + '" alt="Image upload">'
|
||||
: ''
|
||||
}
|
||||
${images.map((object)=>'<img src="' + object + '" alt="Image upload">').join("")}
|
||||
</div>
|
||||
<div class="count">
|
||||
${count_words_and_tokens(message, get_selected_model()?.value)}
|
||||
@@ -602,7 +606,7 @@ const ask_gpt = async (message_id, message_index = -1, regenerate = false, provi
|
||||
}
|
||||
try {
|
||||
const input = imageInput && imageInput.files.length > 0 ? imageInput : cameraInput;
|
||||
const file = input && input.files.length > 0 ? input.files[0] : null;
|
||||
const files = input && input.files.length > 0 ? input.files : null;
|
||||
const auto_continue = document.getElementById("auto_continue")?.checked;
|
||||
const download_images = document.getElementById("download_images")?.checked;
|
||||
let api_key = get_api_key_by_provider(provider);
|
||||
@@ -616,7 +620,7 @@ const ask_gpt = async (message_id, message_index = -1, regenerate = false, provi
|
||||
auto_continue: auto_continue,
|
||||
download_images: download_images,
|
||||
api_key: api_key,
|
||||
}, file, message_id);
|
||||
}, files, message_id);
|
||||
if (!error_storage[message_id]) {
|
||||
html = markdown_render(message_storage[message_id]);
|
||||
content_map.inner.innerHTML = html;
|
||||
@@ -771,6 +775,7 @@ const set_conversation = async (conversation_id) => {
|
||||
const new_conversation = async () => {
|
||||
history.pushState({}, null, `/chat/`);
|
||||
window.conversation_id = uuid();
|
||||
document.title = window.title || document.title;
|
||||
|
||||
await clear_conversation();
|
||||
if (systemPrompt) {
|
||||
@@ -790,6 +795,8 @@ const load_conversation = async (conversation_id, scroll=true) => {
|
||||
return;
|
||||
}
|
||||
|
||||
document.title = conversation.new_title ? `g4f - ${conversation.new_title}` : document.title;
|
||||
|
||||
if (systemPrompt) {
|
||||
systemPrompt.value = conversation.system || "";
|
||||
}
|
||||
@@ -1422,7 +1429,10 @@ async function load_version() {
|
||||
let new_version = document.querySelector(".new_version");
|
||||
if (new_version) return;
|
||||
const versions = await api("version");
|
||||
document.title = 'g4f - ' + versions["version"];
|
||||
window.title = 'g4f - ' + versions["version"];
|
||||
if (document.title == "g4f - gui") {
|
||||
document.title = window.title;
|
||||
}
|
||||
let text = "version ~ "
|
||||
if (versions["version"] != versions["latest_version"]) {
|
||||
let release_url = 'https://github.com/xtekky/gpt4free/releases/latest';
|
||||
@@ -1445,9 +1455,9 @@ setTimeout(load_version, 100);
|
||||
[imageInput, cameraInput].forEach((el) => {
|
||||
el.addEventListener('click', async () => {
|
||||
el.value = '';
|
||||
if (imageInput.dataset.src) {
|
||||
URL.revokeObjectURL(imageInput.dataset.src);
|
||||
delete imageInput.dataset.src
|
||||
if (imageInput.dataset.objects) {
|
||||
imageInput.dataset.objects.split(" ").forEach((object) => URL.revokeObjectURL(object));
|
||||
delete imageInput.dataset.objects
|
||||
}
|
||||
});
|
||||
});
|
||||
@@ -1521,7 +1531,7 @@ function get_selected_model() {
|
||||
}
|
||||
}
|
||||
|
||||
async function api(ressource, args=null, file=null, message_id=null) {
|
||||
async function api(ressource, args=null, files=null, message_id=null) {
|
||||
let api_key;
|
||||
if (ressource == "models" && args) {
|
||||
api_key = get_api_key_by_provider(args);
|
||||
@@ -1535,9 +1545,11 @@ async function api(ressource, args=null, file=null, message_id=null) {
|
||||
if (ressource == "conversation") {
|
||||
let body = JSON.stringify(args);
|
||||
headers.accept = 'text/event-stream';
|
||||
if (file !== null) {
|
||||
if (files !== null) {
|
||||
const formData = new FormData();
|
||||
formData.append('file', file);
|
||||
for (const file of files) {
|
||||
formData.append('files[]', file)
|
||||
}
|
||||
formData.append('json', body);
|
||||
body = formData;
|
||||
} else {
|
||||
|
||||
@@ -8,7 +8,7 @@ from flask import send_from_directory
|
||||
from inspect import signature
|
||||
|
||||
from g4f import version, models
|
||||
from g4f import get_last_provider, ChatCompletion, get_model_and_provider
|
||||
from g4f import ChatCompletion, get_model_and_provider
|
||||
from g4f.errors import VersionNotFoundError
|
||||
from g4f.image import ImagePreview, ImageResponse, copy_images, ensure_images_dir, images_dir
|
||||
from g4f.Provider import ProviderType, __providers__, __map__
|
||||
|
||||
@@ -117,11 +117,12 @@ class Backend_Api(Api):
|
||||
"""
|
||||
|
||||
kwargs = {}
|
||||
if "file" in request.files:
|
||||
file = request.files['file']
|
||||
if "files[]" in request.files:
|
||||
images = []
|
||||
for file in request.files.getlist('files[]'):
|
||||
if file.filename != '' and is_allowed_extension(file.filename):
|
||||
kwargs['image'] = to_image(file.stream, file.filename.endswith('.svg'))
|
||||
kwargs['image_name'] = file.filename
|
||||
images.append((to_image(file.stream, file.filename.endswith('.svg')), file.filename))
|
||||
kwargs['images'] = images
|
||||
if "json" in request.form:
|
||||
json_data = json.loads(request.form['json'])
|
||||
else:
|
||||
|
||||
@@ -266,6 +266,7 @@ def to_bytes(image: ImageType) -> bytes:
|
||||
image.seek(0)
|
||||
return bytes_io.getvalue()
|
||||
else:
|
||||
image.seek(0)
|
||||
return image.read()
|
||||
|
||||
def to_data_uri(image: ImageType) -> str:
|
||||
@@ -283,7 +284,7 @@ async def copy_images(
|
||||
images: list[str],
|
||||
cookies: Optional[Cookies] = None,
|
||||
proxy: Optional[str] = None
|
||||
):
|
||||
) -> list[str]:
|
||||
ensure_images_dir()
|
||||
async with ClientSession(
|
||||
connector=get_connector(proxy=proxy),
|
||||
|
||||
@@ -91,6 +91,7 @@ class StreamSession(AsyncSession):
|
||||
put = partialmethod(request, "PUT")
|
||||
patch = partialmethod(request, "PATCH")
|
||||
delete = partialmethod(request, "DELETE")
|
||||
options = partialmethod(request, "OPTIONS")
|
||||
|
||||
if has_curl_mime:
|
||||
class FormData(CurlMime):
|
||||
|
||||
@@ -19,7 +19,8 @@ CreateResult = Iterator[Union[str, ResponseType]]
|
||||
AsyncResult = AsyncIterator[Union[str, ResponseType]]
|
||||
Messages = List[Dict[str, Union[str, List[Dict[str, Union[str, Dict[str, str]]]]]]]
|
||||
Cookies = Dict[str, str]
|
||||
ImageType = Union[str, bytes, IO, Image, None]
|
||||
ImageType = Union[str, bytes, IO, Image]
|
||||
ImagesType = List[Tuple[ImageType, Optional[str]]]
|
||||
|
||||
__all__ = [
|
||||
'Any',
|
||||
@@ -41,5 +42,6 @@ __all__ = [
|
||||
'Messages',
|
||||
'Cookies',
|
||||
'Image',
|
||||
'ImageType'
|
||||
'ImageType',
|
||||
'ImagesType'
|
||||
]
|
||||
|
||||
Reference in New Issue
Block a user