mirror of
https://github.com/xtekky/gpt4free.git
synced 2025-10-23 00:09:34 +08:00

* refactor(g4f/Provider/Airforce.py): improve model handling and filtering - Add hidden_models set to exclude specific models - Add evil alias for uncensored model handling - Extend filtering for model-specific response tokens - Add response buffering for streamed content - Update model fetching with error handling * refactor(g4f/Provider/Blackbox.py): improve caching and model handling - Add caching system for validated values with file-based storage - Rename 'flux' model to 'ImageGeneration' and update references - Add temperature, top_p and max_tokens parameters to generator - Simplify HTTP headers and remove redundant options - Add model alias mapping for ImageGeneration - Add file system utilities for cache management * feat(g4f/Provider/RobocodersAPI.py): add caching and error handling - Add file-based caching system for access tokens and sessions - Add robust error handling with specific error messages - Add automatic dialog continuation on resource limits - Add HTML parsing with BeautifulSoup for token extraction - Add debug logging for error tracking - Add timeout configuration for API requests * refactor(g4f/Provider/DarkAI.py): update DarkAI default model and aliases - Change default model from llama-3-405b to llama-3-70b - Remove llama-3-405b from supported models list - Remove llama-3.1-405b from model aliases * feat(g4f/Provider/Blackbox2.py): add image generation support - Add image model 'flux' with dedicated API endpoint - Refactor generator to support both text and image outputs - Extract headers into reusable static method - Add type hints for AsyncGenerator return type - Split generation logic into _generate_text and _generate_image methods - Add ImageResponse handling for image generation results BREAKING CHANGE: create_async_generator now returns AsyncGenerator instead of AsyncResult * refactor(g4f/Provider/ChatGptEs.py): update ChatGptEs model configuration - Update models list to include gpt-3.5-turbo - Remove chatgpt-4o-latest from supported models - Remove model_aliases mapping for gpt-4o * feat(g4f/Provider/DeepInfraChat.py): add Accept-Language header support - Add Accept-Language header for internationalization - Maintain existing header configuration - Improve request compatibility with language preferences * refactor(g4f/Provider/needs_auth/Gemini.py): add ProviderModelMixin inheritance - Add ProviderModelMixin to class inheritance - Import ProviderModelMixin from base_provider - Move BaseConversation import to base_provider imports * refactor(g4f/Provider/Liaobots.py): update model details and aliases - Add version suffix to o1 model IDs - Update model aliases for o1-preview and o1-mini - Standardize version format across model definitions * refactor(g4f/Provider/PollinationsAI.py): enhance model support and generation - Split generation logic into dedicated image/text methods - Add additional text models including sur and claude - Add width/height parameters for image generation - Add model existence validation - Add hasattr checks for model lists initialization * chore(gitignore): add provider cache directory - Add g4f/Provider/.cache to gitignore patterns * refactor(g4f/Provider/ReplicateHome.py): update model configuration - Update default model to gemma-2b-it - Add default_image_model configuration - Remove llava-13b from supported models - Simplify request headers * feat(g4f/models.py): expand provider and model support - Add new providers DarkAI and PollinationsAI - Add new models for Mistral, Flux and image generation - Update provider lists for existing models - Add P1 and Evil models with experimental providers BREAKING CHANGE: Remove llava-13b model support * refactor(Airforce): Update type hint for split_message return - Change return type of from to for consistency with import. - Maintain overall functionality and structure of the class. - Ensure compatibility with type hinting standards in Python. * refactor(g4f/Provider/Airforce.py): Update type hint for split_message return - Change return type of 'split_message' from 'list[str]' to 'List[str]' for consistency with import. - Maintain overall functionality and structure of the 'Airforce' class. - Ensure compatibility with type hinting standards in Python. * feat(g4f/Provider/RobocodersAPI.py): Add support for optional BeautifulSoup dependency - Introduce a check for the BeautifulSoup library and handle its absence gracefully. - Raise a if BeautifulSoup is not installed, prompting the user to install it. - Remove direct import of BeautifulSoup to avoid import errors when the library is missing. --------- Co-authored-by: kqlio67 <>
928 lines
20 KiB
Python
928 lines
20 KiB
Python
from __future__ import annotations
|
|
|
|
from dataclasses import dataclass
|
|
|
|
from .Provider import IterListProvider, ProviderType
|
|
from .Provider import (
|
|
AIChatFree,
|
|
AmigoChat,
|
|
Blackbox,
|
|
Blackbox2,
|
|
BingCreateImages,
|
|
ChatGpt,
|
|
ChatGptEs,
|
|
Cloudflare,
|
|
Copilot,
|
|
CopilotAccount,
|
|
DarkAI,
|
|
DDG,
|
|
DeepInfraChat,
|
|
Free2GPT,
|
|
GigaChat,
|
|
Gemini,
|
|
GeminiPro,
|
|
HuggingChat,
|
|
HuggingFace,
|
|
Liaobots,
|
|
Airforce,
|
|
MagickPen,
|
|
Mhystical,
|
|
MetaAI,
|
|
MicrosoftDesigner,
|
|
OpenaiChat,
|
|
OpenaiAccount,
|
|
PerplexityLabs,
|
|
Pi,
|
|
Pizzagpt,
|
|
PollinationsAI,
|
|
Reka,
|
|
ReplicateHome,
|
|
RubiksAI,
|
|
TeachAnything,
|
|
Upstage,
|
|
Flux,
|
|
)
|
|
|
|
@dataclass(unsafe_hash=True)
|
|
class Model:
|
|
"""
|
|
Represents a machine learning model configuration.
|
|
|
|
Attributes:
|
|
name (str): Name of the model.
|
|
base_provider (str): Default provider for the model.
|
|
best_provider (ProviderType): The preferred provider for the model, typically with retry logic.
|
|
"""
|
|
name: str
|
|
base_provider: str
|
|
best_provider: ProviderType = None
|
|
|
|
@staticmethod
|
|
def __all__() -> list[str]:
|
|
"""Returns a list of all model names."""
|
|
return _all_models
|
|
|
|
class ImageModel(Model):
|
|
pass
|
|
|
|
### Default ###
|
|
default = Model(
|
|
name = "",
|
|
base_provider = "",
|
|
best_provider = IterListProvider([
|
|
DDG,
|
|
Pizzagpt,
|
|
ReplicateHome,
|
|
Blackbox2,
|
|
Upstage,
|
|
Blackbox,
|
|
Free2GPT,
|
|
DeepInfraChat,
|
|
Airforce,
|
|
ChatGptEs,
|
|
Cloudflare,
|
|
Mhystical,
|
|
AmigoChat,
|
|
])
|
|
)
|
|
|
|
############
|
|
### Text ###
|
|
############
|
|
|
|
### OpenAI ###
|
|
# gpt-3.5
|
|
gpt_35_turbo = Model(
|
|
name = 'gpt-3.5-turbo',
|
|
base_provider = 'OpenAI',
|
|
best_provider = IterListProvider([Blackbox, ChatGptEs, PollinationsAI, DarkAI])
|
|
)
|
|
|
|
# gpt-4
|
|
gpt_4o = Model(
|
|
name = 'gpt-4o',
|
|
base_provider = 'OpenAI',
|
|
best_provider = IterListProvider([Blackbox, ChatGptEs, PollinationsAI, DarkAI, ChatGpt, AmigoChat, Airforce, Liaobots, OpenaiChat])
|
|
)
|
|
|
|
gpt_4o_mini = Model(
|
|
name = 'gpt-4o-mini',
|
|
base_provider = 'OpenAI',
|
|
best_provider = IterListProvider([DDG, Blackbox, ChatGptEs, Pizzagpt, ChatGpt, AmigoChat, Airforce, RubiksAI, MagickPen, Liaobots, OpenaiChat])
|
|
)
|
|
|
|
gpt_4_turbo = Model(
|
|
name = 'gpt-4-turbo',
|
|
base_provider = 'OpenAI',
|
|
best_provider = IterListProvider([Liaobots, Airforce])
|
|
)
|
|
|
|
gpt_4 = Model(
|
|
name = 'gpt-4',
|
|
base_provider = 'OpenAI',
|
|
best_provider = IterListProvider([DDG, Blackbox, PollinationsAI, Copilot, OpenaiChat, Liaobots, Airforce])
|
|
)
|
|
|
|
# o1
|
|
o1_preview = Model(
|
|
name = 'o1-preview',
|
|
base_provider = 'OpenAI',
|
|
best_provider = Liaobots
|
|
)
|
|
|
|
o1_mini = Model(
|
|
name = 'o1-mini',
|
|
base_provider = 'OpenAI',
|
|
best_provider = IterListProvider([Liaobots, Airforce])
|
|
)
|
|
|
|
### GigaChat ###
|
|
gigachat = Model(
|
|
name = 'GigaChat:latest',
|
|
base_provider = 'gigachat',
|
|
best_provider = GigaChat
|
|
)
|
|
|
|
### Meta ###
|
|
meta = Model(
|
|
name = "meta-ai",
|
|
base_provider = "Meta",
|
|
best_provider = MetaAI
|
|
)
|
|
|
|
# llama 2
|
|
llama_2_7b = Model(
|
|
name = "llama-2-7b",
|
|
base_provider = "Meta Llama",
|
|
best_provider = IterListProvider([Cloudflare, Airforce])
|
|
)
|
|
# llama 3
|
|
llama_3_8b = Model(
|
|
name = "llama-3-8b",
|
|
base_provider = "Meta Llama",
|
|
best_provider = Cloudflare
|
|
)
|
|
|
|
# llama 3.1
|
|
llama_3_1_8b = Model(
|
|
name = "llama-3.1-8b",
|
|
base_provider = "Meta Llama",
|
|
best_provider = IterListProvider([Blackbox, DeepInfraChat, Cloudflare, Airforce, PerplexityLabs])
|
|
)
|
|
|
|
llama_3_1_70b = Model(
|
|
name = "llama-3.1-70b",
|
|
base_provider = "Meta Llama",
|
|
best_provider = IterListProvider([DDG, DeepInfraChat, Blackbox, Blackbox2, TeachAnything, PollinationsAI, DarkAI, Airforce, RubiksAI, HuggingChat, HuggingFace, PerplexityLabs])
|
|
)
|
|
|
|
llama_3_1_405b = Model(
|
|
name = "llama-3.1-405b",
|
|
base_provider = "Meta Llama",
|
|
best_provider = IterListProvider([Blackbox, AmigoChat])
|
|
)
|
|
|
|
# llama 3.2
|
|
llama_3_2_1b = Model(
|
|
name = "llama-3.2-1b",
|
|
base_provider = "Meta Llama",
|
|
best_provider = Cloudflare
|
|
)
|
|
|
|
llama_3_2_11b = Model(
|
|
name = "llama-3.2-11b",
|
|
base_provider = "Meta Llama",
|
|
best_provider = IterListProvider([HuggingChat, HuggingFace])
|
|
)
|
|
|
|
llama_3_2_90b = Model(
|
|
name = "llama-3.2-90b",
|
|
base_provider = "Meta Llama",
|
|
best_provider = AmigoChat
|
|
)
|
|
|
|
# CodeLlama
|
|
codellama_34b = Model(
|
|
name = "codellama-34b",
|
|
base_provider = "Meta Llama",
|
|
best_provider = AmigoChat
|
|
)
|
|
|
|
### Mistral ###
|
|
mixtral_7b = Model(
|
|
name = "mixtral-7b",
|
|
base_provider = "Mistral",
|
|
best_provider = AmigoChat
|
|
)
|
|
|
|
mixtral_8x7b = Model(
|
|
name = "mixtral-8x7b",
|
|
base_provider = "Mistral",
|
|
best_provider = DDG
|
|
)
|
|
|
|
mistral_tiny = Model(
|
|
name = "mistral-tiny",
|
|
base_provider = "Mistral",
|
|
best_provider = AmigoChat
|
|
)
|
|
|
|
mistral_nemo = Model(
|
|
name = "mistral-nemo",
|
|
base_provider = "Mistral",
|
|
best_provider = IterListProvider([PollinationsAI, HuggingChat, AmigoChat, HuggingFace])
|
|
)
|
|
|
|
mistral_large = Model(
|
|
name = "mistral-large",
|
|
base_provider = "Mistral",
|
|
best_provider = PollinationsAI
|
|
)
|
|
|
|
### NousResearch ###
|
|
hermes_2_dpo = Model(
|
|
name = "hermes-2-dpo",
|
|
base_provider = "NousResearch",
|
|
best_provider = Airforce
|
|
)
|
|
|
|
hermes_2_pro = Model(
|
|
name = "hermes-2-pro",
|
|
base_provider = "NousResearch",
|
|
best_provider = Airforce
|
|
)
|
|
|
|
hermes_3 = Model(
|
|
name = "hermes-3",
|
|
base_provider = "NousResearch",
|
|
best_provider = IterListProvider([HuggingChat, HuggingFace])
|
|
)
|
|
|
|
mixtral_8x7b_dpo = Model(
|
|
name = "mixtral-8x7b-dpo",
|
|
base_provider = "NousResearch",
|
|
best_provider = IterListProvider([AmigoChat, Airforce])
|
|
)
|
|
|
|
### Microsoft ###
|
|
phi_2 = Model(
|
|
name = "phi-2",
|
|
base_provider = "Microsoft",
|
|
best_provider = Airforce
|
|
)
|
|
|
|
phi_3_5_mini = Model(
|
|
name = "phi-3.5-mini",
|
|
base_provider = "Microsoft",
|
|
best_provider = IterListProvider([HuggingChat, HuggingFace])
|
|
)
|
|
|
|
### Google DeepMind ###
|
|
# gemini
|
|
gemini_pro = Model(
|
|
name = 'gemini-pro',
|
|
base_provider = 'Google DeepMind',
|
|
best_provider = IterListProvider([Blackbox, AIChatFree, GeminiPro, Liaobots])
|
|
)
|
|
|
|
gemini_flash = Model(
|
|
name = 'gemini-flash',
|
|
base_provider = 'Google DeepMind',
|
|
best_provider = IterListProvider([Blackbox, AmigoChat, Liaobots])
|
|
)
|
|
|
|
gemini = Model(
|
|
name = 'gemini',
|
|
base_provider = 'Google DeepMind',
|
|
best_provider = Gemini
|
|
)
|
|
|
|
# gemma
|
|
gemma_2b = Model(
|
|
name = 'gemma-2b',
|
|
base_provider = 'Google',
|
|
best_provider = IterListProvider([ReplicateHome, AmigoChat])
|
|
)
|
|
|
|
### Anthropic ###
|
|
# claude 3
|
|
claude_3_opus = Model(
|
|
name = 'claude-3-opus',
|
|
base_provider = 'Anthropic',
|
|
best_provider = Liaobots
|
|
)
|
|
|
|
claude_3_sonnet = Model(
|
|
name = 'claude-3-sonnet',
|
|
base_provider = 'Anthropic',
|
|
best_provider = Liaobots
|
|
)
|
|
|
|
claude_3_haiku = Model(
|
|
name = 'claude-3-haiku',
|
|
base_provider = 'Anthropic',
|
|
best_provider = IterListProvider([DDG, Liaobots])
|
|
)
|
|
|
|
# claude 3.5
|
|
claude_3_5_sonnet = Model(
|
|
name = 'claude-3.5-sonnet',
|
|
base_provider = 'Anthropic',
|
|
best_provider = IterListProvider([Blackbox, PollinationsAI, AmigoChat, Liaobots])
|
|
)
|
|
|
|
claude_3_5_haiku = Model(
|
|
name = 'claude-3.5-haiku',
|
|
base_provider = 'Anthropic',
|
|
best_provider = AmigoChat
|
|
)
|
|
|
|
### Reka AI ###
|
|
reka_core = Model(
|
|
name = 'reka-core',
|
|
base_provider = 'Reka AI',
|
|
best_provider = Reka
|
|
)
|
|
|
|
### Blackbox AI ###
|
|
blackboxai = Model(
|
|
name = 'blackboxai',
|
|
base_provider = 'Blackbox AI',
|
|
best_provider = Blackbox
|
|
)
|
|
|
|
blackboxai_pro = Model(
|
|
name = 'blackboxai-pro',
|
|
base_provider = 'Blackbox AI',
|
|
best_provider = Blackbox
|
|
)
|
|
|
|
### CohereForAI ###
|
|
command_r_plus = Model(
|
|
name = 'command-r-plus',
|
|
base_provider = 'CohereForAI',
|
|
best_provider = IterListProvider([PollinationsAI, HuggingChat, AmigoChat])
|
|
)
|
|
|
|
### Qwen ###
|
|
# qwen 1_5
|
|
qwen_1_5_7b = Model(
|
|
name = 'qwen-1.5-7b',
|
|
base_provider = 'Qwen',
|
|
best_provider = Cloudflare
|
|
)
|
|
|
|
# qwen 2
|
|
qwen_2_72b = Model(
|
|
name = 'qwen-2-72b',
|
|
base_provider = 'Qwen',
|
|
best_provider = IterListProvider([DeepInfraChat, HuggingChat, HuggingFace])
|
|
)
|
|
|
|
# qwen 2.5
|
|
qwen_2_5_72b = Model(
|
|
name = 'qwen-2.5-72b',
|
|
base_provider = 'Qwen',
|
|
best_provider = IterListProvider([AmigoChat, HuggingChat, HuggingFace])
|
|
)
|
|
|
|
qwen_2_5_coder_32b = Model(
|
|
name = 'qwen-2.5-coder-32b',
|
|
base_provider = 'Qwen',
|
|
best_provider = IterListProvider([DeepInfraChat, PollinationsAI, HuggingChat, HuggingFace])
|
|
)
|
|
|
|
qwq_32b = Model(
|
|
name = 'qwq-32b',
|
|
base_provider = 'Qwen',
|
|
best_provider = IterListProvider([DeepInfraChat, HuggingChat, HuggingFace])
|
|
)
|
|
|
|
### Upstage ###
|
|
solar_mini = Model(
|
|
name = 'solar-mini',
|
|
base_provider = 'Upstage',
|
|
best_provider = Upstage
|
|
)
|
|
|
|
solar_pro = Model(
|
|
name = 'solar-pro',
|
|
base_provider = 'Upstage',
|
|
best_provider = Upstage
|
|
)
|
|
|
|
|
|
### Inflection ###
|
|
pi = Model(
|
|
name = 'pi',
|
|
base_provider = 'Inflection',
|
|
best_provider = Pi
|
|
)
|
|
|
|
### DeepSeek ###
|
|
deepseek_chat = Model(
|
|
name = 'deepseek-chat',
|
|
base_provider = 'DeepSeek',
|
|
best_provider = AmigoChat
|
|
)
|
|
|
|
deepseek_coder = Model(
|
|
name = 'deepseek-coder',
|
|
base_provider = 'DeepSeek',
|
|
best_provider = Airforce
|
|
)
|
|
|
|
### WizardLM ###
|
|
wizardlm_2_8x22b = Model(
|
|
name = 'wizardlm-2-8x22b',
|
|
base_provider = 'WizardLM',
|
|
best_provider = DeepInfraChat
|
|
)
|
|
|
|
### OpenChat ###
|
|
openchat_3_5 = Model(
|
|
name = 'openchat-3.5',
|
|
base_provider = 'OpenChat',
|
|
best_provider = Airforce
|
|
)
|
|
|
|
|
|
### x.ai ###
|
|
grok_2 = Model(
|
|
name = 'grok-2',
|
|
base_provider = 'x.ai',
|
|
best_provider = Liaobots
|
|
)
|
|
|
|
grok_2_mini = Model(
|
|
name = 'grok-2-mini',
|
|
base_provider = 'x.ai',
|
|
best_provider = Liaobots
|
|
)
|
|
|
|
grok_beta = Model(
|
|
name = 'grok-beta',
|
|
base_provider = 'x.ai',
|
|
best_provider = IterListProvider([AmigoChat, Liaobots])
|
|
)
|
|
|
|
|
|
### Perplexity AI ###
|
|
sonar_online = Model(
|
|
name = 'sonar-online',
|
|
base_provider = 'Perplexity AI',
|
|
best_provider = PerplexityLabs
|
|
)
|
|
|
|
sonar_chat = Model(
|
|
name = 'sonar-chat',
|
|
base_provider = 'Perplexity AI',
|
|
best_provider = PerplexityLabs
|
|
)
|
|
|
|
### Nvidia ###
|
|
nemotron_70b = Model(
|
|
name = 'nemotron-70b',
|
|
base_provider = 'Nvidia',
|
|
best_provider = IterListProvider([DeepInfraChat, HuggingChat, HuggingFace])
|
|
)
|
|
|
|
|
|
### Teknium ###
|
|
openhermes_2_5 = Model(
|
|
name = 'openhermes-2.5',
|
|
base_provider = 'Teknium',
|
|
best_provider = Airforce
|
|
)
|
|
|
|
### Liquid ###
|
|
lfm_40b = Model(
|
|
name = 'lfm-40b',
|
|
base_provider = 'Liquid',
|
|
best_provider = IterListProvider([Airforce, PerplexityLabs])
|
|
)
|
|
|
|
|
|
### DiscoResearch ###
|
|
german_7b = Model(
|
|
name = 'german-7b',
|
|
base_provider = 'DiscoResearch',
|
|
best_provider = Airforce
|
|
)
|
|
|
|
### HuggingFaceH4 ###
|
|
zephyr_7b = Model(
|
|
name = 'zephyr-7b',
|
|
base_provider = 'HuggingFaceH4',
|
|
best_provider = Airforce
|
|
)
|
|
|
|
### Inferless ###
|
|
neural_7b = Model(
|
|
name = 'neural-7b',
|
|
base_provider = 'inferless',
|
|
best_provider = Airforce
|
|
)
|
|
|
|
### Gryphe ###
|
|
mythomax_13b = Model(
|
|
name = 'mythomax-13b',
|
|
base_provider = 'Gryphe',
|
|
best_provider = AmigoChat
|
|
)
|
|
|
|
### databricks ###
|
|
dbrx_instruct = Model(
|
|
name = 'dbrx-instruct',
|
|
base_provider = 'databricks',
|
|
best_provider = AmigoChat
|
|
)
|
|
|
|
### anthracite-org ###
|
|
magnum_72b = Model(
|
|
name = 'magnum-72b',
|
|
base_provider = 'anthracite-org',
|
|
best_provider = AmigoChat
|
|
)
|
|
|
|
### ai21 ###
|
|
jamba_mini = Model(
|
|
name = 'jamba-mini',
|
|
base_provider = 'ai21',
|
|
best_provider = AmigoChat
|
|
)
|
|
|
|
### PollinationsAI ###
|
|
p1 = Model(
|
|
name = 'p1',
|
|
base_provider = 'PollinationsAI',
|
|
best_provider = PollinationsAI
|
|
)
|
|
|
|
### Uncensored AI ###
|
|
evil = Model(
|
|
name = 'evil',
|
|
base_provider = 'Evil Mode - Experimental',
|
|
best_provider = IterListProvider([PollinationsAI, Airforce])
|
|
)
|
|
|
|
#############
|
|
### Image ###
|
|
#############
|
|
|
|
### Stability AI ###
|
|
sdxl = ImageModel(
|
|
name = 'sdxl',
|
|
base_provider = 'Stability AI',
|
|
best_provider = IterListProvider([ReplicateHome, Airforce])
|
|
|
|
)
|
|
|
|
sd_3 = ImageModel(
|
|
name = 'sd-3',
|
|
base_provider = 'Stability AI',
|
|
best_provider = ReplicateHome
|
|
)
|
|
|
|
### Playground ###
|
|
playground_v2_5 = ImageModel(
|
|
name = 'playground-v2.5',
|
|
base_provider = 'Playground AI',
|
|
best_provider = ReplicateHome
|
|
)
|
|
|
|
|
|
### Flux AI ###
|
|
flux = ImageModel(
|
|
name = 'flux',
|
|
base_provider = 'Flux AI',
|
|
best_provider = IterListProvider([Blackbox, Blackbox2, PollinationsAI, Airforce])
|
|
)
|
|
|
|
flux_pro = ImageModel(
|
|
name = 'flux-pro',
|
|
base_provider = 'Flux AI',
|
|
best_provider = IterListProvider([PollinationsAI, Airforce])
|
|
)
|
|
|
|
flux_dev = ImageModel(
|
|
name = 'flux-dev',
|
|
base_provider = 'Flux AI',
|
|
best_provider = IterListProvider([Flux, AmigoChat, HuggingChat, HuggingFace])
|
|
)
|
|
|
|
flux_realism = ImageModel(
|
|
name = 'flux-realism',
|
|
base_provider = 'Flux AI',
|
|
best_provider = IterListProvider([PollinationsAI, Airforce, AmigoChat])
|
|
)
|
|
|
|
flux_cablyai = Model(
|
|
name = 'flux-cablyai',
|
|
base_provider = 'Flux AI',
|
|
best_provider = PollinationsAI
|
|
)
|
|
|
|
flux_anime = ImageModel(
|
|
name = 'flux-anime',
|
|
base_provider = 'Flux AI',
|
|
best_provider = IterListProvider([PollinationsAI, Airforce])
|
|
)
|
|
|
|
flux_3d = ImageModel(
|
|
name = 'flux-3d',
|
|
base_provider = 'Flux AI',
|
|
best_provider = IterListProvider([PollinationsAI, Airforce])
|
|
)
|
|
|
|
flux_disney = ImageModel(
|
|
name = 'flux-disney',
|
|
base_provider = 'Flux AI',
|
|
best_provider = Airforce
|
|
)
|
|
|
|
flux_pixel = ImageModel(
|
|
name = 'flux-pixel',
|
|
base_provider = 'Flux AI',
|
|
best_provider = Airforce
|
|
)
|
|
|
|
flux_4o = ImageModel(
|
|
name = 'flux-4o',
|
|
base_provider = 'Flux AI',
|
|
best_provider = Airforce
|
|
)
|
|
|
|
### OpenAI ###
|
|
dall_e_3 = ImageModel(
|
|
name = 'dall-e-3',
|
|
base_provider = 'OpenAI',
|
|
best_provider = IterListProvider([Airforce, CopilotAccount, OpenaiAccount, MicrosoftDesigner, BingCreateImages])
|
|
)
|
|
|
|
### Recraft ###
|
|
recraft_v3 = ImageModel(
|
|
name = 'recraft-v3',
|
|
base_provider = 'Recraft',
|
|
best_provider = AmigoChat
|
|
)
|
|
|
|
### Midjourney ###
|
|
midijourney = Model(
|
|
name = 'midijourney',
|
|
base_provider = 'Midjourney',
|
|
best_provider = PollinationsAI
|
|
)
|
|
|
|
### Other ###
|
|
any_dark = ImageModel(
|
|
name = 'any-dark',
|
|
base_provider = 'Other',
|
|
best_provider = IterListProvider([PollinationsAI, Airforce])
|
|
)
|
|
|
|
turbo = Model(
|
|
name = 'turbo',
|
|
base_provider = 'Other',
|
|
best_provider = PollinationsAI
|
|
)
|
|
|
|
unity = Model(
|
|
name = 'unity',
|
|
base_provider = 'Other',
|
|
best_provider = PollinationsAI
|
|
)
|
|
|
|
rtist = Model(
|
|
name = 'rtist',
|
|
base_provider = 'Other',
|
|
best_provider = PollinationsAI
|
|
)
|
|
|
|
class ModelUtils:
|
|
"""
|
|
Utility class for mapping string identifiers to Model instances.
|
|
|
|
Attributes:
|
|
convert (dict[str, Model]): Dictionary mapping model string identifiers to Model instances.
|
|
"""
|
|
convert: dict[str, Model] = {
|
|
############
|
|
### Text ###
|
|
############
|
|
|
|
### OpenAI ###
|
|
# gpt-3
|
|
'gpt-3': gpt_35_turbo,
|
|
|
|
# gpt-3.5
|
|
'gpt-3.5-turbo': gpt_35_turbo,
|
|
|
|
# gpt-4
|
|
'gpt-4o': gpt_4o,
|
|
'gpt-4o-mini': gpt_4o_mini,
|
|
'gpt-4': gpt_4,
|
|
'gpt-4-turbo': gpt_4_turbo,
|
|
|
|
# o1
|
|
'o1-preview': o1_preview,
|
|
'o1-mini': o1_mini,
|
|
|
|
### Meta ###
|
|
"meta-ai": meta,
|
|
|
|
# llama-2
|
|
'llama-2-7b': llama_2_7b,
|
|
|
|
# llama-3
|
|
'llama-3-8b': llama_3_8b,
|
|
|
|
# llama-3.1
|
|
'llama-3.1-8b': llama_3_1_8b,
|
|
'llama-3.1-70b': llama_3_1_70b,
|
|
'llama-3.1-405b': llama_3_1_405b,
|
|
|
|
# llama-3.2
|
|
'llama-3.2-1b': llama_3_2_1b,
|
|
'llama-3.2-11b': llama_3_2_11b,
|
|
'llama-3.2-90b': llama_3_2_90b,
|
|
|
|
# CodeLlama
|
|
'codellama-34b': codellama_34b,
|
|
|
|
### Mistral ###
|
|
'mixtral-7b': mixtral_7b,
|
|
'mixtral-8x7b': mixtral_8x7b,
|
|
'mistral-tiny': mistral_tiny,
|
|
'mistral-nemo': mistral_nemo,
|
|
'mistral-large': mistral_large,
|
|
|
|
### NousResearch ###
|
|
'mixtral-8x7b-dpo': mixtral_8x7b_dpo,
|
|
'hermes-2-dpo': hermes_2_dpo,
|
|
'hermes-2-pro': hermes_2_pro,
|
|
'hermes-3': hermes_3,
|
|
|
|
### Microsoft ###
|
|
'phi-2': phi_2,
|
|
'phi-3.5-mini': phi_3_5_mini,
|
|
|
|
### Google ###
|
|
# gemini
|
|
'gemini': gemini,
|
|
'gemini-pro': gemini_pro,
|
|
'gemini-flash': gemini_flash,
|
|
|
|
# gemma
|
|
'gemma-2b': gemma_2b,
|
|
|
|
### Anthropic ###
|
|
# claude 3
|
|
'claude-3-opus': claude_3_opus,
|
|
'claude-3-sonnet': claude_3_sonnet,
|
|
'claude-3-haiku': claude_3_haiku,
|
|
|
|
# claude 3.5
|
|
'claude-3.5-sonnet': claude_3_5_sonnet,
|
|
'claude-3.5-haiku': claude_3_5_haiku,
|
|
|
|
### Reka AI ###
|
|
'reka-core': reka_core,
|
|
|
|
### Blackbox AI ###
|
|
'blackboxai': blackboxai,
|
|
'blackboxai-pro': blackboxai_pro,
|
|
|
|
### CohereForAI ###
|
|
'command-r+': command_r_plus,
|
|
|
|
### GigaChat ###
|
|
'gigachat': gigachat,
|
|
|
|
### Qwen ###
|
|
# qwen 1_5
|
|
'qwen-1.5-7b': qwen_1_5_7b,
|
|
|
|
# qwen 2
|
|
'qwen-2-72b': qwen_2_72b,
|
|
|
|
# qwen 2.5
|
|
'qwen-2.5-72b': qwen_2_5_72b,
|
|
'qwen-2.5-coder-32b': qwen_2_5_coder_32b,
|
|
'qwq-32b': qwq_32b,
|
|
|
|
### Upstage ###
|
|
'solar-mini': solar_mini,
|
|
'solar-pro': solar_pro,
|
|
|
|
### Inflection ###
|
|
'pi': pi,
|
|
|
|
### WizardLM ###
|
|
'wizardlm-2-8x22b': wizardlm_2_8x22b,
|
|
|
|
### OpenChat ###
|
|
'openchat-3.5': openchat_3_5,
|
|
|
|
### x.ai ###
|
|
'grok-2': grok_2,
|
|
'grok-2-mini': grok_2_mini,
|
|
'grok-beta': grok_beta,
|
|
|
|
### Perplexity AI ###
|
|
'sonar-online': sonar_online,
|
|
'sonar-chat': sonar_chat,
|
|
|
|
### DeepSeek ###
|
|
'deepseek-chat': deepseek_chat,
|
|
'deepseek-coder': deepseek_coder,
|
|
|
|
### TheBloke ###
|
|
'german-7b': german_7b,
|
|
|
|
### Nvidia ###
|
|
'nemotron-70b': nemotron_70b,
|
|
|
|
### Teknium ###
|
|
'openhermes-2.5': openhermes_2_5,
|
|
|
|
### Liquid ###
|
|
'lfm-40b': lfm_40b,
|
|
|
|
### databricks ###
|
|
'dbrx-instruct': dbrx_instruct,
|
|
|
|
### anthracite-org ###
|
|
'magnum-72b': magnum_72b,
|
|
|
|
### anthracite-org ###
|
|
'jamba-mini': jamba_mini,
|
|
|
|
### HuggingFaceH4 ###
|
|
'zephyr-7b': zephyr_7b,
|
|
|
|
### Inferless ###
|
|
'neural-7b': neural_7b,
|
|
|
|
### Gryphe ###
|
|
'mythomax-13b': mythomax_13b,
|
|
|
|
### PollinationsAI ###
|
|
'p1': p1,
|
|
|
|
### Uncensored AI ###
|
|
'evil': evil,
|
|
|
|
#############
|
|
### Image ###
|
|
#############
|
|
|
|
### Stability AI ###
|
|
'sdxl': sdxl,
|
|
'sd-3': sd_3,
|
|
|
|
### Playground ###
|
|
'playground-v2.5': playground_v2_5,
|
|
|
|
### Flux AI ###
|
|
'flux': flux,
|
|
'flux-pro': flux_pro,
|
|
'flux-dev': flux_dev,
|
|
'flux-realism': flux_realism,
|
|
'flux-cablyai': flux_cablyai,
|
|
'flux-anime': flux_anime,
|
|
'flux-3d': flux_3d,
|
|
'flux-disney': flux_disney,
|
|
'flux-pixel': flux_pixel,
|
|
'flux-4o': flux_4o,
|
|
|
|
### OpenAI ###
|
|
'dall-e-3': dall_e_3,
|
|
|
|
### Recraft ###
|
|
'recraft-v3': recraft_v3,
|
|
|
|
### Midjourney ###
|
|
'midijourney': midijourney,
|
|
|
|
### Other ###
|
|
'any-dark': any_dark,
|
|
'turbo': turbo,
|
|
'unity': unity,
|
|
'rtist': rtist,
|
|
}
|
|
|
|
# Create a list of all working models
|
|
__models__ = {model.name: (model, providers) for model, providers in [
|
|
(model, [provider for provider in providers if provider.working])
|
|
for model, providers in [
|
|
(model, model.best_provider.providers
|
|
if isinstance(model.best_provider, IterListProvider)
|
|
else [model.best_provider]
|
|
if model.best_provider is not None
|
|
else [])
|
|
for model in ModelUtils.convert.values()]
|
|
] if providers}
|
|
# Update the ModelUtils.convert with the working models
|
|
ModelUtils.convert = {model.name: model for model, _ in __models__.values()}
|
|
_all_models = list(ModelUtils.convert.keys()) |