Added new provider (g4f/Provider/Editee.py)

This commit is contained in:
kqlio67
2024-10-16 21:45:37 +03:00
parent 9f394f9613
commit bbf41daf37
3 changed files with 90 additions and 3 deletions

78
g4f/Provider/Editee.py Normal file
View File

@@ -0,0 +1,78 @@
from __future__ import annotations
from aiohttp import ClientSession
from ..typing import AsyncResult, Messages
from .base_provider import AsyncGeneratorProvider, ProviderModelMixin
from .helper import format_prompt
class Editee(AsyncGeneratorProvider, ProviderModelMixin):
label = "Editee"
url = "https://editee.com"
api_endpoint = "https://editee.com/submit/chatgptfree"
working = True
supports_gpt_4 = True
supports_stream = True
supports_system_message = True
supports_message_history = True
default_model = 'claude'
models = ['claude', 'gpt4', 'gemini' 'mistrallarge']
model_aliases = {
"claude-3.5-sonnet": "claude",
"gpt-4o": "gpt4",
"gemini-pro": "gemini",
"mistral-large": "mistrallarge",
}
@classmethod
def get_model(cls, model: str) -> str:
if model in cls.models:
return model
elif model in cls.model_aliases:
return cls.model_aliases[model]
else:
return cls.default_model
@classmethod
async def create_async_generator(
cls,
model: str,
messages: Messages,
proxy: str = None,
**kwargs
) -> AsyncResult:
model = cls.get_model(model)
headers = {
"Accept": "application/json, text/plain, */*",
"Accept-Language": "en-US,en;q=0.9",
"Cache-Control": "no-cache",
"Content-Type": "application/json",
"Origin": cls.url,
"Pragma": "no-cache",
"Priority": "u=1, i",
"Referer": f"{cls.url}/chat-gpt",
"Sec-CH-UA": '"Chromium";v="129", "Not=A?Brand";v="8"',
"Sec-CH-UA-Mobile": '?0',
"Sec-CH-UA-Platform": '"Linux"',
"Sec-Fetch-Dest": 'empty',
"Sec-Fetch-Mode": 'cors',
"Sec-Fetch-Site": 'same-origin',
"User-Agent": 'Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/129.0.0.0 Safari/537.36',
"X-Requested-With": 'XMLHttpRequest',
}
async with ClientSession(headers=headers) as session:
prompt = format_prompt(messages)
data = {
"user_input": prompt,
"context": " ",
"template_id": "",
"selected_model": model
}
async with session.post(cls.api_endpoint, json=data, proxy=proxy) as response:
response.raise_for_status()
response_data = await response.json()
yield response_data['text']

View File

@@ -38,6 +38,7 @@ from .DDG import DDG
from .DeepInfra import DeepInfra
from .DeepInfraChat import DeepInfraChat
from .DeepInfraImage import DeepInfraImage
from .Editee import Editee
from .FlowGpt import FlowGpt
from .Free2GPT import Free2GPT
from .FreeChatgpt import FreeChatgpt

View File

@@ -24,6 +24,7 @@ from .Provider import (
DeepInfra,
DeepInfraChat,
DeepInfraImage,
Editee,
Free2GPT,
FreeChatgpt,
FreeGpt,
@@ -128,7 +129,7 @@ gpt_35_turbo = Model(
gpt_4o = Model(
name = 'gpt-4o',
base_provider = 'OpenAI',
best_provider = IterListProvider([NexraChatGPT4o, Blackbox, ChatGptEs, AmigoChat, DarkAI, Liaobots, Airforce, OpenaiChat])
best_provider = IterListProvider([NexraChatGPT4o, Blackbox, ChatGptEs, AmigoChat, DarkAI, Editee, Liaobots, Airforce, OpenaiChat])
)
gpt_4o_mini = Model(
@@ -288,6 +289,12 @@ mistral_nemo = Model(
best_provider = IterListProvider([HuggingChat, HuggingFace])
)
mistral_large = Model(
name = "mistral-large",
base_provider = "Mistral",
best_provider = Editee
)
### NousResearch ###
mixtral_8x7b_dpo = Model(
@@ -333,7 +340,7 @@ phi_3_5_mini = Model(
gemini_pro = Model(
name = 'gemini-pro',
base_provider = 'Google DeepMind',
best_provider = IterListProvider([GeminiPro, Blackbox, AIChatFree, GPROChat, AmigoChat, Liaobots, Airforce])
best_provider = IterListProvider([GeminiPro, Blackbox, AIChatFree, GPROChat, AmigoChat, Editee, Liaobots, Airforce])
)
gemini_flash = Model(
@@ -417,7 +424,7 @@ claude_3_haiku = Model(
claude_3_5_sonnet = Model(
name = 'claude-3.5-sonnet',
base_provider = 'Anthropic',
best_provider = IterListProvider([Blackbox, Airforce, AmigoChat, Liaobots])
best_provider = IterListProvider([Blackbox, Editee, AmigoChat, Airforce, Liaobots])
)
@@ -917,6 +924,7 @@ class ModelUtils:
'mixtral-8x7b': mixtral_8x7b,
'mixtral-8x22b': mixtral_8x22b,
'mistral-nemo': mistral_nemo,
'mistral-large': mistral_large,
### NousResearch ###