mirror of
https://github.com/xtekky/gpt4free.git
synced 2025-12-24 13:07:53 +08:00
* Update DDG.py: added Llama 3.3 Instruct and o3-mini Duck.ai now supports o3-mini, and previous Llama 3.1 70B is now replaced by Llama 3.3 70B. * Update DDG.py: change Llama 3.3 70B Instruct ID to "meta-llama/Llama-3.3-70B-Instruct-Turbo" Fixed typo in full model name * Update Cerebras.py: add "deepseek-r1-distill-llama-70b" to the models list Cerebras now provides inference for a DeepSeek-R1 distilled to Llama 3.3 70B as well. * Update models.py: reflect changes in DDG provider - Removed DDG from best providers list for Llama 3.1 70B - Added DDG to best providers list for o3-mini and Llama 3.3 70B * A small update in HuggingFaceInference get_models() method Previously, get_models() method was returning "TypeError: string indices must be integers, not 'str' on line 31" from time to time, possibly because of network error so the models list couldn't load and method was trying to parse this data. Now the code is updated in order to check for any potential errors first. * Update BlackboxAPI.py: remove unused imports format_prompt() and JSON library are not being used here, so they may be removed safely. * Update copilot.yml This job is failing due to the error in JavaScript code; this commit is trying to fix it. * Update providers-and-models.md to reflect latest changes Updated models list and removed providers that are currently not working.
53 lines
1.7 KiB
Python
53 lines
1.7 KiB
Python
from __future__ import annotations
|
|
|
|
from aiohttp import ClientSession
|
|
|
|
from .OpenaiAPI import OpenaiAPI
|
|
from ...typing import AsyncResult, Messages, Cookies
|
|
from ...requests.raise_for_status import raise_for_status
|
|
from ...cookies import get_cookies
|
|
|
|
class Cerebras(OpenaiAPI):
|
|
label = "Cerebras Inference"
|
|
url = "https://inference.cerebras.ai/"
|
|
login_url = "https://cloud.cerebras.ai"
|
|
api_base = "https://api.cerebras.ai/v1"
|
|
working = True
|
|
default_model = "llama3.1-70b"
|
|
models = [
|
|
default_model,
|
|
"llama3.1-8b",
|
|
"llama-3.3-70b",
|
|
"deepseek-r1-distill-llama-70b"
|
|
]
|
|
model_aliases = {"llama-3.1-70b": default_model, "llama-3.1-8b": "llama3.1-8b", "deepseek-r1": "deepseek-r1-distill-llama-70b"}
|
|
|
|
@classmethod
|
|
async def create_async_generator(
|
|
cls,
|
|
model: str,
|
|
messages: Messages,
|
|
api_key: str = None,
|
|
cookies: Cookies = None,
|
|
**kwargs
|
|
) -> AsyncResult:
|
|
if api_key is None:
|
|
if cookies is None:
|
|
cookies = get_cookies(".cerebras.ai")
|
|
async with ClientSession(cookies=cookies) as session:
|
|
async with session.get("https://inference.cerebras.ai/api/auth/session") as response:
|
|
await raise_for_status(response)
|
|
data = await response.json()
|
|
if data:
|
|
api_key = data.get("user", {}).get("demoApiKey")
|
|
async for chunk in super().create_async_generator(
|
|
model, messages,
|
|
impersonate="chrome",
|
|
api_key=api_key,
|
|
headers={
|
|
"User-Agent": "ex/JS 1.5.0",
|
|
},
|
|
**kwargs
|
|
):
|
|
yield chunk
|