mirror of
https://github.com/xtekky/gpt4free.git
synced 2025-12-24 13:07:53 +08:00
Update default model to gpt-5-2 and adjust text models accordingly
This commit is contained in:
@@ -350,23 +350,24 @@ class OpenaiChat(AsyncAuthedProvider, ProviderModelMixin):
|
||||
|
||||
@classmethod
|
||||
async def create_authed(
|
||||
cls,
|
||||
model: str,
|
||||
messages: Messages,
|
||||
auth_result: AuthResult,
|
||||
proxy: str = None,
|
||||
timeout: int = 360,
|
||||
auto_continue: bool = False,
|
||||
action: Optional[str] = None,
|
||||
conversation: Conversation = None,
|
||||
media: MediaListType = None,
|
||||
return_conversation: bool = True,
|
||||
web_search: bool = False,
|
||||
prompt: str = None,
|
||||
conversation_mode: Optional[dict] = None,
|
||||
temporary: Optional[bool] = None,
|
||||
conversation_id: Optional[str] = None,
|
||||
**kwargs
|
||||
cls,
|
||||
model: str,
|
||||
messages: Messages,
|
||||
auth_result: AuthResult,
|
||||
proxy: str = None,
|
||||
timeout: int = 360,
|
||||
auto_continue: bool = False,
|
||||
action: Optional[str] = None,
|
||||
conversation: Conversation = None,
|
||||
media: MediaListType = None,
|
||||
return_conversation: bool = True,
|
||||
web_search: bool = False,
|
||||
prompt: str = None,
|
||||
conversation_mode: Optional[dict] = None,
|
||||
temporary: Optional[bool] = None,
|
||||
conversation_id: Optional[str] = None,
|
||||
reasoning_effort: Optional[str] = None,
|
||||
**kwargs
|
||||
) -> AsyncResult:
|
||||
"""
|
||||
Create an asynchronous generator for the conversation.
|
||||
@@ -394,9 +395,9 @@ class OpenaiChat(AsyncAuthedProvider, ProviderModelMixin):
|
||||
if action is None:
|
||||
action = "next"
|
||||
async with StreamSession(
|
||||
proxy=proxy,
|
||||
impersonate="chrome",
|
||||
timeout=timeout
|
||||
proxy=proxy,
|
||||
impersonate="chrome",
|
||||
timeout=timeout
|
||||
) as session:
|
||||
image_requests = None
|
||||
media = merge_media(media, messages)
|
||||
@@ -457,6 +458,7 @@ class OpenaiChat(AsyncAuthedProvider, ProviderModelMixin):
|
||||
"system_hints": [
|
||||
"picture_v2"
|
||||
] if image_model else [],
|
||||
"thinking_effort": "extended" if reasoning_effort == "high" else "standard",
|
||||
"supports_buffering": True,
|
||||
"supported_encodings": ["v1"]
|
||||
}
|
||||
@@ -517,6 +519,7 @@ class OpenaiChat(AsyncAuthedProvider, ProviderModelMixin):
|
||||
"conversation_mode": {"kind": "primary_assistant"},
|
||||
"enable_message_followups": True,
|
||||
"system_hints": ["search"] if web_search else None,
|
||||
"thinking_effort": "extended" if reasoning_effort == "high" else "standard",
|
||||
"supports_buffering": True,
|
||||
"supported_encodings": ["v1"],
|
||||
"client_contextual_info": {"is_dark_mode": False, "time_since_loaded": random.randint(20, 500),
|
||||
|
||||
@@ -1,7 +1,7 @@
|
||||
default_model = "gpt-5-1"
|
||||
default_model = "gpt-5-2"
|
||||
default_image_model = "gpt-image"
|
||||
image_models = [default_image_model]
|
||||
text_models = [default_model, "gpt-5-1-instant", "gpt-5-1-thinking", "gpt-5", "gpt-5-instant", "gpt-5-thinking", "gpt-4", "gpt-4.1", "gpt-4.1-mini", "gpt-4.5", "gpt-4o", "gpt-4o-mini", "o1", "o1-mini", "o3-mini", "o3-mini-high", "o4-mini", "o4-mini-high"]
|
||||
text_models = [default_model, "gpt-5-2-instant", "gpt-5-2-thinking", "gpt-5-1", "gpt-5-1-instant", "gpt-5-1-thinking", "gpt-5", "gpt-5-instant", "gpt-5-thinking", "gpt-4", "gpt-4.1", "gpt-4.1-mini", "gpt-4.5", "gpt-4o", "gpt-4o-mini", "o1", "o1-mini", "o3-mini", "o3-mini-high", "o4-mini", "o4-mini-high"]
|
||||
vision_models = text_models
|
||||
models = text_models + image_models
|
||||
model_aliases = {
|
||||
|
||||
Reference in New Issue
Block a user