mirror of
https://github.com/xtekky/gpt4free.git
synced 2025-10-30 19:16:27 +08:00
Support "Think Deeper" in Copilot
Improve Documentation
This commit is contained in:
15
README.md
15
README.md
@@ -97,13 +97,13 @@ Is your site on this repository and you want to take it down? Send an email to t
|
||||
1. **Install Docker:** [Download and install Docker](https://docs.docker.com/get-docker/).
|
||||
2. **Set Up Directories:** Before running the container, make sure the necessary data directories exist or can be created. For example, you can create and set ownership on these directories by running:
|
||||
```bash
|
||||
mkdir -p ${PWD}/har_and_cookies ${PWD}/generated_images
|
||||
sudo chown -R 1200:1201 ${PWD}/har_and_cookies ${PWD}/generated_images
|
||||
mkdir -p ${PWD}/har_and_cookies ${PWD}/generated_images
|
||||
sudo chown -R 1200:1201 ${PWD}/har_and_cookies ${PWD}/generated_images
|
||||
```
|
||||
3. **Run the Docker Container:** Use the following commands to pull the latest image and start the container (Only x64):
|
||||
```bash
|
||||
docker pull hlohaus789/g4f
|
||||
docker run -p 8080:8080 -p 7900:7900 \
|
||||
docker pull hlohaus789/g4f
|
||||
docker run -p 8080:8080 -p 7900:7900 \
|
||||
--shm-size="2g" \
|
||||
-v ${PWD}/har_and_cookies:/app/har_and_cookies \
|
||||
-v ${PWD}/generated_images:/app/generated_images \
|
||||
@@ -112,9 +112,9 @@ Is your site on this repository and you want to take it down? Send an email to t
|
||||
|
||||
4. **Running the Slim Docker Image:** And use the following commands to run the Slim Docker image. This command also updates the `g4f` package at startup and installs any additional dependencies: (x64 and arm64)
|
||||
```bash
|
||||
mkdir -p ${PWD}/har_and_cookies ${PWD}/generated_images
|
||||
chown -R 1000:1000 ${PWD}/har_and_cookies ${PWD}/generated_images
|
||||
docker run \
|
||||
mkdir -p ${PWD}/har_and_cookies ${PWD}/generated_images
|
||||
chown -R 1000:1000 ${PWD}/har_and_cookies ${PWD}/generated_images
|
||||
docker run \
|
||||
-p 1337:1337 \
|
||||
-v ${PWD}/har_and_cookies:/app/har_and_cookies \
|
||||
-v ${PWD}/generated_images:/app/generated_images \
|
||||
@@ -248,6 +248,7 @@ Run the Web UI on your smartphone for easy access on the go. Check out the dedic
|
||||
- **File API from G4F:** [/docs/file](docs/file.md)
|
||||
- **PydanticAI and LangChain Integration for G4F:** [/docs/pydantic_ai](docs/pydantic_ai.md)
|
||||
- **Legacy API with python modules:** [/docs/legacy](docs/legacy.md)
|
||||
- **G4F - Media Documentation** [/docs/media](/docs/meda.md) *(New)*
|
||||
|
||||
---
|
||||
|
||||
|
||||
@@ -126,7 +126,7 @@ providers = {
|
||||
}
|
||||
|
||||
for provider_name, api_key in providers.items():
|
||||
client = Client(provider=f"g4f.Provider.{provider_name}", api_key=api_key)
|
||||
client = Client(provider=provider_name, api_key=api_key)
|
||||
response = client.chat.completions.create(
|
||||
model="claude-3.5-sonnet",
|
||||
messages=[{"role": "user", "content": f"Hello from {provider_name}!"}]
|
||||
@@ -144,16 +144,22 @@ for provider_name, api_key in providers.items():
|
||||
- Firefox: **Storage** → **Cookies**
|
||||
|
||||
```python
|
||||
from g4f.client import Client
|
||||
from g4f.Provider import Gemini
|
||||
|
||||
# Initialize with cookies
|
||||
# Using with cookies
|
||||
client = Client(
|
||||
provider=Gemini,
|
||||
)
|
||||
response = client.chat.completions.create(
|
||||
model="", # Default model
|
||||
messages="Hello Google",
|
||||
cookies={
|
||||
"__Secure-1PSID": "your_cookie_value_here",
|
||||
"__Secure-1PSIDTS": "timestamp_value_here"
|
||||
}
|
||||
)
|
||||
print(f"Gemini: {response.choices[0].message.content}")
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
@@ -69,9 +69,9 @@ The G4F GUI is a self-contained, user-friendly interface designed for interactin
|
||||
- **Basic Authentication**
|
||||
You can set a password for Basic Authentication using the `--g4f-api-key` argument when starting the web server.
|
||||
|
||||
### 9. **Continue Button (ChatGPT & HuggingChat)**
|
||||
### 9. **Continue Button**
|
||||
- **Automatic Detection of Truncated Responses**
|
||||
When using **ChatGPT** or **HuggingChat** providers, responses may occasionally be cut off or truncated.
|
||||
When using providers, responses may occasionally be cut off or truncated.
|
||||
- **Continue Button**
|
||||
If the GUI detects that the response ended abruptly, a **Continue** button appears directly below the truncated message. Clicking this button sends a follow-up request to the same provider and model, retrieving the rest of the message.
|
||||
- **Seamless Conversation Flow**
|
||||
@@ -154,7 +154,7 @@ http://localhost:8080/chat/
|
||||
- **Text/Code:** The generated response appears in the conversation window.
|
||||
- **Images:** Generated images are displayed as thumbnails. Click on any thumbnail to view it in full size within the lightbox.
|
||||
|
||||
5. **Continue Button (ChatGPT & HuggingChat)**
|
||||
5. **Continue Button**
|
||||
- If a response is truncated, a **Continue** button will appear under the last message. Clicking it asks the same provider to continue the response from where it ended.
|
||||
|
||||
6. **Manage Conversations**
|
||||
|
||||
@@ -175,7 +175,7 @@ from g4f.Provider import HuggingFaceMedia
|
||||
async def main():
|
||||
client = AsyncClient(
|
||||
provider=HuggingFaceMedia,
|
||||
api_key="hf_***" # Your API key here
|
||||
api_key=os.getenv("HF_TOKEN") # Your API key here
|
||||
)
|
||||
|
||||
video_models = client.models.get_video()
|
||||
@@ -214,7 +214,7 @@ from g4f.Provider import HuggingFaceMedia
|
||||
async def main():
|
||||
client = AsyncClient(
|
||||
provider=HuggingFaceMedia,
|
||||
api_key=os.getenv("HUGGINGFACE_API_KEY") # Your API key here
|
||||
api_key=os.getenv("HF_TOKEN") # Your API key here
|
||||
)
|
||||
|
||||
video_models = client.models.get_video()
|
||||
|
||||
@@ -83,9 +83,9 @@ class Cloudflare(AsyncGeneratorProvider, ProviderModelMixin, AuthFileMixin):
|
||||
pass
|
||||
data = {
|
||||
"messages": [{
|
||||
"role":"user",
|
||||
**message,
|
||||
"content": message["content"] if isinstance(message["content"], str) else "",
|
||||
"parts": [{"type":"text", "text":message["content"]}] if isinstance(message["content"], str) else message["content"]} for message in messages],
|
||||
"parts": [{"type":"text", "text":message["content"]}] if isinstance(message["content"], str) else message} for message in messages],
|
||||
"lora": None,
|
||||
"model": model,
|
||||
"max_tokens": max_tokens,
|
||||
|
||||
@@ -24,7 +24,7 @@ from .openai.har_file import get_headers, get_har_files
|
||||
from ..typing import CreateResult, Messages, MediaListType
|
||||
from ..errors import MissingRequirementsError, NoValidHarFileError, MissingAuthError
|
||||
from ..requests.raise_for_status import raise_for_status
|
||||
from ..providers.response import BaseConversation, JsonConversation, RequestLogin, ImageResponse
|
||||
from ..providers.response import BaseConversation, JsonConversation, RequestLogin, ImageResponse, FinishReason, SuggestedFollowups
|
||||
from ..providers.asyncio import get_running_loop
|
||||
from ..tools.media import merge_media
|
||||
from ..requests import get_nodriver
|
||||
@@ -46,10 +46,13 @@ class Copilot(AbstractProvider, ProviderModelMixin):
|
||||
supports_stream = True
|
||||
|
||||
default_model = "Copilot"
|
||||
models = [default_model]
|
||||
models = [default_model, "Think Deeper"]
|
||||
model_aliases = {
|
||||
"gpt-4": default_model,
|
||||
"o1": default_model,
|
||||
"gpt-4o": default_model,
|
||||
"o1": "Think Deeper",
|
||||
"reasoning": "Think Deeper",
|
||||
"dall-e-3": default_model
|
||||
}
|
||||
|
||||
websocket_url = "wss://copilot.microsoft.com/c/api/chat?api-version=2"
|
||||
@@ -75,10 +78,10 @@ class Copilot(AbstractProvider, ProviderModelMixin):
|
||||
) -> CreateResult:
|
||||
if not has_curl_cffi:
|
||||
raise MissingRequirementsError('Install or update "curl_cffi" package | pip install -U curl_cffi')
|
||||
|
||||
model = cls.get_model(model)
|
||||
websocket_url = cls.websocket_url
|
||||
headers = None
|
||||
if cls.needs_auth or media is not None:
|
||||
if cls._access_token:
|
||||
if api_key is not None:
|
||||
cls._access_token = api_key
|
||||
if cls._access_token is None:
|
||||
@@ -163,6 +166,7 @@ class Copilot(AbstractProvider, ProviderModelMixin):
|
||||
# "token": clarity_token,
|
||||
# "method":"clarity"
|
||||
# }).encode(), CurlWsFlag.TEXT)
|
||||
wss.send(json.dumps({"event":"setOptions","supportedCards":["weather","local","image","sports","video","ads","finance"],"ads":{"supportedTypes":["multimedia","product","tourActivity","propertyPromotion","text"]}}));
|
||||
wss.send(json.dumps({
|
||||
"event": "send",
|
||||
"conversationId": conversation_id,
|
||||
@@ -170,7 +174,7 @@ class Copilot(AbstractProvider, ProviderModelMixin):
|
||||
"type": "text",
|
||||
"text": prompt,
|
||||
}],
|
||||
"mode": "chat"
|
||||
"mode": "reasoning" if "Think" in model else "chat",
|
||||
}).encode(), CurlWsFlag.TEXT)
|
||||
|
||||
is_started = False
|
||||
@@ -193,6 +197,10 @@ class Copilot(AbstractProvider, ProviderModelMixin):
|
||||
elif msg.get("event") == "imageGenerated":
|
||||
yield ImageResponse(msg.get("url"), image_prompt, {"preview": msg.get("thumbnailUrl")})
|
||||
elif msg.get("event") == "done":
|
||||
yield FinishReason("stop")
|
||||
break
|
||||
elif msg.get("event") == "suggestedFollowups":
|
||||
yield SuggestedFollowups(msg.get("suggestions"))
|
||||
break
|
||||
elif msg.get("event") == "replaceText":
|
||||
yield msg.get("text")
|
||||
|
||||
85
g4f/Provider/DuckDuckGo.py
Normal file
85
g4f/Provider/DuckDuckGo.py
Normal file
@@ -0,0 +1,85 @@
|
||||
from __future__ import annotations
|
||||
|
||||
import asyncio
|
||||
|
||||
try:
|
||||
from duckduckgo_search import DDGS
|
||||
from duckduckgo_search.exceptions import DuckDuckGoSearchException, RatelimitException, ConversationLimitException
|
||||
has_requirements = True
|
||||
except ImportError:
|
||||
has_requirements = False
|
||||
try:
|
||||
import nodriver
|
||||
has_nodriver = True
|
||||
except ImportError:
|
||||
has_nodriver = False
|
||||
|
||||
from ..typing import AsyncResult, Messages
|
||||
from ..requests import get_nodriver
|
||||
from .base_provider import AsyncGeneratorProvider, ProviderModelMixin
|
||||
from .helper import get_last_user_message
|
||||
|
||||
class DuckDuckGo(AsyncGeneratorProvider, ProviderModelMixin):
|
||||
label = "Duck.ai (duckduckgo_search)"
|
||||
url = "https://duckduckgo.com/aichat"
|
||||
api_base = "https://duckduckgo.com/duckchat/v1/"
|
||||
|
||||
working = False
|
||||
supports_stream = True
|
||||
supports_system_message = True
|
||||
supports_message_history = True
|
||||
|
||||
default_model = "gpt-4o-mini"
|
||||
models = [default_model, "meta-llama/Llama-3.3-70B-Instruct-Turbo", "claude-3-haiku-20240307", "o3-mini", "mistralai/Mistral-Small-24B-Instruct-2501"]
|
||||
|
||||
ddgs: DDGS = None
|
||||
|
||||
model_aliases = {
|
||||
"gpt-4": "gpt-4o-mini",
|
||||
"llama-3.3-70b": "meta-llama/Llama-3.3-70B-Instruct-Turbo",
|
||||
"claude-3-haiku": "claude-3-haiku-20240307",
|
||||
"mixtral-small-24b": "mistralai/Mistral-Small-24B-Instruct-2501",
|
||||
}
|
||||
|
||||
@classmethod
|
||||
async def create_async_generator(
|
||||
cls,
|
||||
model: str,
|
||||
messages: Messages,
|
||||
proxy: str = None,
|
||||
timeout: int = 60,
|
||||
**kwargs
|
||||
) -> AsyncResult:
|
||||
if not has_requirements:
|
||||
raise ImportError("duckduckgo_search is not installed. Install it with `pip install duckduckgo-search`.")
|
||||
if cls.ddgs is None:
|
||||
cls.ddgs = DDGS(proxy=proxy, timeout=timeout)
|
||||
if has_nodriver:
|
||||
await cls.nodriver_auth(proxy=proxy)
|
||||
model = cls.get_model(model)
|
||||
for chunk in cls.ddgs.chat_yield(get_last_user_message(messages), model, timeout):
|
||||
yield chunk
|
||||
|
||||
@classmethod
|
||||
async def nodriver_auth(cls, proxy: str = None):
|
||||
browser, stop_browser = await get_nodriver(proxy=proxy)
|
||||
try:
|
||||
page = browser.main_tab
|
||||
def on_request(event: nodriver.cdp.network.RequestWillBeSent, page=None):
|
||||
if cls.api_base in event.request.url:
|
||||
if "X-Vqd-4" in event.request.headers:
|
||||
cls.ddgs._chat_vqd = event.request.headers["X-Vqd-4"]
|
||||
if "X-Vqd-Hash-1" in event.request.headers:
|
||||
cls.ddgs._chat_vqd_hash = event.request.headers["X-Vqd-Hash-1"]
|
||||
if "F-Fe-Version" in event.request.headers:
|
||||
cls.ddgs._chat_xfe = event.request.headers["F-Fe-Version" ]
|
||||
await page.send(nodriver.cdp.network.enable())
|
||||
page.add_handler(nodriver.cdp.network.RequestWillBeSent, on_request)
|
||||
page = await browser.get(cls.url)
|
||||
while True:
|
||||
if cls.ddgs._chat_vqd:
|
||||
break
|
||||
await asyncio.sleep(1)
|
||||
await page.close()
|
||||
finally:
|
||||
stop_browser()
|
||||
@@ -40,6 +40,7 @@ try:
|
||||
from .Copilot import Copilot
|
||||
from .DDG import DDG
|
||||
from .DeepInfraChat import DeepInfraChat
|
||||
from .DuckDuckGo import DuckDuckGo
|
||||
from .Dynaspark import Dynaspark
|
||||
except ImportError as e:
|
||||
debug.error("Providers not loaded (A-D):", e)
|
||||
|
||||
@@ -19,11 +19,6 @@ class CopilotAccount(AsyncAuthedProvider, Copilot):
|
||||
parent = "Copilot"
|
||||
default_model = "Copilot"
|
||||
default_vision_model = default_model
|
||||
models = [default_model]
|
||||
image_models = models
|
||||
model_aliases = {
|
||||
"dall-e-3": default_model
|
||||
}
|
||||
|
||||
@classmethod
|
||||
async def on_auth_async(cls, proxy: str = None, **kwargs) -> AsyncIterator:
|
||||
|
||||
@@ -68,6 +68,7 @@ class ImageGenerationConfig(BaseModel):
|
||||
aspect_ratio: Optional[str] = None
|
||||
n: Optional[int] = None
|
||||
negative_prompt: Optional[str] = None
|
||||
resolution: Optional[str] = None
|
||||
|
||||
class ProviderResponseModel(BaseModel):
|
||||
id: str
|
||||
|
||||
@@ -1325,7 +1325,12 @@ const delete_conversation = async (conversation_id) => {
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if (window.share_id && conversation_id == window.start_id) {
|
||||
const url = `${window.share_url}/backend-api/v2/files/${window.share_id}`;
|
||||
await fetch(url, {
|
||||
method: 'DELETE'
|
||||
});
|
||||
}
|
||||
appStorage.removeItem(`conversation:${conversation_id}`);
|
||||
const item = document.getElementById(`convo-${conversation_id}`);
|
||||
item.remove();
|
||||
@@ -2622,7 +2627,7 @@ function connectToSSE(url, do_refine, bucket_id) {
|
||||
} else if (data.action == "media") {
|
||||
inputCount.innerText = `File: ${data.filename}`;
|
||||
const url = `/files/${bucket_id}/media/${data.filename}`;
|
||||
const media = [{bucket_id: bucket_id, url: url}];
|
||||
const media = [{bucket_id: bucket_id, url: url, name: data.filename}];
|
||||
await handle_ask(false, media);
|
||||
} else if (data.action == "load") {
|
||||
inputCount.innerText = `Read data: ${formatFileSize(data.size)}`;
|
||||
|
||||
@@ -37,7 +37,7 @@ EXTENSIONS_MAP: dict[str, str] = {
|
||||
"flac": "audio/flac",
|
||||
"opus": "audio/opus",
|
||||
"ogg": "audio/ogg",
|
||||
"m4a": "audio/mp4",
|
||||
"m4a": "audio/m4a",
|
||||
# Video
|
||||
"mkv": "video/x-matroska",
|
||||
"webm": "video/webm",
|
||||
|
||||
@@ -284,6 +284,10 @@ class SynthesizeData(HiddenResponse, JsonMixin):
|
||||
self.provider = provider
|
||||
self.data = data
|
||||
|
||||
class SuggestedFollowups(HiddenResponse):
|
||||
def __init__(self, suggestions: list[str]):
|
||||
self.suggestions = suggestions
|
||||
|
||||
class RequestLogin(HiddenResponse):
|
||||
def __init__(self, label: str, login_url: str) -> None:
|
||||
"""Initialize with label and login URL."""
|
||||
|
||||
@@ -13,6 +13,7 @@ try:
|
||||
from duckduckgo_search import DDGS
|
||||
from duckduckgo_search.exceptions import DuckDuckGoSearchException
|
||||
from bs4 import BeautifulSoup
|
||||
ddgs = DDGS()
|
||||
has_requirements = True
|
||||
except ImportError:
|
||||
has_requirements = False
|
||||
@@ -164,7 +165,7 @@ async def fetch_and_scrape(session: ClientSession, url: str, max_words: int = No
|
||||
async def search(query: str, max_results: int = 5, max_words: int = 2500, backend: str = "auto", add_text: bool = True, timeout: int = 5, region: str = "wt-wt") -> SearchResults:
|
||||
if not has_requirements:
|
||||
raise MissingRequirementsError('Install "duckduckgo-search" and "beautifulsoup4" package | pip install -U g4f[search]')
|
||||
with DDGS() as ddgs:
|
||||
|
||||
results = []
|
||||
for result in ddgs.text(
|
||||
query,
|
||||
|
||||
Reference in New Issue
Block a user