mirror of
https://github.com/xtekky/gpt4free.git
synced 2025-10-06 00:36:57 +08:00
Fix model and provider in chat completion response
Add login button to HuggingFace demo Custom conversation ids in chat ui Remove rate limiter in demo mode Improve YouTube support in Gemini
This commit is contained in:
@@ -14,12 +14,6 @@ from pathlib import Path
|
||||
from urllib.parse import quote_plus
|
||||
from hashlib import sha256
|
||||
from werkzeug.utils import secure_filename
|
||||
try:
|
||||
from flask_limiter import Limiter
|
||||
from flask_limiter.util import get_remote_address
|
||||
has_flask_limiter = True
|
||||
except ImportError:
|
||||
has_flask_limiter = False
|
||||
|
||||
from ...image import is_allowed_extension, to_image
|
||||
from ...client.service import convert_to_provider
|
||||
@@ -62,19 +56,8 @@ class Backend_Api(Api):
|
||||
"""
|
||||
self.app: Flask = app
|
||||
|
||||
if has_flask_limiter and app.demo:
|
||||
limiter = Limiter(
|
||||
get_remote_address,
|
||||
app=app,
|
||||
default_limits=["200 per day", "50 per hour"],
|
||||
storage_uri="memory://",
|
||||
auto_check=False,
|
||||
strategy="moving-window",
|
||||
)
|
||||
|
||||
if has_flask_limiter and app.demo:
|
||||
if app.demo:
|
||||
@app.route('/', methods=['GET'])
|
||||
@limiter.exempt
|
||||
def home():
|
||||
return render_template('demo.html', backend_url=os.environ.get("G4F_BACKEND_URL", ""))
|
||||
else:
|
||||
@@ -116,7 +99,7 @@ class Backend_Api(Api):
|
||||
}
|
||||
for model, providers in models.demo_models.values()]
|
||||
|
||||
def handle_conversation(limiter_check: callable = None):
|
||||
def handle_conversation():
|
||||
"""
|
||||
Handles conversation requests and streams responses back.
|
||||
|
||||
@@ -135,7 +118,7 @@ class Backend_Api(Api):
|
||||
else:
|
||||
json_data = request.json
|
||||
|
||||
if app.demo and json_data.get("provider") not in ["Custom", "Feature", "HuggingFace", "HuggingSpace", "HuggingChat", "G4F", "PollinationsAI"]:
|
||||
if app.demo and json_data.get("provider") not in ["DeepSeekAPI", "OpenaiChat", "HuggingFace", "HuggingSpace", "HuggingChat", "G4F", "PollinationsAI"]:
|
||||
model = json_data.get("model")
|
||||
if model != "default" and model in models.demo_models:
|
||||
json_data["provider"] = random.choice(models.demo_models[model][1])
|
||||
@@ -143,8 +126,6 @@ class Backend_Api(Api):
|
||||
if not model or model == "default":
|
||||
json_data["model"] = models.demo_models["default"][0].name
|
||||
json_data["provider"] = random.choice(models.demo_models["default"][1])
|
||||
if limiter_check is not None and json_data.get("provider") in ["Feature"]:
|
||||
limiter_check()
|
||||
if "images" in json_data:
|
||||
kwargs["images"] = json_data["images"]
|
||||
kwargs = self._prepare_conversation_kwargs(json_data, kwargs)
|
||||
@@ -158,15 +139,9 @@ class Backend_Api(Api):
|
||||
mimetype='text/event-stream'
|
||||
)
|
||||
|
||||
if has_flask_limiter and app.demo:
|
||||
@app.route('/backend-api/v2/conversation', methods=['POST'])
|
||||
@limiter.limit("2 per minute")
|
||||
def _handle_conversation():
|
||||
return handle_conversation(limiter.check)
|
||||
else:
|
||||
@app.route('/backend-api/v2/conversation', methods=['POST'])
|
||||
def _handle_conversation():
|
||||
return handle_conversation()
|
||||
@app.route('/backend-api/v2/conversation', methods=['POST'])
|
||||
def _handle_conversation():
|
||||
return handle_conversation()
|
||||
|
||||
@app.route('/backend-api/v2/usage', methods=['POST'])
|
||||
def add_usage():
|
||||
|
Reference in New Issue
Block a user