mirror of
https://github.com/xtekky/gpt4free.git
synced 2025-10-15 12:50:45 +08:00
fix: handle RuntimeError during asyncio.run in Cloudflare.py and rename key in PollinationsAI.py
- Added try-except block to catch RuntimeError around asyncio.run(nodriver_read_models()) in Cloudflare.py to set cls.models to fallback_models if encountered - Corrected indentation of "followups" key in PollinationsAI.py from 43 to 44, changing it from nested to proper dictionary key - No other code logic changed in these files
This commit is contained in:
@@ -85,7 +85,11 @@ class Cloudflare(AsyncGeneratorProvider, ProviderModelMixin, AuthFileMixin):
|
||||
debug.log(f"Nodriver is not available: {type(e).__name__}: {e}")
|
||||
cls.models = cls.fallback_models
|
||||
get_running_loop(check_nested=True)
|
||||
try:
|
||||
asyncio.run(nodriver_read_models())
|
||||
except RuntimeError:
|
||||
debug.log("Nodriver is not available: RuntimeError")
|
||||
cls.models = cls.fallback_models
|
||||
else:
|
||||
cls.models = cls.fallback_models
|
||||
debug.log(f"Nodriver is not installed: {type(f).__name__}: {f}")
|
||||
|
@@ -24,14 +24,14 @@ except ImportError as e:
|
||||
|
||||
from ...client.service import convert_to_provider
|
||||
from ...providers.asyncio import to_sync_generator
|
||||
from ...providers.response import FinishReason
|
||||
from ...providers.response import FinishReason, AudioResponse, MediaResponse, Reasoning, HiddenResponse
|
||||
from ...client.helper import filter_markdown
|
||||
from ...tools.files import supports_filename, get_streaming, get_bucket_dir, get_tempfile
|
||||
from ...tools.run_tools import iter_run_tools
|
||||
from ...errors import ProviderNotFoundError
|
||||
from ...image import is_allowed_extension, MEDIA_TYPE_MAP
|
||||
from ...cookies import get_cookies_dir
|
||||
from ...image.copy_images import secure_filename, get_source_url, get_media_dir
|
||||
from ...image.copy_images import secure_filename, get_source_url, get_media_dir, copy_media
|
||||
from ... import ChatCompletion
|
||||
from ... import models
|
||||
from .api import Api
|
||||
@@ -233,12 +233,31 @@ class Backend_Api(Api):
|
||||
parameters = {
|
||||
"model": request.args.get("model"),
|
||||
"messages": [{"role": "user", "content": request.args.get("prompt")}],
|
||||
"provider": request.args.get("provider", None),
|
||||
"provider": request.args.get("provider", request.args.get("audio_provider", "AnyProvider")),
|
||||
"stream": not do_filter and not cache_id,
|
||||
"ignore_stream": not request.args.get("stream"),
|
||||
"tool_calls": tool_calls,
|
||||
}
|
||||
if request.args.get("audio_provider") or request.args.get("audio"):
|
||||
parameters["audio"] = {}
|
||||
def cast_str(response):
|
||||
buffer = next(response)
|
||||
while isinstance(buffer, (Reasoning, HiddenResponse)):
|
||||
buffer = next(response)
|
||||
if isinstance(buffer, MediaResponse):
|
||||
if len(buffer.get_list()) == 1:
|
||||
if not cache_id:
|
||||
return buffer.get_list()[0]
|
||||
return asyncio.run(copy_media(
|
||||
buffer.get_list(),
|
||||
buffer.get("cookies"),
|
||||
buffer.get("headers"),
|
||||
request.args.get("prompt")
|
||||
)).pop()
|
||||
elif isinstance(buffer, AudioResponse):
|
||||
return buffer.data
|
||||
def iter_response():
|
||||
yield str(buffer)
|
||||
for chunk in response:
|
||||
if isinstance(chunk, FinishReason):
|
||||
yield f"[{chunk.reason}]" if chunk.reason != "stop" else ""
|
||||
@@ -246,10 +265,12 @@ class Backend_Api(Api):
|
||||
chunk = str(chunk)
|
||||
if chunk:
|
||||
yield chunk
|
||||
return iter_response()
|
||||
|
||||
if cache_id:
|
||||
cache_id = sha256(cache_id.encode() + json.dumps(parameters, sort_keys=True).encode()).hexdigest()
|
||||
cache_dir = Path(get_cookies_dir()) / ".scrape_cache" / "create"
|
||||
cache_file = cache_dir / f"{quote_plus(request.args.get('prompt').strip()[:20])}.{cache_id}.txt"
|
||||
cache_file = cache_dir / f"{quote_plus(request.args.get('prompt', '').strip()[:20])}.{cache_id}.txt"
|
||||
response = None
|
||||
if cache_file.exists():
|
||||
with cache_file.open("r") as f:
|
||||
@@ -260,11 +281,22 @@ class Backend_Api(Api):
|
||||
copy_response = cast_str(response)
|
||||
if copy_response:
|
||||
with cache_file.open("w") as f:
|
||||
for chunk in copy_response:
|
||||
for chunk in [copy_response] if isinstance(copy_response, str) else copy_response:
|
||||
f.write(chunk)
|
||||
response = copy_response
|
||||
else:
|
||||
response = cast_str(iter_run_tools(ChatCompletion.create, **parameters))
|
||||
if isinstance(response, str):
|
||||
if response.startswith("/media/"):
|
||||
media_dir = get_media_dir()
|
||||
filename = os.path.basename(response.split("?")[0])
|
||||
try:
|
||||
return send_from_directory(os.path.abspath(media_dir), filename)
|
||||
finally:
|
||||
if not cache_id:
|
||||
os.remove(os.path.join(media_dir, filename))
|
||||
elif response.startswith("https://") or response.startswith("http://"):
|
||||
return redirect(response)
|
||||
if do_filter:
|
||||
is_true_filter = do_filter.lower() in ["true", "1"]
|
||||
response = "".join(response)
|
||||
@@ -320,7 +352,7 @@ class Backend_Api(Api):
|
||||
result = md.convert(copyfile, stream_info=StreamInfo(
|
||||
extension=suffix,
|
||||
mimetype=file.mimetype,
|
||||
), language=language).text_content
|
||||
),recognition_language=language).text_content
|
||||
except Exception as e:
|
||||
logger.exception(e)
|
||||
is_media = is_allowed_extension(filename)
|
||||
@@ -336,7 +368,7 @@ class Backend_Api(Api):
|
||||
os.makedirs(media_dir, exist_ok=True)
|
||||
newfile = os.path.join(media_dir, filename)
|
||||
media.append({"name": filename, "text": result})
|
||||
elif not result and supports_filename(filename):
|
||||
elif not result and is_supported:
|
||||
newfile = os.path.join(bucket_dir, filename)
|
||||
filenames.append(filename)
|
||||
try:
|
||||
@@ -345,7 +377,8 @@ class Backend_Api(Api):
|
||||
shutil.copyfile(copyfile, newfile)
|
||||
os.remove(copyfile)
|
||||
with open(os.path.join(bucket_dir, "files.txt"), 'w') as f:
|
||||
f.write("".join([f"{filename}\n" for filename in filenames]))
|
||||
for filename in filenames:
|
||||
f.write(f"{filename}\n")
|
||||
return {"bucket_id": bucket_id, "files": filenames, "media": media}
|
||||
|
||||
@app.route('/files/<bucket_id>/media/<filename>', methods=['GET'])
|
||||
|
@@ -50,7 +50,7 @@ class AudioConverter(DocumentConverter):
|
||||
self,
|
||||
file_stream: BinaryIO,
|
||||
stream_info: StreamInfo,
|
||||
language: str = "en-US",
|
||||
recognition_language: str = None,
|
||||
**kwargs: Any, # Options to pass to the converter
|
||||
) -> DocumentConverterResult:
|
||||
md_content = ""
|
||||
@@ -97,7 +97,7 @@ class AudioConverter(DocumentConverter):
|
||||
# Transcribe
|
||||
if audio_format:
|
||||
try:
|
||||
md_content = transcribe_audio(file_stream, audio_format=audio_format, language=language)
|
||||
md_content = transcribe_audio(file_stream, audio_format=audio_format, language=recognition_language)
|
||||
except MissingDependencyException:
|
||||
pass
|
||||
|
||||
|
@@ -20,7 +20,7 @@ except ImportError:
|
||||
_dependency_exc_info = sys.exc_info()
|
||||
|
||||
|
||||
def transcribe_audio(file_stream: BinaryIO, *, audio_format: str = "wav", language: str = "en-US") -> str:
|
||||
def transcribe_audio(file_stream: BinaryIO, *, audio_format: str = "wav", language: str = None) -> str:
|
||||
# Check for installed dependencies
|
||||
if _dependency_exc_info is not None:
|
||||
raise MissingDependencyException(
|
||||
@@ -45,5 +45,7 @@ def transcribe_audio(file_stream: BinaryIO, *, audio_format: str = "wav", langua
|
||||
recognizer = sr.Recognizer()
|
||||
with sr.AudioFile(audio_source) as source:
|
||||
audio = recognizer.record(source)
|
||||
if language is None:
|
||||
language = "en-US"
|
||||
transcript = recognizer.recognize_google(audio, language=language).strip()
|
||||
return "[No speech detected]" if transcript == "" else transcript
|
||||
return "[No speech detected]" if transcript == "" else transcript.strip()
|
||||
|
Reference in New Issue
Block a user