mirror of
				https://github.com/xtekky/gpt4free.git
				synced 2025-10-31 03:26:22 +08:00 
			
		
		
		
	Fix: There is no current event loop in thread
This commit is contained in:
		| @@ -1,24 +1,25 @@ | ||||
| from __future__ import annotations | ||||
|  | ||||
| import json | ||||
| from curl_cffi.requests import AsyncSession | ||||
|  | ||||
| from .base_provider import AsyncProvider, format_prompt | ||||
| from ..typing import AsyncGenerator | ||||
| from ..requests import StreamSession | ||||
| from .base_provider import AsyncGeneratorProvider, format_prompt | ||||
|  | ||||
|  | ||||
| class AItianhu(AsyncProvider): | ||||
| class AItianhu(AsyncGeneratorProvider): | ||||
|     url = "https://www.aitianhu.com" | ||||
|     working = True | ||||
|     supports_gpt_35_turbo = True | ||||
|  | ||||
|     @classmethod | ||||
|     async def create_async( | ||||
|     async def create_async_generator( | ||||
|         cls, | ||||
|         model: str, | ||||
|         messages: list[dict[str, str]], | ||||
|         proxy: str = None, | ||||
|         **kwargs | ||||
|     ) -> str: | ||||
|     ) -> AsyncGenerator: | ||||
|         data = { | ||||
|             "prompt": format_prompt(messages), | ||||
|             "options": {}, | ||||
| @@ -27,12 +28,25 @@ class AItianhu(AsyncProvider): | ||||
|             "top_p": 1, | ||||
|             **kwargs | ||||
|         } | ||||
|         async with AsyncSession(proxies={"https": proxy}, impersonate="chrome107", verify=False) as session: | ||||
|             response = await session.post(cls.url + "/api/chat-process", json=data) | ||||
|             response.raise_for_status() | ||||
|             line = response.text.splitlines()[-1] | ||||
|             line = json.loads(line) | ||||
|             return line["text"] | ||||
|         headers = { | ||||
|             "Authority": cls.url, | ||||
|             "Accept": "application/json, text/plain, */*", | ||||
|             "Origin": cls.url, | ||||
|             "Referer": f"{cls.url}/" | ||||
|         } | ||||
|         async with StreamSession(headers=headers, proxies={"https": proxy}, impersonate="chrome107", verify=False) as session: | ||||
|             async with session.post(f"{cls.url}/api/chat-process", json=data) as response: | ||||
|                 response.raise_for_status() | ||||
|                 async for line in response.iter_lines(): | ||||
|                     if b"platform's risk control" in line: | ||||
|                         raise RuntimeError("Platform's Risk Control") | ||||
|                     line = json.loads(line) | ||||
|                     if "detail" in line: | ||||
|                         content = line["detail"]["choices"][0]["delta"].get("content") | ||||
|                         if content: | ||||
|                             yield content | ||||
|                     else: | ||||
|                         raise RuntimeError(f"Response: {line}") | ||||
|  | ||||
|  | ||||
|     @classmethod | ||||
|   | ||||
| @@ -2,6 +2,7 @@ from __future__ import annotations | ||||
|  | ||||
| import random, json | ||||
|  | ||||
| from ..typing import AsyncGenerator | ||||
| from ..requests import StreamSession | ||||
| from .base_provider import AsyncGeneratorProvider, format_prompt | ||||
|  | ||||
| @@ -22,7 +23,7 @@ class AItianhuSpace(AsyncGeneratorProvider): | ||||
|         messages: list[dict[str, str]], | ||||
|         stream: bool = True, | ||||
|         **kwargs | ||||
|     ) -> str: | ||||
|     ) -> AsyncGenerator: | ||||
|         if not model: | ||||
|             model = "gpt-3.5-turbo" | ||||
|         elif not model in domains: | ||||
|   | ||||
| @@ -4,7 +4,7 @@ import time | ||||
| import hashlib | ||||
|  | ||||
| from ..typing import AsyncGenerator | ||||
| from ..requests import StreamRequest | ||||
| from ..requests import StreamSession | ||||
| from .base_provider import AsyncGeneratorProvider | ||||
|  | ||||
|  | ||||
| @@ -20,7 +20,7 @@ class Aibn(AsyncGeneratorProvider): | ||||
|         messages: list[dict[str, str]], | ||||
|         **kwargs | ||||
|     ) -> AsyncGenerator: | ||||
|         async with StreamRequest(impersonate="chrome107") as session: | ||||
|         async with StreamSession(impersonate="chrome107") as session: | ||||
|             timestamp = int(time.time()) | ||||
|             data = { | ||||
|                 "messages": messages, | ||||
|   | ||||
| @@ -21,7 +21,11 @@ def get_event_loop() -> AbstractEventLoop: | ||||
|     try: | ||||
|         asyncio.get_running_loop() | ||||
|     except RuntimeError: | ||||
|         return asyncio.get_event_loop() | ||||
|         try: | ||||
|             return asyncio.get_event_loop() | ||||
|         except RuntimeError: | ||||
|             asyncio.set_event_loop(asyncio.new_event_loop()) | ||||
|             return asyncio.get_event_loop() | ||||
|     try: | ||||
|         event_loop = asyncio.get_event_loop() | ||||
|         if not hasattr(event_loop.__class__, "_nest_patched"): | ||||
|   | ||||
| @@ -5,31 +5,26 @@ import asyncio | ||||
| sys.path.append(str(Path(__file__).parent.parent)) | ||||
|  | ||||
| import g4f | ||||
| from g4f.Provider import AsyncProvider | ||||
| from testing.test_providers import get_providers | ||||
| from testing.log_time import log_time_async | ||||
|  | ||||
| async def create_async(provider): | ||||
|     model = g4f.models.gpt_35_turbo.name if provider.supports_gpt_35_turbo else g4f.models.default.name | ||||
|     try: | ||||
|         response = await log_time_async( | ||||
|             provider.create_async, | ||||
|             model=model, | ||||
|             messages=[{"role": "user", "content": "Hello Assistant!"}] | ||||
|             model=g4f.models.default.name, | ||||
|             messages=[{"role": "user", "content": "Hello, are you GPT 3.5?"}] | ||||
|         ) | ||||
|         print(f"{provider.__name__}:", response) | ||||
|     except Exception as e: | ||||
|         return f"{provider.__name__}: {e.__class__.__name__}: {e}" | ||||
|         print(f"{provider.__name__}: {e.__class__.__name__}: {e}") | ||||
|  | ||||
| async def run_async(): | ||||
|   responses: list = [ | ||||
|     create_async(_provider) | ||||
|     for _provider in get_providers() | ||||
|     if _provider.working and issubclass(_provider, AsyncProvider) | ||||
|       create_async(provider) | ||||
|       for provider in get_providers() | ||||
|       if provider.working | ||||
|   ] | ||||
|   responses = await asyncio.gather(*responses) | ||||
|   for error in responses: | ||||
|       if error: | ||||
|         print(error) | ||||
|   await asyncio.gather(*responses) | ||||
|  | ||||
| print("Total:", asyncio.run(log_time_async(run_async))) | ||||
		Reference in New Issue
	
	Block a user
	 Heiner Lohaus
					Heiner Lohaus