mirror of
				https://github.com/xtekky/gpt4free.git
				synced 2025-10-31 11:36:26 +08:00 
			
		
		
		
	Add OpenaiChat and Hugchat Provider
Add tests for providers with auth Improve async support / 2x faster Shared get_cookies by domain function
This commit is contained in:
		| @@ -2,42 +2,26 @@ import json | |||||||
| import random | import random | ||||||
| import re | import re | ||||||
|  |  | ||||||
| import browser_cookie3 |  | ||||||
| from aiohttp import ClientSession | from aiohttp import ClientSession | ||||||
| import asyncio | import asyncio | ||||||
|  |  | ||||||
| from ..typing import Any, CreateResult | from ..typing import Any, CreateResult | ||||||
| from .base_provider import BaseProvider | from .base_provider import AsyncProvider, get_cookies | ||||||
|  |  | ||||||
| class Bard(BaseProvider): | class Bard(AsyncProvider): | ||||||
|     url = "https://bard.google.com" |     url = "https://bard.google.com" | ||||||
|     needs_auth = True |     needs_auth = True | ||||||
|     working = True |     working = True | ||||||
|  |  | ||||||
|     @classmethod |  | ||||||
|     def create_completion( |  | ||||||
|         cls, |  | ||||||
|         model: str, |  | ||||||
|         messages: list[dict[str, str]], |  | ||||||
|         stream: bool, |  | ||||||
|         proxy: str = None, |  | ||||||
|         cookies: dict = {}, |  | ||||||
|         **kwargs: Any, |  | ||||||
|     ) -> CreateResult: |  | ||||||
|         yield asyncio.run(cls.create_async(str, messages, proxy, cookies)) |  | ||||||
|  |  | ||||||
|     @classmethod |     @classmethod | ||||||
|     async def create_async( |     async def create_async( | ||||||
|         cls, |         cls, | ||||||
|         model: str, |         model: str, | ||||||
|         messages: list[dict[str, str]], |         messages: list[dict[str, str]], | ||||||
|         proxy: str = None, |         proxy: str = None, | ||||||
|         cookies: dict = {}, |         cookies: dict = get_cookies(".google.com"), | ||||||
|         **kwargs: Any, |         **kwargs: Any, | ||||||
|     ) -> str: |     ) -> str: | ||||||
|         if not cookies: |  | ||||||
|             for cookie in browser_cookie3.load(domain_name='.google.com'): |  | ||||||
|                 cookies[cookie.name] = cookie.value |  | ||||||
|  |  | ||||||
|         formatted = "\n".join( |         formatted = "\n".join( | ||||||
|             ["%s: %s" % (message["role"], message["content"]) for message in messages] |             ["%s: %s" % (message["role"], message["content"]) for message in messages] | ||||||
|   | |||||||
| @@ -5,48 +5,24 @@ import random | |||||||
|  |  | ||||||
| import aiohttp | import aiohttp | ||||||
| import asyncio | import asyncio | ||||||
| import browser_cookie3 |  | ||||||
| from aiohttp import ClientSession | from aiohttp import ClientSession | ||||||
|  |  | ||||||
| from ..typing import Any, AsyncGenerator, CreateResult, Union | from ..typing import Any, AsyncGenerator, CreateResult, Union | ||||||
| from .base_provider import BaseProvider | from .base_provider import AsyncGeneratorProvider, get_cookies | ||||||
|  |  | ||||||
| class Bing(BaseProvider): | class Bing(AsyncGeneratorProvider): | ||||||
|     url = "https://bing.com/chat" |     url = "https://bing.com/chat" | ||||||
|  |     needs_auth = True | ||||||
|     working = True |     working = True | ||||||
|     supports_gpt_4 = True |     supports_gpt_4 = True | ||||||
|  |     supports_stream=True | ||||||
|     @classmethod |  | ||||||
|     def create_completion( |  | ||||||
|         cls, |  | ||||||
|         model: str, |  | ||||||
|         messages: list[dict[str, str]], |  | ||||||
|         stream: bool, |  | ||||||
|         **kwargs: Any |  | ||||||
|     ) -> CreateResult: |  | ||||||
|         if stream: |  | ||||||
|             yield from run(cls.create_async_generator(model, messages, **kwargs)) |  | ||||||
|         else: |  | ||||||
|             yield asyncio.run(cls.create_async(model, messages, **kwargs)) |  | ||||||
|  |  | ||||||
|     @classmethod |  | ||||||
|     async def create_async( |  | ||||||
|         cls, |  | ||||||
|         model: str, |  | ||||||
|         messages: list[dict[str, str]], |  | ||||||
|         **kwargs: Any, |  | ||||||
|     ) -> str: |  | ||||||
|         result = [] |  | ||||||
|         async for chunk in cls.create_async_generator(model, messages, **kwargs): |  | ||||||
|             result.append(chunk) |  | ||||||
|         if result: |  | ||||||
|             return "".join(result) |  | ||||||
|          |          | ||||||
|     @staticmethod |     @staticmethod | ||||||
|     def create_async_generator( |     def create_async_generator( | ||||||
|             model: str, |             model: str, | ||||||
|             messages: list[dict[str, str]], |             messages: list[dict[str, str]], | ||||||
|             cookies: dict = {} |             cookies: dict = get_cookies(".bing.com"), | ||||||
|  |             **kwargs | ||||||
|         ) -> AsyncGenerator: |         ) -> AsyncGenerator: | ||||||
|         if len(messages) < 2: |         if len(messages) < 2: | ||||||
|             prompt = messages[0]["content"] |             prompt = messages[0]["content"] | ||||||
| @@ -54,15 +30,11 @@ class Bing(BaseProvider): | |||||||
|  |  | ||||||
|         else: |         else: | ||||||
|             prompt = messages[-1]["content"] |             prompt = messages[-1]["content"] | ||||||
|             context = convert(messages[:-1]) |             context = create_context(messages[:-1]) | ||||||
|  |  | ||||||
|         if not cookies: |  | ||||||
|             for cookie in browser_cookie3.load(domain_name='.bing.com'): |  | ||||||
|                 cookies[cookie.name] = cookie.value |  | ||||||
|  |  | ||||||
|         return stream_generate(prompt, context, cookies) |         return stream_generate(prompt, context, cookies) | ||||||
|  |  | ||||||
| def convert(messages: list[dict[str, str]]): | def create_context(messages: list[dict[str, str]]): | ||||||
|     context = "" |     context = "" | ||||||
|  |  | ||||||
|     for message in messages: |     for message in messages: | ||||||
| @@ -187,34 +159,32 @@ class Defaults: | |||||||
|         'x-forwarded-for': ip_address, |         'x-forwarded-for': ip_address, | ||||||
|     } |     } | ||||||
|  |  | ||||||
|     optionsSets = { |     optionsSets = [ | ||||||
|         "optionsSets": [ |         'saharasugg', | ||||||
|             'saharasugg', |         'enablenewsfc', | ||||||
|             'enablenewsfc', |         'clgalileo', | ||||||
|             'clgalileo', |         'gencontentv3', | ||||||
|             'gencontentv3', |         "nlu_direct_response_filter", | ||||||
|             "nlu_direct_response_filter", |         "deepleo", | ||||||
|             "deepleo", |         "disable_emoji_spoken_text", | ||||||
|             "disable_emoji_spoken_text", |         "responsible_ai_policy_235", | ||||||
|             "responsible_ai_policy_235", |         "enablemm", | ||||||
|             "enablemm", |         "h3precise" | ||||||
|             "h3precise" |         "dtappid", | ||||||
|             "dtappid", |         "cricinfo", | ||||||
|             "cricinfo", |         "cricinfov2", | ||||||
|             "cricinfov2", |         "dv3sugg", | ||||||
|             "dv3sugg", |         "nojbfedge" | ||||||
|             "nojbfedge" |     ] | ||||||
|         ] |  | ||||||
|     } |  | ||||||
|  |  | ||||||
| def format_message(msg: dict) -> str: | def format_message(message: dict) -> str: | ||||||
|     return json.dumps(msg, ensure_ascii=False) + Defaults.delimiter |     return json.dumps(message, ensure_ascii=False) + Defaults.delimiter | ||||||
|  |  | ||||||
| def create_message(conversation: Conversation, prompt: str, context: str=None) -> str: | def create_message(conversation: Conversation, prompt: str, context: str=None) -> str: | ||||||
|     struct = { |     struct = { | ||||||
|         'arguments': [ |         'arguments': [ | ||||||
|             { |             { | ||||||
|                 **Defaults.optionsSets, |                 'optionsSets': Defaults.optionsSets, | ||||||
|                 'source': 'cib', |                 'source': 'cib', | ||||||
|                 'allowedMessageTypes': Defaults.allowedMessageTypes, |                 'allowedMessageTypes': Defaults.allowedMessageTypes, | ||||||
|                 'sliceIds': Defaults.sliceIds, |                 'sliceIds': Defaults.sliceIds, | ||||||
|   | |||||||
							
								
								
									
										67
									
								
								g4f/Provider/Hugchat.py
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										67
									
								
								g4f/Provider/Hugchat.py
									
									
									
									
									
										Normal file
									
								
							| @@ -0,0 +1,67 @@ | |||||||
|  | has_module = False | ||||||
|  | try: | ||||||
|  |     from hugchat.hugchat import ChatBot | ||||||
|  | except ImportError: | ||||||
|  |     has_module = False | ||||||
|  |  | ||||||
|  | from .base_provider import BaseProvider, get_cookies | ||||||
|  | from g4f.typing import CreateResult | ||||||
|  |  | ||||||
|  | class Hugchat(BaseProvider): | ||||||
|  |     url = "https://huggingface.co/chat/" | ||||||
|  |     needs_auth = True | ||||||
|  |     working = has_module | ||||||
|  |     llms = ['OpenAssistant/oasst-sft-6-llama-30b-xor', 'meta-llama/Llama-2-70b-chat-hf'] | ||||||
|  |  | ||||||
|  |     @classmethod | ||||||
|  |     def create_completion( | ||||||
|  |         cls, | ||||||
|  |         model: str, | ||||||
|  |         messages: list[dict[str, str]], | ||||||
|  |         stream: bool = False, | ||||||
|  |         proxy: str = None, | ||||||
|  |         cookies: str = get_cookies(".huggingface.co"), | ||||||
|  |         **kwargs | ||||||
|  |     ) -> CreateResult: | ||||||
|  |         bot = ChatBot( | ||||||
|  |             cookies=cookies | ||||||
|  |         ) | ||||||
|  |          | ||||||
|  |         if proxy and "://" not in proxy: | ||||||
|  |             proxy = f"http://{proxy}" | ||||||
|  |             bot.session.proxies = {"http": proxy, "https": proxy} | ||||||
|  |  | ||||||
|  |         if model: | ||||||
|  |             try: | ||||||
|  |                 if not isinstance(model, int): | ||||||
|  |                     model = cls.llms.index(model) | ||||||
|  |                 bot.switch_llm(model) | ||||||
|  |             except: | ||||||
|  |                 raise RuntimeError(f"Model are not supported: {model}") | ||||||
|  |  | ||||||
|  |         if len(messages) > 1: | ||||||
|  |             formatted = "\n".join( | ||||||
|  |                 ["%s: %s" % (message["role"], message["content"]) for message in messages] | ||||||
|  |             ) | ||||||
|  |             prompt = f"{formatted}\nAssistant:" | ||||||
|  |         else: | ||||||
|  |             prompt = messages.pop()["content"] | ||||||
|  |  | ||||||
|  |         try: | ||||||
|  |             yield bot.chat(prompt, **kwargs) | ||||||
|  |         finally: | ||||||
|  |             bot.delete_conversation(bot.current_conversation) | ||||||
|  |             bot.current_conversation = "" | ||||||
|  |             pass | ||||||
|  |  | ||||||
|  |     @classmethod | ||||||
|  |     @property | ||||||
|  |     def params(cls): | ||||||
|  |         params = [ | ||||||
|  |             ("model", "str"), | ||||||
|  |             ("messages", "list[dict[str, str]]"), | ||||||
|  |             ("stream", "bool"), | ||||||
|  |             ("proxy", "str"), | ||||||
|  |         ] | ||||||
|  |         param = ", ".join([": ".join(p) for p in params]) | ||||||
|  |         return f"g4f.provider.{cls.__name__} supports: ({param})" | ||||||
							
								
								
									
										74
									
								
								g4f/Provider/OpenaiChat.py
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										74
									
								
								g4f/Provider/OpenaiChat.py
									
									
									
									
									
										Normal file
									
								
							| @@ -0,0 +1,74 @@ | |||||||
|  | has_module = True | ||||||
|  | try: | ||||||
|  |     from revChatGPT.V1 import AsyncChatbot | ||||||
|  | except ImportError: | ||||||
|  |     has_module = False | ||||||
|  | from .base_provider import AsyncGeneratorProvider, get_cookies | ||||||
|  | from ..typing import AsyncGenerator | ||||||
|  |  | ||||||
|  | class OpenaiChat(AsyncGeneratorProvider): | ||||||
|  |     url = "https://chat.openai.com" | ||||||
|  |     needs_auth = True | ||||||
|  |     working = has_module | ||||||
|  |     supports_gpt_35_turbo = True | ||||||
|  |     supports_gpt_4 = True | ||||||
|  |     supports_stream = True | ||||||
|  |  | ||||||
|  |     @classmethod | ||||||
|  |     async def create_async_generator( | ||||||
|  |         cls, | ||||||
|  |         model: str, | ||||||
|  |         messages: list[dict[str, str]], | ||||||
|  |         proxy: str = None, | ||||||
|  |         access_token: str = None, | ||||||
|  |         cookies: dict = None, | ||||||
|  |         **kwargs | ||||||
|  |     ) -> AsyncGenerator: | ||||||
|  |          | ||||||
|  |         config = {"access_token": access_token, "model": model} | ||||||
|  |         if proxy: | ||||||
|  |             if "://" not in proxy: | ||||||
|  |                 proxy = f"http://{proxy}" | ||||||
|  |             config["proxy"] = proxy | ||||||
|  |  | ||||||
|  |         bot = AsyncChatbot( | ||||||
|  |             config=config | ||||||
|  |         ) | ||||||
|  |  | ||||||
|  |         if not access_token: | ||||||
|  |             cookies = cookies if cookies else get_cookies("chat.openai.com") | ||||||
|  |             response = await bot.session.get("https://chat.openai.com/api/auth/session", cookies=cookies) | ||||||
|  |             access_token = response.json()["accessToken"] | ||||||
|  |             bot.set_access_token(access_token) | ||||||
|  |  | ||||||
|  |         if len(messages) > 1: | ||||||
|  |             formatted = "\n".join( | ||||||
|  |                 ["%s: %s" % ((message["role"]).capitalize(), message["content"]) for message in messages] | ||||||
|  |             ) | ||||||
|  |             prompt = f"{formatted}\nAssistant:" | ||||||
|  |         else: | ||||||
|  |             prompt = messages.pop()["content"] | ||||||
|  |  | ||||||
|  |         returned = None | ||||||
|  |         async for message in bot.ask(prompt): | ||||||
|  |             message = message["message"] | ||||||
|  |             if returned: | ||||||
|  |                 if message.startswith(returned): | ||||||
|  |                     new = message[len(returned):] | ||||||
|  |                     if new: | ||||||
|  |                         yield new | ||||||
|  |             else: | ||||||
|  |                 yield message | ||||||
|  |             returned = message | ||||||
|  |  | ||||||
|  |     @classmethod | ||||||
|  |     @property | ||||||
|  |     def params(cls): | ||||||
|  |         params = [ | ||||||
|  |             ("model", "str"), | ||||||
|  |             ("messages", "list[dict[str, str]]"), | ||||||
|  |             ("stream", "bool"), | ||||||
|  |             ("proxy", "str"), | ||||||
|  |         ] | ||||||
|  |         param = ", ".join([": ".join(p) for p in params]) | ||||||
|  |         return f"g4f.provider.{cls.__name__} supports: ({param})" | ||||||
| @@ -14,9 +14,11 @@ from .EasyChat import EasyChat | |||||||
| from .Forefront import Forefront | from .Forefront import Forefront | ||||||
| from .GetGpt import GetGpt | from .GetGpt import GetGpt | ||||||
| from .H2o import H2o | from .H2o import H2o | ||||||
|  | from .Hugchat import Hugchat | ||||||
| from .Liaobots import Liaobots | from .Liaobots import Liaobots | ||||||
| from .Lockchat import Lockchat | from .Lockchat import Lockchat | ||||||
| from .Opchatgpts import Opchatgpts | from .Opchatgpts import Opchatgpts | ||||||
|  | from .OpenaiChat import OpenaiChat | ||||||
| from .Raycast import Raycast | from .Raycast import Raycast | ||||||
| from .Theb import Theb | from .Theb import Theb | ||||||
| from .Vercel import Vercel | from .Vercel import Vercel | ||||||
| @@ -44,10 +46,12 @@ __all__ = [ | |||||||
|     "Forefront", |     "Forefront", | ||||||
|     "GetGpt", |     "GetGpt", | ||||||
|     "H2o", |     "H2o", | ||||||
|  |     "Hugchat", | ||||||
|     "Liaobots", |     "Liaobots", | ||||||
|     "Lockchat", |     "Lockchat", | ||||||
|     "Opchatgpts", |     "Opchatgpts", | ||||||
|     "Raycast", |     "Raycast", | ||||||
|  |     "OpenaiChat", | ||||||
|     "Theb", |     "Theb", | ||||||
|     "Vercel", |     "Vercel", | ||||||
|     "Wewordle", |     "Wewordle", | ||||||
|   | |||||||
| @@ -1,7 +1,11 @@ | |||||||
| from abc import ABC, abstractmethod | from abc import ABC, abstractmethod | ||||||
|  |  | ||||||
| from ..typing import Any, CreateResult | from ..typing import Any, CreateResult, AsyncGenerator, Union | ||||||
|  |  | ||||||
|  | import browser_cookie3 | ||||||
|  | import asyncio | ||||||
|  | from time import time | ||||||
|  | import math | ||||||
|  |  | ||||||
| class BaseProvider(ABC): | class BaseProvider(ABC): | ||||||
|     url: str |     url: str | ||||||
| @@ -30,4 +34,81 @@ class BaseProvider(ABC): | |||||||
|             ("stream", "bool"), |             ("stream", "bool"), | ||||||
|         ] |         ] | ||||||
|         param = ", ".join([": ".join(p) for p in params]) |         param = ", ".join([": ".join(p) for p in params]) | ||||||
|         return f"g4f.provider.{cls.__name__} supports: ({param})" |         return f"g4f.provider.{cls.__name__} supports: ({param})" | ||||||
|  |      | ||||||
|  |  | ||||||
|  | _cookies = {} | ||||||
|  |  | ||||||
|  | def get_cookies(cookie_domain: str) -> dict: | ||||||
|  |     if cookie_domain not in _cookies: | ||||||
|  |         _cookies[cookie_domain] = {} | ||||||
|  |         for cookie in browser_cookie3.load(cookie_domain): | ||||||
|  |             _cookies[cookie_domain][cookie.name] = cookie.value | ||||||
|  |     return _cookies[cookie_domain] | ||||||
|  |  | ||||||
|  |  | ||||||
|  | class AsyncProvider(BaseProvider): | ||||||
|  |     @classmethod | ||||||
|  |     def create_completion( | ||||||
|  |         cls, | ||||||
|  |         model: str, | ||||||
|  |         messages: list[dict[str, str]], | ||||||
|  |         stream: bool = False, | ||||||
|  |         **kwargs: Any | ||||||
|  |     ) -> CreateResult: | ||||||
|  |         yield asyncio.run(cls.create_async(model, messages, **kwargs)) | ||||||
|  |  | ||||||
|  |     @staticmethod | ||||||
|  |     @abstractmethod | ||||||
|  |     async def create_async( | ||||||
|  |         model: str, | ||||||
|  |         messages: list[dict[str, str]], | ||||||
|  |         **kwargs: Any, | ||||||
|  |     ) -> str: | ||||||
|  |         raise NotImplementedError() | ||||||
|  |  | ||||||
|  |  | ||||||
|  | class AsyncGeneratorProvider(AsyncProvider): | ||||||
|  |     @classmethod | ||||||
|  |     def create_completion( | ||||||
|  |         cls, | ||||||
|  |         model: str, | ||||||
|  |         messages: list[dict[str, str]], | ||||||
|  |         stream: bool = True, | ||||||
|  |         **kwargs: Any | ||||||
|  |     ) -> CreateResult: | ||||||
|  |         if stream: | ||||||
|  |             yield from run_generator(cls.create_async_generator(model, messages, **kwargs)) | ||||||
|  |         else: | ||||||
|  |             yield from AsyncProvider.create_completion(cls=cls, model=model, messages=messages, **kwargs) | ||||||
|  |  | ||||||
|  |     @classmethod | ||||||
|  |     async def create_async( | ||||||
|  |         cls, | ||||||
|  |         model: str, | ||||||
|  |         messages: list[dict[str, str]], | ||||||
|  |         **kwargs: Any, | ||||||
|  |     ) -> str: | ||||||
|  |         chunks = [chunk async for chunk in cls.create_async_generator(model, messages, **kwargs)] | ||||||
|  |         if chunks: | ||||||
|  |             return "".join(chunks) | ||||||
|  |          | ||||||
|  |     @staticmethod | ||||||
|  |     @abstractmethod | ||||||
|  |     def create_async_generator( | ||||||
|  |             model: str, | ||||||
|  |             messages: list[dict[str, str]], | ||||||
|  |         ) -> AsyncGenerator: | ||||||
|  |         raise NotImplementedError() | ||||||
|  |  | ||||||
|  |  | ||||||
|  | def run_generator(generator: AsyncGenerator[Union[Any, str], Any]): | ||||||
|  |     loop = asyncio.new_event_loop() | ||||||
|  |     gen = generator.__aiter__() | ||||||
|  |  | ||||||
|  |     while True: | ||||||
|  |         try: | ||||||
|  |             yield loop.run_until_complete(gen.__anext__()) | ||||||
|  |  | ||||||
|  |         except StopAsyncIteration: | ||||||
|  |             break | ||||||
|   | |||||||
							
								
								
									
										95
									
								
								testing/test_needs_auth.py
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										95
									
								
								testing/test_needs_auth.py
									
									
									
									
									
										Normal file
									
								
							| @@ -0,0 +1,95 @@ | |||||||
|  | import sys | ||||||
|  | from pathlib import Path | ||||||
|  | import asyncio | ||||||
|  | from time import time | ||||||
|  |  | ||||||
|  | sys.path.append(str(Path(__file__).parent.parent)) | ||||||
|  |  | ||||||
|  | import g4f | ||||||
|  |  | ||||||
|  | providers = [g4f.Provider.OpenaiChat, g4f.Provider.Bard, g4f.Provider.Bing] | ||||||
|  |  | ||||||
|  | # Async support | ||||||
|  | async def log_time_async(method: callable, **kwargs): | ||||||
|  |     start = time() | ||||||
|  |     result = await method(**kwargs) | ||||||
|  |     secs = f"{round(time() - start, 2)} secs" | ||||||
|  |     if result: | ||||||
|  |         return " ".join([result, secs]) | ||||||
|  |     return secs | ||||||
|  |  | ||||||
|  | def log_time_yield(method: callable, **kwargs): | ||||||
|  |     start = time() | ||||||
|  |     result = yield from method(**kwargs) | ||||||
|  |     yield f" {round(time() - start, 2)} secs" | ||||||
|  |  | ||||||
|  | def log_time(method: callable, **kwargs): | ||||||
|  |     start = time() | ||||||
|  |     result = method(**kwargs) | ||||||
|  |     secs = f"{round(time() - start, 2)} secs" | ||||||
|  |     if result: | ||||||
|  |         return " ".join([result, secs]) | ||||||
|  |     return secs | ||||||
|  |  | ||||||
|  | async def run_async(): | ||||||
|  |     responses = [] | ||||||
|  |     for provider in providers: | ||||||
|  |         responses.append(log_time_async( | ||||||
|  |             provider.create_async,  | ||||||
|  |             model=None, | ||||||
|  |             messages=[{"role": "user", "content": "Hello"}], | ||||||
|  |             log_time=True | ||||||
|  |         )) | ||||||
|  |     responses = await asyncio.gather(*responses) | ||||||
|  |     for idx, provider in enumerate(providers): | ||||||
|  |         print(f"{provider.__name__}:", responses[idx]) | ||||||
|  | print("Async Total:", asyncio.run(log_time_async(run_async))) | ||||||
|  |  | ||||||
|  | # Streaming support: | ||||||
|  | def run_stream(): | ||||||
|  |     for provider in providers: | ||||||
|  |         print(f"{provider.__name__}: ", end="") | ||||||
|  |         for response in log_time_yield( | ||||||
|  |             provider.create_completion, | ||||||
|  |             model=None, | ||||||
|  |             messages=[{"role": "user", "content": "Hello"}], | ||||||
|  |         ): | ||||||
|  |             print(response, end="") | ||||||
|  |         print() | ||||||
|  | print("Stream Total:", log_time(run_stream)) | ||||||
|  |  | ||||||
|  | # No streaming support: | ||||||
|  | def create_completion(): | ||||||
|  |     for provider in providers: | ||||||
|  |         print(f"{provider.__name__}:", end=" ") | ||||||
|  |         for response in log_time_yield( | ||||||
|  |             g4f.Provider.Bard.create_completion, | ||||||
|  |             model=None, | ||||||
|  |             messages=[{"role": "user", "content": "Hello"}], | ||||||
|  |         ): | ||||||
|  |             print(response, end="") | ||||||
|  |         print() | ||||||
|  | print("No Stream Total:", log_time(create_completion)) | ||||||
|  |  | ||||||
|  | for response in g4f.Provider.Hugchat.create_completion( | ||||||
|  |     model=None, | ||||||
|  |     messages=[{"role": "user", "content": "Hello, tell about you."}], | ||||||
|  | ): | ||||||
|  |     print("Hugchat:", response) | ||||||
|  |  | ||||||
|  | """ | ||||||
|  | OpenaiChat: Hello! How can I assist you today? 2.0 secs | ||||||
|  | Bard: Hello! How can I help you today? 3.44 secs | ||||||
|  | Bing: Hello, this is Bing. How can I help? 😊 4.14 secs | ||||||
|  | Async Total: 4.25 secs | ||||||
|  |  | ||||||
|  | OpenaiChat: Hello! How can I assist you today? 1.85 secs | ||||||
|  | Bard: Hello! How can I help you today? 3.38 secs | ||||||
|  | Bing: Hello, this is Bing. How can I help? 😊 6.14 secs | ||||||
|  | Stream Total: 11.37 secs | ||||||
|  |  | ||||||
|  | OpenaiChat: Hello! How can I help you today? 3.28 secs | ||||||
|  | Bard: Hello there! How can I help you today? 3.58 secs | ||||||
|  | Bing: Hello! How can I help you today? 3.28 secs | ||||||
|  | No Stream Total: 10.14 secs | ||||||
|  | """ | ||||||
		Reference in New Issue
	
	Block a user
	 Heiner Lohaus
					Heiner Lohaus