mirror of
https://github.com/xtekky/gpt4free.git
synced 2025-10-05 08:16:58 +08:00
Add unitests for the client
Fix: async generator ignored GeneratorExit Fix: ResourceWarning: unclosed event loop
This commit is contained in:
151
g4f/client.py
151
g4f/client.py
@@ -2,9 +2,9 @@ from __future__ import annotations
|
||||
|
||||
import re
|
||||
|
||||
from .typing import Union, Generator, AsyncGenerator, Messages, ImageType
|
||||
from .stubs import ChatCompletion, ChatCompletionChunk, Image, ImagesResponse
|
||||
from .typing import Union, Generator, Messages, ImageType
|
||||
from .base_provider import BaseProvider, ProviderType
|
||||
from .Provider.base_provider import AsyncGeneratorProvider
|
||||
from .image import ImageResponse as ImageProviderResponse
|
||||
from .Provider import BingCreateImages, Gemini, OpenaiChat
|
||||
from .errors import NoImageResponseError
|
||||
@@ -36,14 +36,14 @@ def iter_response(
|
||||
stop: list = None
|
||||
) -> Generator:
|
||||
content = ""
|
||||
idx = 1
|
||||
chunk = None
|
||||
finish_reason = "stop"
|
||||
finish_reason = None
|
||||
last_chunk = None
|
||||
for idx, chunk in enumerate(response):
|
||||
if last_chunk is not None:
|
||||
yield ChatCompletionChunk(last_chunk, finish_reason)
|
||||
content += str(chunk)
|
||||
if max_tokens is not None and idx > max_tokens:
|
||||
if max_tokens is not None and idx + 1 >= max_tokens:
|
||||
finish_reason = "max_tokens"
|
||||
break
|
||||
first = -1
|
||||
word = None
|
||||
if stop is not None:
|
||||
@@ -52,98 +52,30 @@ def iter_response(
|
||||
if first != -1:
|
||||
content = content[:first]
|
||||
break
|
||||
if stream:
|
||||
if stream and first != -1:
|
||||
first = chunk.find(word)
|
||||
if first != -1:
|
||||
first = chunk.find(word)
|
||||
if first != -1:
|
||||
chunk = chunk[:first]
|
||||
else:
|
||||
first = 0
|
||||
yield ChatCompletionChunk([ChatCompletionDeltaChoice(ChatCompletionDelta(chunk))])
|
||||
chunk = chunk[:first]
|
||||
else:
|
||||
first = 0
|
||||
if first != -1:
|
||||
finish_reason = "stop"
|
||||
if stream:
|
||||
last_chunk = chunk
|
||||
if finish_reason is not None:
|
||||
break
|
||||
if last_chunk is not None:
|
||||
yield ChatCompletionChunk(last_chunk, finish_reason)
|
||||
if not stream:
|
||||
if response_format is not None and "type" in response_format:
|
||||
if response_format["type"] == "json_object":
|
||||
response = read_json(response)
|
||||
yield ChatCompletion([ChatCompletionChoice(ChatCompletionMessage(response, finish_reason))])
|
||||
|
||||
async def aiter_response(
|
||||
response: aiter,
|
||||
stream: bool,
|
||||
response_format: dict = None,
|
||||
max_tokens: int = None,
|
||||
stop: list = None
|
||||
) -> AsyncGenerator:
|
||||
content = ""
|
||||
try:
|
||||
idx = 0
|
||||
chunk = None
|
||||
async for chunk in response:
|
||||
content += str(chunk)
|
||||
if max_tokens is not None and idx > max_tokens:
|
||||
break
|
||||
first = -1
|
||||
word = None
|
||||
if stop is not None:
|
||||
for word in list(stop):
|
||||
first = content.find(word)
|
||||
if first != -1:
|
||||
content = content[:first]
|
||||
break
|
||||
if stream:
|
||||
if first != -1:
|
||||
first = chunk.find(word)
|
||||
if first != -1:
|
||||
chunk = chunk[:first]
|
||||
else:
|
||||
first = 0
|
||||
yield ChatCompletionChunk([ChatCompletionDeltaChoice(ChatCompletionDelta(chunk))])
|
||||
if first != -1:
|
||||
break
|
||||
idx += 1
|
||||
except:
|
||||
...
|
||||
if not stream:
|
||||
if response_format is not None and "type" in response_format:
|
||||
if response_format["type"] == "json_object":
|
||||
response = read_json(response)
|
||||
yield ChatCompletion([ChatCompletionChoice(ChatCompletionMessage(response))])
|
||||
|
||||
class Model():
|
||||
def __getitem__(self, item):
|
||||
return getattr(self, item)
|
||||
|
||||
class ChatCompletion(Model):
|
||||
def __init__(self, choices: list):
|
||||
self.choices = choices
|
||||
|
||||
class ChatCompletionChunk(Model):
|
||||
def __init__(self, choices: list):
|
||||
self.choices = choices
|
||||
|
||||
class ChatCompletionChoice(Model):
|
||||
def __init__(self, message: ChatCompletionMessage):
|
||||
self.message = message
|
||||
|
||||
class ChatCompletionMessage(Model):
|
||||
def __init__(self, content: str, finish_reason: str):
|
||||
self.content = content
|
||||
self.finish_reason = finish_reason
|
||||
self.index = 0
|
||||
self.logprobs = None
|
||||
|
||||
class ChatCompletionDelta(Model):
|
||||
def __init__(self, content: str):
|
||||
self.content = content
|
||||
|
||||
class ChatCompletionDeltaChoice(Model):
|
||||
def __init__(self, delta: ChatCompletionDelta):
|
||||
self.delta = delta
|
||||
yield ChatCompletion(content, finish_reason)
|
||||
|
||||
class Client():
|
||||
proxies: Proxies = None
|
||||
chat: Chat
|
||||
images: Images
|
||||
|
||||
def __init__(
|
||||
self,
|
||||
@@ -152,9 +84,9 @@ class Client():
|
||||
proxies: Proxies = None,
|
||||
**kwargs
|
||||
) -> None:
|
||||
self.proxies: Proxies = proxies
|
||||
self.images = Images(self, image_provider)
|
||||
self.chat = Chat(self, provider)
|
||||
self.images = Images(self, image_provider)
|
||||
self.proxies: Proxies = proxies
|
||||
|
||||
def get_proxy(self) -> Union[str, None]:
|
||||
if isinstance(self.proxies, str) or self.proxies is None:
|
||||
@@ -178,13 +110,13 @@ class Completions():
|
||||
stream: bool = False,
|
||||
response_format: dict = None,
|
||||
max_tokens: int = None,
|
||||
stop: list = None,
|
||||
stop: Union[list. str] = None,
|
||||
**kwargs
|
||||
) -> Union[dict, Generator]:
|
||||
) -> Union[ChatCompletion, Generator[ChatCompletionChunk]]:
|
||||
if max_tokens is not None:
|
||||
kwargs["max_tokens"] = max_tokens
|
||||
if stop:
|
||||
kwargs["stop"] = list(stop)
|
||||
kwargs["stop"] = stop
|
||||
model, provider = get_model_and_provider(
|
||||
model,
|
||||
self.provider if provider is None else provider,
|
||||
@@ -192,10 +124,8 @@ class Completions():
|
||||
**kwargs
|
||||
)
|
||||
response = provider.create_completion(model, messages, stream=stream, **kwargs)
|
||||
if isinstance(provider, type) and issubclass(provider, AsyncGeneratorProvider):
|
||||
response = iter_response(response, stream, response_format) # max_tokens, stop
|
||||
else:
|
||||
response = iter_response(response, stream, response_format, max_tokens, stop)
|
||||
stop = [stop] if isinstance(stop, str) else stop
|
||||
response = iter_response(response, stream, response_format, max_tokens, stop)
|
||||
return response if stream else next(response)
|
||||
|
||||
class Chat():
|
||||
@@ -203,7 +133,7 @@ class Chat():
|
||||
|
||||
def __init__(self, client: Client, provider: ProviderType = None):
|
||||
self.completions = Completions(client, provider)
|
||||
|
||||
|
||||
class ImageModels():
|
||||
gemini = Gemini
|
||||
openai = OpenaiChat
|
||||
@@ -212,21 +142,9 @@ class ImageModels():
|
||||
self.client = client
|
||||
self.default = BingCreateImages(proxy=self.client.get_proxy())
|
||||
|
||||
def get(self, name: str) -> ImageProvider:
|
||||
return getattr(self, name) if hasattr(self, name) else self.default
|
||||
def get(self, name: str, default: ImageProvider = None) -> ImageProvider:
|
||||
return getattr(self, name) if hasattr(self, name) else default or self.default
|
||||
|
||||
class ImagesResponse(Model):
|
||||
data: list[Image]
|
||||
|
||||
def __init__(self, data: list) -> None:
|
||||
self.data = data
|
||||
|
||||
class Image(Model):
|
||||
url: str
|
||||
|
||||
def __init__(self, url: str) -> None:
|
||||
self.url = url
|
||||
|
||||
class Images():
|
||||
def __init__(self, client: Client, provider: ImageProvider = None):
|
||||
self.client: Client = client
|
||||
@@ -234,7 +152,7 @@ class Images():
|
||||
self.models: ImageModels = ImageModels(client)
|
||||
|
||||
def generate(self, prompt, model: str = None, **kwargs):
|
||||
provider = self.models.get(model) if model else self.provider or self.models.get(model)
|
||||
provider = self.models.get(model, self.provider)
|
||||
if isinstance(provider, BaseProvider) or isinstance(provider, type) and issubclass(provider, BaseProvider):
|
||||
prompt = f"create a image: {prompt}"
|
||||
response = provider.create_completion(
|
||||
@@ -246,14 +164,15 @@ class Images():
|
||||
)
|
||||
else:
|
||||
response = provider.create(prompt)
|
||||
|
||||
|
||||
for chunk in response:
|
||||
if isinstance(chunk, ImageProviderResponse):
|
||||
return ImagesResponse([Image(image)for image in list(chunk.images)])
|
||||
images = [chunk.images] if isinstance(chunk.images, str) else chunk.images
|
||||
return ImagesResponse([Image(image) for image in images])
|
||||
raise NoImageResponseError()
|
||||
|
||||
def create_variation(self, image: ImageType, model: str = None, **kwargs):
|
||||
provider = self.models.get(model) if model else self.provider
|
||||
provider = self.models.get(model, self.provider)
|
||||
result = None
|
||||
if isinstance(provider, type) and issubclass(provider, BaseProvider):
|
||||
response = provider.create_completion(
|
||||
|
Reference in New Issue
Block a user