diff --git a/.github/FUNDING.yml b/.github/FUNDING.yml index e0b12bae..98f1e5b0 100644 --- a/.github/FUNDING.yml +++ b/.github/FUNDING.yml @@ -1,3 +1,3 @@ ko_fi: xtekky -github: [xtekky] +github: [xtekky, hlohaus] patreon: xtekky diff --git a/README.md b/README.md index 965847ee..83d5a45e 100644 --- a/README.md +++ b/README.md @@ -188,7 +188,8 @@ image_url = response.data[0].url **Full Documentation for Python API** -- New Client API like the OpenAI Python library: [/docs/client](/docs/client.md) +- New AsyncClient API from G4F: [/docs/async_client](/docs/async_client.md) +- Client API like the OpenAI Python library: [/docs/client](/docs/async_client.md) - Legacy API with python modules: [/docs/legacy](/docs/legacy.md) #### Web UI diff --git a/docs/async_client.md b/docs/async_client.md new file mode 100644 index 00000000..4827d11b --- /dev/null +++ b/docs/async_client.md @@ -0,0 +1,95 @@ +# How to Use the G4F AsyncClient API + +The AsyncClient API is the asynchronous counterpart to the standard G4F Client API. It offers the same functionality as the synchronous API, but with the added benefit of improved performance due to its asynchronous nature. + +Designed to maintain compatibility with the existing OpenAI API, the G4F AsyncClient API ensures a seamless transition for users already familiar with the OpenAI client. + +## Key Features + +The G4F AsyncClient API offers several key features: + +- **Custom Providers:** The G4F Client API allows you to use custom providers. This feature enhances the flexibility of the API, enabling it to cater to a wide range of use cases. + +- **ChatCompletion Interface:** The G4F package provides an interface for interacting with chat models through the ChatCompletion class. This class provides methods for creating both streaming and non-streaming responses. + +- **Streaming Responses:** The ChatCompletion.create method can return a response iteratively as and when they are received if the stream parameter is set to True. + +- **Non-Streaming Responses:** The ChatCompletion.create method can also generate non-streaming responses. + +- **Image Generation and Vision Models:** The G4F Client API also supports image generation and vision models, expanding its utility beyond text-based interactions. + + +## Using AsyncClient + +### Text Completions: + +You can use the ChatCompletions endpoint to generate text completions as follows: + +```python +response = await client.chat.completions.create( + model="gpt-3.5-turbo", + messages=[{"role": "user", "content": "Say this is a test"}], + ... +) +print(response.choices[0].message.content) +``` + +Streaming completions are also supported: + +```python +stream = client.chat.completions.create( + model="gpt-4", + messages=[{"role": "user", "content": "Say this is a test"}], + stream=True, + ... +) +async for chunk in stream: + if chunk.choices[0].delta.content: + print(chunk.choices[0].delta.content or "", end="") +``` + +### Image Generation: + +You can generate images using a specified prompt: + +```python +response = await client.images.generate( + model="dall-e-3", + prompt="a white siamese cat", + ... +) + +image_url = response.data[0].url +``` + +### Example usage with asyncio.gather + +Start two tasks at the same time: + +```python +import asyncio + +from g4f.client import AsyncClient +from g4f.Provider import BingCreateImages, OpenaiChat, Gemini + +async def main(): + client = AsyncClient( + provider=OpenaiChat, + image_provider=Gemini, + # other parameters... + ) + + task1 = client.chat.completions.create( + model="gpt-3.5-turbo", + messages=[{"role": "user", "content": "Say this is a test"}], + ) + task2 = client.images.generate( + model="dall-e-3", + prompt="a white siamese cat", + ) + responses = await asyncio.gather(task1, task2) + + print(responses) + +asyncio.run(main()) +``` \ No newline at end of file diff --git a/g4f/Provider/Bing.py b/g4f/Provider/Bing.py index aa1b37b0..1e462084 100644 --- a/g4f/Provider/Bing.py +++ b/g4f/Provider/Bing.py @@ -46,6 +46,7 @@ class Bing(AsyncGeneratorProvider, ProviderModelMixin): messages: Messages, proxy: str = None, timeout: int = 900, + api_key: str = None, cookies: Cookies = None, connector: BaseConnector = None, tone: str = None, @@ -68,6 +69,8 @@ class Bing(AsyncGeneratorProvider, ProviderModelMixin): :return: An asynchronous result object. """ prompt = messages[-1]["content"] + if api_key is not None: + cookies["_U"] = api_key if context is None: context = create_context(messages[:-1]) if len(messages) > 1 else None if tone is None: diff --git a/g4f/Provider/DuckDuckGo.py b/g4f/Provider/DuckDuckGo.py new file mode 100644 index 00000000..5269ced3 --- /dev/null +++ b/g4f/Provider/DuckDuckGo.py @@ -0,0 +1,64 @@ +from __future__ import annotations + +import json +import aiohttp + +from .base_provider import AsyncGeneratorProvider, ProviderModelMixin +from ..typing import AsyncResult, Messages +from ..requests.raise_for_status import raise_for_status + +class DuckDuckGo(AsyncGeneratorProvider, ProviderModelMixin): + url = "https://duckduckgo.com/duckchat" + working = True + supports_gpt_35_turbo = True + supports_message_history = True + + default_model = "gpt-3.5-turbo-0125" + models = ["gpt-3.5-turbo-0125", "claude-instant-1.2"] + model_aliases = {"gpt-3.5-turbo": "gpt-3.5-turbo-0125"} + + status_url = "https://duckduckgo.com/duckchat/v1/status" + chat_url = "https://duckduckgo.com/duckchat/v1/chat" + user_agent = 'Mozilla/5.0 (X11; Ubuntu; Linux x86_64; rv:123.0) Gecko/20100101 Firefox/123.0' + headers = { + 'User-Agent': user_agent, + 'Accept': 'text/event-stream', + 'Accept-Language': 'de,en-US;q=0.7,en;q=0.3', + 'Accept-Encoding': 'gzip, deflate, br', + 'Referer': 'https://duckduckgo.com/', + 'Content-Type': 'application/json', + 'Origin': 'https://duckduckgo.com', + 'Connection': 'keep-alive', + 'Cookie': 'dcm=1', + 'Sec-Fetch-Dest': 'empty', + 'Sec-Fetch-Mode': 'cors', + 'Sec-Fetch-Site': 'same-origin', + 'Pragma': 'no-cache', + 'TE': 'trailers' + } + + @classmethod + async def create_async_generator( + cls, + model: str, + messages: Messages, + **kwargs + ) -> AsyncResult: + async with aiohttp.ClientSession(headers=cls.headers) as session: + async with session.get(cls.status_url, headers={"x-vqd-accept": "1"}) as response: + await raise_for_status(response) + vqd_4 = response.headers.get("x-vqd-4") + payload = { + 'model': cls.get_model(model), + 'messages': messages + } + async with session.post(cls.chat_url, json=payload, headers={"x-vqd-4": vqd_4}) as response: + await raise_for_status(response) + async for line in response.content: + if line.startswith(b"data: "): + chunk = line[6:] + if chunk.startswith(b"[DONE]"): + break + data = json.loads(chunk) + if "message" in data: + yield data["message"] \ No newline at end of file diff --git a/g4f/Provider/HuggingFace.py b/g4f/Provider/HuggingFace.py index 647780fd..6a05c26e 100644 --- a/g4f/Provider/HuggingFace.py +++ b/g4f/Provider/HuggingFace.py @@ -13,6 +13,10 @@ class HuggingFace(AsyncGeneratorProvider, ProviderModelMixin): url = "https://huggingface.co/chat" working = True supports_message_history = True + models = [ + "mistralai/Mixtral-8x7B-Instruct-v0.1", + "mistralai/Mistral-7B-Instruct-v0.2" + ] default_model = "mistralai/Mixtral-8x7B-Instruct-v0.1" @classmethod @@ -29,7 +33,7 @@ class HuggingFace(AsyncGeneratorProvider, ProviderModelMixin): temperature: float = 0.7, **kwargs ) -> AsyncResult: - model = cls.get_model(model) + model = cls.get_model(model) if not model else model headers = {} if api_key is not None: headers["Authorization"] = f"Bearer {api_key}" diff --git a/g4f/Provider/WhiteRabbitNeo.py b/g4f/Provider/WhiteRabbitNeo.py new file mode 100644 index 00000000..339434e6 --- /dev/null +++ b/g4f/Provider/WhiteRabbitNeo.py @@ -0,0 +1,57 @@ +from __future__ import annotations + +from aiohttp import ClientSession, BaseConnector + +from ..typing import AsyncResult, Messages, Cookies +from ..requests.raise_for_status import raise_for_status +from .base_provider import AsyncGeneratorProvider +from .helper import get_cookies, get_connector, get_random_string + +class WhiteRabbitNeo(AsyncGeneratorProvider): + url = "https://www.whiterabbitneo.com" + working = True + supports_message_history = True + needs_auth = True + + @classmethod + async def create_async_generator( + cls, + model: str, + messages: Messages, + cookies: Cookies = None, + connector: BaseConnector = None, + proxy: str = None, + **kwargs + ) -> AsyncResult: + if cookies is None: + cookies = get_cookies("www.whiterabbitneo.com") + headers = { + "User-Agent": "Mozilla/5.0 (X11; Ubuntu; Linux x86_64; rv:123.0) Gecko/20100101 Firefox/123.0", + "Accept": "*/*", + "Accept-Language": "de,en-US;q=0.7,en;q=0.3", + "Accept-Encoding": "gzip, deflate, br", + "Referer": f"{cls.url}/", + "Content-Type": "text/plain;charset=UTF-8", + "Origin": cls.url, + "Connection": "keep-alive", + "Sec-Fetch-Dest": "empty", + "Sec-Fetch-Mode": "cors", + "Sec-Fetch-Site": "same-origin", + "TE": "trailers" + } + async with ClientSession( + headers=headers, + cookies=cookies, + connector=get_connector(connector, proxy) + ) as session: + data = { + "messages": messages, + "id": get_random_string(6), + "enhancePrompt": False, + "useFunctions": False + } + async with session.post(f"{cls.url}/api/chat", json=data, proxy=proxy) as response: + await raise_for_status(response) + async for chunk in response.content.iter_any(): + if chunk: + yield chunk.decode(errors="ignore") \ No newline at end of file diff --git a/g4f/Provider/__init__.py b/g4f/Provider/__init__.py index 1db29e19..b567305c 100644 --- a/g4f/Provider/__init__.py +++ b/g4f/Provider/__init__.py @@ -21,6 +21,7 @@ from .ChatgptFree import ChatgptFree from .ChatgptNext import ChatgptNext from .ChatgptX import ChatgptX from .DeepInfra import DeepInfra +from .DuckDuckGo import DuckDuckGo from .FlowGpt import FlowGpt from .FreeChatgpt import FreeChatgpt from .FreeGpt import FreeGpt @@ -37,6 +38,7 @@ from .Local import Local from .PerplexityLabs import PerplexityLabs from .Pi import Pi from .Vercel import Vercel +from .WhiteRabbitNeo import WhiteRabbitNeo from .You import You import sys diff --git a/g4f/gui/client/index.html b/g4f/gui/client/index.html index 7103b9c3..d6ad5241 100644 --- a/g4f/gui/client/index.html +++ b/g4f/gui/client/index.html @@ -37,6 +37,7 @@ import llamaTokenizer from "llama-tokenizer-js" +