|
|
|
@ -4,24 +4,22 @@ from colorama import Fore, Style
|
|
|
|
|
|
|
|
|
|
sys.path.append(str(Path(__file__).parent.parent))
|
|
|
|
|
|
|
|
|
|
from g4f import BaseProvider, models, Provider
|
|
|
|
|
|
|
|
|
|
logging = False
|
|
|
|
|
from g4f import Provider, ProviderType, models
|
|
|
|
|
from g4f.Provider import __providers__
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
def main():
|
|
|
|
|
providers = get_providers()
|
|
|
|
|
failed_providers = []
|
|
|
|
|
|
|
|
|
|
for _provider in providers:
|
|
|
|
|
if _provider.needs_auth:
|
|
|
|
|
for provider in providers:
|
|
|
|
|
if provider.needs_auth:
|
|
|
|
|
continue
|
|
|
|
|
print("Provider:", _provider.__name__)
|
|
|
|
|
result = test(_provider)
|
|
|
|
|
print("Provider:", provider.__name__)
|
|
|
|
|
result = test(provider)
|
|
|
|
|
print("Result:", result)
|
|
|
|
|
if _provider.working and not result:
|
|
|
|
|
failed_providers.append(_provider)
|
|
|
|
|
|
|
|
|
|
if provider.working and not result:
|
|
|
|
|
failed_providers.append(provider)
|
|
|
|
|
print()
|
|
|
|
|
|
|
|
|
|
if failed_providers:
|
|
|
|
@ -32,38 +30,29 @@ def main():
|
|
|
|
|
print(f"{Fore.GREEN + Style.BRIGHT}All providers are working")
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
def get_providers() -> list[type[BaseProvider]]:
|
|
|
|
|
providers = dir(Provider)
|
|
|
|
|
providers = [getattr(Provider, provider) for provider in providers if provider != "RetryProvider"]
|
|
|
|
|
providers = [provider for provider in providers if isinstance(provider, type) and hasattr(provider, "url")]
|
|
|
|
|
def get_providers() -> list[ProviderType]:
|
|
|
|
|
return [
|
|
|
|
|
provider
|
|
|
|
|
for provider in providers
|
|
|
|
|
if issubclass(provider, BaseProvider)
|
|
|
|
|
and provider.__name__ not in dir(Provider.deprecated)
|
|
|
|
|
for provider in __providers__
|
|
|
|
|
if provider.__name__ not in dir(Provider.deprecated)
|
|
|
|
|
and provider.__name__ not in dir(Provider.unfinished)
|
|
|
|
|
]
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
def create_response(_provider: type[BaseProvider]) -> str:
|
|
|
|
|
model = models.gpt_35_turbo.name if _provider.supports_gpt_35_turbo else models.default.name
|
|
|
|
|
response = _provider.create_completion(
|
|
|
|
|
model=model,
|
|
|
|
|
def create_response(provider: ProviderType) -> str:
|
|
|
|
|
response = provider.create_completion(
|
|
|
|
|
model=models.default.name,
|
|
|
|
|
messages=[{"role": "user", "content": "Hello, who are you? Answer in detail much as possible."}],
|
|
|
|
|
stream=False,
|
|
|
|
|
)
|
|
|
|
|
return "".join(response)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
def test(_provider: type[BaseProvider]) -> bool:
|
|
|
|
|
def test(provider: ProviderType) -> bool:
|
|
|
|
|
try:
|
|
|
|
|
response = create_response(_provider)
|
|
|
|
|
response = create_response(provider)
|
|
|
|
|
assert type(response) is str
|
|
|
|
|
assert len(response) > 0
|
|
|
|
|
return response
|
|
|
|
|
except Exception as e:
|
|
|
|
|
if logging:
|
|
|
|
|
print(e)
|
|
|
|
|
except Exception:
|
|
|
|
|
return False
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|