fix run_api_args function in cli.py and get_model function in base_provider.py and default model to use phi3:latest if available

pull/1926/head
hdsz25 4 weeks ago
parent 9680639389
commit f2dcb9f317

@ -17,9 +17,10 @@ class Ollama(Openai):
url = 'http://127.0.0.1:11434/api/tags'
models = requests.get(url).json()["models"]
cls.models = [model['name'] for model in models]
cls.default_model = cls.models[0]
cls.default_model ='phi3:latest' if 'phi3:latest' in cls.models else cls.models[0]
return cls.models
@classmethod
def create_async_generator(
cls,

@ -216,10 +216,10 @@ def run_api(
if bind is not None:
host, port = bind.split(":")
uvicorn.run(
f"g4f.api:create_app{'_debug' if debug else ''}",
f"g4f.api:{'create_app_debug' if debug else 'create_app'}",
host=host, port=int(port),
workers=workers,
use_colors=use_colors,
factory=True,
reload=debug
)
)

@ -10,14 +10,11 @@ def main():
subparsers = parser.add_subparsers(dest="mode", help="Mode to run the g4f in.")
api_parser = subparsers.add_parser("api")
api_parser.add_argument("--bind", default="localhost:1337", help="The bind string.")
api_parser.add_argument("--provider", default="Bing", help="Provider used with default:Bing.")
api_parser.add_argument("--model", default='', help="Model used with default:''.")
api_parser.add_argument("--proxy", default=None, help="proxy used with default:None")
api_parser.add_argument("--debug", action="store_true", help="Enable verbose logging.")
api_parser.add_argument("--model", default=None, help="Default model for chat completion. (incompatible with --debug and --workers)")
api_parser.add_argument("--model", default='', help="Default model for chat completion. (incompatible with --debug and --workers)")
api_parser.add_argument("--provider", choices=[provider.__name__ for provider in Provider.__providers__ if provider.working],
default=None, help="Default provider for chat completion. (incompatible with --debug and --workers)")
api_parser.add_argument("--proxy", default=None, help="Default used proxy.")
default="Bing", help="Default provider for chat completion. (incompatible with --debug and --workers)")
api_parser.add_argument("--proxy", default=None, help="proxy used with default:None")
api_parser.add_argument("--workers", type=int, default=None, help="Number of workers.")
api_parser.add_argument("--disable-colors", action="store_true", help="Don't use colors.")
api_parser.add_argument("--ignore-cookie-files", action="store_true", help="Don't read .har and cookie files.")
@ -38,15 +35,14 @@ def main():
def run_api_args(args):
from g4f.api import AppConfig, run_api, ChatCompletionsForm
AppConfig.set_config(
ignore_cookie_files=args.ignore_cookie_files,
ignored_providers=args.ignored_providers,
g4f_api_key=args.g4f_api_key,
defaults={
"model": args.model,
"provider": args.provider,
"proxy": args.proxy
}
AppConfig.set_ignore_cookie_files(
args.ignore_cookie_files
)
AppConfig.set_list_ignored_providers(
args.ignored_providers
)
AppConfig.set_g4f_api_key(
args.g4f_api_key
)
ChatCompletionsForm.set_provider(
args.provider
@ -64,5 +60,6 @@ def run_api_args(args):
use_colors=not args.disable_colors
)
if __name__ == "__main__":
main()

@ -283,6 +283,11 @@ class ProviderModelMixin:
@classmethod
def get_model(cls, model: str) -> str:
#need to run class function to initialize default setting(at least for Ollama)
cls.get_models()
#for example if user input phi3 then it will match as phi3:latest as listed in cls.get_models()
if model and model.find(':')==-1:
model=model+':latest'
if not model and cls.default_model is not None:
model = cls.default_model
elif model in cls.model_aliases:

@ -87,7 +87,7 @@ DESCRIPTION = (
# Setting up
setup(
name='g4f',
version="0.3.0.8.dev1",
version="0.3.0.9.dev0",
author='Tekky',
author_email='<support@g4f.ai>',
description=DESCRIPTION,

Loading…
Cancel
Save