You cannot select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.
aichat/config.example.yaml

196 lines
8.7 KiB
YAML

model: openai:gpt-3.5-turbo # Specify the language model to use
temperature: null # Set default temperature parameter
top_p: null # Set default top-p parameter
save: true # Indicates whether to persist the message
save_session: null # Controls the persistence of the session, if null, asking the user
highlight: true # Controls syntax highlighting
light_theme: false # Activates a light color theme when true. ENV: AICHAT_LIGHT_THEME
wrap: no # Controls text wrapping (no, auto, <max-width>)
wrap_code: false # Enables or disables wrapping of code blocks
auto_copy: false # Enables or disables automatic copying the last LLM response to the clipboard
keybindings: emacs # Choose keybinding style (emacs, vi)
prelude: null # Set a default role or session to start with (role:<name>, session:<name>)
# Command that will be used to edit the current line buffer with ctrl+o
# if unset fallback to $EDITOR and $VISUAL
buffer_editor: null
# Compress session when token count reaches or exceeds this threshold (must be at least 1000)
compress_threshold: 1000
# Text prompt used for creating a concise summary of session message
summarize_prompt: 'Summarize the discussion briefly in 200 words or less to use as a prompt for future context.'
# Text prompt used for including the summary of the entire session
summary_prompt: 'This is a summary of the chat history as a recap: '
# Custom REPL prompt, see https://github.com/sigoden/aichat/wiki/Custom-REPL-Prompt
left_prompt: '{color.green}{?session {session}{?role /}}{role}{color.cyan}{?session )}{!session >}{color.reset} '
right_prompt: '{color.purple}{?session {?consume_tokens {consume_tokens}({consume_percent}%)}{!consume_tokens {consume_tokens}}}{color.reset}'
clients:
# All clients have the following configuration:
# - type: xxxx
# name: xxxx # Only use it to distinguish clients with the same client type. Optional
# models:
# - name: xxxx # The model name
# max_input_tokens: 100000
# max_output_tokens: 4096
# supports_vision: true
# extra_fields: # Set custom parameters, will merge with the body json
# key: value
# extra:
# proxy: socks5://127.0.0.1:1080 # Specify https/socks5 proxy server. ENV: HTTPS_PROXY/ALL_PROXY
# connect_timeout: 10 # Set a timeout in seconds for connect to server
# See https://platform.openai.com/docs/quickstart
- type: openai
api_key: sk-xxx # ENV: {client_name}_API_KEY
api_base: https://api.openai.com/v1 # ENV: {client_name}_API_BASE
organization_id: org-xxx # Optional
# See https://ai.google.dev/docs
- type: gemini
api_key: xxx # ENV: {client_name}_API_KEY
# possible values: BLOCK_NONE, BLOCK_ONLY_HIGH, BLOCK_MEDIUM_AND_ABOVE, BLOCK_LOW_AND_ABOVE
block_threshold: BLOCK_NONE # Optional
# See https://docs.anthropic.com/claude/reference/getting-started-with-the-api
- type: claude
api_key: sk-ant-xxx # ENV: {client_name}_API_KEY
# See https://docs.mistral.ai/
- type: mistral
api_key: xxx # ENV: {client_name}_API_KEY
# See https://docs.cohere.com/docs/the-cohere-platform
- type: cohere
api_key: xxx # ENV: {client_name}_API_KEY
# See https://docs.perplexity.ai/docs/getting-started
- type: perplexity
api_key: pplx-xxx # ENV: {client_name}_API_KEY
# See https://console.groq.com/docs/quickstart
- type: groq
api_key: gsk_xxx # ENV: {client_name}_API_KEY
# See https://github.com/jmorganca/ollama
- type: ollama
api_base: http://localhost:11434 # ENV: {client_name}_API_BASE
api_auth: Basic xxx # ENV: {client_name}_API_AUTH
chat_endpoint: /api/chat # Optional
models: # Required
- name: llama3
max_input_tokens: 8192
# See https://learn.microsoft.com/en-us/azure/ai-services/openai/chatgpt-quickstart
- type: azure-openai
api_base: https://{RESOURCE}.openai.azure.com # ENV: {client_name}_API_BASE
api_key: xxx # ENV: {client_name}_API_KEY
models: # Required
- name: gpt-35-turbo # Model deployment name
max_input_tokens: 8192
# See https://cloud.google.com/vertex-ai
- type: vertexai
project_id: xxx # ENV: {client_name}_PROJECT_ID
location: xxx # ENV: {client_name}_LOCATION
# Specifies a application-default-credentials (adc) file, Optional field
# Run `gcloud auth application-default login` to init the adc file
# see https://cloud.google.com/docs/authentication/external/set-up-adc
adc_file: <path-to/gcloud/application_default_credentials.json>
# Optional field, possible values: BLOCK_NONE, BLOCK_ONLY_HIGH, BLOCK_MEDIUM_AND_ABOVE, BLOCK_LOW_AND_ABOVE
block_threshold: BLOCK_ONLY_HIGH
- type: cloudflare
account_id: xxx # ENV: {client_name}_ACCOUNT_ID
api_key: xxx # ENV: {client_name}_API_KEY
# See https://docs.aws.amazon.com/bedrock/latest/userguide/
- type: bedrock
access_key_id: xxx # ENV: {client_name}_ACCESS_KEY_ID
secret_access_key: xxx # ENV: {client_name}_SECRET_ACCESS_KEY
region: xxx # ENV: {client_name}_REGION
# See https://cloud.baidu.com/doc/WENXINWORKSHOP/index.html
- type: ernie
api_key: xxx # ENV: {client_name}_API_KEY
secret_key: xxxx # ENV: {client_name}_SECRET_KEY
# See https://help.aliyun.com/zh/dashscope/
- type: qianwen
api_key: sk-xxx # ENV: {client_name}_API_KEY
# See https://platform.moonshot.cn/docs/intro
- type: moonshot
api_key: sk-xxx # ENV: {client_name}_API_KEY
# For any platform compatible with OpenAI's API
- type: openai-compatible
name: localai
api_base: http://localhost:8080/v1 # ENV: {client_name}_API_BASE
api_key: sk-xxx # ENV: {client_name}_API_KEY
chat_endpoint: /chat/completions # Optional
models: # Required
- name: llama3
max_input_tokens: 8192
# See https://docs.endpoints.anyscale.com/
- type: openai-compatible
name: anyscale
api_base: https://api.endpoints.anyscale.com/v1
api_key: xxx
models:
# https://docs.endpoints.anyscale.com/text-generation/query-a-model#select-a-model
- name: meta-llama/Meta-Llama-3-70B-Instruct
max_input_tokens: 8192
input_price: 1
output_price: 1
# See https://deepinfra.com/docs
- type: openai-compatible
name: deepinfra
api_base: https://api.deepinfra.com/v1/openai
api_key: xxx
models:
# https://deepinfra.com/models
- name: meta-llama/Meta-Llama-3-70B-Instruct
max_input_tokens: 8192
input_price: 0.59
output_price: 0.79
# See https://readme.fireworks.ai/docs/quickstart
- type: openai-compatible
name: fireworks
api_base: https://api.fireworks.ai/inference/v1
api_key: xxx
models:
# https://fireworks.ai/models
- name: accounts/fireworks/models/llama-v3-70b-instruct
max_input_tokens: 8192
input_price: 0.9
output_price: 0.9
# See https://octo.ai/docs/getting-started/quickstart
- type: openai-compatible
name: octoai
api_base: https://text.octoai.run/v1
api_key: xxx
models:
# https://octo.ai/docs/getting-started/inference-models
- name: meta-llama-3-70b-instruct
max_input_tokens: 8192
input_price: 0.86
output_price: 0.86
# See https://docs.together.ai/docs/quickstart
- type: openai-compatible
name: together
api_base: https://api.together.xyz/v1
api_key: xxx
models:
# https://docs.together.ai/docs/inference-models
- name: meta-llama/Llama-3-70b-chat-hf
max_input_tokens: 8192
input_price: 0.9
output_price: 0.9