From ebad4bb88842812c9a2633cb9e06071114519314 Mon Sep 17 00:00:00 2001 From: Tatsuya Kyushima <49891479+kyu08@users.noreply.github.com> Date: Thu, 2 May 2024 18:17:50 +0900 Subject: [PATCH] chore: fix typos (#476) --- Argcfile.sh | 12 ++++++------ src/client/bedrock.rs | 4 ++-- src/client/qianwen.rs | 2 +- src/config/session.rs | 4 ++-- src/render/markdown.rs | 4 ++-- src/utils/tiktoken.rs | 2 +- 6 files changed, 14 insertions(+), 14 deletions(-) diff --git a/Argcfile.sh b/Argcfile.sh index 68f222c..e187489 100755 --- a/Argcfile.sh +++ b/Argcfile.sh @@ -92,7 +92,7 @@ chat() { model_env="${env_prefix}_MODEL" model="${!model_env}" fi - argc chat-openai-comptabile \ + argc chat-openai-compatible \ --api-base "$api_base" \ --api-key "$api_key" \ --model "$model" \ @@ -102,7 +102,7 @@ chat() { fi } -# @cmd List models by openai-comptabile api +# @cmd List models by openai-compatible api # @arg platform![`_choice_platform`] models() { for platform_config in "${OPENAI_COMPATIBLE_PLATFORMS[@]}"; do @@ -121,20 +121,20 @@ models() { fi } -# @cmd Chat with openai-comptabile api +# @cmd Chat with openai-compatible api # @option --api-base! $$ # @option --api-key! $$ # @option -m --model! $$ # @flag -S --no-stream # @arg text~ -chat-openai-comptabile() { +chat-openai-compatible() { _openai_chat "$@" } -# @cmd List models by openai-comptabile api +# @cmd List models by openai-compatible api # @option --api-base! $$ # @option --api-key! $$ -models-openai-comptabile() { +models-openai-compatible() { _openai_models } diff --git a/src/client/bedrock.rs b/src/client/bedrock.rs index d6ab696..6abd939 100644 --- a/src/client/bedrock.rs +++ b/src/client/bedrock.rs @@ -141,7 +141,7 @@ async fn send_message( match model_category { ModelCategory::Anthropic => claude_extract_completion(&data), ModelCategory::MetaLlama3 => llama_extract_completion(&data), - ModelCategory::Mistral => mistral_extrat_completion(&data), + ModelCategory::Mistral => mistral_extract_completion(&data), } } @@ -283,7 +283,7 @@ fn llama_extract_completion(data: &Value) -> Result<(String, CompletionDetails)> Ok((text.to_string(), details)) } -fn mistral_extrat_completion(data: &Value) -> Result<(String, CompletionDetails)> { +fn mistral_extract_completion(data: &Value) -> Result<(String, CompletionDetails)> { let text = data["outputs"][0]["text"] .as_str() .ok_or_else(|| anyhow!("Invalid response data: {data}"))?; diff --git a/src/client/qianwen.rs b/src/client/qianwen.rs index 76d7436..3fa17e6 100644 --- a/src/client/qianwen.rs +++ b/src/client/qianwen.rs @@ -210,7 +210,7 @@ fn extract_completion_text(data: &Value, is_vl: bool) -> Result<(String, Complet Ok((text.to_string(), details)) } -/// Patch messsages, upload embedded images to oss +/// Patch messages, upload embedded images to oss async fn patch_messages(model: &str, api_key: &str, messages: &mut Vec) -> Result<()> { for message in messages { if let MessageContent::Array(list) = message.content.borrow_mut() { diff --git a/src/config/session.rs b/src/config/session.rs index 4ebf0d3..e6ad647 100644 --- a/src/config/session.rs +++ b/src/config/session.rs @@ -124,8 +124,8 @@ impl Session { data["save_session"] = save_session.into(); } data["total_tokens"] = tokens.into(); - if let Some(conext_window) = self.model.max_input_tokens { - data["max_input_tokens"] = conext_window.into(); + if let Some(context_window) = self.model.max_input_tokens { + data["max_input_tokens"] = context_window.into(); } if percent != 0.0 { data["total/max"] = format!("{}%", percent).into(); diff --git a/src/render/markdown.rs b/src/render/markdown.rs index 1fb2f5b..3f68ea2 100644 --- a/src/render/markdown.rs +++ b/src/render/markdown.rs @@ -140,11 +140,11 @@ impl MarkdownRender { fn highlight_line(&self, line: &str, syntax: &SyntaxReference, is_code: bool) -> String { let ws: String = line.chars().take_while(|c| c.is_whitespace()).collect(); - let trimed_line: &str = &line[ws.len()..]; + let trimmed_line: &str = &line[ws.len()..]; let mut line_highlighted = None; if let Some(theme) = &self.options.theme { let mut highlighter = HighlightLines::new(syntax, theme); - if let Ok(ranges) = highlighter.highlight_line(trimed_line, &self.syntax_set) { + if let Ok(ranges) = highlighter.highlight_line(trimmed_line, &self.syntax_set) { line_highlighted = Some(format!( "{ws}{}", as_terminal_escaped(&ranges, self.options.truecolor) diff --git a/src/utils/tiktoken.rs b/src/utils/tiktoken.rs index 8b7e712..cd83ed3 100644 --- a/src/utils/tiktoken.rs +++ b/src/utils/tiktoken.rs @@ -128,7 +128,7 @@ pub fn byte_pair_split<'a>(piece: &'a [u8], ranks: &HashMap, usize>) -> // Originally, we had one too! Without it, we were only vaguely faster than Python. // I used an RWLock to protect the cache. This didn't seem to hurt single threaded performance // noticeably, but it did affect multi-threaded performance. Weirdly, it seemed to affect -// multi-threaded performance even when I only had readers (maybed I messed something up?). +// multi-threaded performance even when I only had readers (maybe I messed something up?). // Anyway, I realised that we could get rid of the cache, if we treat the set of tokens as a cache! // These are exactly the set or merges that are likely to be hot. And now we don't have to think // about interior mutability, memory use, or cloning.