From 3b0adb20bfa4acb739106b9456dfda657c2af283 Mon Sep 17 00:00:00 2001 From: RainRat Date: Thu, 2 May 2024 12:44:41 -0700 Subject: [PATCH] fix typos --- README.md | 2 +- docs/Installation/ollama.md | 4 ++-- ui/src/lib/components/MessageInput.svelte | 6 +++--- 3 files changed, 6 insertions(+), 6 deletions(-) diff --git a/README.md b/README.md index 63c8961d..e4c06b36 100644 --- a/README.md +++ b/README.md @@ -65,7 +65,7 @@ Version's requirements - Install uv - Python Package manager [download](https://github.com/astral-sh/uv) - Install bun - JavaScript runtime [download](https://bun.sh/docs/installation) -- For ollama [ollama setup guide](docs/Installation/ollama.md) (optinal: if you don't want to use the local models then you can skip this step) +- For ollama [ollama setup guide](docs/Installation/ollama.md) (optional: if you don't want to use the local models then you can skip this step) - For API models, configure the API keys via setting page in UI. diff --git a/docs/Installation/ollama.md b/docs/Installation/ollama.md index b2fc1572..530bd47e 100644 --- a/docs/Installation/ollama.md +++ b/docs/Installation/ollama.md @@ -1,6 +1,6 @@ # Ollama Installation Guide -This guide will help you set up Ollama for Devika. Ollama is a tool that allows you to run open-source large language models (LLMs) locally on your machine. It supports varity of models like Llama-2, mistral, code-llama and many more. +This guide will help you set up Ollama for Devika. Ollama is a tool that allows you to run open-source large language models (LLMs) locally on your machine. It supports a variety of models like Llama-2, mistral, code-llama and many more. ## Installation @@ -17,4 +17,4 @@ This guide will help you set up Ollama for Devika. Ollama is a tool that allows ## Devika Configuration - if you serve the Ollama on a different address, you can change the port in the `config.toml` file or you can change it via UI. -- if you are using the default address, devika will automatically detect the server and and fetch the models list. +- if you are using the default address, devika will automatically detect the server and fetch the models list. diff --git a/ui/src/lib/components/MessageInput.svelte b/ui/src/lib/components/MessageInput.svelte index 6eab3ed1..0b8c1c51 100644 --- a/ui/src/lib/components/MessageInput.svelte +++ b/ui/src/lib/components/MessageInput.svelte @@ -20,7 +20,7 @@ async function handleSendMessage() { const projectName = localStorage.getItem("selectedProject"); const selectedModel = localStorage.getItem("selectedModel"); - const serachEngine = localStorage.getItem("selectedSearchEngine"); + const searchEngine = localStorage.getItem("selectedSearchEngine"); if (!projectName) { alert("Please select a project first!"); @@ -37,13 +37,13 @@ message: messageInput, base_model: selectedModel, project_name: projectName, - search_engine: serachEngine, + search_engine: searchEngine, }); console.log({ message: messageInput, base_model: selectedModel, project_name: projectName, - search_engine: serachEngine, + search_engine: searchEngine, }); messageInput = "";