From f45d666439f1df3f2d09a6cd965b01a50a6214ca Mon Sep 17 00:00:00 2001 From: "0xThresh.eth" <0xthresh@protonmail.com> Date: Sun, 11 Aug 2024 22:36:51 -0600 Subject: [PATCH] Made env vars more generic, added Ollama port to dev-docker.sh --- dev-docker.sh | 2 +- .../pipelines/rag/text_to_sql_pipeline.py | 24 +++++++++---------- 2 files changed, 13 insertions(+), 13 deletions(-) diff --git a/dev-docker.sh b/dev-docker.sh index a502b050..c9d256aa 100755 --- a/dev-docker.sh +++ b/dev-docker.sh @@ -6,4 +6,4 @@ # Runs the containers with Ollama image for Open WebUI and the Pipelines endpoint in place docker run -d -p 9099:9099 --add-host=host.docker.internal:host-gateway -v pipelines:/app/pipelines --name pipelines --restart always --env-file .env ghcr.io/open-webui/pipelines:latest -docker run -d -p 3000:8080 -v ~/.ollama:/root/.ollama -v open-webui:/app/backend/data --name open-webui --restart always -e OPENAI_API_BASE_URL=http://host.docker.internal:9099 -e OPENAI_API_KEY=0p3n-w3bu! ghcr.io/open-webui/open-webui:ollama \ No newline at end of file +docker run -d -p 3000:8080 -p 11434:11434 --add-host=host.docker.internal:host-gateway -v ~/.ollama:/root/.ollama -v open-webui:/app/backend/data --name open-webui --restart always -e OPENAI_API_BASE_URL=http://host.docker.internal:9099 -e OPENAI_API_KEY=0p3n-w3bu! -e OLLAMA_HOST=0.0.0.0 ghcr.io/open-webui/open-webui:ollama \ No newline at end of file diff --git a/examples/pipelines/rag/text_to_sql_pipeline.py b/examples/pipelines/rag/text_to_sql_pipeline.py index 22f936fb..31471ad4 100644 --- a/examples/pipelines/rag/text_to_sql_pipeline.py +++ b/examples/pipelines/rag/text_to_sql_pipeline.py @@ -1,8 +1,8 @@ """ title: Llama Index DB Pipeline author: 0xThresh -date: 2024-07-01 -version: 1.0 +date: 2024-08-11 +version: 1.1 license: MIT description: A pipeline for using text-to-SQL for retrieving relevant information from a database using the Llama Index library. requirements: llama_index, sqlalchemy, psycopg2-binary @@ -24,7 +24,7 @@ class Valves(BaseModel): DB_USER: str DB_PASSWORD: str DB_DATABASE: str - DB_TABLES: list[str] + DB_TABLE: str OLLAMA_HOST: str TEXT_TO_SQL_MODEL: str @@ -39,14 +39,14 @@ def __init__(self): self.valves = self.Valves( **{ "pipelines": ["*"], # Connect to all pipelines - "DB_HOST": os.getenv("PG_HOST", "http://localhost:5432"), # Database hostname - "DB_PORT": os.getenv("PG_PORT", 5432), # Database port - "DB_USER": os.getenv("PG_USER", "postgres"), # User to connect to the database with - "DB_PASSWORD": os.getenv("PG_PASSWORD", "password"), # Password to connect to the database with - "DB_DATABASE": os.getenv("PG_DB", "postgres"), # Database to select on the DB instance - "DB_TABLES": ["albums"], # Table(s) to run queries against + "DB_HOST": os.getenv("DB_HOST", "http://localhost"), # Database hostname + "DB_PORT": os.getenv("DB_PORT", 5432), # Database port + "DB_USER": os.getenv("DB_USER", "postgres"), # User to connect to the database with + "DB_PASSWORD": os.getenv("DB_PASSWORD", "password"), # Password to connect to the database with + "DB_DATABASE": os.getenv("DB_DATABASE", "postgres"), # Database to select on the DB instance + "DB_TABLE": os.getenv("DB_TABLE", "table_name"), # Table(s) to run queries against "OLLAMA_HOST": os.getenv("OLLAMA_HOST", "http://host.docker.internal:11434"), # Make sure to update with the URL of your Ollama host, such as http://localhost:11434 or remote server address - "TEXT_TO_SQL_MODEL": "phi3:latest" # Model to use for text-to-SQL generation + "TEXT_TO_SQL_MODEL": os.getenv("TEXT_TO_SQL_MODEL", "llama3.1:latest") # Model to use for text-to-SQL generation } ) @@ -69,7 +69,7 @@ def pipe( # Debug logging is required to see what SQL query is generated by the LlamaIndex library; enable on Pipelines server if needed # Create database reader for Postgres - sql_database = SQLDatabase(self.engine, include_tables=self.valves.DB_TABLES) + sql_database = SQLDatabase(self.engine, include_tables=[self.valves.DB_TABLE]) # Set up LLM connection; uses phi3 model with 128k context limit since some queries have returned 20k+ tokens llm = Ollama(model=self.valves.TEXT_TO_SQL_MODEL, base_url=self.valves.OLLAMA_HOST, request_timeout=180.0, context_window=30000) @@ -99,7 +99,7 @@ def pipe( query_engine = NLSQLTableQueryEngine( sql_database=sql_database, - tables=self.valves.DB_TABLES, + tables=[self.valves.DB_TABLE], llm=llm, embed_model="local", text_to_sql_prompt=text_to_sql_template,