Query engine chat bot. #15157
Replies: 1 comment
-
To resolve the connection error with Azure OpenAI, you can use the Here is the relevant code snippet: import time
from azure.core.exceptions import ClientAuthenticationError
from azure.identity import DefaultAzureCredential
def refresh_openai_azuread_token(azure_ad_token: Any = None) -> Any:
"""
Checks the validity of the associated token, if any, and tries to refresh it
using the credentials available in the current context. Different authentication
methods are tried, in order, until a successful one is found as defined at the
package `azure-indentity`.
"""
if not azure_ad_token or azure_ad_token.expires_on < time.time() + 60:
try:
credential = DefaultAzureCredential()
azure_ad_token = credential.get_token(
"https://cognitiveservices.azure.com/.default"
)
except ClientAuthenticationError as err:
raise ValueError(
"Unable to acquire a valid Microsoft Entra ID (former Azure AD) token for "
f"the resource due to the following error: {err.message}"
) from err
return azure_ad_token To use this function, you need to call it and handle any potential exceptions that might be raised due to authentication errors. This will ensure that you have a valid token for connecting to Azure OpenAI services. Additionally, ensure that you have set the necessary credentials and endpoint configurations correctly when initializing the from llama_index.legacy.llms.azure_openai import AzureOpenAI
azure_openai = AzureOpenAI(
model="gpt-35-turbo",
engine="YOUR_DEPLOYMENT_NAME",
azure_endpoint="https://YOUR_RESOURCE_NAME.openai.azure.com/",
api_key="YOUR_API_KEY",
api_version="2023-05-15"
) Make sure to validate the environment variables and credentials as shown in the @root_validator(pre=True)
def validate_env(cls, values: Dict[str, Any]) -> Dict[str, Any]:
"""Validate necessary credentials are set."""
if (
values["api_base"] == "https://api.openai.com/v1"
and values["azure_endpoint"] is None
):
raise ValueError(
"You must set OPENAI_API_BASE to your Azure endpoint. "
"It should look like https://YOUR_RESOURCE_NAME.openai.azure.com/"
)
if values["api_version"] is None:
raise ValueError("You must set OPENAI_API_VERSION for Azure OpenAI.")
return values By ensuring that your token is valid and your environment variables are correctly set, you should be able to resolve the connection error with Azure OpenAI [1][2][3]. |
Beta Was this translation helpful? Give feedback.
-
@dosu
from llama_index.core.query_pipeline import InputComponent
from llama_index.core.prompts import PromptTemplate
Define the input component
input_component = InputComponent()
Define the rewrite template
rewrite = (
"Please write a query to a semantic search engine using the current conversation.\n"
"\n"
"\n"
"{chat_history_str}"
"\n"
"\n"
"Latest message: {query_str}\n"
'Query:"""\n'
)
rewrite_template = PromptTemplate(rewrite)
Step 4: Define ArgPack Component
python
Copy code
from llama_index.core.query_pipeline import ArgPackComponent
argpack_component = ArgPackComponent()
Step 5: Define Response Component
python
Copy code
from typing import Any, Dict, List, Optional
from llama_index.core.bridge.pydantic import Field
from llama_index.core.llms import ChatMessage
from llama_index.core.query_pipeline import CustomQueryComponent
from llama_index.core.schema import NodeWithScore
DEFAULT_CONTEXT_PROMPT = (
"Here is some context that may be relevant:\n"
"-----\n"
"{node_context}\n"
"-----\n"
"Please write a response to the following question, using the above context:\n"
"{query_str}\n"
)
class ResponseWithChatHistory(CustomQueryComponent):
llm: AzureOpenAIWithRetries = Field(..., description="Azure OpenAI LLM with retries")
system_prompt: Optional[str] = Field(
default=None, description="System prompt to use for the LLM"
)
context_prompt: str = Field(
default=DEFAULT_CONTEXT_PROMPT,
description="Context prompt to use for the LLM",
)
response_component = ResponseWithChatHistory(
llm=llm,
system_prompt=(
"You are a Q&A system. You will be provided with the previous chat history, "
"as well as possibly relevant context, to assist in answering a user message."
),
)
Step 6: Define the Query Pipeline
python
Copy code
from llama_index.core.query_pipeline import QueryPipeline
pipeline = QueryPipeline(
modules={
"input": input_component,
"rewrite_template": rewrite_template,
"llm": llm,
"join": argpack_component,
"response_component": response_component,
},
verbose=False,
)
Add links to the pipeline
pipeline.add_link("input", "rewrite_template", src_key="query_str", dest_key="query_str")
pipeline.add_link("input", "rewrite_template", src_key="chat_history_str", dest_key="chat_history_str")
pipeline.add_link("rewrite_template", "llm")
pipeline.add_link("llm", "join", dest_key="rewrite_nodes")
pipeline.add_link("llm", "join", dest_key="query_nodes")
pipeline.add_link("join", "response_component", dest_key="nodes")
pipeline.add_link("input", "response_component", src_key="query_str", dest_key="query_str")
pipeline.add_link("input", "response_component", src_key="chat_history", dest_key="chat_history")
Step 7: Initialize Memory Buffer
python
Copy code
from llama_index.core.memory import ChatMemoryBuffer
pipeline_memory = ChatMemoryBuffer.from_defaults(token_limit=8000)
Step 8: Simulate a Chat Session
python
Copy code
user_inputs = [
"Hello!",
"How does tool-use work with Claude-3 work?",
"What models support it?",
"Thanks, that what I needed to know!",
]
for msg in user_inputs:
# Get memory
chat_history = pipeline_memory.get()
Retrying llama_index.llms.openai.base.OpenAI._chat in 1.5795427245300968 seconds as it raised APIConnectionError: Connection error..
Beta Was this translation helpful? Give feedback.
All reactions