Skip to content

Commit

Permalink
feat: split up endpoint tests (#1432)
Browse files Browse the repository at this point in the history
Co-authored-by: cpacker <[email protected]>
  • Loading branch information
sarahwooders and cpacker authored Jun 5, 2024
1 parent 786b7c6 commit 8fb2b61
Show file tree
Hide file tree
Showing 14 changed files with 244 additions and 204 deletions.
28 changes: 28 additions & 0 deletions .github/workflows/test_local.yml
Original file line number Diff line number Diff line change
@@ -0,0 +1,28 @@
name: Endpoint (Local)

on:
push:
branches: [ main ]
pull_request:
branches: [ main ]

jobs:
test:
runs-on: ubuntu-latest
timeout-minutes: 15
steps:
- name: Checkout
uses: actions/checkout@v4

- name: "Setup Python, Poetry and Dependencies"
uses: packetcoders/action-setup-cache-python-poetry@main
with:
python-version: "3.12"
poetry-version: "1.8.2"
install-args: "-E dev -E local"

- name: Test embedding endpoint
env:
OPENAI_API_KEY: ${{ secrets.OPENAI_API_KEY }}
run: |
poetry run pytest -s -vv tests/test_endpoints.py::test_embedding_endpoint_local
30 changes: 30 additions & 0 deletions .github/workflows/test_memgpt_hosted.yml
Original file line number Diff line number Diff line change
@@ -0,0 +1,30 @@
name: Endpoint (MemGPT)

on:
push:
branches: [ main ]
pull_request:
branches: [ main ]

jobs:
test:
runs-on: ubuntu-latest
timeout-minutes: 15
steps:
- name: Checkout
uses: actions/checkout@v4

- name: "Setup Python, Poetry and Dependencies"
uses: packetcoders/action-setup-cache-python-poetry@main
with:
python-version: "3.12"
poetry-version: "1.8.2"
install-args: "-E dev"

- name: Test LLM endpoint
run: |
poetry run pytest -s -vv tests/test_endpoints.py::test_llm_endpoint_memgpt_hosted
- name: Test embedding endpoint
run: |
poetry run pytest -s -vv tests/test_endpoints.py::test_embedding_endpoint_memgpt_hosted
43 changes: 43 additions & 0 deletions .github/workflows/test_openai.yml
Original file line number Diff line number Diff line change
@@ -0,0 +1,43 @@
name: Endpoint (OpenAI)

env:
OPENAI_API_KEY: ${{ secrets.OPENAI_API_KEY }}

on:
push:
branches: [ main ]
pull_request:
branches: [ main ]

jobs:
test:
runs-on: ubuntu-latest
timeout-minutes: 15
steps:
- name: Checkout
uses: actions/checkout@v4

- name: "Setup Python, Poetry and Dependencies"
uses: packetcoders/action-setup-cache-python-poetry@main
with:
python-version: "3.12"
poetry-version: "1.8.2"
install-args: "-E dev"

- name: Initialize credentials
env:
OPENAI_API_KEY: ${{ secrets.OPENAI_API_KEY }}
run: |
poetry run memgpt quickstart --backend openai
- name: Test LLM endpoint
env:
OPENAI_API_KEY: ${{ secrets.OPENAI_API_KEY }}
run: |
poetry run pytest -s -vv tests/test_endpoints.py::test_llm_endpoint_openai
- name: Test embedding endpoint
env:
OPENAI_API_KEY: ${{ secrets.OPENAI_API_KEY }}
run: |
poetry run pytest -s -vv tests/test_endpoints.py::test_embedding_endpoint_openai
16 changes: 3 additions & 13 deletions .github/workflows/tests.yml
Original file line number Diff line number Diff line change
Expand Up @@ -2,7 +2,6 @@ name: Run All pytest Tests

env:
MEMGPT_PGURI: ${{ secrets.MEMGPT_PGURI }}
OPENAI_API_KEY: ${{ secrets.OPENAI_API_KEY }}

on:
push:
Expand All @@ -26,17 +25,11 @@ jobs:
with:
python-version: "3.12"
poetry-version: "1.8.2"
install-args: "--all-extras"
install-args: "-E dev -E postgres -E milvus"

- name: Initialize credentials
env:
OPENAI_API_KEY: ${{ secrets.OPENAI_API_KEY }}
run: |
if [ -z "$OPENAI_API_KEY" ]; then
poetry run memgpt quickstart --backend openai
else
poetry run memgpt quickstart --backend memgpt
fi
poetry run memgpt quickstart --backend memgpt
#- name: Run docker compose server
# env:
Expand All @@ -55,7 +48,6 @@ jobs:
MEMGPT_PG_PASSWORD: memgpt
MEMGPT_PG_DB: memgpt
MEMGPT_PG_HOST: localhost
OPENAI_API_KEY: ${{ secrets.OPENAI_API_KEY }}
MEMGPT_SERVER_PASS: test_server_token
run: |
poetry run pytest -s -vv tests/test_server.py
Expand All @@ -67,11 +59,10 @@ jobs:
MEMGPT_PG_PASSWORD: memgpt
MEMGPT_PG_HOST: localhost
MEMGPT_PG_DB: memgpt
OPENAI_API_KEY: ${{ secrets.OPENAI_API_KEY }}
MEMGPT_SERVER_PASS: test_server_token
PYTHONPATH: ${{ github.workspace }}:${{ env.PYTHONPATH }}
run: |
poetry run pytest -s -vv -k "not test_storage and not test_server and not test_openai_client" tests
poetry run pytest -s -vv -k "not test_quickstart and not test_endpoints and not test_storage and not test_server and not test_openai_client" tests
- name: Run storage tests
env:
Expand All @@ -80,7 +71,6 @@ jobs:
MEMGPT_PG_PASSWORD: memgpt
MEMGPT_PG_HOST: localhost
MEMGPT_PG_DB: memgpt
OPENAI_API_KEY: ${{ secrets.OPENAI_API_KEY }}
MEMGPT_SERVER_PASS: test_server_token
run: |
poetry run pytest -s -vv tests/test_storage.py
7 changes: 7 additions & 0 deletions configs/embedding_model_configs/local.json
Original file line number Diff line number Diff line change
@@ -0,0 +1,7 @@
{
"embedding_endpoint": null,
"embedding_model": "BAAI/bge-small-en-v1.5",
"embedding_dim": 384,
"embedding_chunk_size": 300,
"embedding_endpoint_type": "local"
}
3 changes: 2 additions & 1 deletion configs/embedding_model_configs/memgpt-hosted.json
Original file line number Diff line number Diff line change
Expand Up @@ -2,5 +2,6 @@
"embedding_endpoint": "https://embeddings.memgpt.ai",
"embedding_model": "BAAI/bge-large-en-v1.5",
"embedding_dim": 1024,
"embedding_chunk_size": 300
"embedding_chunk_size": 300,
"embedding_endpoint_type": "hugging-face"
}
4 changes: 4 additions & 0 deletions memgpt/client/client.py
Original file line number Diff line number Diff line change
Expand Up @@ -476,11 +476,15 @@ def get_messages(
) -> GetAgentMessagesResponse:
params = {"before": before, "after": after, "limit": limit}
response = requests.get(f"{self.base_url}/api/agents/{agent_id}/messages-cursor", params=params, headers=self.headers)
if response.status_code != 200:
raise ValueError(f"Failed to get messages: {response.text}")
return GetAgentMessagesResponse(**response.json())

def send_message(self, agent_id: uuid.UUID, message: str, role: str, stream: Optional[bool] = False) -> UserMessageResponse:
data = {"message": message, "role": role, "stream": stream}
response = requests.post(f"{self.base_url}/api/agents/{agent_id}/messages", json=data, headers=self.headers)
if response.status_code != 200:
raise ValueError(f"Failed to send message: {response.text}")
return UserMessageResponse(**response.json())

# humans / personas
Expand Down
1 change: 0 additions & 1 deletion memgpt/embeddings.py
Original file line number Diff line number Diff line change
Expand Up @@ -162,7 +162,6 @@ def embedding_model(config: EmbeddingConfig, user_id: Optional[uuid.UUID] = None
credentials = MemGPTCredentials.load()

if endpoint_type == "openai":
assert credentials.openai_key is not None
from llama_index.embeddings.openai import OpenAIEmbedding

additional_kwargs = {"user_id": user_id} if user_id else {}
Expand Down
8 changes: 0 additions & 8 deletions memgpt/server/server.py
Original file line number Diff line number Diff line change
Expand Up @@ -223,14 +223,6 @@ def __init__(
# TODO figure out how to handle credentials for the server
self.credentials = MemGPTCredentials.load()

# check credentials
# TODO: add checks for other providers
if (
self.config.default_embedding_config.embedding_endpoint_type == "openai"
or self.config.default_llm_config.model_endpoint_type == "openai"
):
assert self.credentials.openai_key is not None, "OpenAI key must be set in the credentials file"

# Ensure valid database configuration
# TODO: add back once tests are matched
# assert (
Expand Down
58 changes: 14 additions & 44 deletions tests/test_client.py
Original file line number Diff line number Diff line change
Expand Up @@ -7,12 +7,12 @@
from dotenv import load_dotenv

from memgpt import Admin, create_client
from memgpt.config import MemGPTConfig
from memgpt.constants import DEFAULT_PRESET
from memgpt.credentials import MemGPTCredentials
from memgpt.data_types import Preset # TODO move to PresetModel
from memgpt.data_types import EmbeddingConfig, LLMConfig
from memgpt.settings import settings
from tests.config import TestMGPTConfig
from tests.utils import create_config

test_agent_name = f"test_client_{str(uuid.uuid4())}"
# test_preset_name = "test_preset"
Expand All @@ -34,54 +34,24 @@ def _reset_config():
db_url = settings.memgpt_pg_uri

if os.getenv("OPENAI_API_KEY"):
config = TestMGPTConfig(
archival_storage_uri=db_url,
recall_storage_uri=db_url,
metadata_storage_uri=db_url,
archival_storage_type="postgres",
recall_storage_type="postgres",
metadata_storage_type="postgres",
# embeddings
default_embedding_config=EmbeddingConfig(
embedding_endpoint_type="openai",
embedding_endpoint="https://api.openai.com/v1",
embedding_model="text-embedding-ada-002",
embedding_dim=1536,
),
# llms
default_llm_config=LLMConfig(
model_endpoint_type="openai",
model_endpoint="https://api.openai.com/v1",
model="gpt-4",
),
)
create_config("openai")
credentials = MemGPTCredentials(
openai_key=os.getenv("OPENAI_API_KEY"),
)
else: # hosted
config = TestMGPTConfig(
archival_storage_uri=db_url,
recall_storage_uri=db_url,
metadata_storage_uri=db_url,
archival_storage_type="postgres",
recall_storage_type="postgres",
metadata_storage_type="postgres",
# embeddings
default_embedding_config=EmbeddingConfig(
embedding_endpoint_type="hugging-face",
embedding_endpoint="https://embeddings.memgpt.ai",
embedding_model="BAAI/bge-large-en-v1.5",
embedding_dim=1024,
),
# llms
default_llm_config=LLMConfig(
model_endpoint_type="vllm",
model_endpoint="https://api.memgpt.ai",
model="ehartford/dolphin-2.5-mixtral-8x7b",
),
)
create_config("memgpt_hosted")
credentials = MemGPTCredentials()

config = MemGPTConfig.load()

# set to use postgres
config.archival_storage_uri = db_url
config.recall_storage_uri = db_url
config.metadata_storage_uri = db_url
config.archival_storage_type = "postgres"
config.recall_storage_type = "postgres"
config.metadata_storage_type = "postgres"

config.save()
credentials.save()
print("_reset_config :: ", config.config_path)
Expand Down
Loading

0 comments on commit 8fb2b61

Please sign in to comment.