Skip to content

Commit

Permalink
add weave Model as evaluator
Browse files Browse the repository at this point in the history
  • Loading branch information
ayulockin committed May 9, 2024
1 parent 90c5c10 commit 17884a7
Showing 1 changed file with 12 additions and 2 deletions.
14 changes: 12 additions & 2 deletions src/wandbot/evaluation/weave_eval/main.py
Original file line number Diff line number Diff line change
Expand Up @@ -6,6 +6,7 @@
import weave
import asyncio
from weave import Evaluation
from weave import Model
from llama_index.llms.openai import OpenAI

from wandbot.evaluation.config import EvalConfig
Expand Down Expand Up @@ -47,7 +48,6 @@ async def get_answer(question: str, application: str = "api-eval") -> str:
@weave.op()
async def get_eval_record(
question: str,
ground_truth: str,
) -> dict:
response = await get_answer(question)
response = json.loads(response)
Expand All @@ -63,6 +63,16 @@ async def get_eval_record(
}


class EvaluatorModel(Model):
eval_judge_model: str = config.eval_judge_model

@weave.op()
async def predict(self, question: str) -> dict:
# Model logic goes here
prediction = await get_eval_record(question)
return prediction


@weave.op()
async def get_answer_correctness(
question: str,
Expand Down Expand Up @@ -100,4 +110,4 @@ async def get_answer_correctness(
)

if __name__ == "__main__":
asyncio.run(evaluation.evaluate(get_eval_record))
asyncio.run(evaluation.evaluate(EvaluatorModel()))

0 comments on commit 17884a7

Please sign in to comment.