Skip to content

Commit

Permalink
fix: lint issues
Browse files Browse the repository at this point in the history
  • Loading branch information
galkleinman committed Nov 29, 2023
1 parent a38e30c commit 2b10d74
Show file tree
Hide file tree
Showing 4 changed files with 26 additions and 14 deletions.
Original file line number Diff line number Diff line change
@@ -1,6 +1,5 @@
"""OpenTelemetry Bedrock instrumentation"""
from functools import wraps
from itertools import tee
import json
import logging
import os
Expand All @@ -10,7 +9,6 @@

from opentelemetry import context as context_api
from opentelemetry.trace import get_tracer, SpanKind
from opentelemetry.trace.status import Status, StatusCode

from opentelemetry.instrumentation.instrumentor import BaseInstrumentor
from opentelemetry.instrumentation.utils import (
Expand Down Expand Up @@ -64,7 +62,7 @@ def _wrap(tracer, to_wrap, wrapped, instance, args, kwargs):
"""Instruments and calls every function defined in TO_WRAP."""
if context_api.get_value(_SUPPRESS_INSTRUMENTATION_KEY):
return wrapped(*args, **kwargs)

if kwargs.get("service_name") == "bedrock-runtime":
client = wrapped(*args, **kwargs)
client.invoke_model = _instrumented_model_invoke(client.invoke_model, tracer)
Expand All @@ -88,7 +86,7 @@ def with_instrumentation(*args, **kwargs):

if span.is_recording():
(vendor, model) = kwargs.get("modelId").split(".")

_set_span_attribute(span, SpanAttributes.LLM_VENDOR, vendor)
_set_span_attribute(span, SpanAttributes.LLM_REQUEST_MODEL, model)

Expand All @@ -100,11 +98,12 @@ def with_instrumentation(*args, **kwargs):
_set_ai21_span_attributes(span, request_body, response_body)
elif vendor == "meta":
_set_llama_span_attributes(span, request_body, response_body)

return response

return with_instrumentation


def _set_cohere_span_attributes(span, request_body, response_body):
_set_span_attribute(span, SpanAttributes.LLM_REQUEST_TYPE, LLMRequestTypeValues.COMPLETION.value)
_set_span_attribute(span, SpanAttributes.LLM_TOP_P, request_body.get("p"))
Expand All @@ -117,6 +116,7 @@ def _set_cohere_span_attributes(span, request_body, response_body):
for i, generation in enumerate(response_body.get("generations")):
_set_span_attribute(span, f"{SpanAttributes.LLM_COMPLETIONS}.{i}.content", generation.get("text"))


def _set_anthropic_span_attributes(span, request_body, response_body):
_set_span_attribute(span, SpanAttributes.LLM_REQUEST_TYPE, LLMRequestTypeValues.COMPLETION.value)
_set_span_attribute(span, SpanAttributes.LLM_TOP_P, request_body.get("top_p"))
Expand All @@ -127,17 +127,25 @@ def _set_anthropic_span_attributes(span, request_body, response_body):
_set_span_attribute(span, f"{SpanAttributes.LLM_PROMPTS}.0.user", request_body.get("prompt"))
_set_span_attribute(span, f"{SpanAttributes.LLM_COMPLETIONS}.0.content", response_body.get("completion"))


def _set_ai21_span_attributes(span, request_body, response_body):
_set_span_attribute(span, SpanAttributes.LLM_REQUEST_TYPE, LLMRequestTypeValues.COMPLETION.value)
_set_span_attribute(span, SpanAttributes.LLM_TOP_P, request_body.get("topP"))
_set_span_attribute(span, SpanAttributes.LLM_TEMPERATURE, request_body.get("temperature"))
_set_span_attribute(span, SpanAttributes.LLM_REQUEST_MAX_TOKENS, request_body.get("maxTokens"))

if should_send_prompts():
_set_span_attribute(span, f"{SpanAttributes.LLM_PROMPTS}.0.user", request_body.get("prompt"))
_set_span_attribute(
span,
f"{SpanAttributes.LLM_PROMPTS}.0.user", request_body.get("prompt")
)

for i, completion in enumerate(response_body.get("completions")):
_set_span_attribute(span, f"{SpanAttributes.LLM_COMPLETIONS}.{i}.content", completion.get("data").get("text"))
_set_span_attribute(
span,
f"{SpanAttributes.LLM_COMPLETIONS}.{i}.content", completion.get("data").get("text")
)


def _set_llama_span_attributes(span, request_body, response_body):
_set_span_attribute(span, SpanAttributes.LLM_REQUEST_TYPE, LLMRequestTypeValues.COMPLETION.value)
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -14,7 +14,7 @@ def __init__(self, raw_stream, content_length):
super().__init__(raw_stream, content_length)
self._buffer = None
self._buffer_cursor = 0

def read(self, amt=None):
"""Read at most amt bytes from the stream.
Expand All @@ -28,7 +28,7 @@ def read(self, amt=None):
raise ReadTimeoutError(endpoint_url=e.url, error=e)
except URLLib3ProtocolError as e:
raise ResponseStreamingError(error=e)

self._amount_read += len(self._buffer)
if amt is None or (not self._buffer and amt > 0):
# If the server sends empty contents or
Expand Down
8 changes: 5 additions & 3 deletions packages/sample-app/sample_app/bedrock_example_app.py
Original file line number Diff line number Diff line change
Expand Up @@ -19,18 +19,20 @@ def create_joke():
})

response = brt.invoke_model(
body=body,
modelId='cohere.command-text-v14',
accept='application/json',
body=body,
modelId='cohere.command-text-v14',
accept='application/json',
contentType='application/json'
)

response_body = json.loads(response.get('body').read())

return response_body.get('generations')[0].get('text')


@workflow(name="pirate_joke_generator")
def joke_workflow():
print(create_joke())


joke_workflow()
4 changes: 3 additions & 1 deletion packages/traceloop-sdk/traceloop/sdk/tracing/tracing.py
Original file line number Diff line number Diff line change
Expand Up @@ -30,7 +30,8 @@
from typing import Dict

TRACER_NAME = "traceloop.tracer"
EXCLUDED_URLS = "api.openai.com,openai.azure.com,api.anthropic.com,api.cohere.ai,pinecone.io,traceloop.com,posthog.com,bedrock-runtime"
EXCLUDED_URLS = ("api.openai.com,openai.azure.com,api.anthropic.com,api.cohere.ai,pinecone.io,traceloop.com,"
"posthog.com,bedrock-runtime")


class TracerWrapper(object):
Expand Down Expand Up @@ -369,6 +370,7 @@ def init_pymysql_instrumentor():
if not instrumentor.is_instrumented_by_opentelemetry:
instrumentor.instrument()


def init_bedrock_instrumentor():
if importlib.util.find_spec("boto3") is not None:
from opentelemetry.instrumentation.bedrock import BedrockInstrumentor
Expand Down

0 comments on commit 2b10d74

Please sign in to comment.