openlit 1.34.18__tar.gz → 1.34.20__tar.gz
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- {openlit-1.34.18 → openlit-1.34.20}/PKG-INFO +1 -1
- {openlit-1.34.18 → openlit-1.34.20}/pyproject.toml +1 -1
- openlit-1.34.20/src/openlit/instrumentation/anthropic/__init__.py +48 -0
- openlit-1.34.20/src/openlit/instrumentation/anthropic/anthropic.py +142 -0
- openlit-1.34.20/src/openlit/instrumentation/anthropic/async_anthropic.py +142 -0
- openlit-1.34.20/src/openlit/instrumentation/anthropic/utils.py +225 -0
- {openlit-1.34.18 → openlit-1.34.20}/src/openlit/instrumentation/bedrock/__init__.py +19 -14
- openlit-1.34.20/src/openlit/instrumentation/bedrock/bedrock.py +211 -0
- openlit-1.34.20/src/openlit/instrumentation/bedrock/utils.py +223 -0
- {openlit-1.34.18 → openlit-1.34.20}/src/openlit/instrumentation/litellm/async_litellm.py +2 -2
- openlit-1.34.18/src/openlit/instrumentation/anthropic/__init__.py +0 -50
- openlit-1.34.18/src/openlit/instrumentation/anthropic/anthropic.py +0 -149
- openlit-1.34.18/src/openlit/instrumentation/anthropic/async_anthropic.py +0 -149
- openlit-1.34.18/src/openlit/instrumentation/anthropic/utils.py +0 -251
- openlit-1.34.18/src/openlit/instrumentation/bedrock/bedrock.py +0 -77
- openlit-1.34.18/src/openlit/instrumentation/bedrock/utils.py +0 -252
- {openlit-1.34.18 → openlit-1.34.20}/LICENSE +0 -0
- {openlit-1.34.18 → openlit-1.34.20}/README.md +0 -0
- {openlit-1.34.18 → openlit-1.34.20}/src/openlit/__helpers.py +0 -0
- {openlit-1.34.18 → openlit-1.34.20}/src/openlit/__init__.py +0 -0
- {openlit-1.34.18 → openlit-1.34.20}/src/openlit/evals/__init__.py +0 -0
- {openlit-1.34.18 → openlit-1.34.20}/src/openlit/evals/all.py +0 -0
- {openlit-1.34.18 → openlit-1.34.20}/src/openlit/evals/bias_detection.py +0 -0
- {openlit-1.34.18 → openlit-1.34.20}/src/openlit/evals/hallucination.py +0 -0
- {openlit-1.34.18 → openlit-1.34.20}/src/openlit/evals/toxicity.py +0 -0
- {openlit-1.34.18 → openlit-1.34.20}/src/openlit/evals/utils.py +0 -0
- {openlit-1.34.18 → openlit-1.34.20}/src/openlit/guard/__init__.py +0 -0
- {openlit-1.34.18 → openlit-1.34.20}/src/openlit/guard/all.py +0 -0
- {openlit-1.34.18 → openlit-1.34.20}/src/openlit/guard/prompt_injection.py +0 -0
- {openlit-1.34.18 → openlit-1.34.20}/src/openlit/guard/restrict_topic.py +0 -0
- {openlit-1.34.18 → openlit-1.34.20}/src/openlit/guard/sensitive_topic.py +0 -0
- {openlit-1.34.18 → openlit-1.34.20}/src/openlit/guard/utils.py +0 -0
- {openlit-1.34.18 → openlit-1.34.20}/src/openlit/instrumentation/ag2/__init__.py +0 -0
- {openlit-1.34.18 → openlit-1.34.20}/src/openlit/instrumentation/ag2/ag2.py +0 -0
- {openlit-1.34.18 → openlit-1.34.20}/src/openlit/instrumentation/ai21/__init__.py +0 -0
- {openlit-1.34.18 → openlit-1.34.20}/src/openlit/instrumentation/ai21/ai21.py +0 -0
- {openlit-1.34.18 → openlit-1.34.20}/src/openlit/instrumentation/ai21/async_ai21.py +0 -0
- {openlit-1.34.18 → openlit-1.34.20}/src/openlit/instrumentation/ai21/utils.py +0 -0
- {openlit-1.34.18 → openlit-1.34.20}/src/openlit/instrumentation/assemblyai/__init__.py +0 -0
- {openlit-1.34.18 → openlit-1.34.20}/src/openlit/instrumentation/assemblyai/assemblyai.py +0 -0
- {openlit-1.34.18 → openlit-1.34.20}/src/openlit/instrumentation/assemblyai/utils.py +0 -0
- {openlit-1.34.18 → openlit-1.34.20}/src/openlit/instrumentation/astra/__init__.py +0 -0
- {openlit-1.34.18 → openlit-1.34.20}/src/openlit/instrumentation/astra/astra.py +0 -0
- {openlit-1.34.18 → openlit-1.34.20}/src/openlit/instrumentation/astra/async_astra.py +0 -0
- {openlit-1.34.18 → openlit-1.34.20}/src/openlit/instrumentation/astra/utils.py +0 -0
- {openlit-1.34.18 → openlit-1.34.20}/src/openlit/instrumentation/azure_ai_inference/__init__.py +0 -0
- {openlit-1.34.18 → openlit-1.34.20}/src/openlit/instrumentation/azure_ai_inference/async_azure_ai_inference.py +0 -0
- {openlit-1.34.18 → openlit-1.34.20}/src/openlit/instrumentation/azure_ai_inference/azure_ai_inference.py +0 -0
- {openlit-1.34.18 → openlit-1.34.20}/src/openlit/instrumentation/azure_ai_inference/utils.py +0 -0
- {openlit-1.34.18 → openlit-1.34.20}/src/openlit/instrumentation/chroma/__init__.py +0 -0
- {openlit-1.34.18 → openlit-1.34.20}/src/openlit/instrumentation/chroma/chroma.py +0 -0
- {openlit-1.34.18 → openlit-1.34.20}/src/openlit/instrumentation/cohere/__init__.py +0 -0
- {openlit-1.34.18 → openlit-1.34.20}/src/openlit/instrumentation/cohere/async_cohere.py +0 -0
- {openlit-1.34.18 → openlit-1.34.20}/src/openlit/instrumentation/cohere/cohere.py +0 -0
- {openlit-1.34.18 → openlit-1.34.20}/src/openlit/instrumentation/cohere/utils.py +0 -0
- {openlit-1.34.18 → openlit-1.34.20}/src/openlit/instrumentation/controlflow/__init__.py +0 -0
- {openlit-1.34.18 → openlit-1.34.20}/src/openlit/instrumentation/controlflow/controlflow.py +0 -0
- {openlit-1.34.18 → openlit-1.34.20}/src/openlit/instrumentation/crawl4ai/__init__.py +0 -0
- {openlit-1.34.18 → openlit-1.34.20}/src/openlit/instrumentation/crawl4ai/async_crawl4ai.py +0 -0
- {openlit-1.34.18 → openlit-1.34.20}/src/openlit/instrumentation/crawl4ai/crawl4ai.py +0 -0
- {openlit-1.34.18 → openlit-1.34.20}/src/openlit/instrumentation/crewai/__init__.py +0 -0
- {openlit-1.34.18 → openlit-1.34.20}/src/openlit/instrumentation/crewai/crewai.py +0 -0
- {openlit-1.34.18 → openlit-1.34.20}/src/openlit/instrumentation/dynamiq/__init__.py +0 -0
- {openlit-1.34.18 → openlit-1.34.20}/src/openlit/instrumentation/dynamiq/dynamiq.py +0 -0
- {openlit-1.34.18 → openlit-1.34.20}/src/openlit/instrumentation/elevenlabs/__init__.py +0 -0
- {openlit-1.34.18 → openlit-1.34.20}/src/openlit/instrumentation/elevenlabs/async_elevenlabs.py +0 -0
- {openlit-1.34.18 → openlit-1.34.20}/src/openlit/instrumentation/elevenlabs/elevenlabs.py +0 -0
- {openlit-1.34.18 → openlit-1.34.20}/src/openlit/instrumentation/elevenlabs/utils.py +0 -0
- {openlit-1.34.18 → openlit-1.34.20}/src/openlit/instrumentation/embedchain/__init__.py +0 -0
- {openlit-1.34.18 → openlit-1.34.20}/src/openlit/instrumentation/embedchain/embedchain.py +0 -0
- {openlit-1.34.18 → openlit-1.34.20}/src/openlit/instrumentation/firecrawl/__init__.py +0 -0
- {openlit-1.34.18 → openlit-1.34.20}/src/openlit/instrumentation/firecrawl/firecrawl.py +0 -0
- {openlit-1.34.18 → openlit-1.34.20}/src/openlit/instrumentation/google_ai_studio/__init__.py +0 -0
- {openlit-1.34.18 → openlit-1.34.20}/src/openlit/instrumentation/google_ai_studio/async_google_ai_studio.py +0 -0
- {openlit-1.34.18 → openlit-1.34.20}/src/openlit/instrumentation/google_ai_studio/google_ai_studio.py +0 -0
- {openlit-1.34.18 → openlit-1.34.20}/src/openlit/instrumentation/google_ai_studio/utils.py +0 -0
- {openlit-1.34.18 → openlit-1.34.20}/src/openlit/instrumentation/gpt4all/__init__.py +0 -0
- {openlit-1.34.18 → openlit-1.34.20}/src/openlit/instrumentation/gpt4all/gpt4all.py +0 -0
- {openlit-1.34.18 → openlit-1.34.20}/src/openlit/instrumentation/gpt4all/utils.py +0 -0
- {openlit-1.34.18 → openlit-1.34.20}/src/openlit/instrumentation/gpu/__init__.py +0 -0
- {openlit-1.34.18 → openlit-1.34.20}/src/openlit/instrumentation/groq/__init__.py +0 -0
- {openlit-1.34.18 → openlit-1.34.20}/src/openlit/instrumentation/groq/async_groq.py +0 -0
- {openlit-1.34.18 → openlit-1.34.20}/src/openlit/instrumentation/groq/groq.py +0 -0
- {openlit-1.34.18 → openlit-1.34.20}/src/openlit/instrumentation/groq/utils.py +0 -0
- {openlit-1.34.18 → openlit-1.34.20}/src/openlit/instrumentation/haystack/__init__.py +0 -0
- {openlit-1.34.18 → openlit-1.34.20}/src/openlit/instrumentation/haystack/haystack.py +0 -0
- {openlit-1.34.18 → openlit-1.34.20}/src/openlit/instrumentation/julep/__init__.py +0 -0
- {openlit-1.34.18 → openlit-1.34.20}/src/openlit/instrumentation/julep/async_julep.py +0 -0
- {openlit-1.34.18 → openlit-1.34.20}/src/openlit/instrumentation/julep/julep.py +0 -0
- {openlit-1.34.18 → openlit-1.34.20}/src/openlit/instrumentation/langchain/__init__.py +0 -0
- {openlit-1.34.18 → openlit-1.34.20}/src/openlit/instrumentation/langchain/async_langchain.py +0 -0
- {openlit-1.34.18 → openlit-1.34.20}/src/openlit/instrumentation/langchain/langchain.py +0 -0
- {openlit-1.34.18 → openlit-1.34.20}/src/openlit/instrumentation/letta/__init__.py +0 -0
- {openlit-1.34.18 → openlit-1.34.20}/src/openlit/instrumentation/letta/letta.py +0 -0
- {openlit-1.34.18 → openlit-1.34.20}/src/openlit/instrumentation/litellm/__init__.py +0 -0
- {openlit-1.34.18 → openlit-1.34.20}/src/openlit/instrumentation/litellm/litellm.py +0 -0
- {openlit-1.34.18 → openlit-1.34.20}/src/openlit/instrumentation/litellm/utils.py +0 -0
- {openlit-1.34.18 → openlit-1.34.20}/src/openlit/instrumentation/llamaindex/__init__.py +0 -0
- {openlit-1.34.18 → openlit-1.34.20}/src/openlit/instrumentation/llamaindex/llamaindex.py +0 -0
- {openlit-1.34.18 → openlit-1.34.20}/src/openlit/instrumentation/mem0/__init__.py +0 -0
- {openlit-1.34.18 → openlit-1.34.20}/src/openlit/instrumentation/mem0/mem0.py +0 -0
- {openlit-1.34.18 → openlit-1.34.20}/src/openlit/instrumentation/milvus/__init__.py +0 -0
- {openlit-1.34.18 → openlit-1.34.20}/src/openlit/instrumentation/milvus/milvus.py +0 -0
- {openlit-1.34.18 → openlit-1.34.20}/src/openlit/instrumentation/mistral/__init__.py +0 -0
- {openlit-1.34.18 → openlit-1.34.20}/src/openlit/instrumentation/mistral/async_mistral.py +0 -0
- {openlit-1.34.18 → openlit-1.34.20}/src/openlit/instrumentation/mistral/mistral.py +0 -0
- {openlit-1.34.18 → openlit-1.34.20}/src/openlit/instrumentation/mistral/utils.py +0 -0
- {openlit-1.34.18 → openlit-1.34.20}/src/openlit/instrumentation/multion/__init__.py +0 -0
- {openlit-1.34.18 → openlit-1.34.20}/src/openlit/instrumentation/multion/async_multion.py +0 -0
- {openlit-1.34.18 → openlit-1.34.20}/src/openlit/instrumentation/multion/multion.py +0 -0
- {openlit-1.34.18 → openlit-1.34.20}/src/openlit/instrumentation/ollama/__init__.py +0 -0
- {openlit-1.34.18 → openlit-1.34.20}/src/openlit/instrumentation/ollama/async_ollama.py +0 -0
- {openlit-1.34.18 → openlit-1.34.20}/src/openlit/instrumentation/ollama/ollama.py +0 -0
- {openlit-1.34.18 → openlit-1.34.20}/src/openlit/instrumentation/ollama/utils.py +0 -0
- {openlit-1.34.18 → openlit-1.34.20}/src/openlit/instrumentation/openai/__init__.py +0 -0
- {openlit-1.34.18 → openlit-1.34.20}/src/openlit/instrumentation/openai/async_openai.py +0 -0
- {openlit-1.34.18 → openlit-1.34.20}/src/openlit/instrumentation/openai/openai.py +0 -0
- {openlit-1.34.18 → openlit-1.34.20}/src/openlit/instrumentation/openai_agents/__init__.py +0 -0
- {openlit-1.34.18 → openlit-1.34.20}/src/openlit/instrumentation/openai_agents/openai_agents.py +0 -0
- {openlit-1.34.18 → openlit-1.34.20}/src/openlit/instrumentation/phidata/__init__.py +0 -0
- {openlit-1.34.18 → openlit-1.34.20}/src/openlit/instrumentation/phidata/phidata.py +0 -0
- {openlit-1.34.18 → openlit-1.34.20}/src/openlit/instrumentation/pinecone/__init__.py +0 -0
- {openlit-1.34.18 → openlit-1.34.20}/src/openlit/instrumentation/pinecone/pinecone.py +0 -0
- {openlit-1.34.18 → openlit-1.34.20}/src/openlit/instrumentation/premai/__init__.py +0 -0
- {openlit-1.34.18 → openlit-1.34.20}/src/openlit/instrumentation/premai/premai.py +0 -0
- {openlit-1.34.18 → openlit-1.34.20}/src/openlit/instrumentation/premai/utils.py +0 -0
- {openlit-1.34.18 → openlit-1.34.20}/src/openlit/instrumentation/pydantic_ai/__init__.py +0 -0
- {openlit-1.34.18 → openlit-1.34.20}/src/openlit/instrumentation/pydantic_ai/pydantic_ai.py +0 -0
- {openlit-1.34.18 → openlit-1.34.20}/src/openlit/instrumentation/pydantic_ai/utils.py +0 -0
- {openlit-1.34.18 → openlit-1.34.20}/src/openlit/instrumentation/qdrant/__init__.py +0 -0
- {openlit-1.34.18 → openlit-1.34.20}/src/openlit/instrumentation/qdrant/async_qdrant.py +0 -0
- {openlit-1.34.18 → openlit-1.34.20}/src/openlit/instrumentation/qdrant/qdrant.py +0 -0
- {openlit-1.34.18 → openlit-1.34.20}/src/openlit/instrumentation/reka/__init__.py +0 -0
- {openlit-1.34.18 → openlit-1.34.20}/src/openlit/instrumentation/reka/async_reka.py +0 -0
- {openlit-1.34.18 → openlit-1.34.20}/src/openlit/instrumentation/reka/reka.py +0 -0
- {openlit-1.34.18 → openlit-1.34.20}/src/openlit/instrumentation/reka/utils.py +0 -0
- {openlit-1.34.18 → openlit-1.34.20}/src/openlit/instrumentation/together/__init__.py +0 -0
- {openlit-1.34.18 → openlit-1.34.20}/src/openlit/instrumentation/together/async_together.py +0 -0
- {openlit-1.34.18 → openlit-1.34.20}/src/openlit/instrumentation/together/together.py +0 -0
- {openlit-1.34.18 → openlit-1.34.20}/src/openlit/instrumentation/together/utils.py +0 -0
- {openlit-1.34.18 → openlit-1.34.20}/src/openlit/instrumentation/transformers/__init__.py +0 -0
- {openlit-1.34.18 → openlit-1.34.20}/src/openlit/instrumentation/transformers/transformers.py +0 -0
- {openlit-1.34.18 → openlit-1.34.20}/src/openlit/instrumentation/transformers/utils.py +0 -0
- {openlit-1.34.18 → openlit-1.34.20}/src/openlit/instrumentation/vertexai/__init__.py +0 -0
- {openlit-1.34.18 → openlit-1.34.20}/src/openlit/instrumentation/vertexai/async_vertexai.py +0 -0
- {openlit-1.34.18 → openlit-1.34.20}/src/openlit/instrumentation/vertexai/vertexai.py +0 -0
- {openlit-1.34.18 → openlit-1.34.20}/src/openlit/instrumentation/vllm/__init__.py +0 -0
- {openlit-1.34.18 → openlit-1.34.20}/src/openlit/instrumentation/vllm/utils.py +0 -0
- {openlit-1.34.18 → openlit-1.34.20}/src/openlit/instrumentation/vllm/vllm.py +0 -0
- {openlit-1.34.18 → openlit-1.34.20}/src/openlit/otel/events.py +0 -0
- {openlit-1.34.18 → openlit-1.34.20}/src/openlit/otel/metrics.py +0 -0
- {openlit-1.34.18 → openlit-1.34.20}/src/openlit/otel/tracing.py +0 -0
- {openlit-1.34.18 → openlit-1.34.20}/src/openlit/semcov/__init__.py +0 -0
@@ -1,6 +1,6 @@
|
|
1
1
|
Metadata-Version: 2.3
|
2
2
|
Name: openlit
|
3
|
-
Version: 1.34.
|
3
|
+
Version: 1.34.20
|
4
4
|
Summary: OpenTelemetry-native Auto instrumentation library for monitoring LLM Applications and GPUs, facilitating the integration of observability into your GenAI-driven projects
|
5
5
|
License: Apache-2.0
|
6
6
|
Keywords: OpenTelemetry,otel,otlp,llm,tracing,openai,anthropic,claude,cohere,llm monitoring,observability,monitoring,gpt,Generative AI,chatGPT,gpu
|
@@ -1,6 +1,6 @@
|
|
1
1
|
[tool.poetry]
|
2
2
|
name = "openlit"
|
3
|
-
version = "1.34.
|
3
|
+
version = "1.34.20"
|
4
4
|
description = "OpenTelemetry-native Auto instrumentation library for monitoring LLM Applications and GPUs, facilitating the integration of observability into your GenAI-driven projects"
|
5
5
|
authors = ["OpenLIT"]
|
6
6
|
license = "Apache-2.0"
|
@@ -0,0 +1,48 @@
|
|
1
|
+
"""Initializer of Auto Instrumentation of Anthropic Functions"""
|
2
|
+
|
3
|
+
from typing import Collection
|
4
|
+
import importlib.metadata
|
5
|
+
from opentelemetry.instrumentation.instrumentor import BaseInstrumentor
|
6
|
+
from wrapt import wrap_function_wrapper
|
7
|
+
|
8
|
+
from openlit.instrumentation.anthropic.anthropic import messages
|
9
|
+
from openlit.instrumentation.anthropic.async_anthropic import async_messages
|
10
|
+
|
11
|
+
_instruments = ("anthropic >= 0.21.0",)
|
12
|
+
|
13
|
+
class AnthropicInstrumentor(BaseInstrumentor):
|
14
|
+
"""
|
15
|
+
An instrumentor for Anthropic's client library.
|
16
|
+
"""
|
17
|
+
|
18
|
+
def instrumentation_dependencies(self) -> Collection[str]:
|
19
|
+
return _instruments
|
20
|
+
|
21
|
+
def _instrument(self, **kwargs):
|
22
|
+
version = importlib.metadata.version("anthropic")
|
23
|
+
environment = kwargs.get("environment", "default")
|
24
|
+
application_name = kwargs.get("application_name", "default")
|
25
|
+
tracer = kwargs.get("tracer")
|
26
|
+
pricing_info = kwargs.get("pricing_info", {})
|
27
|
+
capture_message_content = kwargs.get("capture_message_content", False)
|
28
|
+
metrics = kwargs.get("metrics_dict")
|
29
|
+
disable_metrics = kwargs.get("disable_metrics")
|
30
|
+
|
31
|
+
# sync
|
32
|
+
wrap_function_wrapper(
|
33
|
+
"anthropic.resources.messages",
|
34
|
+
"Messages.create",
|
35
|
+
messages(version, environment, application_name, tracer, pricing_info,
|
36
|
+
capture_message_content, metrics, disable_metrics),
|
37
|
+
)
|
38
|
+
|
39
|
+
# async
|
40
|
+
wrap_function_wrapper(
|
41
|
+
"anthropic.resources.messages",
|
42
|
+
"AsyncMessages.create",
|
43
|
+
async_messages(version, environment, application_name, tracer, pricing_info,
|
44
|
+
capture_message_content, metrics, disable_metrics),
|
45
|
+
)
|
46
|
+
|
47
|
+
def _uninstrument(self, **kwargs):
|
48
|
+
pass
|
@@ -0,0 +1,142 @@
|
|
1
|
+
"""
|
2
|
+
Module for monitoring Anthropic API calls.
|
3
|
+
"""
|
4
|
+
|
5
|
+
import time
|
6
|
+
from opentelemetry.trace import SpanKind
|
7
|
+
from openlit.__helpers import (
|
8
|
+
handle_exception,
|
9
|
+
set_server_address_and_port
|
10
|
+
)
|
11
|
+
from openlit.instrumentation.anthropic.utils import (
|
12
|
+
process_chunk,
|
13
|
+
process_chat_response,
|
14
|
+
process_streaming_chat_response,
|
15
|
+
)
|
16
|
+
from openlit.semcov import SemanticConvention
|
17
|
+
|
18
|
+
def messages(version, environment, application_name, tracer, pricing_info, capture_message_content, metrics, disable_metrics):
|
19
|
+
"""
|
20
|
+
Generates a telemetry wrapper for Anthropic Messages.create calls.
|
21
|
+
"""
|
22
|
+
|
23
|
+
class TracedSyncStream:
|
24
|
+
"""
|
25
|
+
Wrapper for streaming responses to collect telemetry.
|
26
|
+
"""
|
27
|
+
|
28
|
+
def __init__(
|
29
|
+
self,
|
30
|
+
wrapped,
|
31
|
+
span,
|
32
|
+
span_name,
|
33
|
+
kwargs,
|
34
|
+
server_address,
|
35
|
+
server_port,
|
36
|
+
):
|
37
|
+
self.__wrapped__ = wrapped
|
38
|
+
self._span = span
|
39
|
+
self._span_name = span_name
|
40
|
+
self._llmresponse = ""
|
41
|
+
self._response_id = ""
|
42
|
+
self._response_model = ""
|
43
|
+
self._finish_reason = ""
|
44
|
+
self._input_tokens = 0
|
45
|
+
self._output_tokens = 0
|
46
|
+
self._tool_arguments = ""
|
47
|
+
self._tool_id = ""
|
48
|
+
self._tool_name = ""
|
49
|
+
self._tool_calls = None
|
50
|
+
self._response_role = ""
|
51
|
+
self._kwargs = kwargs
|
52
|
+
self._start_time = time.time()
|
53
|
+
self._end_time = None
|
54
|
+
self._timestamps = []
|
55
|
+
self._ttft = 0
|
56
|
+
self._tbt = 0
|
57
|
+
self._server_address = server_address
|
58
|
+
self._server_port = server_port
|
59
|
+
|
60
|
+
def __enter__(self):
|
61
|
+
self.__wrapped__.__enter__()
|
62
|
+
return self
|
63
|
+
|
64
|
+
def __exit__(self, exc_type, exc_value, traceback):
|
65
|
+
self.__wrapped__.__exit__(exc_type, exc_value, traceback)
|
66
|
+
|
67
|
+
def __iter__(self):
|
68
|
+
return self
|
69
|
+
|
70
|
+
def __getattr__(self, name):
|
71
|
+
"""Delegate attribute access to the wrapped object."""
|
72
|
+
return getattr(self.__wrapped__, name)
|
73
|
+
|
74
|
+
def __next__(self):
|
75
|
+
try:
|
76
|
+
chunk = self.__wrapped__.__next__()
|
77
|
+
process_chunk(self, chunk)
|
78
|
+
return chunk
|
79
|
+
except StopIteration:
|
80
|
+
try:
|
81
|
+
with self._span:
|
82
|
+
process_streaming_chat_response(
|
83
|
+
self,
|
84
|
+
pricing_info=pricing_info,
|
85
|
+
environment=environment,
|
86
|
+
application_name=application_name,
|
87
|
+
metrics=metrics,
|
88
|
+
capture_message_content=capture_message_content,
|
89
|
+
disable_metrics=disable_metrics,
|
90
|
+
version=version
|
91
|
+
)
|
92
|
+
except Exception as e:
|
93
|
+
handle_exception(self._span, e)
|
94
|
+
raise
|
95
|
+
|
96
|
+
def wrapper(wrapped, instance, args, kwargs):
|
97
|
+
"""
|
98
|
+
Wraps the Anthropic Messages.create call.
|
99
|
+
"""
|
100
|
+
|
101
|
+
streaming = kwargs.get("stream", False)
|
102
|
+
server_address, server_port = set_server_address_and_port(instance, "api.anthropic.com", 443)
|
103
|
+
request_model = kwargs.get("model", "claude-3-5-sonnet-latest")
|
104
|
+
|
105
|
+
span_name = f"{SemanticConvention.GEN_AI_OPERATION_TYPE_CHAT} {request_model}"
|
106
|
+
|
107
|
+
# pylint: disable=no-else-return
|
108
|
+
if streaming:
|
109
|
+
awaited_wrapped = wrapped(*args, **kwargs)
|
110
|
+
span = tracer.start_span(span_name, kind=SpanKind.CLIENT)
|
111
|
+
|
112
|
+
return TracedSyncStream(awaited_wrapped, span, span_name, kwargs, server_address, server_port)
|
113
|
+
|
114
|
+
else:
|
115
|
+
with tracer.start_as_current_span(span_name, kind=SpanKind.CLIENT) as span:
|
116
|
+
start_time = time.time()
|
117
|
+
response = wrapped(*args, **kwargs)
|
118
|
+
|
119
|
+
try:
|
120
|
+
response = process_chat_response(
|
121
|
+
response=response,
|
122
|
+
request_model=request_model,
|
123
|
+
pricing_info=pricing_info,
|
124
|
+
server_port=server_port,
|
125
|
+
server_address=server_address,
|
126
|
+
environment=environment,
|
127
|
+
application_name=application_name,
|
128
|
+
metrics=metrics,
|
129
|
+
start_time=start_time,
|
130
|
+
span=span,
|
131
|
+
capture_message_content=capture_message_content,
|
132
|
+
disable_metrics=disable_metrics,
|
133
|
+
version=version,
|
134
|
+
**kwargs
|
135
|
+
)
|
136
|
+
|
137
|
+
except Exception as e:
|
138
|
+
handle_exception(span, e)
|
139
|
+
|
140
|
+
return response
|
141
|
+
|
142
|
+
return wrapper
|
@@ -0,0 +1,142 @@
|
|
1
|
+
"""
|
2
|
+
Module for monitoring Anthropic API calls.
|
3
|
+
"""
|
4
|
+
|
5
|
+
import time
|
6
|
+
from opentelemetry.trace import SpanKind
|
7
|
+
from openlit.__helpers import (
|
8
|
+
handle_exception,
|
9
|
+
set_server_address_and_port
|
10
|
+
)
|
11
|
+
from openlit.instrumentation.anthropic.utils import (
|
12
|
+
process_chunk,
|
13
|
+
process_chat_response,
|
14
|
+
process_streaming_chat_response,
|
15
|
+
)
|
16
|
+
from openlit.semcov import SemanticConvention
|
17
|
+
|
18
|
+
def async_messages(version, environment, application_name, tracer, pricing_info, capture_message_content, metrics, disable_metrics):
|
19
|
+
"""
|
20
|
+
Generates a telemetry wrapper for Anthropic AsyncMessages.create calls.
|
21
|
+
"""
|
22
|
+
|
23
|
+
class TracedAsyncStream:
|
24
|
+
"""
|
25
|
+
Wrapper for async streaming responses to collect telemetry.
|
26
|
+
"""
|
27
|
+
|
28
|
+
def __init__(
|
29
|
+
self,
|
30
|
+
wrapped,
|
31
|
+
span,
|
32
|
+
span_name,
|
33
|
+
kwargs,
|
34
|
+
server_address,
|
35
|
+
server_port,
|
36
|
+
):
|
37
|
+
self.__wrapped__ = wrapped
|
38
|
+
self._span = span
|
39
|
+
self._span_name = span_name
|
40
|
+
self._llmresponse = ""
|
41
|
+
self._response_id = ""
|
42
|
+
self._response_model = ""
|
43
|
+
self._finish_reason = ""
|
44
|
+
self._input_tokens = 0
|
45
|
+
self._output_tokens = 0
|
46
|
+
self._tool_arguments = ""
|
47
|
+
self._tool_id = ""
|
48
|
+
self._tool_name = ""
|
49
|
+
self._tool_calls = None
|
50
|
+
self._response_role = ""
|
51
|
+
self._kwargs = kwargs
|
52
|
+
self._start_time = time.time()
|
53
|
+
self._end_time = None
|
54
|
+
self._timestamps = []
|
55
|
+
self._ttft = 0
|
56
|
+
self._tbt = 0
|
57
|
+
self._server_address = server_address
|
58
|
+
self._server_port = server_port
|
59
|
+
|
60
|
+
async def __aenter__(self):
|
61
|
+
await self.__wrapped__.__aenter__()
|
62
|
+
return self
|
63
|
+
|
64
|
+
async def __aexit__(self, exc_type, exc_value, traceback):
|
65
|
+
await self.__wrapped__.__aexit__(exc_type, exc_value, traceback)
|
66
|
+
|
67
|
+
def __aiter__(self):
|
68
|
+
return self
|
69
|
+
|
70
|
+
async def __getattr__(self, name):
|
71
|
+
"""Delegate attribute access to the wrapped object."""
|
72
|
+
return getattr(await self.__wrapped__, name)
|
73
|
+
|
74
|
+
async def __anext__(self):
|
75
|
+
try:
|
76
|
+
chunk = await self.__wrapped__.__anext__()
|
77
|
+
process_chunk(self, chunk)
|
78
|
+
return chunk
|
79
|
+
except StopAsyncIteration:
|
80
|
+
try:
|
81
|
+
with self._span:
|
82
|
+
process_streaming_chat_response(
|
83
|
+
self,
|
84
|
+
pricing_info=pricing_info,
|
85
|
+
environment=environment,
|
86
|
+
application_name=application_name,
|
87
|
+
metrics=metrics,
|
88
|
+
capture_message_content=capture_message_content,
|
89
|
+
disable_metrics=disable_metrics,
|
90
|
+
version=version
|
91
|
+
)
|
92
|
+
except Exception as e:
|
93
|
+
handle_exception(self._span, e)
|
94
|
+
raise
|
95
|
+
|
96
|
+
async def wrapper(wrapped, instance, args, kwargs):
|
97
|
+
"""
|
98
|
+
Wraps the Anthropic AsyncMessages.create call.
|
99
|
+
"""
|
100
|
+
|
101
|
+
streaming = kwargs.get("stream", False)
|
102
|
+
server_address, server_port = set_server_address_and_port(instance, "api.anthropic.com", 443)
|
103
|
+
request_model = kwargs.get("model", "claude-3-5-sonnet-latest")
|
104
|
+
|
105
|
+
span_name = f"{SemanticConvention.GEN_AI_OPERATION_TYPE_CHAT} {request_model}"
|
106
|
+
|
107
|
+
# pylint: disable=no-else-return
|
108
|
+
if streaming:
|
109
|
+
awaited_wrapped = await wrapped(*args, **kwargs)
|
110
|
+
span = tracer.start_span(span_name, kind=SpanKind.CLIENT)
|
111
|
+
|
112
|
+
return TracedAsyncStream(awaited_wrapped, span, span_name, kwargs, server_address, server_port)
|
113
|
+
|
114
|
+
else:
|
115
|
+
with tracer.start_as_current_span(span_name, kind=SpanKind.CLIENT) as span:
|
116
|
+
start_time = time.time()
|
117
|
+
response = await wrapped(*args, **kwargs)
|
118
|
+
|
119
|
+
try:
|
120
|
+
response = process_chat_response(
|
121
|
+
response=response,
|
122
|
+
request_model=request_model,
|
123
|
+
pricing_info=pricing_info,
|
124
|
+
server_port=server_port,
|
125
|
+
server_address=server_address,
|
126
|
+
environment=environment,
|
127
|
+
application_name=application_name,
|
128
|
+
metrics=metrics,
|
129
|
+
start_time=start_time,
|
130
|
+
span=span,
|
131
|
+
capture_message_content=capture_message_content,
|
132
|
+
disable_metrics=disable_metrics,
|
133
|
+
version=version,
|
134
|
+
**kwargs
|
135
|
+
)
|
136
|
+
|
137
|
+
except Exception as e:
|
138
|
+
handle_exception(span, e)
|
139
|
+
|
140
|
+
return response
|
141
|
+
|
142
|
+
return wrapper
|
@@ -0,0 +1,225 @@
|
|
1
|
+
"""
|
2
|
+
Anthropic OpenTelemetry instrumentation utility functions
|
3
|
+
"""
|
4
|
+
import time
|
5
|
+
|
6
|
+
from opentelemetry.trace import Status, StatusCode
|
7
|
+
|
8
|
+
from openlit.__helpers import (
|
9
|
+
calculate_ttft,
|
10
|
+
response_as_dict,
|
11
|
+
calculate_tbt,
|
12
|
+
get_chat_model_cost,
|
13
|
+
record_completion_metrics,
|
14
|
+
common_span_attributes,
|
15
|
+
)
|
16
|
+
from openlit.semcov import SemanticConvention
|
17
|
+
|
18
|
+
def format_content(messages):
|
19
|
+
"""
|
20
|
+
Format the messages into a string for span events.
|
21
|
+
"""
|
22
|
+
|
23
|
+
if not messages:
|
24
|
+
return ""
|
25
|
+
|
26
|
+
formatted_messages = []
|
27
|
+
for message in messages:
|
28
|
+
if isinstance(message, dict):
|
29
|
+
role = message.get("role", "user")
|
30
|
+
content = message.get("content", "")
|
31
|
+
else:
|
32
|
+
# Handle Anthropic object format
|
33
|
+
role = getattr(message, "role", "user")
|
34
|
+
content = getattr(message, "content", "")
|
35
|
+
|
36
|
+
if isinstance(content, list):
|
37
|
+
# Handle structured content (e.g., text + images)
|
38
|
+
text_parts = []
|
39
|
+
for part in content:
|
40
|
+
if isinstance(part, dict) and part.get("type") == "text":
|
41
|
+
text_parts.append(part.get("text", ""))
|
42
|
+
content = " ".join(text_parts)
|
43
|
+
elif not isinstance(content, str):
|
44
|
+
content = str(content)
|
45
|
+
|
46
|
+
formatted_messages.append(f"{role}: {content}")
|
47
|
+
|
48
|
+
return "\n".join(formatted_messages)
|
49
|
+
|
50
|
+
def process_chunk(scope, chunk):
|
51
|
+
"""
|
52
|
+
Process a chunk of response data and update state.
|
53
|
+
"""
|
54
|
+
|
55
|
+
end_time = time.time()
|
56
|
+
# Record the timestamp for the current chunk
|
57
|
+
scope._timestamps.append(end_time)
|
58
|
+
|
59
|
+
if len(scope._timestamps) == 1:
|
60
|
+
# Calculate time to first chunk
|
61
|
+
scope._ttft = calculate_ttft(scope._timestamps, scope._start_time)
|
62
|
+
|
63
|
+
chunked = response_as_dict(chunk)
|
64
|
+
|
65
|
+
# Collect message IDs and input token from events
|
66
|
+
if chunked.get("type") == "message_start":
|
67
|
+
scope._response_id = chunked.get("message").get("id")
|
68
|
+
scope._input_tokens = chunked.get("message").get("usage").get("input_tokens")
|
69
|
+
scope._response_model = chunked.get("message").get("model")
|
70
|
+
scope._response_role = chunked.get("message").get("role")
|
71
|
+
|
72
|
+
# Collect message IDs and aggregated response from events
|
73
|
+
if chunked.get("type") == "content_block_delta":
|
74
|
+
if chunked.get("delta").get("text"):
|
75
|
+
scope._llmresponse += chunked.get("delta").get("text")
|
76
|
+
elif chunked.get("delta").get("partial_json"):
|
77
|
+
scope._tool_arguments += chunked.get("delta").get("partial_json")
|
78
|
+
|
79
|
+
if chunked.get("type") == "content_block_start":
|
80
|
+
if chunked.get("content_block").get("id"):
|
81
|
+
scope._tool_id = chunked.get("content_block").get("id")
|
82
|
+
if chunked.get("content_block").get("name"):
|
83
|
+
scope._tool_name = chunked.get("content_block").get("name")
|
84
|
+
|
85
|
+
# Collect output tokens and stop reason from events
|
86
|
+
if chunked.get("type") == "message_delta":
|
87
|
+
scope._output_tokens = chunked.get("usage").get("output_tokens")
|
88
|
+
scope._finish_reason = chunked.get("delta").get("stop_reason")
|
89
|
+
|
90
|
+
def common_chat_logic(scope, pricing_info, environment, application_name, metrics,
|
91
|
+
capture_message_content, disable_metrics, version, is_stream):
|
92
|
+
"""
|
93
|
+
Process chat request and generate Telemetry
|
94
|
+
"""
|
95
|
+
|
96
|
+
scope._end_time = time.time()
|
97
|
+
if len(scope._timestamps) > 1:
|
98
|
+
scope._tbt = calculate_tbt(scope._timestamps)
|
99
|
+
|
100
|
+
formatted_messages = format_content(scope._kwargs.get("messages", []))
|
101
|
+
request_model = scope._kwargs.get("model", "claude-3-5-sonnet-latest")
|
102
|
+
|
103
|
+
cost = get_chat_model_cost(request_model, pricing_info, scope._input_tokens, scope._output_tokens)
|
104
|
+
|
105
|
+
# Common Span Attributes
|
106
|
+
common_span_attributes(scope,
|
107
|
+
SemanticConvention.GEN_AI_OPERATION_TYPE_CHAT, SemanticConvention.GEN_AI_SYSTEM_ANTHROPIC,
|
108
|
+
scope._server_address, scope._server_port, request_model, scope._response_model,
|
109
|
+
environment, application_name, is_stream, scope._tbt, scope._ttft, version)
|
110
|
+
|
111
|
+
# Span Attributes for Request parameters
|
112
|
+
scope._span.set_attribute(SemanticConvention.GEN_AI_REQUEST_MAX_TOKENS, scope._kwargs.get("max_tokens", -1))
|
113
|
+
scope._span.set_attribute(SemanticConvention.GEN_AI_REQUEST_STOP_SEQUENCES, scope._kwargs.get("stop_sequences", []))
|
114
|
+
scope._span.set_attribute(SemanticConvention.GEN_AI_REQUEST_TEMPERATURE, scope._kwargs.get("temperature", 1.0))
|
115
|
+
scope._span.set_attribute(SemanticConvention.GEN_AI_REQUEST_TOP_K, scope._kwargs.get("top_k", 1.0))
|
116
|
+
scope._span.set_attribute(SemanticConvention.GEN_AI_REQUEST_TOP_P, scope._kwargs.get("top_p", 1.0))
|
117
|
+
|
118
|
+
# Span Attributes for Response parameters
|
119
|
+
scope._span.set_attribute(SemanticConvention.GEN_AI_RESPONSE_ID, scope._response_id)
|
120
|
+
scope._span.set_attribute(SemanticConvention.GEN_AI_RESPONSE_FINISH_REASON, [scope._finish_reason])
|
121
|
+
scope._span.set_attribute(SemanticConvention.GEN_AI_OUTPUT_TYPE, "text" if isinstance(scope._llmresponse, str) else "json")
|
122
|
+
|
123
|
+
# Span Attributes for Cost and Tokens
|
124
|
+
scope._span.set_attribute(SemanticConvention.GEN_AI_USAGE_INPUT_TOKENS, scope._input_tokens)
|
125
|
+
scope._span.set_attribute(SemanticConvention.GEN_AI_USAGE_OUTPUT_TOKENS, scope._output_tokens)
|
126
|
+
scope._span.set_attribute(SemanticConvention.GEN_AI_CLIENT_TOKEN_USAGE, scope._input_tokens + scope._output_tokens)
|
127
|
+
scope._span.set_attribute(SemanticConvention.GEN_AI_USAGE_COST, cost)
|
128
|
+
|
129
|
+
# Handle tool calls if present
|
130
|
+
if scope._tool_calls:
|
131
|
+
# Optimized tool handling - extract name, id, and arguments
|
132
|
+
tool_name = scope._tool_calls.get("name", "")
|
133
|
+
tool_id = scope._tool_calls.get("id", "")
|
134
|
+
tool_args = scope._tool_calls.get("input", "")
|
135
|
+
|
136
|
+
scope._span.set_attribute(SemanticConvention.GEN_AI_TOOL_NAME, tool_name)
|
137
|
+
scope._span.set_attribute(SemanticConvention.GEN_AI_TOOL_CALL_ID, tool_id)
|
138
|
+
scope._span.set_attribute(SemanticConvention.GEN_AI_TOOL_ARGS, str(tool_args))
|
139
|
+
|
140
|
+
# Span Attributes for Content
|
141
|
+
if capture_message_content:
|
142
|
+
scope._span.set_attribute(SemanticConvention.GEN_AI_CONTENT_PROMPT, formatted_messages)
|
143
|
+
scope._span.set_attribute(SemanticConvention.GEN_AI_CONTENT_COMPLETION, scope._llmresponse)
|
144
|
+
|
145
|
+
# To be removed once the change to span_attributes (from span events) is complete
|
146
|
+
scope._span.add_event(
|
147
|
+
name=SemanticConvention.GEN_AI_CONTENT_PROMPT_EVENT,
|
148
|
+
attributes={
|
149
|
+
SemanticConvention.GEN_AI_CONTENT_PROMPT: formatted_messages,
|
150
|
+
},
|
151
|
+
)
|
152
|
+
scope._span.add_event(
|
153
|
+
name=SemanticConvention.GEN_AI_CONTENT_COMPLETION_EVENT,
|
154
|
+
attributes={
|
155
|
+
SemanticConvention.GEN_AI_CONTENT_COMPLETION: scope._llmresponse,
|
156
|
+
},
|
157
|
+
)
|
158
|
+
|
159
|
+
scope._span.set_status(Status(StatusCode.OK))
|
160
|
+
|
161
|
+
# Record metrics
|
162
|
+
if not disable_metrics:
|
163
|
+
record_completion_metrics(metrics, SemanticConvention.GEN_AI_OPERATION_TYPE_CHAT, SemanticConvention.GEN_AI_SYSTEM_ANTHROPIC,
|
164
|
+
scope._server_address, scope._server_port, request_model, scope._response_model, environment,
|
165
|
+
application_name, scope._start_time, scope._end_time, scope._input_tokens, scope._output_tokens,
|
166
|
+
cost, scope._tbt, scope._ttft)
|
167
|
+
|
168
|
+
def process_streaming_chat_response(scope, pricing_info, environment, application_name, metrics,
|
169
|
+
capture_message_content=False, disable_metrics=False, version=""):
|
170
|
+
"""
|
171
|
+
Process streaming chat response and generate telemetry.
|
172
|
+
"""
|
173
|
+
|
174
|
+
if scope._tool_id != "":
|
175
|
+
scope._tool_calls = {
|
176
|
+
"id": scope._tool_id,
|
177
|
+
"name": scope._tool_name,
|
178
|
+
"input": scope._tool_arguments
|
179
|
+
}
|
180
|
+
|
181
|
+
common_chat_logic(scope, pricing_info, environment, application_name, metrics,
|
182
|
+
capture_message_content, disable_metrics, version, is_stream=True)
|
183
|
+
|
184
|
+
def process_chat_response(response, request_model, pricing_info, server_port, server_address,
|
185
|
+
environment, application_name, metrics, start_time,
|
186
|
+
span, capture_message_content=False, disable_metrics=False, version="1.0.0", **kwargs):
|
187
|
+
"""
|
188
|
+
Process non-streaming chat response and generate telemetry.
|
189
|
+
"""
|
190
|
+
|
191
|
+
scope = type("GenericScope", (), {})()
|
192
|
+
response_dict = response_as_dict(response)
|
193
|
+
|
194
|
+
# pylint: disable = no-member
|
195
|
+
scope._start_time = start_time
|
196
|
+
scope._end_time = time.time()
|
197
|
+
scope._span = span
|
198
|
+
scope._llmresponse = response_dict.get("content", [{}])[0].get("text", "")
|
199
|
+
scope._response_role = response_dict.get("role", "assistant")
|
200
|
+
scope._input_tokens = response_dict.get("usage").get("input_tokens")
|
201
|
+
scope._output_tokens = response_dict.get("usage").get("output_tokens")
|
202
|
+
scope._response_model = response_dict.get("model", "")
|
203
|
+
scope._finish_reason = response_dict.get("stop_reason", "")
|
204
|
+
scope._response_id = response_dict.get("id", "")
|
205
|
+
scope._timestamps = []
|
206
|
+
scope._ttft, scope._tbt = scope._end_time - scope._start_time, 0
|
207
|
+
scope._server_address, scope._server_port = server_address, server_port
|
208
|
+
scope._kwargs = kwargs
|
209
|
+
|
210
|
+
# Handle tool calls if present
|
211
|
+
content_blocks = response_dict.get("content", [])
|
212
|
+
scope._tool_calls = None
|
213
|
+
for block in content_blocks:
|
214
|
+
if block.get("type") == "tool_use":
|
215
|
+
scope._tool_calls = {
|
216
|
+
"id": block.get("id", ""),
|
217
|
+
"name": block.get("name", ""),
|
218
|
+
"input": block.get("input", "")
|
219
|
+
}
|
220
|
+
break
|
221
|
+
|
222
|
+
common_chat_logic(scope, pricing_info, environment, application_name, metrics,
|
223
|
+
capture_message_content, disable_metrics, version, is_stream=False)
|
224
|
+
|
225
|
+
return response
|
@@ -1,4 +1,3 @@
|
|
1
|
-
# pylint: disable=useless-return, bad-staticmethod-argument, disable=duplicate-code
|
2
1
|
"""Initializer of Auto Instrumentation of AWS Bedrock Functions"""
|
3
2
|
|
4
3
|
from typing import Collection
|
@@ -6,37 +5,43 @@ import importlib.metadata
|
|
6
5
|
from opentelemetry.instrumentation.instrumentor import BaseInstrumentor
|
7
6
|
from wrapt import wrap_function_wrapper
|
8
7
|
|
9
|
-
from openlit.instrumentation.bedrock.bedrock import converse
|
8
|
+
from openlit.instrumentation.bedrock.bedrock import converse, converse_stream
|
10
9
|
|
11
10
|
_instruments = ("boto3 >= 1.34.138",)
|
12
11
|
|
13
12
|
class BedrockInstrumentor(BaseInstrumentor):
|
14
13
|
"""
|
15
|
-
An instrumentor for AWS Bedrock
|
14
|
+
An instrumentor for AWS Bedrock client library.
|
16
15
|
"""
|
17
16
|
|
18
17
|
def instrumentation_dependencies(self) -> Collection[str]:
|
19
18
|
return _instruments
|
20
19
|
|
21
20
|
def _instrument(self, **kwargs):
|
22
|
-
|
23
|
-
environment = kwargs.get("environment", "
|
21
|
+
version = importlib.metadata.version("boto3")
|
22
|
+
environment = kwargs.get("environment", "default")
|
23
|
+
application_name = kwargs.get("application_name", "default")
|
24
24
|
tracer = kwargs.get("tracer")
|
25
|
-
event_provider = kwargs.get('event_provider')
|
26
|
-
metrics = kwargs.get("metrics_dict")
|
27
25
|
pricing_info = kwargs.get("pricing_info", {})
|
28
26
|
capture_message_content = kwargs.get("capture_message_content", False)
|
27
|
+
metrics = kwargs.get("metrics_dict")
|
29
28
|
disable_metrics = kwargs.get("disable_metrics")
|
30
|
-
version = importlib.metadata.version("boto3")
|
31
29
|
|
32
|
-
#sync
|
30
|
+
# sync
|
31
|
+
wrap_function_wrapper(
|
32
|
+
"botocore.client",
|
33
|
+
"ClientCreator.create_client",
|
34
|
+
converse(version, environment, application_name, tracer, pricing_info,
|
35
|
+
capture_message_content, metrics, disable_metrics),
|
36
|
+
)
|
37
|
+
|
38
|
+
# streaming
|
33
39
|
wrap_function_wrapper(
|
34
|
-
"botocore.client",
|
35
|
-
"ClientCreator.create_client",
|
36
|
-
|
37
|
-
|
40
|
+
"botocore.client",
|
41
|
+
"ClientCreator.create_client",
|
42
|
+
converse_stream(version, environment, application_name, tracer, pricing_info,
|
43
|
+
capture_message_content, metrics, disable_metrics),
|
38
44
|
)
|
39
45
|
|
40
46
|
def _uninstrument(self, **kwargs):
|
41
|
-
# Proper uninstrumentation logic to revert patched methods
|
42
47
|
pass
|