openlit 1.34.5__tar.gz → 1.34.7__tar.gz
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- {openlit-1.34.5 → openlit-1.34.7}/PKG-INFO +1 -1
- {openlit-1.34.5 → openlit-1.34.7}/pyproject.toml +1 -1
- {openlit-1.34.5 → openlit-1.34.7}/src/openlit/instrumentation/openai/async_openai.py +1 -1
- {openlit-1.34.5 → openlit-1.34.7}/src/openlit/instrumentation/premai/__init__.py +0 -1
- openlit-1.34.7/src/openlit/instrumentation/premai/premai.py +186 -0
- openlit-1.34.7/src/openlit/instrumentation/premai/utils.py +325 -0
- {openlit-1.34.5 → openlit-1.34.7}/src/openlit/instrumentation/reka/__init__.py +5 -7
- openlit-1.34.7/src/openlit/instrumentation/reka/async_reka.py +59 -0
- openlit-1.34.7/src/openlit/instrumentation/reka/reka.py +59 -0
- openlit-1.34.7/src/openlit/instrumentation/reka/utils.py +193 -0
- {openlit-1.34.5 → openlit-1.34.7}/src/openlit/instrumentation/together/utils.py +3 -3
- openlit-1.34.5/src/openlit/instrumentation/premai/premai.py +0 -556
- openlit-1.34.5/src/openlit/instrumentation/reka/async_reka.py +0 -197
- openlit-1.34.5/src/openlit/instrumentation/reka/reka.py +0 -197
- {openlit-1.34.5 → openlit-1.34.7}/LICENSE +0 -0
- {openlit-1.34.5 → openlit-1.34.7}/README.md +0 -0
- {openlit-1.34.5 → openlit-1.34.7}/src/openlit/__helpers.py +0 -0
- {openlit-1.34.5 → openlit-1.34.7}/src/openlit/__init__.py +0 -0
- {openlit-1.34.5 → openlit-1.34.7}/src/openlit/evals/__init__.py +0 -0
- {openlit-1.34.5 → openlit-1.34.7}/src/openlit/evals/all.py +0 -0
- {openlit-1.34.5 → openlit-1.34.7}/src/openlit/evals/bias_detection.py +0 -0
- {openlit-1.34.5 → openlit-1.34.7}/src/openlit/evals/hallucination.py +0 -0
- {openlit-1.34.5 → openlit-1.34.7}/src/openlit/evals/toxicity.py +0 -0
- {openlit-1.34.5 → openlit-1.34.7}/src/openlit/evals/utils.py +0 -0
- {openlit-1.34.5 → openlit-1.34.7}/src/openlit/guard/__init__.py +0 -0
- {openlit-1.34.5 → openlit-1.34.7}/src/openlit/guard/all.py +0 -0
- {openlit-1.34.5 → openlit-1.34.7}/src/openlit/guard/prompt_injection.py +0 -0
- {openlit-1.34.5 → openlit-1.34.7}/src/openlit/guard/restrict_topic.py +0 -0
- {openlit-1.34.5 → openlit-1.34.7}/src/openlit/guard/sensitive_topic.py +0 -0
- {openlit-1.34.5 → openlit-1.34.7}/src/openlit/guard/utils.py +0 -0
- {openlit-1.34.5 → openlit-1.34.7}/src/openlit/instrumentation/ag2/__init__.py +0 -0
- {openlit-1.34.5 → openlit-1.34.7}/src/openlit/instrumentation/ag2/ag2.py +0 -0
- {openlit-1.34.5 → openlit-1.34.7}/src/openlit/instrumentation/ai21/__init__.py +0 -0
- {openlit-1.34.5 → openlit-1.34.7}/src/openlit/instrumentation/ai21/ai21.py +0 -0
- {openlit-1.34.5 → openlit-1.34.7}/src/openlit/instrumentation/ai21/async_ai21.py +0 -0
- {openlit-1.34.5 → openlit-1.34.7}/src/openlit/instrumentation/ai21/utils.py +0 -0
- {openlit-1.34.5 → openlit-1.34.7}/src/openlit/instrumentation/anthropic/__init__.py +0 -0
- {openlit-1.34.5 → openlit-1.34.7}/src/openlit/instrumentation/anthropic/anthropic.py +0 -0
- {openlit-1.34.5 → openlit-1.34.7}/src/openlit/instrumentation/anthropic/async_anthropic.py +0 -0
- {openlit-1.34.5 → openlit-1.34.7}/src/openlit/instrumentation/anthropic/utils.py +0 -0
- {openlit-1.34.5 → openlit-1.34.7}/src/openlit/instrumentation/assemblyai/__init__.py +0 -0
- {openlit-1.34.5 → openlit-1.34.7}/src/openlit/instrumentation/assemblyai/assemblyai.py +0 -0
- {openlit-1.34.5 → openlit-1.34.7}/src/openlit/instrumentation/astra/__init__.py +0 -0
- {openlit-1.34.5 → openlit-1.34.7}/src/openlit/instrumentation/astra/astra.py +0 -0
- {openlit-1.34.5 → openlit-1.34.7}/src/openlit/instrumentation/astra/async_astra.py +0 -0
- {openlit-1.34.5 → openlit-1.34.7}/src/openlit/instrumentation/astra/utils.py +0 -0
- {openlit-1.34.5 → openlit-1.34.7}/src/openlit/instrumentation/azure_ai_inference/__init__.py +0 -0
- {openlit-1.34.5 → openlit-1.34.7}/src/openlit/instrumentation/azure_ai_inference/async_azure_ai_inference.py +0 -0
- {openlit-1.34.5 → openlit-1.34.7}/src/openlit/instrumentation/azure_ai_inference/azure_ai_inference.py +0 -0
- {openlit-1.34.5 → openlit-1.34.7}/src/openlit/instrumentation/azure_ai_inference/utils.py +0 -0
- {openlit-1.34.5 → openlit-1.34.7}/src/openlit/instrumentation/bedrock/__init__.py +0 -0
- {openlit-1.34.5 → openlit-1.34.7}/src/openlit/instrumentation/bedrock/bedrock.py +0 -0
- {openlit-1.34.5 → openlit-1.34.7}/src/openlit/instrumentation/bedrock/utils.py +0 -0
- {openlit-1.34.5 → openlit-1.34.7}/src/openlit/instrumentation/chroma/__init__.py +0 -0
- {openlit-1.34.5 → openlit-1.34.7}/src/openlit/instrumentation/chroma/chroma.py +0 -0
- {openlit-1.34.5 → openlit-1.34.7}/src/openlit/instrumentation/cohere/__init__.py +0 -0
- {openlit-1.34.5 → openlit-1.34.7}/src/openlit/instrumentation/cohere/async_cohere.py +0 -0
- {openlit-1.34.5 → openlit-1.34.7}/src/openlit/instrumentation/cohere/cohere.py +0 -0
- {openlit-1.34.5 → openlit-1.34.7}/src/openlit/instrumentation/controlflow/__init__.py +0 -0
- {openlit-1.34.5 → openlit-1.34.7}/src/openlit/instrumentation/controlflow/controlflow.py +0 -0
- {openlit-1.34.5 → openlit-1.34.7}/src/openlit/instrumentation/crawl4ai/__init__.py +0 -0
- {openlit-1.34.5 → openlit-1.34.7}/src/openlit/instrumentation/crawl4ai/async_crawl4ai.py +0 -0
- {openlit-1.34.5 → openlit-1.34.7}/src/openlit/instrumentation/crawl4ai/crawl4ai.py +0 -0
- {openlit-1.34.5 → openlit-1.34.7}/src/openlit/instrumentation/crewai/__init__.py +0 -0
- {openlit-1.34.5 → openlit-1.34.7}/src/openlit/instrumentation/crewai/crewai.py +0 -0
- {openlit-1.34.5 → openlit-1.34.7}/src/openlit/instrumentation/dynamiq/__init__.py +0 -0
- {openlit-1.34.5 → openlit-1.34.7}/src/openlit/instrumentation/dynamiq/dynamiq.py +0 -0
- {openlit-1.34.5 → openlit-1.34.7}/src/openlit/instrumentation/elevenlabs/__init__.py +0 -0
- {openlit-1.34.5 → openlit-1.34.7}/src/openlit/instrumentation/elevenlabs/async_elevenlabs.py +0 -0
- {openlit-1.34.5 → openlit-1.34.7}/src/openlit/instrumentation/elevenlabs/elevenlabs.py +0 -0
- {openlit-1.34.5 → openlit-1.34.7}/src/openlit/instrumentation/embedchain/__init__.py +0 -0
- {openlit-1.34.5 → openlit-1.34.7}/src/openlit/instrumentation/embedchain/embedchain.py +0 -0
- {openlit-1.34.5 → openlit-1.34.7}/src/openlit/instrumentation/firecrawl/__init__.py +0 -0
- {openlit-1.34.5 → openlit-1.34.7}/src/openlit/instrumentation/firecrawl/firecrawl.py +0 -0
- {openlit-1.34.5 → openlit-1.34.7}/src/openlit/instrumentation/google_ai_studio/__init__.py +0 -0
- {openlit-1.34.5 → openlit-1.34.7}/src/openlit/instrumentation/google_ai_studio/async_google_ai_studio.py +0 -0
- {openlit-1.34.5 → openlit-1.34.7}/src/openlit/instrumentation/google_ai_studio/google_ai_studio.py +0 -0
- {openlit-1.34.5 → openlit-1.34.7}/src/openlit/instrumentation/google_ai_studio/utils.py +0 -0
- {openlit-1.34.5 → openlit-1.34.7}/src/openlit/instrumentation/gpt4all/__init__.py +0 -0
- {openlit-1.34.5 → openlit-1.34.7}/src/openlit/instrumentation/gpt4all/gpt4all.py +0 -0
- {openlit-1.34.5 → openlit-1.34.7}/src/openlit/instrumentation/gpu/__init__.py +0 -0
- {openlit-1.34.5 → openlit-1.34.7}/src/openlit/instrumentation/groq/__init__.py +0 -0
- {openlit-1.34.5 → openlit-1.34.7}/src/openlit/instrumentation/groq/async_groq.py +0 -0
- {openlit-1.34.5 → openlit-1.34.7}/src/openlit/instrumentation/groq/groq.py +0 -0
- {openlit-1.34.5 → openlit-1.34.7}/src/openlit/instrumentation/haystack/__init__.py +0 -0
- {openlit-1.34.5 → openlit-1.34.7}/src/openlit/instrumentation/haystack/haystack.py +0 -0
- {openlit-1.34.5 → openlit-1.34.7}/src/openlit/instrumentation/julep/__init__.py +0 -0
- {openlit-1.34.5 → openlit-1.34.7}/src/openlit/instrumentation/julep/async_julep.py +0 -0
- {openlit-1.34.5 → openlit-1.34.7}/src/openlit/instrumentation/julep/julep.py +0 -0
- {openlit-1.34.5 → openlit-1.34.7}/src/openlit/instrumentation/langchain/__init__.py +0 -0
- {openlit-1.34.5 → openlit-1.34.7}/src/openlit/instrumentation/langchain/async_langchain.py +0 -0
- {openlit-1.34.5 → openlit-1.34.7}/src/openlit/instrumentation/langchain/langchain.py +0 -0
- {openlit-1.34.5 → openlit-1.34.7}/src/openlit/instrumentation/letta/__init__.py +0 -0
- {openlit-1.34.5 → openlit-1.34.7}/src/openlit/instrumentation/letta/letta.py +0 -0
- {openlit-1.34.5 → openlit-1.34.7}/src/openlit/instrumentation/litellm/__init__.py +0 -0
- {openlit-1.34.5 → openlit-1.34.7}/src/openlit/instrumentation/litellm/async_litellm.py +0 -0
- {openlit-1.34.5 → openlit-1.34.7}/src/openlit/instrumentation/litellm/litellm.py +0 -0
- {openlit-1.34.5 → openlit-1.34.7}/src/openlit/instrumentation/llamaindex/__init__.py +0 -0
- {openlit-1.34.5 → openlit-1.34.7}/src/openlit/instrumentation/llamaindex/llamaindex.py +0 -0
- {openlit-1.34.5 → openlit-1.34.7}/src/openlit/instrumentation/mem0/__init__.py +0 -0
- {openlit-1.34.5 → openlit-1.34.7}/src/openlit/instrumentation/mem0/mem0.py +0 -0
- {openlit-1.34.5 → openlit-1.34.7}/src/openlit/instrumentation/milvus/__init__.py +0 -0
- {openlit-1.34.5 → openlit-1.34.7}/src/openlit/instrumentation/milvus/milvus.py +0 -0
- {openlit-1.34.5 → openlit-1.34.7}/src/openlit/instrumentation/mistral/__init__.py +0 -0
- {openlit-1.34.5 → openlit-1.34.7}/src/openlit/instrumentation/mistral/async_mistral.py +0 -0
- {openlit-1.34.5 → openlit-1.34.7}/src/openlit/instrumentation/mistral/mistral.py +0 -0
- {openlit-1.34.5 → openlit-1.34.7}/src/openlit/instrumentation/multion/__init__.py +0 -0
- {openlit-1.34.5 → openlit-1.34.7}/src/openlit/instrumentation/multion/async_multion.py +0 -0
- {openlit-1.34.5 → openlit-1.34.7}/src/openlit/instrumentation/multion/multion.py +0 -0
- {openlit-1.34.5 → openlit-1.34.7}/src/openlit/instrumentation/ollama/__init__.py +0 -0
- {openlit-1.34.5 → openlit-1.34.7}/src/openlit/instrumentation/ollama/async_ollama.py +0 -0
- {openlit-1.34.5 → openlit-1.34.7}/src/openlit/instrumentation/ollama/ollama.py +0 -0
- {openlit-1.34.5 → openlit-1.34.7}/src/openlit/instrumentation/ollama/utils.py +0 -0
- {openlit-1.34.5 → openlit-1.34.7}/src/openlit/instrumentation/openai/__init__.py +0 -0
- {openlit-1.34.5 → openlit-1.34.7}/src/openlit/instrumentation/openai/openai.py +0 -0
- {openlit-1.34.5 → openlit-1.34.7}/src/openlit/instrumentation/openai_agents/__init__.py +0 -0
- {openlit-1.34.5 → openlit-1.34.7}/src/openlit/instrumentation/openai_agents/openai_agents.py +0 -0
- {openlit-1.34.5 → openlit-1.34.7}/src/openlit/instrumentation/phidata/__init__.py +0 -0
- {openlit-1.34.5 → openlit-1.34.7}/src/openlit/instrumentation/phidata/phidata.py +0 -0
- {openlit-1.34.5 → openlit-1.34.7}/src/openlit/instrumentation/pinecone/__init__.py +0 -0
- {openlit-1.34.5 → openlit-1.34.7}/src/openlit/instrumentation/pinecone/pinecone.py +0 -0
- {openlit-1.34.5 → openlit-1.34.7}/src/openlit/instrumentation/pydantic_ai/__init__.py +0 -0
- {openlit-1.34.5 → openlit-1.34.7}/src/openlit/instrumentation/pydantic_ai/pydantic_ai.py +0 -0
- {openlit-1.34.5 → openlit-1.34.7}/src/openlit/instrumentation/pydantic_ai/utils.py +0 -0
- {openlit-1.34.5 → openlit-1.34.7}/src/openlit/instrumentation/qdrant/__init__.py +0 -0
- {openlit-1.34.5 → openlit-1.34.7}/src/openlit/instrumentation/qdrant/async_qdrant.py +0 -0
- {openlit-1.34.5 → openlit-1.34.7}/src/openlit/instrumentation/qdrant/qdrant.py +0 -0
- {openlit-1.34.5 → openlit-1.34.7}/src/openlit/instrumentation/together/__init__.py +0 -0
- {openlit-1.34.5 → openlit-1.34.7}/src/openlit/instrumentation/together/async_together.py +0 -0
- {openlit-1.34.5 → openlit-1.34.7}/src/openlit/instrumentation/together/together.py +0 -0
- {openlit-1.34.5 → openlit-1.34.7}/src/openlit/instrumentation/transformers/__init__.py +0 -0
- {openlit-1.34.5 → openlit-1.34.7}/src/openlit/instrumentation/transformers/transformers.py +0 -0
- {openlit-1.34.5 → openlit-1.34.7}/src/openlit/instrumentation/transformers/utils.py +0 -0
- {openlit-1.34.5 → openlit-1.34.7}/src/openlit/instrumentation/vertexai/__init__.py +0 -0
- {openlit-1.34.5 → openlit-1.34.7}/src/openlit/instrumentation/vertexai/async_vertexai.py +0 -0
- {openlit-1.34.5 → openlit-1.34.7}/src/openlit/instrumentation/vertexai/vertexai.py +0 -0
- {openlit-1.34.5 → openlit-1.34.7}/src/openlit/instrumentation/vllm/__init__.py +0 -0
- {openlit-1.34.5 → openlit-1.34.7}/src/openlit/instrumentation/vllm/utils.py +0 -0
- {openlit-1.34.5 → openlit-1.34.7}/src/openlit/instrumentation/vllm/vllm.py +0 -0
- {openlit-1.34.5 → openlit-1.34.7}/src/openlit/otel/events.py +0 -0
- {openlit-1.34.5 → openlit-1.34.7}/src/openlit/otel/metrics.py +0 -0
- {openlit-1.34.5 → openlit-1.34.7}/src/openlit/otel/tracing.py +0 -0
- {openlit-1.34.5 → openlit-1.34.7}/src/openlit/semcov/__init__.py +0 -0
@@ -1,6 +1,6 @@
|
|
1
1
|
Metadata-Version: 2.3
|
2
2
|
Name: openlit
|
3
|
-
Version: 1.34.
|
3
|
+
Version: 1.34.7
|
4
4
|
Summary: OpenTelemetry-native Auto instrumentation library for monitoring LLM Applications and GPUs, facilitating the integration of observability into your GenAI-driven projects
|
5
5
|
License: Apache-2.0
|
6
6
|
Keywords: OpenTelemetry,otel,otlp,llm,tracing,openai,anthropic,claude,cohere,llm monitoring,observability,monitoring,gpt,Generative AI,chatGPT,gpu
|
@@ -1,6 +1,6 @@
|
|
1
1
|
[tool.poetry]
|
2
2
|
name = "openlit"
|
3
|
-
version = "1.34.
|
3
|
+
version = "1.34.7"
|
4
4
|
description = "OpenTelemetry-native Auto instrumentation library for monitoring LLM Applications and GPUs, facilitating the integration of observability into your GenAI-driven projects"
|
5
5
|
authors = ["OpenLIT"]
|
6
6
|
license = "Apache-2.0"
|
@@ -733,7 +733,7 @@ def async_chat_completions(version, environment, application_name,
|
|
733
733
|
formatted_messages = []
|
734
734
|
for message in message_prompt:
|
735
735
|
role = message["role"]
|
736
|
-
content = message
|
736
|
+
content = message.get("content", "")
|
737
737
|
|
738
738
|
if isinstance(content, list):
|
739
739
|
content_str = ", ".join(
|
@@ -0,0 +1,186 @@
|
|
1
|
+
"""
|
2
|
+
Module for monitoring PremAI API calls.
|
3
|
+
"""
|
4
|
+
|
5
|
+
import time
|
6
|
+
from opentelemetry.trace import SpanKind
|
7
|
+
from openlit.__helpers import (
|
8
|
+
handle_exception,
|
9
|
+
set_server_address_and_port
|
10
|
+
)
|
11
|
+
from openlit.instrumentation.premai.utils import (
|
12
|
+
process_chat_response,
|
13
|
+
process_chunk,
|
14
|
+
process_streaming_chat_response,
|
15
|
+
process_embedding_response
|
16
|
+
)
|
17
|
+
from openlit.semcov import SemanticConvention
|
18
|
+
|
19
|
+
def chat(version, environment, application_name,
|
20
|
+
tracer, pricing_info, capture_message_content, metrics, disable_metrics):
|
21
|
+
"""
|
22
|
+
Generates a telemetry wrapper for GenAI function call
|
23
|
+
"""
|
24
|
+
|
25
|
+
class TracedSyncStream:
|
26
|
+
"""
|
27
|
+
Wrapper for streaming responses to collect telemetry.
|
28
|
+
"""
|
29
|
+
|
30
|
+
def __init__(
|
31
|
+
self,
|
32
|
+
wrapped,
|
33
|
+
span,
|
34
|
+
span_name,
|
35
|
+
kwargs,
|
36
|
+
server_address,
|
37
|
+
server_port,
|
38
|
+
**args,
|
39
|
+
):
|
40
|
+
self.__wrapped__ = wrapped
|
41
|
+
self._span = span
|
42
|
+
self._span_name = span_name
|
43
|
+
self._llmresponse = ""
|
44
|
+
self._response_id = ""
|
45
|
+
self._response_model = ""
|
46
|
+
self._input_tokens = 0
|
47
|
+
self._output_tokens = 0
|
48
|
+
self._finish_reason = ""
|
49
|
+
self._tools = None
|
50
|
+
self._args = args
|
51
|
+
self._kwargs = kwargs
|
52
|
+
self._start_time = time.time()
|
53
|
+
self._end_time = None
|
54
|
+
self._timestamps = []
|
55
|
+
self._ttft = 0
|
56
|
+
self._tbt = 0
|
57
|
+
self._server_address = server_address
|
58
|
+
self._server_port = server_port
|
59
|
+
|
60
|
+
def __enter__(self):
|
61
|
+
return self
|
62
|
+
|
63
|
+
def __exit__(self, exc_type, exc_value, traceback):
|
64
|
+
pass
|
65
|
+
|
66
|
+
def __getattr__(self, name):
|
67
|
+
"""Delegate attribute access to the wrapped object."""
|
68
|
+
return getattr(self.__wrapped__, name)
|
69
|
+
|
70
|
+
def __iter__(self):
|
71
|
+
try:
|
72
|
+
chunk = self.__wrapped__.__next__()
|
73
|
+
process_chunk(self, chunk)
|
74
|
+
return chunk
|
75
|
+
|
76
|
+
finally:
|
77
|
+
try:
|
78
|
+
with tracer.start_as_current_span(self._span_name, kind=SpanKind.CLIENT) as self._span:
|
79
|
+
process_streaming_chat_response(
|
80
|
+
self,
|
81
|
+
pricing_info=pricing_info,
|
82
|
+
environment=environment,
|
83
|
+
application_name=application_name,
|
84
|
+
metrics=metrics,
|
85
|
+
capture_message_content=capture_message_content,
|
86
|
+
disable_metrics=disable_metrics,
|
87
|
+
version=version
|
88
|
+
)
|
89
|
+
|
90
|
+
except Exception as e:
|
91
|
+
handle_exception(self._span, e)
|
92
|
+
|
93
|
+
def wrapper(wrapped, instance, args, kwargs):
|
94
|
+
"""
|
95
|
+
Wraps the GenAI function call.
|
96
|
+
"""
|
97
|
+
|
98
|
+
# Check if streaming is enabled for the API call
|
99
|
+
streaming = kwargs.get("stream", False)
|
100
|
+
|
101
|
+
server_address, server_port = set_server_address_and_port(instance, "app.premai.io", 443)
|
102
|
+
request_model = kwargs.get("model", "gpt-4o-mini")
|
103
|
+
|
104
|
+
span_name = f"{SemanticConvention.GEN_AI_OPERATION_TYPE_CHAT} {request_model}"
|
105
|
+
|
106
|
+
if streaming:
|
107
|
+
# Special handling for streaming response to accommodate the nature of data flow
|
108
|
+
awaited_wrapped = wrapped(*args, **kwargs)
|
109
|
+
span = tracer.start_span(span_name, kind=SpanKind.CLIENT)
|
110
|
+
return TracedSyncStream(awaited_wrapped, span, span_name, kwargs, server_address, server_port)
|
111
|
+
|
112
|
+
# Handling for non-streaming responses
|
113
|
+
else:
|
114
|
+
with tracer.start_as_current_span(span_name, kind=SpanKind.CLIENT) as span:
|
115
|
+
start_time = time.time()
|
116
|
+
response = wrapped(*args, **kwargs)
|
117
|
+
|
118
|
+
try:
|
119
|
+
response = process_chat_response(
|
120
|
+
response=response,
|
121
|
+
request_model=request_model,
|
122
|
+
pricing_info=pricing_info,
|
123
|
+
server_port=server_port,
|
124
|
+
server_address=server_address,
|
125
|
+
environment=environment,
|
126
|
+
application_name=application_name,
|
127
|
+
metrics=metrics,
|
128
|
+
start_time=start_time,
|
129
|
+
span=span,
|
130
|
+
capture_message_content=capture_message_content,
|
131
|
+
disable_metrics=disable_metrics,
|
132
|
+
version=version,
|
133
|
+
**kwargs
|
134
|
+
)
|
135
|
+
|
136
|
+
except Exception as e:
|
137
|
+
handle_exception(span, e)
|
138
|
+
|
139
|
+
return response
|
140
|
+
|
141
|
+
return wrapper
|
142
|
+
|
143
|
+
def embedding(version, environment, application_name,
|
144
|
+
tracer, pricing_info, capture_message_content, metrics, disable_metrics):
|
145
|
+
"""
|
146
|
+
Generates a telemetry wrapper for GenAI function call
|
147
|
+
"""
|
148
|
+
|
149
|
+
def wrapper(wrapped, instance, args, kwargs):
|
150
|
+
"""
|
151
|
+
Wraps the GenAI function call.
|
152
|
+
"""
|
153
|
+
|
154
|
+
server_address, server_port = set_server_address_and_port(instance, "app.premai.io", 443)
|
155
|
+
request_model = kwargs.get("model", "text-embedding-ada-002")
|
156
|
+
|
157
|
+
span_name = f"{SemanticConvention.GEN_AI_OPERATION_TYPE_EMBEDDING} {request_model}"
|
158
|
+
|
159
|
+
with tracer.start_as_current_span(span_name, kind=SpanKind.CLIENT) as span:
|
160
|
+
start_time = time.time()
|
161
|
+
response = wrapped(*args, **kwargs)
|
162
|
+
|
163
|
+
try:
|
164
|
+
response = process_embedding_response(
|
165
|
+
response=response,
|
166
|
+
request_model=request_model,
|
167
|
+
pricing_info=pricing_info,
|
168
|
+
server_port=server_port,
|
169
|
+
server_address=server_address,
|
170
|
+
environment=environment,
|
171
|
+
application_name=application_name,
|
172
|
+
metrics=metrics,
|
173
|
+
start_time=start_time,
|
174
|
+
span=span,
|
175
|
+
capture_message_content=capture_message_content,
|
176
|
+
disable_metrics=disable_metrics,
|
177
|
+
version=version,
|
178
|
+
**kwargs
|
179
|
+
)
|
180
|
+
|
181
|
+
except Exception as e:
|
182
|
+
handle_exception(span, e)
|
183
|
+
|
184
|
+
return response
|
185
|
+
|
186
|
+
return wrapper
|
@@ -0,0 +1,325 @@
|
|
1
|
+
"""
|
2
|
+
PremAI OpenTelemetry instrumentation utility functions
|
3
|
+
"""
|
4
|
+
import time
|
5
|
+
|
6
|
+
from opentelemetry.sdk.resources import SERVICE_NAME, TELEMETRY_SDK_NAME, DEPLOYMENT_ENVIRONMENT
|
7
|
+
from opentelemetry.trace import Status, StatusCode
|
8
|
+
|
9
|
+
from openlit.__helpers import (
|
10
|
+
response_as_dict,
|
11
|
+
calculate_ttft,
|
12
|
+
calculate_tbt,
|
13
|
+
get_chat_model_cost,
|
14
|
+
get_embed_model_cost,
|
15
|
+
general_tokens,
|
16
|
+
create_metrics_attributes,
|
17
|
+
)
|
18
|
+
from openlit.semcov import SemanticConvention
|
19
|
+
|
20
|
+
def format_content(messages):
|
21
|
+
"""
|
22
|
+
Process a list of messages to extract content.
|
23
|
+
"""
|
24
|
+
|
25
|
+
formatted_messages = []
|
26
|
+
for message in messages:
|
27
|
+
role = message["role"]
|
28
|
+
content = message["content"]
|
29
|
+
|
30
|
+
if isinstance(content, list):
|
31
|
+
content_str = ", ".join(
|
32
|
+
f'{item["type"]}: {item["text"] if "text" in item else item["image_url"]}'
|
33
|
+
if "type" in item else f'text: {item["text"]}'
|
34
|
+
for item in content
|
35
|
+
)
|
36
|
+
formatted_messages.append(f"{role}: {content_str}")
|
37
|
+
else:
|
38
|
+
formatted_messages.append(f"{role}: {content}")
|
39
|
+
|
40
|
+
return "\n".join(formatted_messages)
|
41
|
+
|
42
|
+
def process_chunk(scope, chunk):
|
43
|
+
"""
|
44
|
+
Process a chunk of response data and update state.
|
45
|
+
"""
|
46
|
+
|
47
|
+
end_time = time.time()
|
48
|
+
# Record the timestamp for the current chunk
|
49
|
+
scope._timestamps.append(end_time)
|
50
|
+
|
51
|
+
if len(scope._timestamps) == 1:
|
52
|
+
# Calculate time to first chunk
|
53
|
+
scope._ttft = calculate_ttft(scope._timestamps, scope._start_time)
|
54
|
+
|
55
|
+
chunked = response_as_dict(chunk)
|
56
|
+
# Collect message IDs and aggregated response from events
|
57
|
+
if chunked.choices:
|
58
|
+
first_choice = chunked.get("choices")[0]
|
59
|
+
|
60
|
+
if first_choice.get("delta").get("content"):
|
61
|
+
scope._llmresponse += first_choice.get("delta").get("content")
|
62
|
+
|
63
|
+
if chunked.get("choices")[0].get("finish_reason"):
|
64
|
+
scope._finish_reason = chunked.get("choices")[0].get("finish_reason")
|
65
|
+
scope._response_id = chunked.get("id")
|
66
|
+
scope._response_model = chunked.get("model")
|
67
|
+
|
68
|
+
def common_span_attributes(scope, gen_ai_operation, gen_ai_system, server_address, server_port,
|
69
|
+
request_model, response_model, environment, application_name, is_stream, tbt, ttft, version):
|
70
|
+
"""
|
71
|
+
Set common span attributes for both chat and RAG operations.
|
72
|
+
"""
|
73
|
+
|
74
|
+
scope._span.set_attribute(TELEMETRY_SDK_NAME, "openlit")
|
75
|
+
scope._span.set_attribute(SemanticConvention.GEN_AI_OPERATION, gen_ai_operation)
|
76
|
+
scope._span.set_attribute(SemanticConvention.GEN_AI_SYSTEM, gen_ai_system)
|
77
|
+
scope._span.set_attribute(SemanticConvention.SERVER_ADDRESS, server_address)
|
78
|
+
scope._span.set_attribute(SemanticConvention.SERVER_PORT, server_port)
|
79
|
+
scope._span.set_attribute(SemanticConvention.GEN_AI_REQUEST_MODEL, request_model)
|
80
|
+
scope._span.set_attribute(SemanticConvention.GEN_AI_RESPONSE_MODEL, scope._response_model)
|
81
|
+
scope._span.set_attribute(DEPLOYMENT_ENVIRONMENT, environment)
|
82
|
+
scope._span.set_attribute(SERVICE_NAME, application_name)
|
83
|
+
scope._span.set_attribute(SemanticConvention.GEN_AI_REQUEST_IS_STREAM, is_stream)
|
84
|
+
scope._span.set_attribute(SemanticConvention.GEN_AI_SERVER_TBT, scope._tbt)
|
85
|
+
scope._span.set_attribute(SemanticConvention.GEN_AI_SERVER_TTFT, scope._ttft)
|
86
|
+
scope._span.set_attribute(SemanticConvention.GEN_AI_SDK_VERSION, version)
|
87
|
+
|
88
|
+
def record_completion_metrics(metrics, gen_ai_operation, gen_ai_system, server_address, server_port,
|
89
|
+
request_model, response_model, environment, application_name, start_time, end_time,
|
90
|
+
input_tokens, output_tokens, cost, tbt=None, ttft=None):
|
91
|
+
"""
|
92
|
+
Record completion-specific metrics for the operation.
|
93
|
+
"""
|
94
|
+
|
95
|
+
attributes = create_metrics_attributes(
|
96
|
+
operation=gen_ai_operation,
|
97
|
+
system=gen_ai_system,
|
98
|
+
server_address=server_address,
|
99
|
+
server_port=server_port,
|
100
|
+
request_model=request_model,
|
101
|
+
response_model=response_model,
|
102
|
+
service_name=application_name,
|
103
|
+
deployment_environment=environment,
|
104
|
+
)
|
105
|
+
metrics["genai_client_operation_duration"].record(end_time - start_time, attributes)
|
106
|
+
metrics["genai_requests"].add(1, attributes)
|
107
|
+
metrics["genai_prompt_tokens"].add(input_tokens, attributes)
|
108
|
+
metrics["genai_completion_tokens"].add(output_tokens, attributes)
|
109
|
+
metrics["genai_client_usage_tokens"].record(input_tokens + output_tokens, attributes)
|
110
|
+
metrics["genai_cost"].record(cost, attributes)
|
111
|
+
if tbt is not None:
|
112
|
+
metrics["genai_server_tbt"].record(tbt, attributes)
|
113
|
+
if ttft is not None:
|
114
|
+
metrics["genai_server_ttft"].record(ttft, attributes)
|
115
|
+
|
116
|
+
def record_embedding_metrics(metrics, gen_ai_operation, gen_ai_system, server_address, server_port,
|
117
|
+
request_model, response_model, environment, application_name, start_time, end_time,
|
118
|
+
input_tokens, cost):
|
119
|
+
"""
|
120
|
+
Record embedding-specific metrics for the operation.
|
121
|
+
"""
|
122
|
+
|
123
|
+
attributes = create_metrics_attributes(
|
124
|
+
operation=gen_ai_operation,
|
125
|
+
system=gen_ai_system,
|
126
|
+
server_address=server_address,
|
127
|
+
server_port=server_port,
|
128
|
+
request_model=request_model,
|
129
|
+
response_model=response_model,
|
130
|
+
service_name=application_name,
|
131
|
+
deployment_environment=environment,
|
132
|
+
)
|
133
|
+
metrics["genai_client_usage_tokens"].record(input_tokens, attributes)
|
134
|
+
metrics["genai_client_operation_duration"].record(end_time - start_time, attributes)
|
135
|
+
metrics["genai_requests"].add(1, attributes)
|
136
|
+
metrics["genai_prompt_tokens"].add(input_tokens, attributes)
|
137
|
+
metrics["genai_cost"].record(cost, attributes)
|
138
|
+
|
139
|
+
def common_chat_logic(scope, pricing_info, environment, application_name, metrics,
|
140
|
+
capture_message_content, disable_metrics, version, is_stream):
|
141
|
+
"""
|
142
|
+
Process chat request and generate Telemetry
|
143
|
+
"""
|
144
|
+
|
145
|
+
if len(scope._timestamps) > 1:
|
146
|
+
scope._tbt = calculate_tbt(scope._timestamps)
|
147
|
+
|
148
|
+
prompt = format_content(scope._kwargs.get("messages", ""))
|
149
|
+
request_model = scope._kwargs.get("model", "llama3.2-3b")
|
150
|
+
|
151
|
+
# Calculate tokens using input prompt and aggregated response
|
152
|
+
if is_stream:
|
153
|
+
input_tokens = general_tokens(prompt)
|
154
|
+
output_tokens = general_tokens(scope._llmresponse)
|
155
|
+
else:
|
156
|
+
input_tokens = scope._input_tokens
|
157
|
+
output_tokens = scope._output_tokens
|
158
|
+
|
159
|
+
cost = get_chat_model_cost(request_model, pricing_info, input_tokens, output_tokens)
|
160
|
+
|
161
|
+
# Common Span Attributes
|
162
|
+
common_span_attributes(scope,
|
163
|
+
SemanticConvention.GEN_AI_OPERATION_TYPE_CHAT,SemanticConvention.GEN_AI_SYSTEM_PREMAI,
|
164
|
+
scope._server_address, scope._server_port, request_model, scope._response_model,
|
165
|
+
environment, application_name, is_stream, scope._tbt, scope._ttft, version)
|
166
|
+
|
167
|
+
# Span Attributes for Response parameters
|
168
|
+
scope._span.set_attribute(SemanticConvention.GEN_AI_RESPONSE_FINISH_REASON, [scope._finish_reason])
|
169
|
+
scope._span.set_attribute(SemanticConvention.GEN_AI_REQUEST_SEED, scope._kwargs.get("seed", ""))
|
170
|
+
scope._span.set_attribute(SemanticConvention.GEN_AI_REQUEST_FREQUENCY_PENALTY, scope._kwargs.get("frequency_penalty", 0.0))
|
171
|
+
scope._span.set_attribute(SemanticConvention.GEN_AI_REQUEST_MAX_TOKENS, scope._kwargs.get("max_tokens", -1))
|
172
|
+
scope._span.set_attribute(SemanticConvention.GEN_AI_REQUEST_PRESENCE_PENALTY, scope._kwargs.get("presence_penalty", 0.0))
|
173
|
+
scope._span.set_attribute(SemanticConvention.GEN_AI_REQUEST_STOP_SEQUENCES, scope._kwargs.get("stop", []))
|
174
|
+
scope._span.set_attribute(SemanticConvention.GEN_AI_REQUEST_TEMPERATURE, scope._kwargs.get("temperature", 1.0))
|
175
|
+
scope._span.set_attribute(SemanticConvention.GEN_AI_REQUEST_TOP_P, scope._kwargs.get("top_p", 1.0))
|
176
|
+
scope._span.set_attribute(SemanticConvention.GEN_AI_REQUEST_USER, scope._kwargs.get("user", ""))
|
177
|
+
scope._span.set_attribute(SemanticConvention.GEN_AI_RESPONSE_ID, scope._response_id)
|
178
|
+
scope._span.set_attribute(SemanticConvention.GEN_AI_OUTPUT_TYPE, "text" if isinstance(scope._llmresponse, str) else "json")
|
179
|
+
|
180
|
+
# Span Attributes for Cost and Tokens
|
181
|
+
scope._span.set_attribute(SemanticConvention.GEN_AI_USAGE_INPUT_TOKENS, input_tokens)
|
182
|
+
scope._span.set_attribute(SemanticConvention.GEN_AI_USAGE_OUTPUT_TOKENS, output_tokens)
|
183
|
+
scope._span.set_attribute(SemanticConvention.GEN_AI_CLIENT_TOKEN_USAGE, input_tokens + output_tokens)
|
184
|
+
scope._span.set_attribute(SemanticConvention.GEN_AI_USAGE_COST, cost)
|
185
|
+
|
186
|
+
# Span Attributes for Tools
|
187
|
+
if scope._tools:
|
188
|
+
scope._span.set_attribute(SemanticConvention.GEN_AI_TOOL_NAME, scope._tools.get("function","")).get("name","")
|
189
|
+
scope._span.set_attribute(SemanticConvention.GEN_AI_TOOL_CALL_ID, str(scope._tools.get("id","")))
|
190
|
+
scope._span.set_attribute(SemanticConvention.GEN_AI_TOOL_ARGS, str(scope._tools.get("function","").get("arguments","")))
|
191
|
+
|
192
|
+
# Span Attributes for Content
|
193
|
+
if capture_message_content:
|
194
|
+
scope._span.set_attribute(SemanticConvention.GEN_AI_CONTENT_PROMPT, prompt)
|
195
|
+
scope._span.set_attribute(SemanticConvention.GEN_AI_CONTENT_COMPLETION, scope._llmresponse)
|
196
|
+
|
197
|
+
# To be removed one the change to span_attributes (from span events) is complete
|
198
|
+
scope._span.add_event(
|
199
|
+
name=SemanticConvention.GEN_AI_CONTENT_PROMPT_EVENT,
|
200
|
+
attributes={
|
201
|
+
SemanticConvention.GEN_AI_CONTENT_PROMPT: prompt,
|
202
|
+
},
|
203
|
+
)
|
204
|
+
scope._span.add_event(
|
205
|
+
name=SemanticConvention.GEN_AI_CONTENT_COMPLETION_EVENT,
|
206
|
+
attributes={
|
207
|
+
SemanticConvention.GEN_AI_CONTENT_COMPLETION: scope._llmresponse,
|
208
|
+
},
|
209
|
+
)
|
210
|
+
|
211
|
+
scope._span.set_status(Status(StatusCode.OK))
|
212
|
+
|
213
|
+
# Metrics
|
214
|
+
if not disable_metrics:
|
215
|
+
record_completion_metrics(metrics, SemanticConvention.GEN_AI_OPERATION_TYPE_CHAT, SemanticConvention.GEN_AI_SYSTEM_PREMAI,
|
216
|
+
scope._server_address, scope._server_port, request_model, scope._response_model, environment,
|
217
|
+
application_name, scope._start_time, scope._end_time, input_tokens, output_tokens,
|
218
|
+
cost, scope._tbt, scope._ttft)
|
219
|
+
|
220
|
+
def common_embedding_logic(scope, pricing_info, environment, application_name, metrics,
|
221
|
+
capture_message_content, disable_metrics, version):
|
222
|
+
"""
|
223
|
+
Process embedding request and generate Telemetry
|
224
|
+
"""
|
225
|
+
|
226
|
+
request_model = scope._kwargs.get("model", "text-embedding-ada-002")
|
227
|
+
|
228
|
+
cost = get_embed_model_cost(request_model, pricing_info, scope._input_tokens)
|
229
|
+
|
230
|
+
# Common Span Attributes
|
231
|
+
common_span_attributes(scope,
|
232
|
+
SemanticConvention.GEN_AI_OPERATION_TYPE_EMBEDDING, SemanticConvention.GEN_AI_SYSTEM_PREMAI,
|
233
|
+
scope._server_address, scope._server_port, request_model, scope._response_model,
|
234
|
+
environment, application_name, False, scope._tbt, scope._ttft, version)
|
235
|
+
|
236
|
+
# Embedding-specific span attributes
|
237
|
+
scope._span.set_attribute(SemanticConvention.GEN_AI_REQUEST_ENCODING_FORMATS, [scope._kwargs.get("encoding_format", "float")])
|
238
|
+
scope._span.set_attribute(SemanticConvention.GEN_AI_REQUEST_USER, scope._kwargs.get("user", ""))
|
239
|
+
scope._span.set_attribute(SemanticConvention.GEN_AI_USAGE_INPUT_TOKENS, scope._input_tokens)
|
240
|
+
scope._span.set_attribute(SemanticConvention.GEN_AI_CLIENT_TOKEN_USAGE, scope._input_tokens)
|
241
|
+
scope._span.set_attribute(SemanticConvention.GEN_AI_USAGE_COST, cost)
|
242
|
+
|
243
|
+
# Span Attributes for Content
|
244
|
+
if capture_message_content:
|
245
|
+
scope._span.add_event(
|
246
|
+
name=SemanticConvention.GEN_AI_CONTENT_PROMPT_EVENT,
|
247
|
+
attributes={
|
248
|
+
SemanticConvention.GEN_AI_CONTENT_PROMPT: str(scope._kwargs.get("input", "")),
|
249
|
+
},
|
250
|
+
)
|
251
|
+
|
252
|
+
scope._span.set_status(Status(StatusCode.OK))
|
253
|
+
|
254
|
+
# Metrics
|
255
|
+
if not disable_metrics:
|
256
|
+
record_embedding_metrics(metrics, SemanticConvention.GEN_AI_OPERATION_TYPE_EMBEDDING, SemanticConvention.GEN_AI_SYSTEM_PREMAI,
|
257
|
+
scope._server_address, scope._server_port, request_model, scope._response_model, environment,
|
258
|
+
application_name, scope._start_time, scope._end_time, scope._input_tokens, cost)
|
259
|
+
|
260
|
+
def process_streaming_chat_response(scope, pricing_info, environment, application_name, metrics,
|
261
|
+
capture_message_content=False, disable_metrics=False, version=""):
|
262
|
+
"""
|
263
|
+
Process chat request and generate Telemetry
|
264
|
+
"""
|
265
|
+
common_chat_logic(scope, pricing_info, environment, application_name, metrics,
|
266
|
+
capture_message_content, disable_metrics, version, is_stream=True)
|
267
|
+
|
268
|
+
def process_chat_response(response, request_model, pricing_info, server_port, server_address,
|
269
|
+
environment, application_name, metrics, start_time, span, capture_message_content=False,
|
270
|
+
disable_metrics=False, version="1.0.0", **kwargs):
|
271
|
+
"""
|
272
|
+
Process chat request and generate Telemetry
|
273
|
+
"""
|
274
|
+
|
275
|
+
scope = type("GenericScope", (), {})()
|
276
|
+
response_dict = response_as_dict(response)
|
277
|
+
|
278
|
+
scope._start_time = start_time
|
279
|
+
scope._end_time = time.time()
|
280
|
+
scope._span = span
|
281
|
+
scope._llmresponse = str(response_dict.get("choices")[0].get("message").get("content"))
|
282
|
+
scope._response_id = response_dict.get("additional_properties", {}).get("id")
|
283
|
+
scope._response_model = response_dict.get("model")
|
284
|
+
scope._input_tokens = response_dict.get("usage").get("prompt_tokens")
|
285
|
+
scope._output_tokens = response_dict.get("usage").get("completion_tokens")
|
286
|
+
scope._timestamps = []
|
287
|
+
scope._ttft, scope._tbt = scope._end_time - scope._start_time, 0
|
288
|
+
scope._server_address, scope._server_port = server_address, server_port
|
289
|
+
scope._kwargs = kwargs
|
290
|
+
scope._finish_reason = str(response_dict.get("choices")[0].get("finish_reason"))
|
291
|
+
|
292
|
+
if scope._kwargs.get("tools"):
|
293
|
+
scope._tools = response_dict.get("choices")[0].get("message").get("tool_calls")
|
294
|
+
else:
|
295
|
+
scope._tools = None
|
296
|
+
|
297
|
+
common_chat_logic(scope, pricing_info, environment, application_name, metrics,
|
298
|
+
capture_message_content, disable_metrics, version, is_stream=False)
|
299
|
+
|
300
|
+
return response
|
301
|
+
|
302
|
+
def process_embedding_response(response, request_model, pricing_info, server_port, server_address,
|
303
|
+
environment, application_name, metrics, start_time, span, capture_message_content=False,
|
304
|
+
disable_metrics=False, version="1.0.0", **kwargs):
|
305
|
+
"""
|
306
|
+
Process embedding request and generate Telemetry
|
307
|
+
"""
|
308
|
+
|
309
|
+
scope = type("GenericScope", (), {})()
|
310
|
+
response_dict = response_as_dict(response)
|
311
|
+
|
312
|
+
scope._start_time = start_time
|
313
|
+
scope._end_time = time.time()
|
314
|
+
scope._span = span
|
315
|
+
scope._response_model = response_dict.get("model")
|
316
|
+
scope._input_tokens = response_dict.get("usage").get("prompt_tokens")
|
317
|
+
scope._timestamps = []
|
318
|
+
scope._ttft, scope._tbt = scope._end_time - scope._start_time, 0
|
319
|
+
scope._server_address, scope._server_port = server_address, server_port
|
320
|
+
scope._kwargs = kwargs
|
321
|
+
|
322
|
+
common_embedding_logic(scope, pricing_info, environment, application_name, metrics,
|
323
|
+
capture_message_content, disable_metrics, version)
|
324
|
+
|
325
|
+
return response
|
@@ -1,4 +1,3 @@
|
|
1
|
-
# pylint: disable=useless-return, bad-staticmethod-argument, disable=duplicate-code
|
2
1
|
"""Initializer of Auto Instrumentation of Reka Functions"""
|
3
2
|
|
4
3
|
from typing import Collection
|
@@ -17,15 +16,15 @@ _instruments = ("reka-api >= 3.2.0",)
|
|
17
16
|
|
18
17
|
class RekaInstrumentor(BaseInstrumentor):
|
19
18
|
"""
|
20
|
-
An instrumentor for Reka
|
19
|
+
An instrumentor for Reka client library.
|
21
20
|
"""
|
22
21
|
|
23
22
|
def instrumentation_dependencies(self) -> Collection[str]:
|
24
23
|
return _instruments
|
25
24
|
|
26
25
|
def _instrument(self, **kwargs):
|
27
|
-
application_name = kwargs.get("application_name", "
|
28
|
-
environment = kwargs.get("environment", "
|
26
|
+
application_name = kwargs.get("application_name", "default")
|
27
|
+
environment = kwargs.get("environment", "default")
|
29
28
|
tracer = kwargs.get("tracer")
|
30
29
|
metrics = kwargs.get("metrics_dict")
|
31
30
|
pricing_info = kwargs.get("pricing_info", {})
|
@@ -33,7 +32,7 @@ class RekaInstrumentor(BaseInstrumentor):
|
|
33
32
|
disable_metrics = kwargs.get("disable_metrics")
|
34
33
|
version = importlib.metadata.version("reka-api")
|
35
34
|
|
36
|
-
#
|
35
|
+
# Chat completions
|
37
36
|
wrap_function_wrapper(
|
38
37
|
"reka.chat.client",
|
39
38
|
"ChatClient.create",
|
@@ -41,7 +40,7 @@ class RekaInstrumentor(BaseInstrumentor):
|
|
41
40
|
tracer, pricing_info, capture_message_content, metrics, disable_metrics),
|
42
41
|
)
|
43
42
|
|
44
|
-
#
|
43
|
+
# Chat completions
|
45
44
|
wrap_function_wrapper(
|
46
45
|
"reka.chat.client",
|
47
46
|
"AsyncChatClient.create",
|
@@ -50,5 +49,4 @@ class RekaInstrumentor(BaseInstrumentor):
|
|
50
49
|
)
|
51
50
|
|
52
51
|
def _uninstrument(self, **kwargs):
|
53
|
-
# Proper uninstrumentation logic to revert patched methods
|
54
52
|
pass
|
@@ -0,0 +1,59 @@
|
|
1
|
+
"""
|
2
|
+
Module for monitoring Reka API calls.
|
3
|
+
"""
|
4
|
+
|
5
|
+
import time
|
6
|
+
from opentelemetry.trace import SpanKind
|
7
|
+
from openlit.__helpers import (
|
8
|
+
handle_exception,
|
9
|
+
set_server_address_and_port
|
10
|
+
)
|
11
|
+
from openlit.instrumentation.reka.utils import (
|
12
|
+
process_chat_response
|
13
|
+
)
|
14
|
+
from openlit.semcov import SemanticConvention
|
15
|
+
|
16
|
+
def async_chat(version, environment, application_name,
|
17
|
+
tracer, pricing_info, capture_message_content, metrics, disable_metrics):
|
18
|
+
"""
|
19
|
+
Generates a telemetry wrapper for GenAI function call
|
20
|
+
"""
|
21
|
+
|
22
|
+
async def wrapper(wrapped, instance, args, kwargs):
|
23
|
+
"""
|
24
|
+
Wraps the GenAI function call.
|
25
|
+
"""
|
26
|
+
|
27
|
+
server_address, server_port = set_server_address_and_port(instance, "api.reka.ai", 443)
|
28
|
+
request_model = kwargs.get("model", "reka-core-20240501")
|
29
|
+
|
30
|
+
span_name = f"{SemanticConvention.GEN_AI_OPERATION_TYPE_CHAT} {request_model}"
|
31
|
+
|
32
|
+
with tracer.start_as_current_span(span_name, kind=SpanKind.CLIENT) as span:
|
33
|
+
start_time = time.time()
|
34
|
+
response = await wrapped(*args, **kwargs)
|
35
|
+
|
36
|
+
try:
|
37
|
+
response = process_chat_response(
|
38
|
+
response=response,
|
39
|
+
request_model=request_model,
|
40
|
+
pricing_info=pricing_info,
|
41
|
+
server_port=server_port,
|
42
|
+
server_address=server_address,
|
43
|
+
environment=environment,
|
44
|
+
application_name=application_name,
|
45
|
+
metrics=metrics,
|
46
|
+
start_time=start_time,
|
47
|
+
span=span,
|
48
|
+
capture_message_content=capture_message_content,
|
49
|
+
disable_metrics=disable_metrics,
|
50
|
+
version=version,
|
51
|
+
**kwargs
|
52
|
+
)
|
53
|
+
|
54
|
+
except Exception as e:
|
55
|
+
handle_exception(span, e)
|
56
|
+
|
57
|
+
return response
|
58
|
+
|
59
|
+
return wrapper
|