openlit 1.33.10__py3-none-any.whl → 1.33.12__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- openlit/__helpers.py +125 -88
- openlit/__init__.py +38 -11
- openlit/instrumentation/ag2/__init__.py +19 -20
- openlit/instrumentation/ag2/ag2.py +134 -69
- openlit/instrumentation/ai21/__init__.py +22 -21
- openlit/instrumentation/ai21/ai21.py +82 -546
- openlit/instrumentation/ai21/async_ai21.py +82 -546
- openlit/instrumentation/ai21/utils.py +409 -0
- openlit/instrumentation/anthropic/__init__.py +16 -16
- openlit/instrumentation/anthropic/anthropic.py +61 -353
- openlit/instrumentation/anthropic/async_anthropic.py +62 -354
- openlit/instrumentation/anthropic/utils.py +251 -0
- openlit/instrumentation/assemblyai/__init__.py +2 -2
- openlit/instrumentation/assemblyai/assemblyai.py +3 -3
- openlit/instrumentation/astra/__init__.py +25 -25
- openlit/instrumentation/astra/astra.py +2 -2
- openlit/instrumentation/astra/async_astra.py +2 -2
- openlit/instrumentation/azure_ai_inference/__init__.py +5 -5
- openlit/instrumentation/azure_ai_inference/async_azure_ai_inference.py +8 -8
- openlit/instrumentation/azure_ai_inference/azure_ai_inference.py +8 -8
- openlit/instrumentation/bedrock/__init__.py +2 -2
- openlit/instrumentation/bedrock/bedrock.py +3 -3
- openlit/instrumentation/chroma/__init__.py +9 -9
- openlit/instrumentation/chroma/chroma.py +2 -2
- openlit/instrumentation/cohere/__init__.py +7 -7
- openlit/instrumentation/cohere/async_cohere.py +9 -9
- openlit/instrumentation/cohere/cohere.py +9 -9
- openlit/instrumentation/controlflow/__init__.py +4 -4
- openlit/instrumentation/controlflow/controlflow.py +2 -2
- openlit/instrumentation/crawl4ai/__init__.py +3 -3
- openlit/instrumentation/crawl4ai/async_crawl4ai.py +2 -2
- openlit/instrumentation/crawl4ai/crawl4ai.py +2 -2
- openlit/instrumentation/crewai/__init__.py +3 -3
- openlit/instrumentation/crewai/crewai.py +2 -2
- openlit/instrumentation/dynamiq/__init__.py +5 -5
- openlit/instrumentation/dynamiq/dynamiq.py +2 -2
- openlit/instrumentation/elevenlabs/__init__.py +5 -5
- openlit/instrumentation/elevenlabs/async_elevenlabs.py +3 -3
- openlit/instrumentation/elevenlabs/elevenlabs.py +3 -3
- openlit/instrumentation/embedchain/__init__.py +2 -2
- openlit/instrumentation/embedchain/embedchain.py +4 -4
- openlit/instrumentation/firecrawl/__init__.py +3 -3
- openlit/instrumentation/firecrawl/firecrawl.py +2 -2
- openlit/instrumentation/google_ai_studio/__init__.py +3 -3
- openlit/instrumentation/google_ai_studio/async_google_ai_studio.py +3 -3
- openlit/instrumentation/google_ai_studio/google_ai_studio.py +3 -3
- openlit/instrumentation/gpt4all/__init__.py +3 -3
- openlit/instrumentation/gpt4all/gpt4all.py +7 -7
- openlit/instrumentation/groq/__init__.py +3 -3
- openlit/instrumentation/groq/async_groq.py +5 -5
- openlit/instrumentation/groq/groq.py +5 -5
- openlit/instrumentation/haystack/__init__.py +2 -2
- openlit/instrumentation/haystack/haystack.py +2 -2
- openlit/instrumentation/julep/__init__.py +7 -7
- openlit/instrumentation/julep/async_julep.py +3 -3
- openlit/instrumentation/julep/julep.py +3 -3
- openlit/instrumentation/langchain/__init__.py +2 -2
- openlit/instrumentation/langchain/async_langchain.py +13 -9
- openlit/instrumentation/langchain/langchain.py +13 -8
- openlit/instrumentation/letta/__init__.py +7 -7
- openlit/instrumentation/letta/letta.py +5 -5
- openlit/instrumentation/litellm/__init__.py +5 -5
- openlit/instrumentation/litellm/async_litellm.py +8 -8
- openlit/instrumentation/litellm/litellm.py +8 -8
- openlit/instrumentation/llamaindex/__init__.py +2 -2
- openlit/instrumentation/llamaindex/llamaindex.py +2 -2
- openlit/instrumentation/mem0/__init__.py +2 -2
- openlit/instrumentation/mem0/mem0.py +2 -2
- openlit/instrumentation/milvus/__init__.py +2 -2
- openlit/instrumentation/milvus/milvus.py +2 -2
- openlit/instrumentation/mistral/__init__.py +7 -7
- openlit/instrumentation/mistral/async_mistral.py +10 -10
- openlit/instrumentation/mistral/mistral.py +10 -10
- openlit/instrumentation/multion/__init__.py +7 -7
- openlit/instrumentation/multion/async_multion.py +5 -5
- openlit/instrumentation/multion/multion.py +5 -5
- openlit/instrumentation/ollama/__init__.py +11 -9
- openlit/instrumentation/ollama/async_ollama.py +71 -465
- openlit/instrumentation/ollama/ollama.py +71 -465
- openlit/instrumentation/ollama/utils.py +332 -0
- openlit/instrumentation/openai/__init__.py +11 -11
- openlit/instrumentation/openai/async_openai.py +18 -18
- openlit/instrumentation/openai/openai.py +18 -18
- openlit/instrumentation/phidata/__init__.py +2 -2
- openlit/instrumentation/phidata/phidata.py +2 -2
- openlit/instrumentation/pinecone/__init__.py +6 -6
- openlit/instrumentation/pinecone/pinecone.py +2 -2
- openlit/instrumentation/premai/__init__.py +3 -3
- openlit/instrumentation/premai/premai.py +7 -7
- openlit/instrumentation/qdrant/__init__.py +2 -2
- openlit/instrumentation/qdrant/async_qdrant.py +2 -2
- openlit/instrumentation/qdrant/qdrant.py +2 -2
- openlit/instrumentation/reka/__init__.py +3 -3
- openlit/instrumentation/reka/async_reka.py +3 -3
- openlit/instrumentation/reka/reka.py +3 -3
- openlit/instrumentation/together/__init__.py +5 -5
- openlit/instrumentation/together/async_together.py +8 -8
- openlit/instrumentation/together/together.py +8 -8
- openlit/instrumentation/transformers/__init__.py +2 -2
- openlit/instrumentation/transformers/transformers.py +4 -4
- openlit/instrumentation/vertexai/__init__.py +9 -9
- openlit/instrumentation/vertexai/async_vertexai.py +4 -4
- openlit/instrumentation/vertexai/vertexai.py +4 -4
- openlit/instrumentation/vllm/__init__.py +2 -2
- openlit/instrumentation/vllm/vllm.py +3 -3
- openlit/otel/events.py +85 -0
- openlit/otel/tracing.py +3 -13
- openlit/semcov/__init__.py +13 -1
- {openlit-1.33.10.dist-info → openlit-1.33.12.dist-info}/METADATA +2 -2
- openlit-1.33.12.dist-info/RECORD +126 -0
- openlit-1.33.10.dist-info/RECORD +0 -122
- {openlit-1.33.10.dist-info → openlit-1.33.12.dist-info}/LICENSE +0 -0
- {openlit-1.33.10.dist-info → openlit-1.33.12.dist-info}/WHEEL +0 -0
@@ -4,54 +4,37 @@ Module for monitoring Anthropic API calls.
|
|
4
4
|
|
5
5
|
import logging
|
6
6
|
import time
|
7
|
-
from opentelemetry.trace import SpanKind
|
8
|
-
from opentelemetry.sdk.resources import SERVICE_NAME, TELEMETRY_SDK_NAME, DEPLOYMENT_ENVIRONMENT
|
7
|
+
from opentelemetry.trace import SpanKind
|
9
8
|
from openlit.__helpers import (
|
10
|
-
get_chat_model_cost,
|
11
9
|
handle_exception,
|
12
|
-
response_as_dict,
|
13
|
-
calculate_ttft,
|
14
|
-
calculate_tbt,
|
15
|
-
create_metrics_attributes,
|
16
10
|
set_server_address_and_port
|
17
11
|
)
|
12
|
+
from openlit.instrumentation.anthropic.utils import (
|
13
|
+
process_chunk,
|
14
|
+
process_chat_response,
|
15
|
+
process_streaming_chat_response,
|
16
|
+
)
|
18
17
|
from openlit.semcov import SemanticConvetion
|
19
18
|
|
20
19
|
# Initialize logger for logging potential issues and operations
|
21
20
|
logger = logging.getLogger(__name__)
|
22
21
|
|
23
|
-
def messages(version, environment, application_name, tracer,
|
24
|
-
pricing_info,
|
22
|
+
def messages(version, environment, application_name, tracer, event_provider,
|
23
|
+
pricing_info, capture_message_content, metrics, disable_metrics):
|
25
24
|
"""
|
26
|
-
Generates a telemetry wrapper for
|
27
|
-
|
28
|
-
Args:
|
29
|
-
version: Version of the monitoring package.
|
30
|
-
environment: Deployment environment (e.g., production, staging).
|
31
|
-
application_name: Name of the application using the Anthropic API.
|
32
|
-
tracer: OpenTelemetry tracer for creating spans.
|
33
|
-
pricing_info: Information used for calculating the cost of Anthropic usage.
|
34
|
-
trace_content: Flag indicating whether to trace the actual content.
|
35
|
-
|
36
|
-
Returns:
|
37
|
-
A function that wraps the chat method to add telemetry.
|
25
|
+
Generates a telemetry wrapper for GenAI function call
|
38
26
|
"""
|
39
27
|
|
40
28
|
class TracedSyncStream:
|
41
29
|
"""
|
42
|
-
Wrapper for streaming responses to collect
|
43
|
-
Wraps the response to collect message IDs and aggregated response.
|
44
|
-
|
45
|
-
This class implements the '__aiter__' and '__anext__' methods that
|
46
|
-
handle asynchronous streaming responses.
|
47
|
-
|
48
|
-
This class also implements '__aenter__' and '__aexit__' methods that
|
49
|
-
handle asynchronous context management protocol.
|
30
|
+
Wrapper for streaming responses to collect telemetry.
|
50
31
|
"""
|
32
|
+
|
51
33
|
def __init__(
|
52
34
|
self,
|
53
35
|
wrapped,
|
54
36
|
span,
|
37
|
+
span_name,
|
55
38
|
kwargs,
|
56
39
|
server_address,
|
57
40
|
server_port,
|
@@ -59,13 +42,18 @@ def messages(version, environment, application_name, tracer,
|
|
59
42
|
):
|
60
43
|
self.__wrapped__ = wrapped
|
61
44
|
self._span = span
|
62
|
-
|
63
|
-
self._llmresponse =
|
64
|
-
self._response_id =
|
65
|
-
self._response_model =
|
66
|
-
self._finish_reason =
|
67
|
-
self._input_tokens =
|
68
|
-
self._output_tokens =
|
45
|
+
self._span_name = span_name
|
46
|
+
self._llmresponse = ''
|
47
|
+
self._response_id = ''
|
48
|
+
self._response_model = ''
|
49
|
+
self._finish_reason = ''
|
50
|
+
self._input_tokens = ''
|
51
|
+
self._output_tokens = ''
|
52
|
+
self._tool_arguments = ''
|
53
|
+
self._tool_id = ''
|
54
|
+
self._tool_name = ''
|
55
|
+
self._tool_calls = None
|
56
|
+
self._response_role = ''
|
69
57
|
|
70
58
|
self._args = args
|
71
59
|
self._kwargs = kwargs
|
@@ -94,348 +82,68 @@ def messages(version, environment, application_name, tracer,
|
|
94
82
|
def __next__(self):
|
95
83
|
try:
|
96
84
|
chunk = self.__wrapped__.__next__()
|
97
|
-
|
98
|
-
# Record the timestamp for the current chunk
|
99
|
-
self._timestamps.append(end_time)
|
100
|
-
|
101
|
-
if len(self._timestamps) == 1:
|
102
|
-
# Calculate time to first chunk
|
103
|
-
self._ttft = calculate_ttft(self._timestamps, self._start_time)
|
104
|
-
|
105
|
-
chunked = response_as_dict(chunk)
|
106
|
-
|
107
|
-
# Collect message IDs and input token from events
|
108
|
-
if chunked.get('type') == "message_start":
|
109
|
-
self._response_id = chunked.get('message').get('id')
|
110
|
-
self._input_tokens = chunked.get('message').get('usage').get('input_tokens')
|
111
|
-
self._response_model = chunked.get('message').get('model')
|
112
|
-
# Collect message IDs and aggregated response from events
|
113
|
-
if chunked.get('type') == "content_block_delta":
|
114
|
-
content = chunked.get('delta').get('text')
|
115
|
-
if content:
|
116
|
-
self._llmresponse += content
|
117
|
-
# Collect output tokens and stop reason from events
|
118
|
-
if chunked.get('type') == "message_delta":
|
119
|
-
self._output_tokens = chunked.get('usage').get('output_tokens')
|
120
|
-
self._finish_reason = chunked.get('delta').get('stop_reason')
|
121
|
-
|
85
|
+
process_chunk(self, chunk)
|
122
86
|
return chunk
|
123
87
|
except StopIteration:
|
124
|
-
# Handling exception ensure observability without disrupting operation
|
125
88
|
try:
|
126
|
-
self.
|
127
|
-
|
128
|
-
|
129
|
-
|
130
|
-
|
131
|
-
|
132
|
-
|
133
|
-
|
134
|
-
|
135
|
-
|
136
|
-
|
137
|
-
if isinstance(content, list):
|
138
|
-
content_str_list = []
|
139
|
-
for item in content:
|
140
|
-
if item["type"] == "text":
|
141
|
-
content_str_list.append(f'text: {item["text"]}')
|
142
|
-
elif (item["type"] == "image_url" and
|
143
|
-
not item["image_url"]["url"].startswith("data:")):
|
144
|
-
content_str_list.append(f'image_url: {item["image_url"]["url"]}')
|
145
|
-
content_str = ", ".join(content_str_list)
|
146
|
-
formatted_messages.append(f"{role}: {content_str}")
|
147
|
-
else:
|
148
|
-
formatted_messages.append(f"{role}: {content}")
|
149
|
-
prompt = "\n".join(formatted_messages)
|
150
|
-
|
151
|
-
request_model = self._kwargs.get("model", "claude-3-5-sonnet-latest")
|
152
|
-
|
153
|
-
# Calculate cost of the operation
|
154
|
-
cost = get_chat_model_cost(request_model,
|
155
|
-
pricing_info, self._input_tokens,
|
156
|
-
self._output_tokens)
|
157
|
-
|
158
|
-
# Set Span attributes (OTel Semconv)
|
159
|
-
self._span.set_attribute(TELEMETRY_SDK_NAME, "openlit")
|
160
|
-
self._span.set_attribute(SemanticConvetion.GEN_AI_OPERATION,
|
161
|
-
SemanticConvetion.GEN_AI_OPERATION_TYPE_CHAT)
|
162
|
-
self._span.set_attribute(SemanticConvetion.GEN_AI_SYSTEM,
|
163
|
-
SemanticConvetion.GEN_AI_SYSTEM_ANTHROPIC)
|
164
|
-
self._span.set_attribute(SemanticConvetion.GEN_AI_REQUEST_MODEL,
|
165
|
-
request_model)
|
166
|
-
self._span.set_attribute(SemanticConvetion.SERVER_PORT,
|
167
|
-
self._server_port)
|
168
|
-
self._span.set_attribute(SemanticConvetion.GEN_AI_REQUEST_MAX_TOKENS,
|
169
|
-
self._kwargs.get("max_tokens", -1))
|
170
|
-
self._span.set_attribute(SemanticConvetion.GEN_AI_REQUEST_STOP_SEQUENCES,
|
171
|
-
self._kwargs.get("stop_sequences", []))
|
172
|
-
self._span.set_attribute(SemanticConvetion.GEN_AI_REQUEST_TEMPERATURE,
|
173
|
-
self._kwargs.get("temperature", 1.0))
|
174
|
-
self._span.set_attribute(SemanticConvetion.GEN_AI_REQUEST_TOP_K,
|
175
|
-
self._kwargs.get("top_k", 1.0))
|
176
|
-
self._span.set_attribute(SemanticConvetion.GEN_AI_REQUEST_TOP_P,
|
177
|
-
self._kwargs.get("top_p", 1.0))
|
178
|
-
self._span.set_attribute(SemanticConvetion.GEN_AI_RESPONSE_FINISH_REASON,
|
179
|
-
[self._finish_reason])
|
180
|
-
self._span.set_attribute(SemanticConvetion.GEN_AI_RESPONSE_ID,
|
181
|
-
self._response_id)
|
182
|
-
self._span.set_attribute(SemanticConvetion.GEN_AI_RESPONSE_MODEL,
|
183
|
-
self._response_model)
|
184
|
-
self._span.set_attribute(SemanticConvetion.GEN_AI_USAGE_INPUT_TOKENS,
|
185
|
-
self._input_tokens)
|
186
|
-
self._span.set_attribute(SemanticConvetion.GEN_AI_USAGE_OUTPUT_TOKENS,
|
187
|
-
self._output_tokens)
|
188
|
-
self._span.set_attribute(SemanticConvetion.SERVER_ADDRESS,
|
189
|
-
self._server_address)
|
190
|
-
if isinstance(self._llmresponse, str):
|
191
|
-
self._span.set_attribute(SemanticConvetion.GEN_AI_OUTPUT_TYPE,
|
192
|
-
"text")
|
193
|
-
else:
|
194
|
-
self._span.set_attribute(SemanticConvetion.GEN_AI_OUTPUT_TYPE,
|
195
|
-
"json")
|
196
|
-
|
197
|
-
# Set Span attributes (Extra)
|
198
|
-
self._span.set_attribute(DEPLOYMENT_ENVIRONMENT,
|
199
|
-
environment)
|
200
|
-
self._span.set_attribute(SERVICE_NAME,
|
201
|
-
application_name)
|
202
|
-
self._span.set_attribute(SemanticConvetion.GEN_AI_REQUEST_IS_STREAM,
|
203
|
-
True)
|
204
|
-
self._span.set_attribute(SemanticConvetion.GEN_AI_USAGE_TOTAL_TOKENS,
|
205
|
-
self._input_tokens + self._output_tokens)
|
206
|
-
self._span.set_attribute(SemanticConvetion.GEN_AI_USAGE_COST,
|
207
|
-
cost)
|
208
|
-
self._span.set_attribute(SemanticConvetion.GEN_AI_SERVER_TBT,
|
209
|
-
self._tbt)
|
210
|
-
self._span.set_attribute(SemanticConvetion.GEN_AI_SERVER_TTFT,
|
211
|
-
self._ttft)
|
212
|
-
self._span.set_attribute(SemanticConvetion.GEN_AI_SDK_VERSION,
|
213
|
-
version)
|
214
|
-
if trace_content:
|
215
|
-
self._span.add_event(
|
216
|
-
name=SemanticConvetion.GEN_AI_CONTENT_PROMPT_EVENT,
|
217
|
-
attributes={
|
218
|
-
SemanticConvetion.GEN_AI_CONTENT_PROMPT: prompt,
|
219
|
-
},
|
220
|
-
)
|
221
|
-
self._span.add_event(
|
222
|
-
name=SemanticConvetion.GEN_AI_CONTENT_COMPLETION_EVENT,
|
223
|
-
attributes={
|
224
|
-
SemanticConvetion.GEN_AI_CONTENT_COMPLETION: self._llmresponse,
|
225
|
-
},
|
89
|
+
with tracer.start_as_current_span(self._span_name, kind= SpanKind.CLIENT) as self._span:
|
90
|
+
process_streaming_chat_response(
|
91
|
+
self,
|
92
|
+
pricing_info=pricing_info,
|
93
|
+
environment=environment,
|
94
|
+
application_name=application_name,
|
95
|
+
metrics=metrics,
|
96
|
+
event_provider=event_provider,
|
97
|
+
capture_message_content=capture_message_content,
|
98
|
+
disable_metrics=disable_metrics,
|
99
|
+
version=version
|
226
100
|
)
|
227
|
-
self._span.set_status(Status(StatusCode.OK))
|
228
|
-
|
229
|
-
if disable_metrics is False:
|
230
|
-
attributes = create_metrics_attributes(
|
231
|
-
service_name=application_name,
|
232
|
-
deployment_environment=environment,
|
233
|
-
operation=SemanticConvetion.GEN_AI_OPERATION_TYPE_CHAT,
|
234
|
-
system=SemanticConvetion.GEN_AI_SYSTEM_ANTHROPIC,
|
235
|
-
request_model=request_model,
|
236
|
-
server_address=self._server_address,
|
237
|
-
server_port=self._server_port,
|
238
|
-
response_model=self._response_model,
|
239
|
-
)
|
240
|
-
|
241
|
-
metrics["genai_client_usage_tokens"].record(
|
242
|
-
self._input_tokens + self._output_tokens, attributes
|
243
|
-
)
|
244
|
-
metrics["genai_client_operation_duration"].record(
|
245
|
-
self._end_time - self._start_time, attributes
|
246
|
-
)
|
247
|
-
metrics["genai_server_tbt"].record(
|
248
|
-
self._tbt, attributes
|
249
|
-
)
|
250
|
-
metrics["genai_server_ttft"].record(
|
251
|
-
self._ttft, attributes
|
252
|
-
)
|
253
|
-
metrics["genai_requests"].add(1, attributes)
|
254
|
-
metrics["genai_completion_tokens"].add(self._output_tokens, attributes)
|
255
|
-
metrics["genai_prompt_tokens"].add(self._input_tokens, attributes)
|
256
|
-
metrics["genai_cost"].record(cost, attributes)
|
257
101
|
|
258
102
|
except Exception as e:
|
259
103
|
handle_exception(self._span, e)
|
260
104
|
logger.error("Error in trace creation: %s", e)
|
261
|
-
finally:
|
262
|
-
self._span.end()
|
263
105
|
raise
|
264
106
|
|
265
107
|
def wrapper(wrapped, instance, args, kwargs):
|
266
108
|
"""
|
267
|
-
Wraps the
|
268
|
-
|
269
|
-
This collects metrics such as execution time, cost, and token usage, and handles errors
|
270
|
-
gracefully, adding details to the trace for observability.
|
271
|
-
|
272
|
-
Args:
|
273
|
-
wrapped: The original 'messages' method to be wrapped.
|
274
|
-
instance: The instance of the class where the original method is defined.
|
275
|
-
args: Positional arguments for the 'messages' method.
|
276
|
-
kwargs: Keyword arguments for the 'messages' method.
|
277
|
-
|
278
|
-
Returns:
|
279
|
-
The response from the original 'messages' method.
|
109
|
+
Wraps the GenAI function call.
|
280
110
|
"""
|
281
111
|
|
282
|
-
|
283
|
-
|
284
|
-
|
285
|
-
request_model = kwargs.get("model", "claude-3-5-sonnet-latest")
|
112
|
+
streaming = kwargs.get('stream', False)
|
113
|
+
server_address, server_port = set_server_address_and_port(instance, 'api.anthropic.com', 443)
|
114
|
+
request_model = kwargs.get('model', 'claude-3-5-sonnet-latest')
|
286
115
|
|
287
|
-
span_name = f
|
116
|
+
span_name = f'{SemanticConvetion.GEN_AI_OPERATION_TYPE_CHAT} {request_model}'
|
288
117
|
|
289
118
|
# pylint: disable=no-else-return
|
290
119
|
if streaming:
|
291
|
-
# Special handling for streaming response to accommodate the nature of data flow
|
292
120
|
awaited_wrapped = wrapped(*args, **kwargs)
|
293
121
|
span = tracer.start_span(span_name, kind=SpanKind.CLIENT)
|
294
122
|
|
295
|
-
return TracedSyncStream(awaited_wrapped, span, kwargs, server_address, server_port)
|
123
|
+
return TracedSyncStream(awaited_wrapped, span, span_name, kwargs, server_address, server_port)
|
296
124
|
|
297
|
-
# Handling for non-streaming responses
|
298
125
|
else:
|
299
126
|
with tracer.start_as_current_span(span_name, kind=SpanKind.CLIENT) as span:
|
300
127
|
start_time = time.time()
|
301
128
|
response = wrapped(*args, **kwargs)
|
302
|
-
|
303
|
-
|
304
|
-
|
305
|
-
|
306
|
-
|
307
|
-
|
308
|
-
|
309
|
-
|
310
|
-
|
311
|
-
|
312
|
-
|
313
|
-
|
314
|
-
|
315
|
-
|
316
|
-
|
317
|
-
|
318
|
-
|
319
|
-
|
320
|
-
|
321
|
-
formatted_messages.append(f"{role}: {content}")
|
322
|
-
prompt = "\n".join(formatted_messages)
|
323
|
-
|
324
|
-
input_tokens = response_dict.get('usage').get('input_tokens')
|
325
|
-
output_tokens = response_dict.get('usage').get('output_tokens')
|
326
|
-
|
327
|
-
# Calculate cost of the operation
|
328
|
-
cost = get_chat_model_cost(request_model,
|
329
|
-
pricing_info, input_tokens,
|
330
|
-
output_tokens)
|
331
|
-
|
332
|
-
llm_response = ""
|
333
|
-
for i in range(len(response_dict.get('content'))):
|
334
|
-
if response_dict.get('content')[i].get('type') == 'text':
|
335
|
-
llm_response = response_dict.get('content')[i].get('text')
|
336
|
-
|
337
|
-
# Set Span attributes (OTel Semconv)
|
338
|
-
span.set_attribute(TELEMETRY_SDK_NAME, "openlit")
|
339
|
-
span.set_attribute(SemanticConvetion.GEN_AI_OPERATION,
|
340
|
-
SemanticConvetion.GEN_AI_OPERATION_TYPE_CHAT)
|
341
|
-
span.set_attribute(SemanticConvetion.GEN_AI_SYSTEM,
|
342
|
-
SemanticConvetion.GEN_AI_SYSTEM_ANTHROPIC)
|
343
|
-
span.set_attribute(SemanticConvetion.GEN_AI_REQUEST_MODEL,
|
344
|
-
request_model)
|
345
|
-
span.set_attribute(SemanticConvetion.SERVER_PORT,
|
346
|
-
server_port)
|
347
|
-
span.set_attribute(SemanticConvetion.GEN_AI_REQUEST_MAX_TOKENS,
|
348
|
-
kwargs.get("max_tokens", -1))
|
349
|
-
span.set_attribute(SemanticConvetion.GEN_AI_REQUEST_STOP_SEQUENCES,
|
350
|
-
kwargs.get("stop_sequences", []))
|
351
|
-
span.set_attribute(SemanticConvetion.GEN_AI_REQUEST_TEMPERATURE,
|
352
|
-
kwargs.get("temperature", 1.0))
|
353
|
-
span.set_attribute(SemanticConvetion.GEN_AI_REQUEST_TOP_K,
|
354
|
-
kwargs.get("top_k", 1.0))
|
355
|
-
span.set_attribute(SemanticConvetion.GEN_AI_REQUEST_TOP_P,
|
356
|
-
kwargs.get("top_p", 1.0))
|
357
|
-
span.set_attribute(SemanticConvetion.GEN_AI_RESPONSE_FINISH_REASON,
|
358
|
-
[response_dict.get('stop_reason')])
|
359
|
-
span.set_attribute(SemanticConvetion.GEN_AI_RESPONSE_ID,
|
360
|
-
response_dict.get('id'))
|
361
|
-
span.set_attribute(SemanticConvetion.GEN_AI_RESPONSE_MODEL,
|
362
|
-
response_dict.get('model'))
|
363
|
-
span.set_attribute(SemanticConvetion.GEN_AI_USAGE_INPUT_TOKENS,
|
364
|
-
input_tokens)
|
365
|
-
span.set_attribute(SemanticConvetion.GEN_AI_USAGE_OUTPUT_TOKENS,
|
366
|
-
output_tokens)
|
367
|
-
span.set_attribute(SemanticConvetion.SERVER_ADDRESS,
|
368
|
-
server_address)
|
369
|
-
|
370
|
-
span.set_attribute(SemanticConvetion.GEN_AI_OUTPUT_TYPE,
|
371
|
-
response_dict.get('content')[0].get('type'))
|
372
|
-
|
373
|
-
# Set Span attributes (Extra)
|
374
|
-
span.set_attribute(DEPLOYMENT_ENVIRONMENT,
|
375
|
-
environment)
|
376
|
-
span.set_attribute(SERVICE_NAME,
|
377
|
-
application_name)
|
378
|
-
span.set_attribute(SemanticConvetion.GEN_AI_REQUEST_IS_STREAM,
|
379
|
-
False)
|
380
|
-
span.set_attribute(SemanticConvetion.GEN_AI_USAGE_TOTAL_TOKENS,
|
381
|
-
input_tokens + output_tokens)
|
382
|
-
span.set_attribute(SemanticConvetion.GEN_AI_USAGE_COST,
|
383
|
-
cost)
|
384
|
-
span.set_attribute(SemanticConvetion.GEN_AI_SERVER_TTFT,
|
385
|
-
end_time - start_time)
|
386
|
-
span.set_attribute(SemanticConvetion.GEN_AI_SDK_VERSION,
|
387
|
-
version)
|
388
|
-
|
389
|
-
if trace_content:
|
390
|
-
span.add_event(
|
391
|
-
name=SemanticConvetion.GEN_AI_CONTENT_PROMPT_EVENT,
|
392
|
-
attributes={
|
393
|
-
SemanticConvetion.GEN_AI_CONTENT_PROMPT: prompt,
|
394
|
-
},
|
395
|
-
)
|
396
|
-
span.add_event(
|
397
|
-
name=SemanticConvetion.GEN_AI_CONTENT_COMPLETION_EVENT,
|
398
|
-
attributes={
|
399
|
-
SemanticConvetion.GEN_AI_CONTENT_COMPLETION: llm_response,
|
400
|
-
},
|
401
|
-
)
|
402
|
-
|
403
|
-
span.set_status(Status(StatusCode.OK))
|
404
|
-
|
405
|
-
if disable_metrics is False:
|
406
|
-
attributes = create_metrics_attributes(
|
407
|
-
service_name=application_name,
|
408
|
-
deployment_environment=environment,
|
409
|
-
operation=SemanticConvetion.GEN_AI_OPERATION_TYPE_CHAT,
|
410
|
-
system=SemanticConvetion.GEN_AI_SYSTEM_ANTHROPIC,
|
411
|
-
request_model=request_model,
|
412
|
-
server_address=server_address,
|
413
|
-
server_port=server_port,
|
414
|
-
response_model=response_dict.get('model'),
|
415
|
-
)
|
416
|
-
|
417
|
-
metrics["genai_client_usage_tokens"].record(
|
418
|
-
input_tokens + output_tokens, attributes
|
419
|
-
)
|
420
|
-
metrics["genai_client_operation_duration"].record(
|
421
|
-
end_time - start_time, attributes
|
422
|
-
)
|
423
|
-
metrics["genai_server_ttft"].record(
|
424
|
-
end_time - start_time, attributes
|
425
|
-
)
|
426
|
-
metrics["genai_requests"].add(1, attributes)
|
427
|
-
metrics["genai_completion_tokens"].add(output_tokens, attributes)
|
428
|
-
metrics["genai_prompt_tokens"].add(input_tokens, attributes)
|
429
|
-
metrics["genai_cost"].record(cost, attributes)
|
430
|
-
|
431
|
-
# Return original response
|
432
|
-
return response
|
433
|
-
|
434
|
-
except Exception as e:
|
435
|
-
handle_exception(span, e)
|
436
|
-
logger.error("Error in trace creation: %s", e)
|
437
|
-
|
438
|
-
# Return original response
|
439
|
-
return response
|
129
|
+
response = process_chat_response(
|
130
|
+
response=response,
|
131
|
+
request_model=request_model,
|
132
|
+
pricing_info=pricing_info,
|
133
|
+
server_port=server_port,
|
134
|
+
server_address=server_address,
|
135
|
+
environment=environment,
|
136
|
+
application_name=application_name,
|
137
|
+
metrics=metrics,
|
138
|
+
event_provider=event_provider,
|
139
|
+
start_time=start_time,
|
140
|
+
span=span,
|
141
|
+
capture_message_content=capture_message_content,
|
142
|
+
disable_metrics=disable_metrics,
|
143
|
+
version=version,
|
144
|
+
**kwargs
|
145
|
+
)
|
146
|
+
|
147
|
+
return response
|
440
148
|
|
441
149
|
return wrapper
|