openlit 1.33.11__py3-none-any.whl → 1.33.13__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -4,54 +4,37 @@ Module for monitoring Anthropic API calls.
4
4
 
5
5
  import logging
6
6
  import time
7
- from opentelemetry.trace import SpanKind, Status, StatusCode
8
- from opentelemetry.sdk.resources import SERVICE_NAME, TELEMETRY_SDK_NAME, DEPLOYMENT_ENVIRONMENT
7
+ from opentelemetry.trace import SpanKind
9
8
  from openlit.__helpers import (
10
- get_chat_model_cost,
11
9
  handle_exception,
12
- response_as_dict,
13
- calculate_ttft,
14
- calculate_tbt,
15
- create_metrics_attributes,
16
10
  set_server_address_and_port
17
11
  )
12
+ from openlit.instrumentation.anthropic.utils import (
13
+ process_chunk,
14
+ process_chat_response,
15
+ process_streaming_chat_response,
16
+ )
18
17
  from openlit.semcov import SemanticConvetion
19
18
 
20
19
  # Initialize logger for logging potential issues and operations
21
20
  logger = logging.getLogger(__name__)
22
21
 
23
- def messages(version, environment, application_name, tracer,
22
+ def messages(version, environment, application_name, tracer, event_provider,
24
23
  pricing_info, capture_message_content, metrics, disable_metrics):
25
24
  """
26
- Generates a telemetry wrapper for messages to collect metrics.
27
-
28
- Args:
29
- version: Version of the monitoring package.
30
- environment: Deployment environment (e.g., production, staging).
31
- application_name: Name of the application using the Anthropic API.
32
- tracer: OpenTelemetry tracer for creating spans.
33
- pricing_info: Information used for calculating the cost of Anthropic usage.
34
- capture_message_content: Flag indicating whether to trace the actual content.
35
-
36
- Returns:
37
- A function that wraps the chat method to add telemetry.
25
+ Generates a telemetry wrapper for GenAI function call
38
26
  """
39
27
 
40
28
  class TracedSyncStream:
41
29
  """
42
- Wrapper for streaming responses to collect metrics and trace data.
43
- Wraps the response to collect message IDs and aggregated response.
44
-
45
- This class implements the '__aiter__' and '__anext__' methods that
46
- handle asynchronous streaming responses.
47
-
48
- This class also implements '__aenter__' and '__aexit__' methods that
49
- handle asynchronous context management protocol.
30
+ Wrapper for streaming responses to collect telemetry.
50
31
  """
32
+
51
33
  def __init__(
52
34
  self,
53
35
  wrapped,
54
36
  span,
37
+ span_name,
55
38
  kwargs,
56
39
  server_address,
57
40
  server_port,
@@ -59,13 +42,18 @@ def messages(version, environment, application_name, tracer,
59
42
  ):
60
43
  self.__wrapped__ = wrapped
61
44
  self._span = span
62
- # Placeholder for aggregating streaming response
63
- self._llmresponse = ""
64
- self._response_id = ""
65
- self._response_model = ""
66
- self._finish_reason = ""
67
- self._input_tokens = ""
68
- self._output_tokens = ""
45
+ self._span_name = span_name
46
+ self._llmresponse = ''
47
+ self._response_id = ''
48
+ self._response_model = ''
49
+ self._finish_reason = ''
50
+ self._input_tokens = ''
51
+ self._output_tokens = ''
52
+ self._tool_arguments = ''
53
+ self._tool_id = ''
54
+ self._tool_name = ''
55
+ self._tool_calls = None
56
+ self._response_role = ''
69
57
 
70
58
  self._args = args
71
59
  self._kwargs = kwargs
@@ -94,348 +82,68 @@ def messages(version, environment, application_name, tracer,
94
82
  def __next__(self):
95
83
  try:
96
84
  chunk = self.__wrapped__.__next__()
97
- end_time = time.time()
98
- # Record the timestamp for the current chunk
99
- self._timestamps.append(end_time)
100
-
101
- if len(self._timestamps) == 1:
102
- # Calculate time to first chunk
103
- self._ttft = calculate_ttft(self._timestamps, self._start_time)
104
-
105
- chunked = response_as_dict(chunk)
106
-
107
- # Collect message IDs and input token from events
108
- if chunked.get('type') == "message_start":
109
- self._response_id = chunked.get('message').get('id')
110
- self._input_tokens = chunked.get('message').get('usage').get('input_tokens')
111
- self._response_model = chunked.get('message').get('model')
112
- # Collect message IDs and aggregated response from events
113
- if chunked.get('type') == "content_block_delta":
114
- content = chunked.get('delta').get('text')
115
- if content:
116
- self._llmresponse += content
117
- # Collect output tokens and stop reason from events
118
- if chunked.get('type') == "message_delta":
119
- self._output_tokens = chunked.get('usage').get('output_tokens')
120
- self._finish_reason = chunked.get('delta').get('stop_reason')
121
-
85
+ process_chunk(self, chunk)
122
86
  return chunk
123
87
  except StopIteration:
124
- # Handling exception ensure observability without disrupting operation
125
88
  try:
126
- self._end_time = time.time()
127
- if len(self._timestamps) > 1:
128
- self._tbt = calculate_tbt(self._timestamps)
129
-
130
- # Format 'messages' into a single string
131
- message_prompt = self._kwargs.get("messages", "")
132
- formatted_messages = []
133
- for message in message_prompt:
134
- role = message["role"]
135
- content = message["content"]
136
-
137
- if isinstance(content, list):
138
- content_str_list = []
139
- for item in content:
140
- if item["type"] == "text":
141
- content_str_list.append(f'text: {item["text"]}')
142
- elif (item["type"] == "image_url" and
143
- not item["image_url"]["url"].startswith("data:")):
144
- content_str_list.append(f'image_url: {item["image_url"]["url"]}')
145
- content_str = ", ".join(content_str_list)
146
- formatted_messages.append(f"{role}: {content_str}")
147
- else:
148
- formatted_messages.append(f"{role}: {content}")
149
- prompt = "\n".join(formatted_messages)
150
-
151
- request_model = self._kwargs.get("model", "claude-3-5-sonnet-latest")
152
-
153
- # Calculate cost of the operation
154
- cost = get_chat_model_cost(request_model,
155
- pricing_info, self._input_tokens,
156
- self._output_tokens)
157
-
158
- # Set Span attributes (OTel Semconv)
159
- self._span.set_attribute(TELEMETRY_SDK_NAME, "openlit")
160
- self._span.set_attribute(SemanticConvetion.GEN_AI_OPERATION,
161
- SemanticConvetion.GEN_AI_OPERATION_TYPE_CHAT)
162
- self._span.set_attribute(SemanticConvetion.GEN_AI_SYSTEM,
163
- SemanticConvetion.GEN_AI_SYSTEM_ANTHROPIC)
164
- self._span.set_attribute(SemanticConvetion.GEN_AI_REQUEST_MODEL,
165
- request_model)
166
- self._span.set_attribute(SemanticConvetion.SERVER_PORT,
167
- self._server_port)
168
- self._span.set_attribute(SemanticConvetion.GEN_AI_REQUEST_MAX_TOKENS,
169
- self._kwargs.get("max_tokens", -1))
170
- self._span.set_attribute(SemanticConvetion.GEN_AI_REQUEST_STOP_SEQUENCES,
171
- self._kwargs.get("stop_sequences", []))
172
- self._span.set_attribute(SemanticConvetion.GEN_AI_REQUEST_TEMPERATURE,
173
- self._kwargs.get("temperature", 1.0))
174
- self._span.set_attribute(SemanticConvetion.GEN_AI_REQUEST_TOP_K,
175
- self._kwargs.get("top_k", 1.0))
176
- self._span.set_attribute(SemanticConvetion.GEN_AI_REQUEST_TOP_P,
177
- self._kwargs.get("top_p", 1.0))
178
- self._span.set_attribute(SemanticConvetion.GEN_AI_RESPONSE_FINISH_REASON,
179
- [self._finish_reason])
180
- self._span.set_attribute(SemanticConvetion.GEN_AI_RESPONSE_ID,
181
- self._response_id)
182
- self._span.set_attribute(SemanticConvetion.GEN_AI_RESPONSE_MODEL,
183
- self._response_model)
184
- self._span.set_attribute(SemanticConvetion.GEN_AI_USAGE_INPUT_TOKENS,
185
- self._input_tokens)
186
- self._span.set_attribute(SemanticConvetion.GEN_AI_USAGE_OUTPUT_TOKENS,
187
- self._output_tokens)
188
- self._span.set_attribute(SemanticConvetion.SERVER_ADDRESS,
189
- self._server_address)
190
- if isinstance(self._llmresponse, str):
191
- self._span.set_attribute(SemanticConvetion.GEN_AI_OUTPUT_TYPE,
192
- "text")
193
- else:
194
- self._span.set_attribute(SemanticConvetion.GEN_AI_OUTPUT_TYPE,
195
- "json")
196
-
197
- # Set Span attributes (Extra)
198
- self._span.set_attribute(DEPLOYMENT_ENVIRONMENT,
199
- environment)
200
- self._span.set_attribute(SERVICE_NAME,
201
- application_name)
202
- self._span.set_attribute(SemanticConvetion.GEN_AI_REQUEST_IS_STREAM,
203
- True)
204
- self._span.set_attribute(SemanticConvetion.GEN_AI_USAGE_TOTAL_TOKENS,
205
- self._input_tokens + self._output_tokens)
206
- self._span.set_attribute(SemanticConvetion.GEN_AI_USAGE_COST,
207
- cost)
208
- self._span.set_attribute(SemanticConvetion.GEN_AI_SERVER_TBT,
209
- self._tbt)
210
- self._span.set_attribute(SemanticConvetion.GEN_AI_SERVER_TTFT,
211
- self._ttft)
212
- self._span.set_attribute(SemanticConvetion.GEN_AI_SDK_VERSION,
213
- version)
214
- if capture_message_content:
215
- self._span.add_event(
216
- name=SemanticConvetion.GEN_AI_CONTENT_PROMPT_EVENT,
217
- attributes={
218
- SemanticConvetion.GEN_AI_CONTENT_PROMPT: prompt,
219
- },
220
- )
221
- self._span.add_event(
222
- name=SemanticConvetion.GEN_AI_CONTENT_COMPLETION_EVENT,
223
- attributes={
224
- SemanticConvetion.GEN_AI_CONTENT_COMPLETION: self._llmresponse,
225
- },
89
+ with tracer.start_as_current_span(self._span_name, kind= SpanKind.CLIENT) as self._span:
90
+ process_streaming_chat_response(
91
+ self,
92
+ pricing_info=pricing_info,
93
+ environment=environment,
94
+ application_name=application_name,
95
+ metrics=metrics,
96
+ event_provider=event_provider,
97
+ capture_message_content=capture_message_content,
98
+ disable_metrics=disable_metrics,
99
+ version=version
226
100
  )
227
- self._span.set_status(Status(StatusCode.OK))
228
-
229
- if disable_metrics is False:
230
- attributes = create_metrics_attributes(
231
- service_name=application_name,
232
- deployment_environment=environment,
233
- operation=SemanticConvetion.GEN_AI_OPERATION_TYPE_CHAT,
234
- system=SemanticConvetion.GEN_AI_SYSTEM_ANTHROPIC,
235
- request_model=request_model,
236
- server_address=self._server_address,
237
- server_port=self._server_port,
238
- response_model=self._response_model,
239
- )
240
-
241
- metrics["genai_client_usage_tokens"].record(
242
- self._input_tokens + self._output_tokens, attributes
243
- )
244
- metrics["genai_client_operation_duration"].record(
245
- self._end_time - self._start_time, attributes
246
- )
247
- metrics["genai_server_tbt"].record(
248
- self._tbt, attributes
249
- )
250
- metrics["genai_server_ttft"].record(
251
- self._ttft, attributes
252
- )
253
- metrics["genai_requests"].add(1, attributes)
254
- metrics["genai_completion_tokens"].add(self._output_tokens, attributes)
255
- metrics["genai_prompt_tokens"].add(self._input_tokens, attributes)
256
- metrics["genai_cost"].record(cost, attributes)
257
101
 
258
102
  except Exception as e:
259
103
  handle_exception(self._span, e)
260
104
  logger.error("Error in trace creation: %s", e)
261
- finally:
262
- self._span.end()
263
105
  raise
264
106
 
265
107
  def wrapper(wrapped, instance, args, kwargs):
266
108
  """
267
- Wraps the 'messages' API call to add telemetry.
268
-
269
- This collects metrics such as execution time, cost, and token usage, and handles errors
270
- gracefully, adding details to the trace for observability.
271
-
272
- Args:
273
- wrapped: The original 'messages' method to be wrapped.
274
- instance: The instance of the class where the original method is defined.
275
- args: Positional arguments for the 'messages' method.
276
- kwargs: Keyword arguments for the 'messages' method.
277
-
278
- Returns:
279
- The response from the original 'messages' method.
109
+ Wraps the GenAI function call.
280
110
  """
281
111
 
282
- # Check if streaming is enabled for the API call
283
- streaming = kwargs.get("stream", False)
284
- server_address, server_port = set_server_address_and_port(instance, "api.anthropic.com", 443)
285
- request_model = kwargs.get("model", "claude-3-5-sonnet-latest")
112
+ streaming = kwargs.get('stream', False)
113
+ server_address, server_port = set_server_address_and_port(instance, 'api.anthropic.com', 443)
114
+ request_model = kwargs.get('model', 'claude-3-5-sonnet-latest')
286
115
 
287
- span_name = f"{SemanticConvetion.GEN_AI_OPERATION_TYPE_CHAT} {request_model}"
116
+ span_name = f'{SemanticConvetion.GEN_AI_OPERATION_TYPE_CHAT} {request_model}'
288
117
 
289
118
  # pylint: disable=no-else-return
290
119
  if streaming:
291
- # Special handling for streaming response to accommodate the nature of data flow
292
120
  awaited_wrapped = wrapped(*args, **kwargs)
293
121
  span = tracer.start_span(span_name, kind=SpanKind.CLIENT)
294
122
 
295
- return TracedSyncStream(awaited_wrapped, span, kwargs, server_address, server_port)
123
+ return TracedSyncStream(awaited_wrapped, span, span_name, kwargs, server_address, server_port)
296
124
 
297
- # Handling for non-streaming responses
298
125
  else:
299
126
  with tracer.start_as_current_span(span_name, kind=SpanKind.CLIENT) as span:
300
127
  start_time = time.time()
301
128
  response = wrapped(*args, **kwargs)
302
- end_time = time.time()
303
-
304
- response_dict = response_as_dict(response)
305
- try:
306
- # Format 'messages' into a single string
307
- message_prompt = kwargs.get("messages", "")
308
- formatted_messages = []
309
- for message in message_prompt:
310
- role = message["role"]
311
- content = message["content"]
312
-
313
- if isinstance(content, list):
314
- content_str = ", ".join(
315
- f'{item["type"]}: {item["text"] if "text" in item else item["image_url"]}'
316
- if "type" in item else f'text: {item["text"]}'
317
- for item in content
318
- )
319
- formatted_messages.append(f"{role}: {content_str}")
320
- else:
321
- formatted_messages.append(f"{role}: {content}")
322
- prompt = "\n".join(formatted_messages)
323
-
324
- input_tokens = response_dict.get('usage').get('input_tokens')
325
- output_tokens = response_dict.get('usage').get('output_tokens')
326
-
327
- # Calculate cost of the operation
328
- cost = get_chat_model_cost(request_model,
329
- pricing_info, input_tokens,
330
- output_tokens)
331
-
332
- llm_response = ""
333
- for i in range(len(response_dict.get('content'))):
334
- if response_dict.get('content')[i].get('type') == 'text':
335
- llm_response = response_dict.get('content')[i].get('text')
336
-
337
- # Set Span attributes (OTel Semconv)
338
- span.set_attribute(TELEMETRY_SDK_NAME, "openlit")
339
- span.set_attribute(SemanticConvetion.GEN_AI_OPERATION,
340
- SemanticConvetion.GEN_AI_OPERATION_TYPE_CHAT)
341
- span.set_attribute(SemanticConvetion.GEN_AI_SYSTEM,
342
- SemanticConvetion.GEN_AI_SYSTEM_ANTHROPIC)
343
- span.set_attribute(SemanticConvetion.GEN_AI_REQUEST_MODEL,
344
- request_model)
345
- span.set_attribute(SemanticConvetion.SERVER_PORT,
346
- server_port)
347
- span.set_attribute(SemanticConvetion.GEN_AI_REQUEST_MAX_TOKENS,
348
- kwargs.get("max_tokens", -1))
349
- span.set_attribute(SemanticConvetion.GEN_AI_REQUEST_STOP_SEQUENCES,
350
- kwargs.get("stop_sequences", []))
351
- span.set_attribute(SemanticConvetion.GEN_AI_REQUEST_TEMPERATURE,
352
- kwargs.get("temperature", 1.0))
353
- span.set_attribute(SemanticConvetion.GEN_AI_REQUEST_TOP_K,
354
- kwargs.get("top_k", 1.0))
355
- span.set_attribute(SemanticConvetion.GEN_AI_REQUEST_TOP_P,
356
- kwargs.get("top_p", 1.0))
357
- span.set_attribute(SemanticConvetion.GEN_AI_RESPONSE_FINISH_REASON,
358
- [response_dict.get('stop_reason')])
359
- span.set_attribute(SemanticConvetion.GEN_AI_RESPONSE_ID,
360
- response_dict.get('id'))
361
- span.set_attribute(SemanticConvetion.GEN_AI_RESPONSE_MODEL,
362
- response_dict.get('model'))
363
- span.set_attribute(SemanticConvetion.GEN_AI_USAGE_INPUT_TOKENS,
364
- input_tokens)
365
- span.set_attribute(SemanticConvetion.GEN_AI_USAGE_OUTPUT_TOKENS,
366
- output_tokens)
367
- span.set_attribute(SemanticConvetion.SERVER_ADDRESS,
368
- server_address)
369
-
370
- span.set_attribute(SemanticConvetion.GEN_AI_OUTPUT_TYPE,
371
- response_dict.get('content')[0].get('type'))
372
-
373
- # Set Span attributes (Extra)
374
- span.set_attribute(DEPLOYMENT_ENVIRONMENT,
375
- environment)
376
- span.set_attribute(SERVICE_NAME,
377
- application_name)
378
- span.set_attribute(SemanticConvetion.GEN_AI_REQUEST_IS_STREAM,
379
- False)
380
- span.set_attribute(SemanticConvetion.GEN_AI_USAGE_TOTAL_TOKENS,
381
- input_tokens + output_tokens)
382
- span.set_attribute(SemanticConvetion.GEN_AI_USAGE_COST,
383
- cost)
384
- span.set_attribute(SemanticConvetion.GEN_AI_SERVER_TTFT,
385
- end_time - start_time)
386
- span.set_attribute(SemanticConvetion.GEN_AI_SDK_VERSION,
387
- version)
388
-
389
- if capture_message_content:
390
- span.add_event(
391
- name=SemanticConvetion.GEN_AI_CONTENT_PROMPT_EVENT,
392
- attributes={
393
- SemanticConvetion.GEN_AI_CONTENT_PROMPT: prompt,
394
- },
395
- )
396
- span.add_event(
397
- name=SemanticConvetion.GEN_AI_CONTENT_COMPLETION_EVENT,
398
- attributes={
399
- SemanticConvetion.GEN_AI_CONTENT_COMPLETION: llm_response,
400
- },
401
- )
402
-
403
- span.set_status(Status(StatusCode.OK))
404
-
405
- if disable_metrics is False:
406
- attributes = create_metrics_attributes(
407
- service_name=application_name,
408
- deployment_environment=environment,
409
- operation=SemanticConvetion.GEN_AI_OPERATION_TYPE_CHAT,
410
- system=SemanticConvetion.GEN_AI_SYSTEM_ANTHROPIC,
411
- request_model=request_model,
412
- server_address=server_address,
413
- server_port=server_port,
414
- response_model=response_dict.get('model'),
415
- )
416
-
417
- metrics["genai_client_usage_tokens"].record(
418
- input_tokens + output_tokens, attributes
419
- )
420
- metrics["genai_client_operation_duration"].record(
421
- end_time - start_time, attributes
422
- )
423
- metrics["genai_server_ttft"].record(
424
- end_time - start_time, attributes
425
- )
426
- metrics["genai_requests"].add(1, attributes)
427
- metrics["genai_completion_tokens"].add(output_tokens, attributes)
428
- metrics["genai_prompt_tokens"].add(input_tokens, attributes)
429
- metrics["genai_cost"].record(cost, attributes)
430
-
431
- # Return original response
432
- return response
433
-
434
- except Exception as e:
435
- handle_exception(span, e)
436
- logger.error("Error in trace creation: %s", e)
437
-
438
- # Return original response
439
- return response
129
+ response = process_chat_response(
130
+ response=response,
131
+ request_model=request_model,
132
+ pricing_info=pricing_info,
133
+ server_port=server_port,
134
+ server_address=server_address,
135
+ environment=environment,
136
+ application_name=application_name,
137
+ metrics=metrics,
138
+ event_provider=event_provider,
139
+ start_time=start_time,
140
+ span=span,
141
+ capture_message_content=capture_message_content,
142
+ disable_metrics=disable_metrics,
143
+ version=version,
144
+ **kwargs
145
+ )
146
+
147
+ return response
440
148
 
441
149
  return wrapper