openlit 1.33.10__py3-none-any.whl → 1.33.12__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (113) hide show
  1. openlit/__helpers.py +125 -88
  2. openlit/__init__.py +38 -11
  3. openlit/instrumentation/ag2/__init__.py +19 -20
  4. openlit/instrumentation/ag2/ag2.py +134 -69
  5. openlit/instrumentation/ai21/__init__.py +22 -21
  6. openlit/instrumentation/ai21/ai21.py +82 -546
  7. openlit/instrumentation/ai21/async_ai21.py +82 -546
  8. openlit/instrumentation/ai21/utils.py +409 -0
  9. openlit/instrumentation/anthropic/__init__.py +16 -16
  10. openlit/instrumentation/anthropic/anthropic.py +61 -353
  11. openlit/instrumentation/anthropic/async_anthropic.py +62 -354
  12. openlit/instrumentation/anthropic/utils.py +251 -0
  13. openlit/instrumentation/assemblyai/__init__.py +2 -2
  14. openlit/instrumentation/assemblyai/assemblyai.py +3 -3
  15. openlit/instrumentation/astra/__init__.py +25 -25
  16. openlit/instrumentation/astra/astra.py +2 -2
  17. openlit/instrumentation/astra/async_astra.py +2 -2
  18. openlit/instrumentation/azure_ai_inference/__init__.py +5 -5
  19. openlit/instrumentation/azure_ai_inference/async_azure_ai_inference.py +8 -8
  20. openlit/instrumentation/azure_ai_inference/azure_ai_inference.py +8 -8
  21. openlit/instrumentation/bedrock/__init__.py +2 -2
  22. openlit/instrumentation/bedrock/bedrock.py +3 -3
  23. openlit/instrumentation/chroma/__init__.py +9 -9
  24. openlit/instrumentation/chroma/chroma.py +2 -2
  25. openlit/instrumentation/cohere/__init__.py +7 -7
  26. openlit/instrumentation/cohere/async_cohere.py +9 -9
  27. openlit/instrumentation/cohere/cohere.py +9 -9
  28. openlit/instrumentation/controlflow/__init__.py +4 -4
  29. openlit/instrumentation/controlflow/controlflow.py +2 -2
  30. openlit/instrumentation/crawl4ai/__init__.py +3 -3
  31. openlit/instrumentation/crawl4ai/async_crawl4ai.py +2 -2
  32. openlit/instrumentation/crawl4ai/crawl4ai.py +2 -2
  33. openlit/instrumentation/crewai/__init__.py +3 -3
  34. openlit/instrumentation/crewai/crewai.py +2 -2
  35. openlit/instrumentation/dynamiq/__init__.py +5 -5
  36. openlit/instrumentation/dynamiq/dynamiq.py +2 -2
  37. openlit/instrumentation/elevenlabs/__init__.py +5 -5
  38. openlit/instrumentation/elevenlabs/async_elevenlabs.py +3 -3
  39. openlit/instrumentation/elevenlabs/elevenlabs.py +3 -3
  40. openlit/instrumentation/embedchain/__init__.py +2 -2
  41. openlit/instrumentation/embedchain/embedchain.py +4 -4
  42. openlit/instrumentation/firecrawl/__init__.py +3 -3
  43. openlit/instrumentation/firecrawl/firecrawl.py +2 -2
  44. openlit/instrumentation/google_ai_studio/__init__.py +3 -3
  45. openlit/instrumentation/google_ai_studio/async_google_ai_studio.py +3 -3
  46. openlit/instrumentation/google_ai_studio/google_ai_studio.py +3 -3
  47. openlit/instrumentation/gpt4all/__init__.py +3 -3
  48. openlit/instrumentation/gpt4all/gpt4all.py +7 -7
  49. openlit/instrumentation/groq/__init__.py +3 -3
  50. openlit/instrumentation/groq/async_groq.py +5 -5
  51. openlit/instrumentation/groq/groq.py +5 -5
  52. openlit/instrumentation/haystack/__init__.py +2 -2
  53. openlit/instrumentation/haystack/haystack.py +2 -2
  54. openlit/instrumentation/julep/__init__.py +7 -7
  55. openlit/instrumentation/julep/async_julep.py +3 -3
  56. openlit/instrumentation/julep/julep.py +3 -3
  57. openlit/instrumentation/langchain/__init__.py +2 -2
  58. openlit/instrumentation/langchain/async_langchain.py +13 -9
  59. openlit/instrumentation/langchain/langchain.py +13 -8
  60. openlit/instrumentation/letta/__init__.py +7 -7
  61. openlit/instrumentation/letta/letta.py +5 -5
  62. openlit/instrumentation/litellm/__init__.py +5 -5
  63. openlit/instrumentation/litellm/async_litellm.py +8 -8
  64. openlit/instrumentation/litellm/litellm.py +8 -8
  65. openlit/instrumentation/llamaindex/__init__.py +2 -2
  66. openlit/instrumentation/llamaindex/llamaindex.py +2 -2
  67. openlit/instrumentation/mem0/__init__.py +2 -2
  68. openlit/instrumentation/mem0/mem0.py +2 -2
  69. openlit/instrumentation/milvus/__init__.py +2 -2
  70. openlit/instrumentation/milvus/milvus.py +2 -2
  71. openlit/instrumentation/mistral/__init__.py +7 -7
  72. openlit/instrumentation/mistral/async_mistral.py +10 -10
  73. openlit/instrumentation/mistral/mistral.py +10 -10
  74. openlit/instrumentation/multion/__init__.py +7 -7
  75. openlit/instrumentation/multion/async_multion.py +5 -5
  76. openlit/instrumentation/multion/multion.py +5 -5
  77. openlit/instrumentation/ollama/__init__.py +11 -9
  78. openlit/instrumentation/ollama/async_ollama.py +71 -465
  79. openlit/instrumentation/ollama/ollama.py +71 -465
  80. openlit/instrumentation/ollama/utils.py +332 -0
  81. openlit/instrumentation/openai/__init__.py +11 -11
  82. openlit/instrumentation/openai/async_openai.py +18 -18
  83. openlit/instrumentation/openai/openai.py +18 -18
  84. openlit/instrumentation/phidata/__init__.py +2 -2
  85. openlit/instrumentation/phidata/phidata.py +2 -2
  86. openlit/instrumentation/pinecone/__init__.py +6 -6
  87. openlit/instrumentation/pinecone/pinecone.py +2 -2
  88. openlit/instrumentation/premai/__init__.py +3 -3
  89. openlit/instrumentation/premai/premai.py +7 -7
  90. openlit/instrumentation/qdrant/__init__.py +2 -2
  91. openlit/instrumentation/qdrant/async_qdrant.py +2 -2
  92. openlit/instrumentation/qdrant/qdrant.py +2 -2
  93. openlit/instrumentation/reka/__init__.py +3 -3
  94. openlit/instrumentation/reka/async_reka.py +3 -3
  95. openlit/instrumentation/reka/reka.py +3 -3
  96. openlit/instrumentation/together/__init__.py +5 -5
  97. openlit/instrumentation/together/async_together.py +8 -8
  98. openlit/instrumentation/together/together.py +8 -8
  99. openlit/instrumentation/transformers/__init__.py +2 -2
  100. openlit/instrumentation/transformers/transformers.py +4 -4
  101. openlit/instrumentation/vertexai/__init__.py +9 -9
  102. openlit/instrumentation/vertexai/async_vertexai.py +4 -4
  103. openlit/instrumentation/vertexai/vertexai.py +4 -4
  104. openlit/instrumentation/vllm/__init__.py +2 -2
  105. openlit/instrumentation/vllm/vllm.py +3 -3
  106. openlit/otel/events.py +85 -0
  107. openlit/otel/tracing.py +3 -13
  108. openlit/semcov/__init__.py +13 -1
  109. {openlit-1.33.10.dist-info → openlit-1.33.12.dist-info}/METADATA +2 -2
  110. openlit-1.33.12.dist-info/RECORD +126 -0
  111. openlit-1.33.10.dist-info/RECORD +0 -122
  112. {openlit-1.33.10.dist-info → openlit-1.33.12.dist-info}/LICENSE +0 -0
  113. {openlit-1.33.10.dist-info → openlit-1.33.12.dist-info}/WHEEL +0 -0
@@ -4,56 +4,37 @@ Module for monitoring Ollama API calls.
4
4
 
5
5
  import logging
6
6
  import time
7
- from opentelemetry.trace import SpanKind, Status, StatusCode
8
- from opentelemetry.sdk.resources import SERVICE_NAME, TELEMETRY_SDK_NAME, DEPLOYMENT_ENVIRONMENT
7
+ from opentelemetry.trace import SpanKind
9
8
  from openlit.__helpers import (
10
- get_chat_model_cost,
11
- get_embed_model_cost,
12
9
  handle_exception,
13
- response_as_dict,
14
- general_tokens,
15
- calculate_ttft,
16
- calculate_tbt,
17
- create_metrics_attributes,
18
10
  set_server_address_and_port
19
11
  )
12
+ from openlit.instrumentation.ollama.utils import (
13
+ process_chunk,
14
+ process_chat_response,
15
+ process_streaming_chat_response,
16
+ process_embedding_response
17
+ )
20
18
  from openlit.semcov import SemanticConvetion
21
19
 
22
- # Initialize logger for logging potential issues and operations
23
20
  logger = logging.getLogger(__name__)
24
21
 
25
22
  def chat(version, environment, application_name,
26
- tracer, pricing_info, trace_content, metrics, disable_metrics):
23
+ tracer, event_provider, pricing_info, capture_message_content, metrics, disable_metrics):
27
24
  """
28
- Generates a telemetry wrapper for chat completions to collect metrics.
29
-
30
- Args:
31
- version: Version of the monitoring package.
32
- environment: Deployment environment (e.g., production, staging).
33
- application_name: Name of the application using the Ollama API.
34
- tracer: OpenTelemetry tracer for creating spans.
35
- pricing_info: Information used for calculating the cost of Ollama usage.
36
- trace_content: Flag indicating whether to trace the actual content.
37
-
38
- Returns:
39
- A function that wraps the chat completions method to add telemetry.
25
+ Generates a telemetry wrapper for GenAI function call
40
26
  """
41
27
 
42
28
  class TracedSyncStream:
43
29
  """
44
- Wrapper for streaming responses to collect metrics and trace data.
45
- Wraps the response to collect message IDs and aggregated response.
46
-
47
- This class implements the '__aiter__' and '__anext__' methods that
48
- handle asynchronous streaming responses.
49
-
50
- This class also implements '__aenter__' and '__aexit__' methods that
51
- handle asynchronous context management protocol.
30
+ Wrapper for streaming responses to collect telemetry.
52
31
  """
32
+
53
33
  def __init__(
54
34
  self,
55
35
  wrapped,
56
36
  span,
37
+ span_name,
57
38
  kwargs,
58
39
  server_address,
59
40
  server_port,
@@ -61,13 +42,14 @@ def chat(version, environment, application_name,
61
42
  ):
62
43
  self.__wrapped__ = wrapped
63
44
  self._span = span
64
- # Placeholder for aggregating streaming response
65
45
  self._llmresponse = ""
66
46
  self._response_model = ""
67
47
  self._finish_reason = ""
48
+ self._tool_calls = []
68
49
  self._input_tokens = 0
69
50
  self._output_tokens = 0
70
-
51
+ self._response_role = ''
52
+ self._span_name = span_name
71
53
  self._args = args
72
54
  self._kwargs = kwargs
73
55
  self._start_time = time.time()
@@ -95,194 +77,34 @@ def chat(version, environment, application_name,
95
77
  def __next__(self):
96
78
  try:
97
79
  chunk = self.__wrapped__.__next__()
98
- end_time = time.time()
99
- # Record the timestamp for the current chunk
100
- self._timestamps.append(end_time)
101
-
102
- if len(self._timestamps) == 1:
103
- # Calculate time to first chunk
104
- self._ttft = calculate_ttft(self._timestamps, self._start_time)
105
-
106
- chunked = response_as_dict(chunk)
107
- self._llmresponse += chunked.get('message').get('content')
108
-
109
- if chunked.get('eval_count'):
110
- self._input_tokens = chunked.get('prompt_eval_count')
111
- self._output_tokens = chunked.get('eval_count')
112
- self._response_model = chunked.get('model')
113
- self._finish_reason = chunked.get('done_reason')
80
+ process_chunk(self, chunk)
114
81
  return chunk
115
82
  except StopIteration:
116
- # Handling exception ensure observability without disrupting operation
117
83
  try:
118
- self._end_time = time.time()
119
- if len(self._timestamps) > 1:
120
- self._tbt = calculate_tbt(self._timestamps)
121
-
122
- # Format 'messages' into a single string
123
- message_prompt = self._kwargs.get("messages", "")
124
- formatted_messages = []
125
- for message in message_prompt:
126
- role = message["role"]
127
- content = message["content"]
128
-
129
- if isinstance(content, list):
130
- content_str_list = []
131
- for item in content:
132
- if item["type"] == "text":
133
- content_str_list.append(f'text: {item["text"]}')
134
- elif (item["type"] == "image_url" and
135
- not item["image_url"]["url"].startswith("data:")):
136
- content_str_list.append(f'image_url: {item["image_url"]["url"]}')
137
- content_str = ", ".join(content_str_list)
138
- formatted_messages.append(f"{role}: {content_str}")
139
- else:
140
- formatted_messages.append(f"{role}: {content}")
141
- prompt = "\n".join(formatted_messages)
142
-
143
- request_model = self._kwargs.get("model", "gpt-4o")
144
-
145
- # Calculate cost of the operation
146
- cost = get_chat_model_cost(request_model,
147
- pricing_info, self._input_tokens,
148
- self._output_tokens)
149
-
150
- # Set Span attributes (OTel Semconv)
151
- self._span.set_attribute(TELEMETRY_SDK_NAME, "openlit")
152
- self._span.set_attribute(SemanticConvetion.GEN_AI_OPERATION,
153
- SemanticConvetion.GEN_AI_OPERATION_TYPE_CHAT)
154
- self._span.set_attribute(SemanticConvetion.GEN_AI_SYSTEM,
155
- SemanticConvetion.GEN_AI_SYSTEM_OLLAMA)
156
- self._span.set_attribute(SemanticConvetion.GEN_AI_REQUEST_MODEL,
157
- request_model)
158
- self._span.set_attribute(SemanticConvetion.SERVER_PORT,
159
- self._server_port)
160
-
161
- # List of attributes and their config keys
162
- attributes = [
163
- (SemanticConvetion.GEN_AI_REQUEST_FREQUENCY_PENALTY, 'repeat_penalty'),
164
- (SemanticConvetion.GEN_AI_REQUEST_MAX_TOKENS, 'max_tokens'),
165
- (SemanticConvetion.GEN_AI_REQUEST_SEED, 'seed'),
166
- (SemanticConvetion.GEN_AI_REQUEST_STOP_SEQUENCES, 'stop'),
167
- (SemanticConvetion.GEN_AI_REQUEST_TEMPERATURE, 'temperature'),
168
- (SemanticConvetion.GEN_AI_REQUEST_TOP_P, 'top_p'),
169
- (SemanticConvetion.GEN_AI_REQUEST_TOP_K, 'top_k'),
170
- ]
171
-
172
- # Safely get the options dictionary from kwargs
173
- options = self._kwargs.get('options', {})
174
-
175
- # Set each attribute if the corresponding value exists and is not None
176
- for attribute, key in attributes:
177
- # Use dictionary `get` to retrieve values from the options dictionary
178
- value = options.get(key)
179
- if value is not None:
180
- self._span.set_attribute(attribute, value)
181
-
182
- self._span.set_attribute(SemanticConvetion.GEN_AI_RESPONSE_FINISH_REASON,
183
- [self._finish_reason])
184
- self._span.set_attribute(SemanticConvetion.GEN_AI_RESPONSE_MODEL,
185
- self._response_model)
186
- self._span.set_attribute(SemanticConvetion.GEN_AI_USAGE_INPUT_TOKENS,
187
- self._input_tokens)
188
- self._span.set_attribute(SemanticConvetion.GEN_AI_USAGE_OUTPUT_TOKENS,
189
- self._output_tokens)
190
- self._span.set_attribute(SemanticConvetion.SERVER_ADDRESS,
191
- self._server_address)
192
- if isinstance(self._llmresponse, str):
193
- self._span.set_attribute(SemanticConvetion.GEN_AI_OUTPUT_TYPE,
194
- "text")
195
- else:
196
- self._span.set_attribute(SemanticConvetion.GEN_AI_OUTPUT_TYPE,
197
- "json")
198
-
199
- # Set Span attributes (Extra)
200
- self._span.set_attribute(DEPLOYMENT_ENVIRONMENT,
201
- environment)
202
- self._span.set_attribute(SERVICE_NAME,
203
- application_name)
204
- self._span.set_attribute(SemanticConvetion.GEN_AI_REQUEST_IS_STREAM,
205
- True)
206
- self._span.set_attribute(SemanticConvetion.GEN_AI_USAGE_TOTAL_TOKENS,
207
- self._input_tokens + self._output_tokens)
208
- self._span.set_attribute(SemanticConvetion.GEN_AI_USAGE_COST,
209
- cost)
210
- self._span.set_attribute(SemanticConvetion.GEN_AI_SERVER_TBT,
211
- self._tbt)
212
- self._span.set_attribute(SemanticConvetion.GEN_AI_SERVER_TTFT,
213
- self._ttft)
214
- self._span.set_attribute(SemanticConvetion.GEN_AI_SDK_VERSION,
215
- version)
216
- if trace_content:
217
- self._span.add_event(
218
- name=SemanticConvetion.GEN_AI_CONTENT_PROMPT_EVENT,
219
- attributes={
220
- SemanticConvetion.GEN_AI_CONTENT_PROMPT: prompt,
221
- },
222
- )
223
- self._span.add_event(
224
- name=SemanticConvetion.GEN_AI_CONTENT_COMPLETION_EVENT,
225
- attributes={
226
- SemanticConvetion.GEN_AI_CONTENT_COMPLETION: self._llmresponse,
227
- },
228
- )
229
- self._span.set_status(Status(StatusCode.OK))
230
-
231
- if disable_metrics is False:
232
- attributes = create_metrics_attributes(
233
- service_name=application_name,
234
- deployment_environment=environment,
235
- operation=SemanticConvetion.GEN_AI_OPERATION_TYPE_CHAT,
236
- system=SemanticConvetion.GEN_AI_SYSTEM_OLLAMA,
237
- request_model=request_model,
238
- server_address=self._server_address,
239
- server_port=self._server_port,
240
- response_model=self._response_model,
241
- )
242
-
243
- metrics["genai_client_usage_tokens"].record(
244
- self._input_tokens + self._output_tokens, attributes
245
- )
246
- metrics["genai_client_operation_duration"].record(
247
- self._end_time - self._start_time, attributes
248
- )
249
- metrics["genai_server_tbt"].record(
250
- self._tbt, attributes
84
+ with tracer.start_as_current_span(self._span_name, kind= SpanKind.CLIENT) as self._span:
85
+ process_streaming_chat_response(
86
+ self,
87
+ pricing_info=pricing_info,
88
+ environment=environment,
89
+ application_name=application_name,
90
+ metrics=metrics,
91
+ event_provider=event_provider,
92
+ capture_message_content=capture_message_content,
93
+ disable_metrics=disable_metrics,
94
+ version=version
251
95
  )
252
- metrics["genai_server_ttft"].record(
253
- self._ttft, attributes
254
- )
255
- metrics["genai_requests"].add(1, attributes)
256
- metrics["genai_completion_tokens"].add(self._output_tokens, attributes)
257
- metrics["genai_prompt_tokens"].add(self._input_tokens, attributes)
258
- metrics["genai_cost"].record(cost, attributes)
259
-
260
96
  except Exception as e:
261
97
  handle_exception(self._span, e)
262
98
  logger.error("Error in trace creation: %s", e)
263
- finally:
264
- self._span.end()
265
99
  raise
266
100
 
267
101
  def wrapper(wrapped, instance, args, kwargs):
268
102
  """
269
- Wraps the 'chat.completions' API call to add telemetry.
270
-
271
- This collects metrics such as execution time, cost, and token usage, and handles errors
272
- gracefully, adding details to the trace for observability.
273
-
274
- Args:
275
- wrapped: The original 'chat.completions' method to be wrapped.
276
- instance: The instance of the class where the original method is defined.
277
- args: Positional arguments for the 'chat.completions' method.
278
- kwargs: Keyword arguments for the 'chat.completions' method.
279
-
280
- Returns:
281
- The response from the original 'chat.completions' method.
103
+ Wraps the GenAI function call.
282
104
  """
283
105
 
284
- # Check if streaming is enabled for the API call
285
106
  streaming = kwargs.get("stream", False)
107
+
286
108
  server_address, server_port = set_server_address_and_port(instance, "127.0.0.1", 11434)
287
109
  request_model = kwargs.get("model", "gpt-4o")
288
110
 
@@ -290,202 +112,45 @@ def chat(version, environment, application_name,
290
112
 
291
113
  # pylint: disable=no-else-return
292
114
  if streaming:
293
- # Special handling for streaming response to accommodate the nature of data flow
294
115
  awaited_wrapped = wrapped(*args, **kwargs)
295
116
  span = tracer.start_span(span_name, kind=SpanKind.CLIENT)
117
+ return TracedSyncStream(awaited_wrapped, span, span_name, kwargs, server_address, server_port)
296
118
 
297
- return TracedSyncStream(awaited_wrapped, span, kwargs, server_address, server_port)
298
-
299
- # Handling for non-streaming responses
300
119
  else:
301
120
  with tracer.start_as_current_span(span_name, kind= SpanKind.CLIENT) as span:
302
121
  start_time = time.time()
303
122
  response = wrapped(*args, **kwargs)
304
- end_time = time.time()
305
-
306
- response_dict = response_as_dict(response)
307
-
308
- try:
309
- # Format 'messages' into a single string
310
- message_prompt = kwargs.get("messages", "")
311
- formatted_messages = []
312
- for message in message_prompt:
313
- role = message["role"]
314
- content = message["content"]
315
-
316
- if isinstance(content, list):
317
- content_str = ", ".join(
318
- f'{item["type"]}: {item["text"] if "text" in item else item["image_url"]}'
319
- if "type" in item else f'text: {item["text"]}'
320
- for item in content
321
- )
322
- formatted_messages.append(f"{role}: {content_str}")
323
- else:
324
- formatted_messages.append(f"{role}: {content}")
325
- prompt = "\n".join(formatted_messages)
326
-
327
- input_tokens = response_dict.get('prompt_eval_count')
328
- output_tokens = response_dict.get('eval_count')
329
-
330
- # Calculate cost of the operation
331
- cost = get_chat_model_cost(request_model,
332
- pricing_info, input_tokens,
333
- output_tokens)
334
-
335
- # Set base span attribues (OTel Semconv)
336
- span.set_attribute(TELEMETRY_SDK_NAME, "openlit")
337
- span.set_attribute(SemanticConvetion.GEN_AI_OPERATION,
338
- SemanticConvetion.GEN_AI_OPERATION_TYPE_CHAT)
339
- span.set_attribute(SemanticConvetion.GEN_AI_SYSTEM,
340
- SemanticConvetion.GEN_AI_SYSTEM_OLLAMA)
341
- span.set_attribute(SemanticConvetion.GEN_AI_REQUEST_MODEL,
342
- request_model)
343
- span.set_attribute(SemanticConvetion.SERVER_PORT,
344
- server_port)
345
-
346
- # List of attributes and their config keys
347
- attributes = [
348
- (SemanticConvetion.GEN_AI_REQUEST_FREQUENCY_PENALTY, 'repeat_penalty'),
349
- (SemanticConvetion.GEN_AI_REQUEST_MAX_TOKENS, 'max_tokens'),
350
- (SemanticConvetion.GEN_AI_REQUEST_SEED, 'seed'),
351
- (SemanticConvetion.GEN_AI_REQUEST_STOP_SEQUENCES, 'stop'),
352
- (SemanticConvetion.GEN_AI_REQUEST_TEMPERATURE, 'temperature'),
353
- (SemanticConvetion.GEN_AI_REQUEST_TOP_P, 'top_p'),
354
- (SemanticConvetion.GEN_AI_REQUEST_TOP_K, 'top_k'),
355
- ]
356
-
357
- # Safely get the options dictionary from kwargs
358
- options = kwargs.get('options', {})
359
-
360
- # Set each attribute if the corresponding value exists and is not None
361
- for attribute, key in attributes:
362
- # Use dictionary `get` to retrieve values from the options dictionary
363
- value = options.get(key)
364
- if value is not None:
365
- span.set_attribute(attribute, value)
366
-
367
- span.set_attribute(SemanticConvetion.GEN_AI_RESPONSE_MODEL,
368
- response_dict.get('model'))
369
- span.set_attribute(SemanticConvetion.GEN_AI_USAGE_INPUT_TOKENS,
370
- input_tokens)
371
- span.set_attribute(SemanticConvetion.GEN_AI_USAGE_OUTPUT_TOKENS,
372
- output_tokens)
373
- span.set_attribute(SemanticConvetion.SERVER_ADDRESS,
374
- server_address)
375
- span.set_attribute(SemanticConvetion.GEN_AI_RESPONSE_FINISH_REASON,
376
- [response_dict.get('done_reason')])
377
- if kwargs.get('format'):
378
- span.set_attribute(SemanticConvetion.GEN_AI_OUTPUT_TYPE,
379
- 'json')
380
- else:
381
- span.set_attribute(SemanticConvetion.GEN_AI_OUTPUT_TYPE,
382
- 'text')
383
-
384
- # Set base span attribues (Extras)
385
- span.set_attribute(DEPLOYMENT_ENVIRONMENT,
386
- environment)
387
- span.set_attribute(SERVICE_NAME,
388
- application_name)
389
- span.set_attribute(SemanticConvetion.GEN_AI_REQUEST_IS_STREAM,
390
- False)
391
- span.set_attribute(SemanticConvetion.GEN_AI_USAGE_TOTAL_TOKENS,
392
- input_tokens + output_tokens)
393
- span.set_attribute(SemanticConvetion.GEN_AI_USAGE_COST,
394
- cost)
395
- span.set_attribute(SemanticConvetion.GEN_AI_SERVER_TTFT,
396
- end_time - start_time)
397
- span.set_attribute(SemanticConvetion.GEN_AI_SDK_VERSION,
398
- version)
399
- if trace_content:
400
- span.add_event(
401
- name=SemanticConvetion.GEN_AI_CONTENT_PROMPT_EVENT,
402
- attributes={
403
- SemanticConvetion.GEN_AI_CONTENT_PROMPT: prompt,
404
- },
405
- )
406
- span.add_event(
407
- name=SemanticConvetion.GEN_AI_CONTENT_COMPLETION_EVENT,
408
- attributes={
409
- # pylint: disable=line-too-long
410
- SemanticConvetion.GEN_AI_CONTENT_COMPLETION: str(response_dict.get('message').get('content')),
411
- },
412
- )
413
- if kwargs.get('tools'):
414
- span.set_attribute(SemanticConvetion.GEN_AI_TOOL_CALLS,
415
- str(response_dict.get('message').get('tool_calls')))
416
-
417
- span.set_status(Status(StatusCode.OK))
418
-
419
- if disable_metrics is False:
420
- attributes = create_metrics_attributes(
421
- service_name=application_name,
422
- deployment_environment=environment,
423
- operation=SemanticConvetion.GEN_AI_OPERATION_TYPE_CHAT,
424
- system=SemanticConvetion.GEN_AI_SYSTEM_OLLAMA,
425
- request_model=request_model,
426
- server_address=server_address,
427
- server_port=server_port,
428
- response_model=response_dict.get('model'),
429
- )
430
-
431
- metrics["genai_client_usage_tokens"].record(
432
- input_tokens + output_tokens, attributes
433
- )
434
- metrics["genai_client_operation_duration"].record(
435
- end_time - start_time, attributes
436
- )
437
- metrics["genai_server_ttft"].record(
438
- end_time - start_time, attributes
439
- )
440
- metrics["genai_requests"].add(1, attributes)
441
- metrics["genai_completion_tokens"].add(output_tokens, attributes)
442
- metrics["genai_prompt_tokens"].add(input_tokens, attributes)
443
- metrics["genai_cost"].record(cost, attributes)
444
-
445
- # Return original response
446
- return response
447
-
448
- except Exception as e:
449
- handle_exception(span, e)
450
- logger.error("Error in trace creation: %s", e)
451
-
452
- # Return original response
453
- return response
123
+ response = process_chat_response(
124
+ response=response,
125
+ request_model=request_model,
126
+ pricing_info=pricing_info,
127
+ server_port=server_port,
128
+ server_address=server_address,
129
+ environment=environment,
130
+ application_name=application_name,
131
+ metrics=metrics,
132
+ event_provider=event_provider,
133
+ start_time=start_time,
134
+ span=span,
135
+ capture_message_content=capture_message_content,
136
+ disable_metrics=disable_metrics,
137
+ version=version,
138
+ **kwargs
139
+ )
140
+
141
+ return response
454
142
 
455
143
  return wrapper
456
144
 
457
145
  def embeddings(version, environment, application_name,
458
- tracer, pricing_info, trace_content, metrics, disable_metrics):
146
+ tracer, event_provider, pricing_info, capture_message_content, metrics, disable_metrics):
459
147
  """
460
- Generates a telemetry wrapper for embeddings to collect metrics.
461
-
462
- Args:
463
- version: Version of the monitoring package.
464
- environment: Deployment environment (e.g., production, staging).
465
- application_name: Name of the application using the Ollama API.
466
- tracer: OpenTelemetry tracer for creating spans.
467
- pricing_info: Information used for calculating the cost of Ollama usage.
468
- trace_content: Flag indicating whether to trace the actual content.
469
-
470
- Returns:
471
- A function that wraps the embeddings method to add telemetry.
148
+ Generates a telemetry wrapper for GenAI function call
472
149
  """
473
150
 
474
151
  def wrapper(wrapped, instance, args, kwargs):
475
152
  """
476
- Wraps the 'embeddings' API call to add telemetry.
477
-
478
- This collects metrics such as execution time, cost, and token usage, and handles errors
479
- gracefully, adding details to the trace for observability.
480
-
481
- Args:
482
- wrapped: The original 'embeddings' method to be wrapped.
483
- instance: The instance of the class where the original method is defined.
484
- args: Positional arguments for the 'embeddings' method.
485
- kwargs: Keyword arguments for the 'embeddings' method.
486
-
487
- Returns:
488
- The response from the original 'embeddings' method.
153
+ Wraps the GenAI function call.
489
154
  """
490
155
 
491
156
  server_address, server_port = set_server_address_and_port(instance, '127.0.0.1', 11434)
@@ -496,83 +161,24 @@ def embeddings(version, environment, application_name,
496
161
  with tracer.start_as_current_span(span_name, kind= SpanKind.CLIENT) as span:
497
162
  start_time = time.time()
498
163
  response = wrapped(*args, **kwargs)
499
- end_time = time.time()
500
-
501
- try:
502
- input_tokens = general_tokens(str(kwargs.get('prompt')))
503
-
504
- # Calculate cost of the operation
505
- cost = get_embed_model_cost(request_model,
506
- pricing_info, input_tokens)
507
-
508
- # Set Span attributes (OTel Semconv)
509
- span.set_attribute(TELEMETRY_SDK_NAME, 'openlit')
510
- span.set_attribute(SemanticConvetion.GEN_AI_OPERATION,
511
- SemanticConvetion.GEN_AI_OPERATION_TYPE_EMBEDDING)
512
- span.set_attribute(SemanticConvetion.GEN_AI_SYSTEM,
513
- SemanticConvetion.GEN_AI_SYSTEM_OLLAMA)
514
- span.set_attribute(SemanticConvetion.GEN_AI_REQUEST_MODEL,
515
- request_model)
516
- span.set_attribute(SemanticConvetion.GEN_AI_RESPONSE_MODEL,
517
- request_model)
518
- span.set_attribute(SemanticConvetion.SERVER_ADDRESS,
519
- server_address)
520
- span.set_attribute(SemanticConvetion.SERVER_PORT,
521
- server_port)
522
- span.set_attribute(SemanticConvetion.GEN_AI_USAGE_INPUT_TOKENS,
523
- input_tokens)
524
-
525
- # Set Span attributes (Extras)
526
- span.set_attribute(DEPLOYMENT_ENVIRONMENT,
527
- environment)
528
- span.set_attribute(SERVICE_NAME,
529
- application_name)
530
- span.set_attribute(SemanticConvetion.GEN_AI_USAGE_TOTAL_TOKENS,
531
- input_tokens)
532
- span.set_attribute(SemanticConvetion.GEN_AI_USAGE_COST,
533
- cost)
534
- span.set_attribute(SemanticConvetion.GEN_AI_SDK_VERSION,
535
- version)
536
-
537
- if trace_content:
538
- span.add_event(
539
- name=SemanticConvetion.GEN_AI_CONTENT_PROMPT_EVENT,
540
- attributes={
541
- SemanticConvetion.GEN_AI_CONTENT_PROMPT: str(kwargs.get('prompt', '')),
542
- },
543
- )
544
-
545
- span.set_status(Status(StatusCode.OK))
546
-
547
- if disable_metrics is False:
548
- attributes = create_metrics_attributes(
549
- service_name=application_name,
550
- deployment_environment=environment,
551
- operation=SemanticConvetion.GEN_AI_OPERATION_TYPE_EMBEDDING,
552
- system=SemanticConvetion.GEN_AI_SYSTEM_OLLAMA,
553
- request_model=request_model,
554
- server_address=server_address,
555
- server_port=server_port,
556
- response_model=request_model,
557
- )
558
- metrics['genai_client_usage_tokens'].record(
559
- input_tokens, attributes
560
- )
561
- metrics['genai_client_operation_duration'].record(
562
- end_time - start_time, attributes
563
- )
564
- metrics['genai_requests'].add(1, attributes)
565
- metrics['genai_prompt_tokens'].add(input_tokens, attributes)
566
- metrics['genai_cost'].record(cost, attributes)
567
-
568
- # Return original response
569
- return response
570
-
571
- except Exception as e:
572
- handle_exception(span, e)
573
- logger.error('Error in trace creation: %s', e)
574
-
575
- # Return original response
576
- return response
164
+ response = process_embedding_response(
165
+ response=response,
166
+ request_model=request_model,
167
+ pricing_info=pricing_info,
168
+ server_port=server_port,
169
+ server_address=server_address,
170
+ environment=environment,
171
+ application_name=application_name,
172
+ metrics=metrics,
173
+ event_provider=event_provider,
174
+ start_time=start_time,
175
+ span=span,
176
+ capture_message_content=capture_message_content,
177
+ disable_metrics=disable_metrics,
178
+ version=version,
179
+ **kwargs
180
+ )
181
+
182
+ return response
577
183
 
578
184
  return wrapper