openlit 1.33.10__py3-none-any.whl → 1.33.11__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (112) hide show
  1. openlit/__helpers.py +73 -0
  2. openlit/__init__.py +38 -11
  3. openlit/instrumentation/ag2/__init__.py +9 -10
  4. openlit/instrumentation/ag2/ag2.py +133 -68
  5. openlit/instrumentation/ai21/__init__.py +6 -5
  6. openlit/instrumentation/ai21/ai21.py +71 -534
  7. openlit/instrumentation/ai21/async_ai21.py +71 -534
  8. openlit/instrumentation/ai21/utils.py +407 -0
  9. openlit/instrumentation/anthropic/__init__.py +3 -3
  10. openlit/instrumentation/anthropic/anthropic.py +4 -4
  11. openlit/instrumentation/anthropic/async_anthropic.py +4 -4
  12. openlit/instrumentation/assemblyai/__init__.py +2 -2
  13. openlit/instrumentation/assemblyai/assemblyai.py +3 -3
  14. openlit/instrumentation/astra/__init__.py +25 -25
  15. openlit/instrumentation/astra/astra.py +2 -2
  16. openlit/instrumentation/astra/async_astra.py +2 -2
  17. openlit/instrumentation/azure_ai_inference/__init__.py +5 -5
  18. openlit/instrumentation/azure_ai_inference/async_azure_ai_inference.py +8 -8
  19. openlit/instrumentation/azure_ai_inference/azure_ai_inference.py +8 -8
  20. openlit/instrumentation/bedrock/__init__.py +2 -2
  21. openlit/instrumentation/bedrock/bedrock.py +3 -3
  22. openlit/instrumentation/chroma/__init__.py +9 -9
  23. openlit/instrumentation/chroma/chroma.py +2 -2
  24. openlit/instrumentation/cohere/__init__.py +7 -7
  25. openlit/instrumentation/cohere/async_cohere.py +9 -9
  26. openlit/instrumentation/cohere/cohere.py +9 -9
  27. openlit/instrumentation/controlflow/__init__.py +4 -4
  28. openlit/instrumentation/controlflow/controlflow.py +2 -2
  29. openlit/instrumentation/crawl4ai/__init__.py +3 -3
  30. openlit/instrumentation/crawl4ai/async_crawl4ai.py +2 -2
  31. openlit/instrumentation/crawl4ai/crawl4ai.py +2 -2
  32. openlit/instrumentation/crewai/__init__.py +3 -3
  33. openlit/instrumentation/crewai/crewai.py +2 -2
  34. openlit/instrumentation/dynamiq/__init__.py +5 -5
  35. openlit/instrumentation/dynamiq/dynamiq.py +2 -2
  36. openlit/instrumentation/elevenlabs/__init__.py +5 -5
  37. openlit/instrumentation/elevenlabs/async_elevenlabs.py +3 -3
  38. openlit/instrumentation/elevenlabs/elevenlabs.py +3 -3
  39. openlit/instrumentation/embedchain/__init__.py +2 -2
  40. openlit/instrumentation/embedchain/embedchain.py +4 -4
  41. openlit/instrumentation/firecrawl/__init__.py +3 -3
  42. openlit/instrumentation/firecrawl/firecrawl.py +2 -2
  43. openlit/instrumentation/google_ai_studio/__init__.py +3 -3
  44. openlit/instrumentation/google_ai_studio/async_google_ai_studio.py +3 -3
  45. openlit/instrumentation/google_ai_studio/google_ai_studio.py +3 -3
  46. openlit/instrumentation/gpt4all/__init__.py +3 -3
  47. openlit/instrumentation/gpt4all/gpt4all.py +7 -7
  48. openlit/instrumentation/groq/__init__.py +3 -3
  49. openlit/instrumentation/groq/async_groq.py +5 -5
  50. openlit/instrumentation/groq/groq.py +5 -5
  51. openlit/instrumentation/haystack/__init__.py +2 -2
  52. openlit/instrumentation/haystack/haystack.py +2 -2
  53. openlit/instrumentation/julep/__init__.py +7 -7
  54. openlit/instrumentation/julep/async_julep.py +3 -3
  55. openlit/instrumentation/julep/julep.py +3 -3
  56. openlit/instrumentation/langchain/__init__.py +2 -2
  57. openlit/instrumentation/langchain/async_langchain.py +13 -9
  58. openlit/instrumentation/langchain/langchain.py +13 -8
  59. openlit/instrumentation/letta/__init__.py +7 -7
  60. openlit/instrumentation/letta/letta.py +5 -5
  61. openlit/instrumentation/litellm/__init__.py +5 -5
  62. openlit/instrumentation/litellm/async_litellm.py +8 -8
  63. openlit/instrumentation/litellm/litellm.py +8 -8
  64. openlit/instrumentation/llamaindex/__init__.py +2 -2
  65. openlit/instrumentation/llamaindex/llamaindex.py +2 -2
  66. openlit/instrumentation/mem0/__init__.py +2 -2
  67. openlit/instrumentation/mem0/mem0.py +2 -2
  68. openlit/instrumentation/milvus/__init__.py +2 -2
  69. openlit/instrumentation/milvus/milvus.py +2 -2
  70. openlit/instrumentation/mistral/__init__.py +7 -7
  71. openlit/instrumentation/mistral/async_mistral.py +10 -10
  72. openlit/instrumentation/mistral/mistral.py +10 -10
  73. openlit/instrumentation/multion/__init__.py +7 -7
  74. openlit/instrumentation/multion/async_multion.py +5 -5
  75. openlit/instrumentation/multion/multion.py +5 -5
  76. openlit/instrumentation/ollama/__init__.py +11 -9
  77. openlit/instrumentation/ollama/async_ollama.py +71 -465
  78. openlit/instrumentation/ollama/ollama.py +71 -465
  79. openlit/instrumentation/ollama/utils.py +333 -0
  80. openlit/instrumentation/openai/__init__.py +11 -11
  81. openlit/instrumentation/openai/async_openai.py +18 -18
  82. openlit/instrumentation/openai/openai.py +18 -18
  83. openlit/instrumentation/phidata/__init__.py +2 -2
  84. openlit/instrumentation/phidata/phidata.py +2 -2
  85. openlit/instrumentation/pinecone/__init__.py +6 -6
  86. openlit/instrumentation/pinecone/pinecone.py +2 -2
  87. openlit/instrumentation/premai/__init__.py +3 -3
  88. openlit/instrumentation/premai/premai.py +7 -7
  89. openlit/instrumentation/qdrant/__init__.py +2 -2
  90. openlit/instrumentation/qdrant/async_qdrant.py +2 -2
  91. openlit/instrumentation/qdrant/qdrant.py +2 -2
  92. openlit/instrumentation/reka/__init__.py +3 -3
  93. openlit/instrumentation/reka/async_reka.py +3 -3
  94. openlit/instrumentation/reka/reka.py +3 -3
  95. openlit/instrumentation/together/__init__.py +5 -5
  96. openlit/instrumentation/together/async_together.py +8 -8
  97. openlit/instrumentation/together/together.py +8 -8
  98. openlit/instrumentation/transformers/__init__.py +2 -2
  99. openlit/instrumentation/transformers/transformers.py +4 -4
  100. openlit/instrumentation/vertexai/__init__.py +9 -9
  101. openlit/instrumentation/vertexai/async_vertexai.py +4 -4
  102. openlit/instrumentation/vertexai/vertexai.py +4 -4
  103. openlit/instrumentation/vllm/__init__.py +2 -2
  104. openlit/instrumentation/vllm/vllm.py +3 -3
  105. openlit/otel/events.py +85 -0
  106. openlit/otel/tracing.py +3 -13
  107. openlit/semcov/__init__.py +13 -1
  108. {openlit-1.33.10.dist-info → openlit-1.33.11.dist-info}/METADATA +2 -2
  109. openlit-1.33.11.dist-info/RECORD +125 -0
  110. openlit-1.33.10.dist-info/RECORD +0 -122
  111. {openlit-1.33.10.dist-info → openlit-1.33.11.dist-info}/LICENSE +0 -0
  112. {openlit-1.33.10.dist-info → openlit-1.33.11.dist-info}/WHEEL +0 -0
@@ -0,0 +1,333 @@
1
+ """
2
+ Ollama OpenTelemetry instrumentation utility functions
3
+ """
4
+ import time
5
+
6
+ from opentelemetry.sdk.resources import SERVICE_NAME, TELEMETRY_SDK_NAME, DEPLOYMENT_ENVIRONMENT
7
+ from opentelemetry.trace import Status, StatusCode
8
+
9
+ from openlit.__helpers import (
10
+ calculate_ttft,
11
+ response_as_dict,
12
+ calculate_tbt,
13
+ general_tokens,
14
+ extract_and_format_input,
15
+ get_chat_model_cost,
16
+ get_embed_model_cost,
17
+ handle_exception,
18
+ create_metrics_attributes,
19
+ otel_event,
20
+ concatenate_all_contents
21
+ )
22
+ from openlit.semcov import SemanticConvetion
23
+
24
+ def process_chunk(self, chunk):
25
+ """
26
+ Process a chunk of response data and update state.
27
+ """
28
+
29
+ end_time = time.time()
30
+ # Record the timestamp for the current chunk
31
+ self._timestamps.append(end_time)
32
+
33
+ if len(self._timestamps) == 1:
34
+ # Calculate time to first chunk
35
+ self._ttft = calculate_ttft(self._timestamps, self._start_time)
36
+
37
+ chunked = response_as_dict(chunk)
38
+ self._llmresponse += chunked.get('message', {}).get('content', '')
39
+
40
+ if chunked.get('message', {}).get('tool_calls'):
41
+ self._tool_calls = chunked['message']['tool_calls']
42
+
43
+ if chunked.get('eval_count'):
44
+ self._response_role = chunked.get('message', {}).get('role', '')
45
+ self._input_tokens = chunked.get('prompt_eval_count', 0)
46
+ self._output_tokens = chunked.get('eval_count', 0)
47
+ self._response_model = chunked.get('model', '')
48
+ self._finish_reason = chunked.get('done_reason', '')
49
+
50
+ def common_chat_logic(scope, pricing_info, environment, application_name, metrics,
51
+ event_provider, capture_message_content, disable_metrics, version, is_stream):
52
+ """
53
+ Process chat request and generate Telemetry
54
+ """
55
+
56
+ scope._end_time = time.time()
57
+ if len(scope._timestamps) > 1:
58
+ scope._tbt = calculate_tbt(scope._timestamps)
59
+
60
+ formatted_messages = extract_and_format_input(scope._kwargs.get("messages", ""))
61
+ request_model = scope._kwargs.get("model", "gpt-4o")
62
+
63
+ cost = get_chat_model_cost(request_model, pricing_info, scope._input_tokens, scope._output_tokens)
64
+
65
+ # Set Span attributes (OTel Semconv)
66
+ scope._span.set_attribute(TELEMETRY_SDK_NAME, "openlit")
67
+ scope._span.set_attribute(SemanticConvetion.GEN_AI_OPERATION, SemanticConvetion.GEN_AI_OPERATION_TYPE_CHAT)
68
+ scope._span.set_attribute(SemanticConvetion.GEN_AI_SYSTEM, SemanticConvetion.GEN_AI_SYSTEM_OLLAMA)
69
+ scope._span.set_attribute(SemanticConvetion.GEN_AI_REQUEST_MODEL, request_model)
70
+ scope._span.set_attribute(SemanticConvetion.SERVER_PORT, scope._server_port)
71
+
72
+ options = scope._kwargs.get('options', {})
73
+ attributes = [
74
+ (SemanticConvetion.GEN_AI_REQUEST_FREQUENCY_PENALTY, 'repeat_penalty'),
75
+ (SemanticConvetion.GEN_AI_REQUEST_MAX_TOKENS, 'max_tokens'),
76
+ (SemanticConvetion.GEN_AI_REQUEST_SEED, 'seed'),
77
+ (SemanticConvetion.GEN_AI_REQUEST_STOP_SEQUENCES, 'stop'),
78
+ (SemanticConvetion.GEN_AI_REQUEST_TEMPERATURE, 'temperature'),
79
+ (SemanticConvetion.GEN_AI_REQUEST_TOP_P, 'top_p'),
80
+ (SemanticConvetion.GEN_AI_REQUEST_TOP_K, 'top_k'),
81
+ ]
82
+
83
+ for attribute, key in attributes:
84
+ value = options.get(key)
85
+ if value is not None:
86
+ scope._span.set_attribute(attribute, value)
87
+
88
+ scope._span.set_attribute(SemanticConvetion.GEN_AI_RESPONSE_FINISH_REASON, [scope._finish_reason])
89
+ scope._span.set_attribute(SemanticConvetion.GEN_AI_RESPONSE_MODEL, scope._response_model)
90
+ scope._span.set_attribute(SemanticConvetion.GEN_AI_USAGE_INPUT_TOKENS, scope._input_tokens)
91
+ scope._span.set_attribute(SemanticConvetion.GEN_AI_USAGE_OUTPUT_TOKENS, scope._output_tokens)
92
+ scope._span.set_attribute(SemanticConvetion.SERVER_ADDRESS, scope._server_address)
93
+
94
+ scope._span.set_attribute(SemanticConvetion.GEN_AI_OUTPUT_TYPE,
95
+ "text" if isinstance(scope._llmresponse, str) else "json")
96
+
97
+ scope._span.set_attribute(DEPLOYMENT_ENVIRONMENT, environment)
98
+ scope._span.set_attribute(SERVICE_NAME, application_name)
99
+ scope._span.set_attribute(SemanticConvetion.GEN_AI_REQUEST_IS_STREAM, is_stream)
100
+ scope._span.set_attribute(SemanticConvetion.GEN_AI_CLIENT_TOKEN_USAGE, scope._input_tokens + scope._output_tokens)
101
+ scope._span.set_attribute(SemanticConvetion.GEN_AI_USAGE_COST, cost)
102
+ scope._span.set_attribute(SemanticConvetion.GEN_AI_SERVER_TBT, scope._tbt)
103
+ scope._span.set_attribute(SemanticConvetion.GEN_AI_SERVER_TTFT, scope._ttft)
104
+ scope._span.set_attribute(SemanticConvetion.GEN_AI_SDK_VERSION, version)
105
+
106
+ # To be removed one the change to log events (from span events) is complete
107
+ prompt = concatenate_all_contents(formatted_messages)
108
+ if capture_message_content:
109
+ scope._span.add_event(
110
+ name=SemanticConvetion.GEN_AI_CONTENT_PROMPT_EVENT,
111
+ attributes={
112
+ SemanticConvetion.GEN_AI_CONTENT_PROMPT: prompt,
113
+ },
114
+ )
115
+ scope._span.add_event(
116
+ name=SemanticConvetion.GEN_AI_CONTENT_COMPLETION_EVENT,
117
+ attributes={
118
+ SemanticConvetion.GEN_AI_CONTENT_COMPLETION: scope._llmresponse,
119
+ },
120
+ )
121
+
122
+ choice_event_body = {
123
+ "finish_reason": scope._finish_reason,
124
+ "index": 0,
125
+ "message": {
126
+ **({"content": scope._llmresponse} if capture_message_content else {}),
127
+ "role": scope._response_role
128
+ }
129
+ }
130
+
131
+ if scope._tool_calls:
132
+ print(scope._tool_calls)
133
+ function_call = scope._tool_calls[0]
134
+ choice_event_body["message"].update({
135
+ "tool_calls": {
136
+ "function": {
137
+ "name": function_call.get('function', {}).get('name', ''),
138
+ "arguments": function_call.get('function', {}).get('arguments', '')
139
+ },
140
+ "id": function_call.get('id', ''),
141
+ "type": "function"
142
+ }
143
+ })
144
+
145
+ # Emit events
146
+ for role in ['user', 'system', 'assistant', 'tool']:
147
+ if formatted_messages.get(role, {}).get('content', ''):
148
+ event = otel_event(
149
+ name=getattr(SemanticConvetion, f'GEN_AI_{role.upper()}_MESSAGE'),
150
+ attributes={
151
+ SemanticConvetion.GEN_AI_SYSTEM: SemanticConvetion.GEN_AI_SYSTEM_OLLAMA
152
+ },
153
+ body = {
154
+ # pylint: disable=line-too-long
155
+ **({"content": formatted_messages.get(role, {}).get('content', '')} if capture_message_content else {}),
156
+ "role": formatted_messages.get(role, {}).get('role', []),
157
+ **({
158
+ "tool_calls": {
159
+ "function": {
160
+ # pylint: disable=line-too-long
161
+ "name": (scope._tool_calls[0].get('function', {}).get('name', '') if scope._tool_calls else ''),
162
+ "arguments": (scope._tool_calls[0].get('function', {}).get('arguments', '') if scope._tool_calls else '')
163
+ },
164
+ "id": (scope._tool_calls[0].get('id', '') if scope._tool_calls else ''),
165
+ "type": "function"
166
+ }
167
+ } if role == 'assistant' else {}),
168
+ **({
169
+ "id": (scope._tool_calls[0].get('id', '') if scope._tool_calls else '')
170
+ } if role == 'tool' else {})
171
+ }
172
+ )
173
+ event_provider.emit(event)
174
+
175
+ choice_event = otel_event(
176
+ name=SemanticConvetion.GEN_AI_CHOICE,
177
+ attributes={
178
+ SemanticConvetion.GEN_AI_SYSTEM: SemanticConvetion.GEN_AI_SYSTEM_OLLAMA
179
+ },
180
+ body=choice_event_body
181
+ )
182
+ event_provider.emit(choice_event)
183
+
184
+ scope._span.set_status(Status(StatusCode.OK))
185
+
186
+ if not disable_metrics:
187
+ metrics_attributes = create_metrics_attributes(
188
+ service_name=application_name,
189
+ deployment_environment=environment,
190
+ operation=SemanticConvetion.GEN_AI_OPERATION_TYPE_CHAT,
191
+ system=SemanticConvetion.GEN_AI_SYSTEM_OLLAMA,
192
+ request_model=request_model,
193
+ server_address=scope._server_address,
194
+ server_port=scope._server_port,
195
+ response_model=scope._response_model,
196
+ )
197
+
198
+ metrics["genai_client_usage_tokens"].record(scope._input_tokens + scope._output_tokens, metrics_attributes)
199
+ metrics["genai_client_operation_duration"].record(scope._end_time - scope._start_time, metrics_attributes)
200
+ metrics["genai_server_tbt"].record(scope._tbt, metrics_attributes)
201
+ metrics["genai_server_ttft"].record(scope._ttft, metrics_attributes)
202
+ metrics["genai_requests"].add(1, metrics_attributes)
203
+ metrics["genai_completion_tokens"].add(scope._output_tokens, metrics_attributes)
204
+ metrics["genai_prompt_tokens"].add(scope._input_tokens, metrics_attributes)
205
+ metrics["genai_cost"].record(cost, metrics_attributes)
206
+
207
+ def process_streaming_chat_response(self, pricing_info, environment, application_name, metrics,
208
+ event_provider, capture_message_content=False, disable_metrics=False, version=''):
209
+ """
210
+ Process chat request and generate Telemetry
211
+ """
212
+
213
+ common_chat_logic(self, pricing_info, environment, application_name, metrics,
214
+ event_provider, capture_message_content, disable_metrics, version, is_stream=True)
215
+
216
+ def process_chat_response(response, request_model, pricing_info, server_port, server_address,
217
+ environment, application_name, metrics, event_provider, start_time,
218
+ span, capture_message_content=False, disable_metrics=False, version="1.0.0", **kwargs):
219
+ """
220
+ Process chat request and generate Telemetry
221
+ """
222
+
223
+ self = type('GenericScope', (), {})()
224
+
225
+ # pylint: disable = no-member
226
+ self._start_time = start_time
227
+ self._end_time = time.time()
228
+ self._span = span
229
+ self._llmresponse = response.get('message', {}).get('content', '')
230
+ self._response_role = response.get('message', {}).get('role', 'assistant')
231
+ self._input_tokens = response.get('prompt_eval_count')
232
+ self._output_tokens = response.get('eval_count')
233
+ self._response_model = response.get('model', '')
234
+ self._finish_reason = response.get('done_reason', '')
235
+ self._timestamps = []
236
+ self._ttft, self._tbt = self._end_time - self._start_time, 0
237
+ self._server_address, self._server_port = server_address, server_port
238
+ self._kwargs = kwargs
239
+ self._tool_calls = response.get('message', {}).get('tool_calls', [])
240
+
241
+ common_chat_logic(self, pricing_info, environment, application_name, metrics,
242
+ event_provider, capture_message_content, disable_metrics, version, is_stream=False)
243
+
244
+ return response
245
+
246
+ def process_embedding_response(response, request_model, pricing_info, server_port, server_address,
247
+ environment, application_name, metrics, event_provider,
248
+ start_time, span, capture_message_content=False, disable_metrics=False, version="1.0.0", **kwargs):
249
+ """
250
+ Process embedding request and generate Telemetry
251
+ """
252
+
253
+ end_time = time.time()
254
+
255
+ try:
256
+ input_tokens = general_tokens(str(kwargs.get('prompt')))
257
+
258
+ # Calculate cost of the operation
259
+ cost = get_embed_model_cost(request_model,
260
+ pricing_info, input_tokens)
261
+
262
+ # Set Span attributes (OTel Semconv)
263
+ span.set_attribute(TELEMETRY_SDK_NAME, 'openlit')
264
+ span.set_attribute(SemanticConvetion.GEN_AI_OPERATION,
265
+ SemanticConvetion.GEN_AI_OPERATION_TYPE_EMBEDDING)
266
+ span.set_attribute(SemanticConvetion.GEN_AI_SYSTEM,
267
+ SemanticConvetion.GEN_AI_SYSTEM_OLLAMA)
268
+ span.set_attribute(SemanticConvetion.GEN_AI_REQUEST_MODEL,
269
+ request_model)
270
+ span.set_attribute(SemanticConvetion.GEN_AI_RESPONSE_MODEL,
271
+ request_model)
272
+ span.set_attribute(SemanticConvetion.SERVER_ADDRESS,
273
+ server_address)
274
+ span.set_attribute(SemanticConvetion.SERVER_PORT,
275
+ server_port)
276
+ span.set_attribute(SemanticConvetion.GEN_AI_USAGE_INPUT_TOKENS,
277
+ input_tokens)
278
+
279
+ # Set Span attributes (Extras)
280
+ span.set_attribute(DEPLOYMENT_ENVIRONMENT,
281
+ environment)
282
+ span.set_attribute(SERVICE_NAME,
283
+ application_name)
284
+ span.set_attribute(SemanticConvetion.GEN_AI_CLIENT_TOKEN_USAGE,
285
+ input_tokens)
286
+ span.set_attribute(SemanticConvetion.GEN_AI_USAGE_COST,
287
+ cost)
288
+ span.set_attribute(SemanticConvetion.GEN_AI_SDK_VERSION,
289
+ version)
290
+
291
+ prompt_event = otel_event(
292
+ name=SemanticConvetion.GEN_AI_USER_MESSAGE,
293
+ attributes={
294
+ SemanticConvetion.GEN_AI_SYSTEM: SemanticConvetion.GEN_AI_SYSTEM_OLLAMA
295
+ },
296
+ body={
297
+ **({"content": kwargs.get('prompt', '')} if capture_message_content else {}),
298
+ "role": 'user'
299
+ }
300
+ )
301
+ event_provider.emit(prompt_event)
302
+
303
+ span.set_status(Status(StatusCode.OK))
304
+
305
+ if disable_metrics is False:
306
+ attributes = create_metrics_attributes(
307
+ service_name=application_name,
308
+ deployment_environment=environment,
309
+ operation=SemanticConvetion.GEN_AI_OPERATION_TYPE_EMBEDDING,
310
+ system=SemanticConvetion.GEN_AI_SYSTEM_OLLAMA,
311
+ request_model=request_model,
312
+ server_address=server_address,
313
+ server_port=server_port,
314
+ response_model=request_model,
315
+ )
316
+ metrics['genai_client_usage_tokens'].record(
317
+ input_tokens, attributes
318
+ )
319
+ metrics['genai_client_operation_duration'].record(
320
+ end_time - start_time, attributes
321
+ )
322
+ metrics['genai_requests'].add(1, attributes)
323
+ metrics['genai_prompt_tokens'].add(input_tokens, attributes)
324
+ metrics['genai_cost'].record(cost, attributes)
325
+
326
+ # Return original response
327
+ return response
328
+
329
+ except Exception as e:
330
+ handle_exception(span, e)
331
+
332
+ # Return original response
333
+ return response
@@ -25,7 +25,7 @@ class OpenAIInstrumentor(BaseInstrumentor):
25
25
  tracer = kwargs.get("tracer")
26
26
  metrics = kwargs.get("metrics_dict")
27
27
  pricing_info = kwargs.get("pricing_info")
28
- trace_content = kwargs.get("trace_content")
28
+ capture_message_content = kwargs.get("capture_message_content")
29
29
  disable_metrics = kwargs.get("disable_metrics")
30
30
  version = importlib.metadata.version("openai")
31
31
 
@@ -33,7 +33,7 @@ class OpenAIInstrumentor(BaseInstrumentor):
33
33
  "openai.resources.chat.completions",
34
34
  "Completions.create",
35
35
  chat_completions(version, environment, application_name,
36
- tracer, pricing_info, trace_content,
36
+ tracer, pricing_info, capture_message_content,
37
37
  metrics, disable_metrics),
38
38
  )
39
39
 
@@ -41,7 +41,7 @@ class OpenAIInstrumentor(BaseInstrumentor):
41
41
  "openai.resources.chat.completions",
42
42
  "AsyncCompletions.create",
43
43
  async_chat_completions(version, environment, application_name,
44
- tracer, pricing_info, trace_content,
44
+ tracer, pricing_info, capture_message_content,
45
45
  metrics, disable_metrics),
46
46
  )
47
47
 
@@ -49,7 +49,7 @@ class OpenAIInstrumentor(BaseInstrumentor):
49
49
  "openai.resources.images",
50
50
  "Images.generate",
51
51
  image_generate(version, environment, application_name,
52
- tracer, pricing_info, trace_content,
52
+ tracer, pricing_info, capture_message_content,
53
53
  metrics, disable_metrics),
54
54
  )
55
55
 
@@ -57,7 +57,7 @@ class OpenAIInstrumentor(BaseInstrumentor):
57
57
  "openai.resources.images",
58
58
  "AsyncImages.generate",
59
59
  async_image_generate(version, environment, application_name,
60
- tracer, pricing_info, trace_content,
60
+ tracer, pricing_info, capture_message_content,
61
61
  metrics, disable_metrics),
62
62
  )
63
63
 
@@ -65,7 +65,7 @@ class OpenAIInstrumentor(BaseInstrumentor):
65
65
  "openai.resources.embeddings",
66
66
  "Embeddings.create",
67
67
  embedding(version, environment, application_name,
68
- tracer, pricing_info, trace_content,
68
+ tracer, pricing_info, capture_message_content,
69
69
  metrics, disable_metrics),
70
70
  )
71
71
 
@@ -73,7 +73,7 @@ class OpenAIInstrumentor(BaseInstrumentor):
73
73
  "openai.resources.embeddings",
74
74
  "AsyncEmbeddings.create",
75
75
  async_embedding(version, environment, application_name,
76
- tracer, pricing_info, trace_content,
76
+ tracer, pricing_info, capture_message_content,
77
77
  metrics, disable_metrics),
78
78
  )
79
79
 
@@ -82,7 +82,7 @@ class OpenAIInstrumentor(BaseInstrumentor):
82
82
  "Images.create_variation",
83
83
  image_variatons(version,
84
84
  environment, application_name,
85
- tracer, pricing_info, trace_content,
85
+ tracer, pricing_info, capture_message_content,
86
86
  metrics, disable_metrics),
87
87
  )
88
88
 
@@ -91,7 +91,7 @@ class OpenAIInstrumentor(BaseInstrumentor):
91
91
  "AsyncImages.create_variation",
92
92
  async_image_variatons(version,
93
93
  environment, application_name,
94
- tracer, pricing_info, trace_content,
94
+ tracer, pricing_info, capture_message_content,
95
95
  metrics, disable_metrics),
96
96
  )
97
97
 
@@ -99,7 +99,7 @@ class OpenAIInstrumentor(BaseInstrumentor):
99
99
  "openai.resources.audio.speech",
100
100
  "Speech.create",
101
101
  audio_create(version, environment, application_name,
102
- tracer, pricing_info, trace_content,
102
+ tracer, pricing_info, capture_message_content,
103
103
  metrics, disable_metrics),
104
104
  )
105
105
 
@@ -107,7 +107,7 @@ class OpenAIInstrumentor(BaseInstrumentor):
107
107
  "openai.resources.audio.speech",
108
108
  "AsyncSpeech.create",
109
109
  async_audio_create(version, environment, application_name,
110
- tracer, pricing_info, trace_content,
110
+ tracer, pricing_info, capture_message_content,
111
111
  metrics, disable_metrics),
112
112
  )
113
113
 
@@ -25,7 +25,7 @@ from openlit.semcov import SemanticConvetion
25
25
  logger = logging.getLogger(__name__)
26
26
 
27
27
  def async_chat_completions(version, environment, application_name,
28
- tracer, pricing_info, trace_content, metrics, disable_metrics):
28
+ tracer, pricing_info, capture_message_content, metrics, disable_metrics):
29
29
  """
30
30
  Generates a telemetry wrapper for chat completions to collect metrics.
31
31
 
@@ -35,7 +35,7 @@ def async_chat_completions(version, environment, application_name,
35
35
  application_name: Name of the application using the OpenAI API.
36
36
  tracer: OpenTelemetry tracer for creating spans.
37
37
  pricing_info: Information used for calculating the cost of OpenAI usage.
38
- trace_content: Flag indicating whether to trace the actual content.
38
+ capture_message_content: Flag indicating whether to trace the actual content.
39
39
 
40
40
  Returns:
41
41
  A function that wraps the chat completions method to add telemetry.
@@ -229,7 +229,7 @@ def async_chat_completions(version, environment, application_name,
229
229
  self._ttft)
230
230
  self._span.set_attribute(SemanticConvetion.GEN_AI_SDK_VERSION,
231
231
  version)
232
- if trace_content:
232
+ if capture_message_content:
233
233
  self._span.add_event(
234
234
  name=SemanticConvetion.GEN_AI_CONTENT_PROMPT_EVENT,
235
235
  attributes={
@@ -387,7 +387,7 @@ def async_chat_completions(version, environment, application_name,
387
387
  span.set_attribute(SemanticConvetion.GEN_AI_RESPONSE_SERVICE_TIER,
388
388
  response_dict.get('service_tier'))
389
389
  span.set_attribute(SemanticConvetion.GEN_AI_RESPONSE_SYSTEM_FINGERPRINT,
390
- response_dict.get('system_fingerprint'))
390
+ response_dict.get('system_fingerprint', ''))
391
391
 
392
392
  # Set base span attribues (Extras)
393
393
  span.set_attribute(DEPLOYMENT_ENVIRONMENT,
@@ -406,7 +406,7 @@ def async_chat_completions(version, environment, application_name,
406
406
  end_time - start_time)
407
407
  span.set_attribute(SemanticConvetion.GEN_AI_SDK_VERSION,
408
408
  version)
409
- if trace_content:
409
+ if capture_message_content:
410
410
  span.add_event(
411
411
  name=SemanticConvetion.GEN_AI_CONTENT_PROMPT_EVENT,
412
412
  attributes={
@@ -417,7 +417,7 @@ def async_chat_completions(version, environment, application_name,
417
417
  for i in range(kwargs.get('n',1)):
418
418
  span.set_attribute(SemanticConvetion.GEN_AI_RESPONSE_FINISH_REASON,
419
419
  [response_dict.get('choices')[i].get('finish_reason')])
420
- if trace_content:
420
+ if capture_message_content:
421
421
  span.add_event(
422
422
  name=SemanticConvetion.GEN_AI_CONTENT_COMPLETION_EVENT,
423
423
  attributes={
@@ -477,7 +477,7 @@ def async_chat_completions(version, environment, application_name,
477
477
  return wrapper
478
478
 
479
479
  def async_embedding(version, environment, application_name,
480
- tracer, pricing_info, trace_content, metrics, disable_metrics):
480
+ tracer, pricing_info, capture_message_content, metrics, disable_metrics):
481
481
  """
482
482
  Generates a telemetry wrapper for embeddings to collect metrics.
483
483
 
@@ -487,7 +487,7 @@ def async_embedding(version, environment, application_name,
487
487
  application_name: Name of the application using the OpenAI API.
488
488
  tracer: OpenTelemetry tracer for creating spans.
489
489
  pricing_info: Information used for calculating the cost of OpenAI usage.
490
- trace_content: Flag indicating whether to trace the actual content.
490
+ capture_message_content: Flag indicating whether to trace the actual content.
491
491
 
492
492
  Returns:
493
493
  A function that wraps the embeddings method to add telemetry.
@@ -561,7 +561,7 @@ def async_embedding(version, environment, application_name,
561
561
  span.set_attribute(SemanticConvetion.GEN_AI_SDK_VERSION,
562
562
  version)
563
563
 
564
- if trace_content:
564
+ if capture_message_content:
565
565
  span.add_event(
566
566
  name=SemanticConvetion.GEN_AI_CONTENT_PROMPT_EVENT,
567
567
  attributes={
@@ -605,7 +605,7 @@ def async_embedding(version, environment, application_name,
605
605
  return wrapper
606
606
 
607
607
  def async_image_generate(version, environment, application_name,
608
- tracer, pricing_info, trace_content, metrics, disable_metrics):
608
+ tracer, pricing_info, capture_message_content, metrics, disable_metrics):
609
609
  """
610
610
  Generates a telemetry wrapper for image generation to collect metrics.
611
611
 
@@ -615,7 +615,7 @@ def async_image_generate(version, environment, application_name,
615
615
  application_name: Name of the application using the OpenAI API.
616
616
  tracer: OpenTelemetry tracer for creating spans.
617
617
  pricing_info: Information used for calculating the cost of OpenAI image generation.
618
- trace_content: Flag indicating whether to trace the input prompt and generated images.
618
+ capture_message_content: Flag indicating whether to trace the input prompt and generated images.
619
619
 
620
620
  Returns:
621
621
  A function that wraps the image generation method to add telemetry.
@@ -700,7 +700,7 @@ def async_image_generate(version, environment, application_name,
700
700
  span.set_attribute(SemanticConvetion.GEN_AI_SDK_VERSION,
701
701
  version)
702
702
 
703
- if trace_content:
703
+ if capture_message_content:
704
704
  span.add_event(
705
705
  name=SemanticConvetion.GEN_AI_CONTENT_PROMPT_EVENT,
706
706
  attributes={
@@ -752,7 +752,7 @@ def async_image_generate(version, environment, application_name,
752
752
  return wrapper
753
753
 
754
754
  def async_image_variatons(version, environment, application_name,
755
- tracer, pricing_info, trace_content, metrics, disable_metrics):
755
+ tracer, pricing_info, capture_message_content, metrics, disable_metrics):
756
756
  """
757
757
  Generates a telemetry wrapper for creating image variations to collect metrics.
758
758
 
@@ -762,7 +762,7 @@ def async_image_variatons(version, environment, application_name,
762
762
  application_name: Name of the application using the OpenAI API.
763
763
  tracer: OpenTelemetry tracer for creating spans.
764
764
  pricing_info: Information used for calculating the cost of generating image variations.
765
- trace_content: Flag indicating whether to trace the input image and generated variations.
765
+ capture_message_content: Flag indicating whether to trace the input image and generated variations.
766
766
 
767
767
  Returns:
768
768
  A function that wraps the image variations creation method to add telemetry.
@@ -842,7 +842,7 @@ def async_image_variatons(version, environment, application_name,
842
842
  span.set_attribute(SemanticConvetion.GEN_AI_SDK_VERSION,
843
843
  version)
844
844
 
845
- if trace_content:
845
+ if capture_message_content:
846
846
  attribute_name = f"{SemanticConvetion.GEN_AI_RESPONSE_IMAGE}.{images_count}"
847
847
  span.add_event(
848
848
  name=attribute_name,
@@ -888,7 +888,7 @@ def async_image_variatons(version, environment, application_name,
888
888
  return wrapper
889
889
 
890
890
  def async_audio_create(version, environment, application_name,
891
- tracer, pricing_info, trace_content, metrics, disable_metrics):
891
+ tracer, pricing_info, capture_message_content, metrics, disable_metrics):
892
892
  """
893
893
  Generates a telemetry wrapper for creating speech audio to collect metrics.
894
894
 
@@ -898,7 +898,7 @@ def async_audio_create(version, environment, application_name,
898
898
  application_name: Name of the application using the OpenAI API.
899
899
  tracer: OpenTelemetry tracer for creating spans.
900
900
  pricing_info: Information used for calculating the cost of generating speech audio.
901
- trace_content: Flag indicating whether to trace the input text and generated audio.
901
+ capture_message_content: Flag indicating whether to trace the input text and generated audio.
902
902
 
903
903
  Returns:
904
904
  A function that wraps the speech audio creation method to add telemetry.
@@ -967,7 +967,7 @@ def async_audio_create(version, environment, application_name,
967
967
  cost)
968
968
  span.set_attribute(SemanticConvetion.GEN_AI_SDK_VERSION,
969
969
  version)
970
- if trace_content:
970
+ if capture_message_content:
971
971
  span.add_event(
972
972
  name=SemanticConvetion.GEN_AI_CONTENT_PROMPT_EVENT,
973
973
  attributes={