lmnr 0.4.53.dev0__py3-none-any.whl → 0.7.26__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (133) hide show
  1. lmnr/__init__.py +32 -11
  2. lmnr/cli/__init__.py +270 -0
  3. lmnr/cli/datasets.py +371 -0
  4. lmnr/cli/evals.py +111 -0
  5. lmnr/cli/rules.py +42 -0
  6. lmnr/opentelemetry_lib/__init__.py +70 -0
  7. lmnr/opentelemetry_lib/decorators/__init__.py +337 -0
  8. lmnr/opentelemetry_lib/litellm/__init__.py +685 -0
  9. lmnr/opentelemetry_lib/litellm/utils.py +100 -0
  10. lmnr/opentelemetry_lib/opentelemetry/instrumentation/anthropic/__init__.py +849 -0
  11. lmnr/opentelemetry_lib/opentelemetry/instrumentation/anthropic/config.py +13 -0
  12. lmnr/opentelemetry_lib/opentelemetry/instrumentation/anthropic/event_emitter.py +211 -0
  13. lmnr/opentelemetry_lib/opentelemetry/instrumentation/anthropic/event_models.py +41 -0
  14. lmnr/opentelemetry_lib/opentelemetry/instrumentation/anthropic/span_utils.py +401 -0
  15. lmnr/opentelemetry_lib/opentelemetry/instrumentation/anthropic/streaming.py +425 -0
  16. lmnr/opentelemetry_lib/opentelemetry/instrumentation/anthropic/utils.py +332 -0
  17. lmnr/opentelemetry_lib/opentelemetry/instrumentation/anthropic/version.py +1 -0
  18. lmnr/opentelemetry_lib/opentelemetry/instrumentation/claude_agent/__init__.py +451 -0
  19. lmnr/opentelemetry_lib/opentelemetry/instrumentation/claude_agent/proxy.py +144 -0
  20. lmnr/opentelemetry_lib/opentelemetry/instrumentation/cua_agent/__init__.py +100 -0
  21. lmnr/opentelemetry_lib/opentelemetry/instrumentation/cua_computer/__init__.py +476 -0
  22. lmnr/opentelemetry_lib/opentelemetry/instrumentation/cua_computer/utils.py +12 -0
  23. lmnr/opentelemetry_lib/opentelemetry/instrumentation/google_genai/__init__.py +599 -0
  24. lmnr/opentelemetry_lib/opentelemetry/instrumentation/google_genai/config.py +9 -0
  25. lmnr/opentelemetry_lib/opentelemetry/instrumentation/google_genai/schema_utils.py +26 -0
  26. lmnr/opentelemetry_lib/opentelemetry/instrumentation/google_genai/utils.py +330 -0
  27. lmnr/opentelemetry_lib/opentelemetry/instrumentation/groq/__init__.py +488 -0
  28. lmnr/opentelemetry_lib/opentelemetry/instrumentation/groq/config.py +8 -0
  29. lmnr/opentelemetry_lib/opentelemetry/instrumentation/groq/event_emitter.py +143 -0
  30. lmnr/opentelemetry_lib/opentelemetry/instrumentation/groq/event_models.py +41 -0
  31. lmnr/opentelemetry_lib/opentelemetry/instrumentation/groq/span_utils.py +229 -0
  32. lmnr/opentelemetry_lib/opentelemetry/instrumentation/groq/utils.py +92 -0
  33. lmnr/opentelemetry_lib/opentelemetry/instrumentation/groq/version.py +1 -0
  34. lmnr/opentelemetry_lib/opentelemetry/instrumentation/kernel/__init__.py +381 -0
  35. lmnr/opentelemetry_lib/opentelemetry/instrumentation/kernel/utils.py +36 -0
  36. lmnr/opentelemetry_lib/opentelemetry/instrumentation/langgraph/__init__.py +121 -0
  37. lmnr/opentelemetry_lib/opentelemetry/instrumentation/langgraph/utils.py +60 -0
  38. lmnr/opentelemetry_lib/opentelemetry/instrumentation/openai/__init__.py +61 -0
  39. lmnr/opentelemetry_lib/opentelemetry/instrumentation/openai/shared/__init__.py +472 -0
  40. lmnr/opentelemetry_lib/opentelemetry/instrumentation/openai/shared/chat_wrappers.py +1185 -0
  41. lmnr/opentelemetry_lib/opentelemetry/instrumentation/openai/shared/completion_wrappers.py +305 -0
  42. lmnr/opentelemetry_lib/opentelemetry/instrumentation/openai/shared/config.py +16 -0
  43. lmnr/opentelemetry_lib/opentelemetry/instrumentation/openai/shared/embeddings_wrappers.py +312 -0
  44. lmnr/opentelemetry_lib/opentelemetry/instrumentation/openai/shared/event_emitter.py +100 -0
  45. lmnr/opentelemetry_lib/opentelemetry/instrumentation/openai/shared/event_models.py +41 -0
  46. lmnr/opentelemetry_lib/opentelemetry/instrumentation/openai/shared/image_gen_wrappers.py +68 -0
  47. lmnr/opentelemetry_lib/opentelemetry/instrumentation/openai/utils.py +197 -0
  48. lmnr/opentelemetry_lib/opentelemetry/instrumentation/openai/v0/__init__.py +176 -0
  49. lmnr/opentelemetry_lib/opentelemetry/instrumentation/openai/v1/__init__.py +368 -0
  50. lmnr/opentelemetry_lib/opentelemetry/instrumentation/openai/v1/assistant_wrappers.py +325 -0
  51. lmnr/opentelemetry_lib/opentelemetry/instrumentation/openai/v1/event_handler_wrapper.py +135 -0
  52. lmnr/opentelemetry_lib/opentelemetry/instrumentation/openai/v1/responses_wrappers.py +786 -0
  53. lmnr/opentelemetry_lib/opentelemetry/instrumentation/openai/version.py +1 -0
  54. lmnr/opentelemetry_lib/opentelemetry/instrumentation/openhands_ai/__init__.py +388 -0
  55. lmnr/opentelemetry_lib/opentelemetry/instrumentation/opentelemetry/__init__.py +69 -0
  56. lmnr/opentelemetry_lib/opentelemetry/instrumentation/skyvern/__init__.py +191 -0
  57. lmnr/opentelemetry_lib/opentelemetry/instrumentation/threading/__init__.py +197 -0
  58. lmnr/opentelemetry_lib/tracing/__init__.py +263 -0
  59. lmnr/opentelemetry_lib/tracing/_instrument_initializers.py +516 -0
  60. lmnr/{openllmetry_sdk → opentelemetry_lib}/tracing/attributes.py +21 -8
  61. lmnr/opentelemetry_lib/tracing/context.py +200 -0
  62. lmnr/opentelemetry_lib/tracing/exporter.py +153 -0
  63. lmnr/opentelemetry_lib/tracing/instruments.py +140 -0
  64. lmnr/opentelemetry_lib/tracing/processor.py +193 -0
  65. lmnr/opentelemetry_lib/tracing/span.py +398 -0
  66. lmnr/opentelemetry_lib/tracing/tracer.py +57 -0
  67. lmnr/opentelemetry_lib/tracing/utils.py +62 -0
  68. lmnr/opentelemetry_lib/utils/package_check.py +18 -0
  69. lmnr/opentelemetry_lib/utils/wrappers.py +11 -0
  70. lmnr/sdk/browser/__init__.py +0 -0
  71. lmnr/sdk/browser/background_send_events.py +158 -0
  72. lmnr/sdk/browser/browser_use_cdp_otel.py +100 -0
  73. lmnr/sdk/browser/browser_use_otel.py +142 -0
  74. lmnr/sdk/browser/bubus_otel.py +71 -0
  75. lmnr/sdk/browser/cdp_utils.py +518 -0
  76. lmnr/sdk/browser/inject_script.js +514 -0
  77. lmnr/sdk/browser/patchright_otel.py +151 -0
  78. lmnr/sdk/browser/playwright_otel.py +322 -0
  79. lmnr/sdk/browser/pw_utils.py +363 -0
  80. lmnr/sdk/browser/recorder/record.umd.min.cjs +84 -0
  81. lmnr/sdk/browser/utils.py +70 -0
  82. lmnr/sdk/client/asynchronous/async_client.py +180 -0
  83. lmnr/sdk/client/asynchronous/resources/__init__.py +6 -0
  84. lmnr/sdk/client/asynchronous/resources/base.py +32 -0
  85. lmnr/sdk/client/asynchronous/resources/browser_events.py +41 -0
  86. lmnr/sdk/client/asynchronous/resources/datasets.py +131 -0
  87. lmnr/sdk/client/asynchronous/resources/evals.py +266 -0
  88. lmnr/sdk/client/asynchronous/resources/evaluators.py +85 -0
  89. lmnr/sdk/client/asynchronous/resources/tags.py +83 -0
  90. lmnr/sdk/client/synchronous/resources/__init__.py +6 -0
  91. lmnr/sdk/client/synchronous/resources/base.py +32 -0
  92. lmnr/sdk/client/synchronous/resources/browser_events.py +40 -0
  93. lmnr/sdk/client/synchronous/resources/datasets.py +131 -0
  94. lmnr/sdk/client/synchronous/resources/evals.py +263 -0
  95. lmnr/sdk/client/synchronous/resources/evaluators.py +85 -0
  96. lmnr/sdk/client/synchronous/resources/tags.py +83 -0
  97. lmnr/sdk/client/synchronous/sync_client.py +191 -0
  98. lmnr/sdk/datasets/__init__.py +94 -0
  99. lmnr/sdk/datasets/file_utils.py +91 -0
  100. lmnr/sdk/decorators.py +163 -26
  101. lmnr/sdk/eval_control.py +3 -2
  102. lmnr/sdk/evaluations.py +403 -191
  103. lmnr/sdk/laminar.py +1080 -549
  104. lmnr/sdk/log.py +7 -2
  105. lmnr/sdk/types.py +246 -134
  106. lmnr/sdk/utils.py +151 -7
  107. lmnr/version.py +46 -0
  108. {lmnr-0.4.53.dev0.dist-info → lmnr-0.7.26.dist-info}/METADATA +152 -106
  109. lmnr-0.7.26.dist-info/RECORD +116 -0
  110. lmnr-0.7.26.dist-info/WHEEL +4 -0
  111. lmnr-0.7.26.dist-info/entry_points.txt +3 -0
  112. lmnr/cli.py +0 -101
  113. lmnr/openllmetry_sdk/.python-version +0 -1
  114. lmnr/openllmetry_sdk/__init__.py +0 -72
  115. lmnr/openllmetry_sdk/config/__init__.py +0 -9
  116. lmnr/openllmetry_sdk/decorators/base.py +0 -185
  117. lmnr/openllmetry_sdk/instruments.py +0 -38
  118. lmnr/openllmetry_sdk/tracing/__init__.py +0 -1
  119. lmnr/openllmetry_sdk/tracing/content_allow_list.py +0 -24
  120. lmnr/openllmetry_sdk/tracing/context_manager.py +0 -13
  121. lmnr/openllmetry_sdk/tracing/tracing.py +0 -884
  122. lmnr/openllmetry_sdk/utils/in_memory_span_exporter.py +0 -61
  123. lmnr/openllmetry_sdk/utils/package_check.py +0 -7
  124. lmnr/openllmetry_sdk/version.py +0 -1
  125. lmnr/sdk/datasets.py +0 -55
  126. lmnr-0.4.53.dev0.dist-info/LICENSE +0 -75
  127. lmnr-0.4.53.dev0.dist-info/RECORD +0 -33
  128. lmnr-0.4.53.dev0.dist-info/WHEEL +0 -4
  129. lmnr-0.4.53.dev0.dist-info/entry_points.txt +0 -3
  130. /lmnr/{openllmetry_sdk → opentelemetry_lib}/.flake8 +0 -0
  131. /lmnr/{openllmetry_sdk → opentelemetry_lib}/utils/__init__.py +0 -0
  132. /lmnr/{openllmetry_sdk → opentelemetry_lib}/utils/json_encoder.py +0 -0
  133. /lmnr/{openllmetry_sdk/decorators/__init__.py → py.typed} +0 -0
@@ -0,0 +1,368 @@
1
+ from typing import Collection
2
+
3
+ from opentelemetry._events import get_event_logger
4
+ from opentelemetry.instrumentation.instrumentor import BaseInstrumentor
5
+
6
+ from lmnr.sdk.log import get_default_logger
7
+ from ..shared.chat_wrappers import (
8
+ achat_wrapper,
9
+ chat_wrapper,
10
+ )
11
+ from ..shared.completion_wrappers import (
12
+ acompletion_wrapper,
13
+ completion_wrapper,
14
+ )
15
+ from ..shared.config import Config
16
+ from ..shared.embeddings_wrappers import (
17
+ aembeddings_wrapper,
18
+ embeddings_wrapper,
19
+ )
20
+ from ..shared.image_gen_wrappers import (
21
+ image_gen_metrics_wrapper,
22
+ )
23
+ from ..utils import is_metrics_enabled
24
+ from .assistant_wrappers import (
25
+ assistants_create_wrapper,
26
+ messages_list_wrapper,
27
+ runs_create_and_stream_wrapper,
28
+ runs_create_wrapper,
29
+ runs_retrieve_wrapper,
30
+ )
31
+
32
+ from .responses_wrappers import (
33
+ async_responses_cancel_wrapper,
34
+ async_responses_get_or_create_wrapper,
35
+ responses_cancel_wrapper,
36
+ responses_get_or_create_wrapper,
37
+ )
38
+
39
+ from ..version import __version__
40
+ from opentelemetry.instrumentation.utils import unwrap
41
+ from opentelemetry.metrics import get_meter
42
+ from opentelemetry.semconv._incubating.metrics import gen_ai_metrics as GenAIMetrics
43
+ from opentelemetry.semconv_ai import Meters
44
+ from opentelemetry.trace import get_tracer
45
+ from wrapt import wrap_function_wrapper
46
+
47
+
48
+ _instruments = ("openai >= 1.0.0",)
49
+ logger = get_default_logger(__name__)
50
+
51
+
52
+ class OpenAIV1Instrumentor(BaseInstrumentor):
53
+ def instrumentation_dependencies(self) -> Collection[str]:
54
+ return _instruments
55
+
56
+ def _try_wrap(self, module, function, wrapper):
57
+ """
58
+ Wrap a function if it exists, otherwise do nothing.
59
+ This is useful for handling cases where the function is not available in
60
+ the older versions of the library.
61
+
62
+ Args:
63
+ module (str): The module to wrap, e.g. "openai.resources.chat.completions"
64
+ function (str): "Object.function" to wrap, e.g. "Completions.parse"
65
+ wrapper (callable): The wrapper to apply to the function.
66
+ """
67
+ try:
68
+ wrap_function_wrapper(module, function, wrapper)
69
+ except (AttributeError, ModuleNotFoundError, ImportError):
70
+ logger.debug(f"Failed to wrap {module}.{function}")
71
+ pass
72
+
73
+ def _instrument(self, **kwargs):
74
+ tracer_provider = kwargs.get("tracer_provider")
75
+ tracer = get_tracer(__name__, __version__, tracer_provider)
76
+
77
+ # meter and counters are inited here
78
+ meter_provider = kwargs.get("meter_provider")
79
+ meter = get_meter(__name__, __version__, meter_provider)
80
+
81
+ if not Config.use_legacy_attributes:
82
+ event_logger_provider = kwargs.get("event_logger_provider")
83
+ Config.event_logger = get_event_logger(
84
+ __name__, __version__, event_logger_provider=event_logger_provider
85
+ )
86
+
87
+ if is_metrics_enabled():
88
+ tokens_histogram = meter.create_histogram(
89
+ name=Meters.LLM_TOKEN_USAGE,
90
+ unit="token",
91
+ description="Measures number of input and output tokens used",
92
+ )
93
+
94
+ chat_choice_counter = meter.create_counter(
95
+ name=Meters.LLM_GENERATION_CHOICES,
96
+ unit="choice",
97
+ description="Number of choices returned by chat completions call",
98
+ )
99
+
100
+ duration_histogram = meter.create_histogram(
101
+ name=Meters.LLM_OPERATION_DURATION,
102
+ unit="s",
103
+ description="GenAI operation duration",
104
+ )
105
+
106
+ chat_exception_counter = meter.create_counter(
107
+ name=Meters.LLM_COMPLETIONS_EXCEPTIONS,
108
+ unit="time",
109
+ description="Number of exceptions occurred during chat completions",
110
+ )
111
+
112
+ streaming_time_to_first_token = meter.create_histogram(
113
+ name=GenAIMetrics.GEN_AI_SERVER_TIME_TO_FIRST_TOKEN,
114
+ unit="s",
115
+ description="Time to first token in streaming chat completions",
116
+ )
117
+ streaming_time_to_generate = meter.create_histogram(
118
+ name=Meters.LLM_STREAMING_TIME_TO_GENERATE,
119
+ unit="s",
120
+ description="Time between first token and completion in streaming chat completions",
121
+ )
122
+ else:
123
+ (
124
+ tokens_histogram,
125
+ chat_choice_counter,
126
+ duration_histogram,
127
+ chat_exception_counter,
128
+ streaming_time_to_first_token,
129
+ streaming_time_to_generate,
130
+ ) = (None, None, None, None, None, None)
131
+
132
+ wrap_function_wrapper(
133
+ "openai.resources.chat.completions",
134
+ "Completions.create",
135
+ chat_wrapper(
136
+ tracer,
137
+ tokens_histogram,
138
+ chat_choice_counter,
139
+ duration_histogram,
140
+ chat_exception_counter,
141
+ streaming_time_to_first_token,
142
+ streaming_time_to_generate,
143
+ ),
144
+ )
145
+
146
+ wrap_function_wrapper(
147
+ "openai.resources.completions",
148
+ "Completions.create",
149
+ completion_wrapper(tracer),
150
+ )
151
+
152
+ if is_metrics_enabled():
153
+ embeddings_vector_size_counter = meter.create_counter(
154
+ name=Meters.LLM_EMBEDDINGS_VECTOR_SIZE,
155
+ unit="element",
156
+ description="he size of returned vector",
157
+ )
158
+ embeddings_exception_counter = meter.create_counter(
159
+ name=Meters.LLM_EMBEDDINGS_EXCEPTIONS,
160
+ unit="time",
161
+ description="Number of exceptions occurred during embeddings operation",
162
+ )
163
+ else:
164
+ (
165
+ tokens_histogram,
166
+ embeddings_vector_size_counter,
167
+ embeddings_exception_counter,
168
+ ) = (None, None, None)
169
+
170
+ wrap_function_wrapper(
171
+ "openai.resources.embeddings",
172
+ "Embeddings.create",
173
+ embeddings_wrapper(
174
+ tracer,
175
+ tokens_histogram,
176
+ embeddings_vector_size_counter,
177
+ duration_histogram,
178
+ embeddings_exception_counter,
179
+ ),
180
+ )
181
+
182
+ wrap_function_wrapper(
183
+ "openai.resources.chat.completions",
184
+ "AsyncCompletions.create",
185
+ achat_wrapper(
186
+ tracer,
187
+ tokens_histogram,
188
+ chat_choice_counter,
189
+ duration_histogram,
190
+ chat_exception_counter,
191
+ streaming_time_to_first_token,
192
+ streaming_time_to_generate,
193
+ ),
194
+ )
195
+ wrap_function_wrapper(
196
+ "openai.resources.completions",
197
+ "AsyncCompletions.create",
198
+ acompletion_wrapper(tracer),
199
+ )
200
+ wrap_function_wrapper(
201
+ "openai.resources.embeddings",
202
+ "AsyncEmbeddings.create",
203
+ aembeddings_wrapper(
204
+ tracer,
205
+ tokens_histogram,
206
+ embeddings_vector_size_counter,
207
+ duration_histogram,
208
+ embeddings_exception_counter,
209
+ ),
210
+ )
211
+ # in newer versions, Completions.parse are out of beta
212
+ self._try_wrap(
213
+ "openai.resources.chat.completions",
214
+ "Completions.parse",
215
+ chat_wrapper(
216
+ tracer,
217
+ tokens_histogram,
218
+ chat_choice_counter,
219
+ duration_histogram,
220
+ chat_exception_counter,
221
+ streaming_time_to_first_token,
222
+ streaming_time_to_generate,
223
+ ),
224
+ )
225
+ self._try_wrap(
226
+ "openai.resources.chat.completions",
227
+ "AsyncCompletions.parse",
228
+ achat_wrapper(
229
+ tracer,
230
+ tokens_histogram,
231
+ chat_choice_counter,
232
+ duration_histogram,
233
+ chat_exception_counter,
234
+ streaming_time_to_first_token,
235
+ streaming_time_to_generate,
236
+ ),
237
+ )
238
+
239
+ if is_metrics_enabled():
240
+ image_gen_exception_counter = meter.create_counter(
241
+ name=Meters.LLM_IMAGE_GENERATIONS_EXCEPTIONS,
242
+ unit="time",
243
+ description="Number of exceptions occurred during image generations operation",
244
+ )
245
+ else:
246
+ image_gen_exception_counter = None
247
+
248
+ wrap_function_wrapper(
249
+ "openai.resources.images",
250
+ "Images.generate",
251
+ image_gen_metrics_wrapper(duration_histogram, image_gen_exception_counter),
252
+ )
253
+
254
+ # Beta APIs may not be available consistently in all versions
255
+ self._try_wrap(
256
+ "openai.resources.beta.assistants",
257
+ "Assistants.create",
258
+ assistants_create_wrapper(tracer),
259
+ )
260
+ self._try_wrap(
261
+ "openai.resources.beta.chat.completions",
262
+ "Completions.parse",
263
+ chat_wrapper(
264
+ tracer,
265
+ tokens_histogram,
266
+ chat_choice_counter,
267
+ duration_histogram,
268
+ chat_exception_counter,
269
+ streaming_time_to_first_token,
270
+ streaming_time_to_generate,
271
+ ),
272
+ )
273
+ self._try_wrap(
274
+ "openai.resources.beta.chat.completions",
275
+ "AsyncCompletions.parse",
276
+ achat_wrapper(
277
+ tracer,
278
+ tokens_histogram,
279
+ chat_choice_counter,
280
+ duration_histogram,
281
+ chat_exception_counter,
282
+ streaming_time_to_first_token,
283
+ streaming_time_to_generate,
284
+ ),
285
+ )
286
+ self._try_wrap(
287
+ "openai.resources.beta.threads.runs",
288
+ "Runs.create",
289
+ runs_create_wrapper(tracer),
290
+ )
291
+ self._try_wrap(
292
+ "openai.resources.beta.threads.runs",
293
+ "Runs.retrieve",
294
+ runs_retrieve_wrapper(tracer),
295
+ )
296
+ self._try_wrap(
297
+ "openai.resources.beta.threads.runs",
298
+ "Runs.create_and_stream",
299
+ runs_create_and_stream_wrapper(tracer),
300
+ )
301
+ self._try_wrap(
302
+ "openai.resources.beta.threads.messages",
303
+ "Messages.list",
304
+ messages_list_wrapper(tracer),
305
+ )
306
+ self._try_wrap(
307
+ "openai.resources.responses",
308
+ "Responses.create",
309
+ responses_get_or_create_wrapper(tracer),
310
+ )
311
+ self._try_wrap(
312
+ "openai.resources.responses",
313
+ "Responses.retrieve",
314
+ responses_get_or_create_wrapper(tracer),
315
+ )
316
+ self._try_wrap(
317
+ "openai.resources.responses",
318
+ "Responses.cancel",
319
+ responses_cancel_wrapper(tracer),
320
+ )
321
+ self._try_wrap(
322
+ "openai.resources.responses",
323
+ "AsyncResponses.create",
324
+ async_responses_get_or_create_wrapper(tracer),
325
+ )
326
+ self._try_wrap(
327
+ "openai.resources.responses",
328
+ "AsyncResponses.retrieve",
329
+ async_responses_get_or_create_wrapper(tracer),
330
+ )
331
+ self._try_wrap(
332
+ "openai.resources.responses",
333
+ "AsyncResponses.cancel",
334
+ async_responses_cancel_wrapper(tracer),
335
+ )
336
+
337
+ def _uninstrument(self, **kwargs):
338
+ self.try_unwrap("openai.resources.chat.completions.Completions", "create")
339
+ self.try_unwrap("openai.resources.completions.Completions", "create")
340
+ self.try_unwrap("openai.resources.embeddings.Embeddings", "create")
341
+ self.try_unwrap("openai.resources.chat.completions.AsyncCompletions", "create")
342
+ self.try_unwrap("openai.resources.completions.AsyncCompletions", "create")
343
+ self.try_unwrap("openai.resources.embeddings.AsyncEmbeddings", "create")
344
+ self.try_unwrap("openai.resources.images.Images", "generate")
345
+ self.try_unwrap("openai.resources.chat.completions.Completions", "parse")
346
+ self.try_unwrap("openai.resources.chat.completions.AsyncCompletions", "parse")
347
+ self.try_unwrap("openai.resources.beta.assistants.Assistants", "create")
348
+ self.try_unwrap("openai.resources.beta.chat.completions.Completions", "parse")
349
+ self.try_unwrap(
350
+ "openai.resources.beta.chat.completions.AsyncCompletions", "parse"
351
+ )
352
+ self.try_unwrap("openai.resources.beta.threads.runs.Runs", "create")
353
+ self.try_unwrap("openai.resources.beta.threads.runs.Runs", "retrieve")
354
+ self.try_unwrap("openai.resources.beta.threads.runs.Runs", "create_and_stream")
355
+ self.try_unwrap("openai.resources.beta.threads.messages.Messages", "list")
356
+ self.try_unwrap("openai.resources.responses.Responses", "create")
357
+ self.try_unwrap("openai.resources.responses.Responses", "retrieve")
358
+ self.try_unwrap("openai.resources.responses.Responses", "cancel")
359
+ self.try_unwrap("openai.resources.responses.AsyncResponses", "create")
360
+ self.try_unwrap("openai.resources.responses.AsyncResponses", "retrieve")
361
+ self.try_unwrap("openai.resources.responses.AsyncResponses", "cancel")
362
+
363
+ def try_unwrap(self, module, function):
364
+ try:
365
+ unwrap(module, function)
366
+ except (AttributeError, ModuleNotFoundError, ImportError):
367
+ logger.debug(f"Failed to unwrap {module}.{function}")
368
+ pass
@@ -0,0 +1,325 @@
1
+ import logging
2
+ import time
3
+
4
+ from opentelemetry import context as context_api
5
+ from ..shared import (
6
+ _set_span_attribute,
7
+ model_as_dict,
8
+ )
9
+ from ..shared.config import Config
10
+ from ..shared.event_emitter import emit_event
11
+ from ..shared.event_models import (
12
+ ChoiceEvent,
13
+ MessageEvent,
14
+ )
15
+ from ..utils import (
16
+ _with_tracer_wrapper,
17
+ dont_throw,
18
+ should_emit_events,
19
+ )
20
+ from lmnr.opentelemetry_lib.tracing.context import (
21
+ get_current_context,
22
+ get_event_attributes_from_context,
23
+ )
24
+ from opentelemetry.instrumentation.utils import _SUPPRESS_INSTRUMENTATION_KEY
25
+ from opentelemetry.semconv.attributes.error_attributes import ERROR_TYPE
26
+ from opentelemetry.semconv_ai import LLMRequestTypeValues, SpanAttributes
27
+ from opentelemetry.trace import SpanKind, Status, StatusCode
28
+
29
+ from openai._legacy_response import LegacyAPIResponse
30
+ from openai.types.beta.threads.run import Run
31
+
32
+ logger = logging.getLogger(__name__)
33
+
34
+ assistants = {}
35
+ runs = {}
36
+
37
+
38
+ @_with_tracer_wrapper
39
+ def assistants_create_wrapper(tracer, wrapped, instance, args, kwargs):
40
+ if context_api.get_value(_SUPPRESS_INSTRUMENTATION_KEY):
41
+ return wrapped(*args, **kwargs)
42
+
43
+ response = wrapped(*args, **kwargs)
44
+
45
+ assistants[response.id] = {
46
+ "model": kwargs.get("model"),
47
+ "instructions": kwargs.get("instructions"),
48
+ }
49
+
50
+ return response
51
+
52
+
53
+ @_with_tracer_wrapper
54
+ def runs_create_wrapper(tracer, wrapped, instance, args, kwargs):
55
+ if context_api.get_value(_SUPPRESS_INSTRUMENTATION_KEY):
56
+ return wrapped(*args, **kwargs)
57
+
58
+ thread_id = kwargs.get("thread_id")
59
+ instructions = kwargs.get("instructions")
60
+
61
+ try:
62
+ response = wrapped(*args, **kwargs)
63
+ response_dict = model_as_dict(response)
64
+
65
+ runs[thread_id] = {
66
+ "start_time": time.time_ns(),
67
+ "assistant_id": kwargs.get("assistant_id"),
68
+ "instructions": instructions,
69
+ "run_id": response_dict.get("id"),
70
+ }
71
+
72
+ return response
73
+ except Exception as e:
74
+ runs[thread_id] = {
75
+ "exception": e,
76
+ "end_time": time.time_ns(),
77
+ }
78
+ raise
79
+
80
+
81
+ @_with_tracer_wrapper
82
+ def runs_retrieve_wrapper(tracer, wrapped, instance, args, kwargs):
83
+ @dont_throw
84
+ def process_response(response):
85
+ if type(response) is LegacyAPIResponse:
86
+ parsed_response = response.parse()
87
+ else:
88
+ parsed_response = response
89
+ assert type(parsed_response) is Run
90
+
91
+ if parsed_response.thread_id in runs:
92
+ thread_id = parsed_response.thread_id
93
+ runs[thread_id]["end_time"] = time.time_ns()
94
+ if parsed_response.usage:
95
+ runs[thread_id]["usage"] = parsed_response.usage
96
+
97
+ if context_api.get_value(_SUPPRESS_INSTRUMENTATION_KEY):
98
+ return wrapped(*args, **kwargs)
99
+
100
+ try:
101
+ response = wrapped(*args, **kwargs)
102
+ process_response(response)
103
+ return response
104
+ except Exception as e:
105
+ thread_id = kwargs.get("thread_id")
106
+ if thread_id in runs:
107
+ runs[thread_id]["exception"] = e
108
+ runs[thread_id]["end_time"] = time.time_ns()
109
+ raise
110
+
111
+
112
+ @_with_tracer_wrapper
113
+ def messages_list_wrapper(tracer, wrapped, instance, args, kwargs):
114
+ if context_api.get_value(_SUPPRESS_INSTRUMENTATION_KEY):
115
+ return wrapped(*args, **kwargs)
116
+
117
+ id = kwargs.get("thread_id")
118
+
119
+ response = wrapped(*args, **kwargs)
120
+
121
+ response_dict = model_as_dict(response)
122
+ if id not in runs:
123
+ return response
124
+
125
+ run = runs[id]
126
+ messages = sorted(response_dict["data"], key=lambda x: x["created_at"])
127
+
128
+ span = tracer.start_span(
129
+ "openai.assistant.run",
130
+ kind=SpanKind.CLIENT,
131
+ attributes={SpanAttributes.LLM_REQUEST_TYPE: LLMRequestTypeValues.CHAT.value},
132
+ start_time=run.get("start_time"),
133
+ context=get_current_context(),
134
+ )
135
+
136
+ if exception := run.get("exception"):
137
+ span.set_attribute(ERROR_TYPE, exception.__class__.__name__)
138
+ span.record_exception(exception, attributes=get_event_attributes_from_context())
139
+ span.set_status(Status(StatusCode.ERROR, str(exception)))
140
+ span.end(run.get("end_time"))
141
+
142
+ prompt_index = 0
143
+ if assistants.get(run["assistant_id"]) is not None or Config.enrich_assistant:
144
+ if Config.enrich_assistant:
145
+ assistant = model_as_dict(
146
+ instance._client.beta.assistants.retrieve(run["assistant_id"])
147
+ )
148
+ assistants[run["assistant_id"]] = assistant
149
+ else:
150
+ assistant = assistants[run["assistant_id"]]
151
+
152
+ _set_span_attribute(
153
+ span,
154
+ SpanAttributes.LLM_SYSTEM,
155
+ "openai",
156
+ )
157
+ _set_span_attribute(
158
+ span,
159
+ SpanAttributes.LLM_REQUEST_MODEL,
160
+ assistant["model"],
161
+ )
162
+ _set_span_attribute(
163
+ span,
164
+ SpanAttributes.LLM_RESPONSE_MODEL,
165
+ assistant["model"],
166
+ )
167
+ if should_emit_events():
168
+ emit_event(MessageEvent(content=assistant["instructions"], role="system"))
169
+ else:
170
+ _set_span_attribute(
171
+ span, f"{SpanAttributes.LLM_PROMPTS}.{prompt_index}.role", "system"
172
+ )
173
+ _set_span_attribute(
174
+ span,
175
+ f"{SpanAttributes.LLM_PROMPTS}.{prompt_index}.content",
176
+ assistant["instructions"],
177
+ )
178
+ prompt_index += 1
179
+ _set_span_attribute(
180
+ span, f"{SpanAttributes.LLM_PROMPTS}.{prompt_index}.role", "system"
181
+ )
182
+ _set_span_attribute(
183
+ span,
184
+ f"{SpanAttributes.LLM_PROMPTS}.{prompt_index}.content",
185
+ run["instructions"],
186
+ )
187
+ emit_event(MessageEvent(content=run["instructions"], role="system"))
188
+ prompt_index += 1
189
+
190
+ completion_index = 0
191
+ for msg in messages:
192
+ prefix = f"{SpanAttributes.LLM_COMPLETIONS}.{completion_index}"
193
+ content = msg.get("content")
194
+
195
+ message_content = content[0].get("text").get("value")
196
+ message_role = msg.get("role")
197
+ if message_role in ["user", "system"]:
198
+ if should_emit_events():
199
+ emit_event(MessageEvent(content=message_content, role=message_role))
200
+ else:
201
+ _set_span_attribute(
202
+ span,
203
+ f"{SpanAttributes.LLM_PROMPTS}.{prompt_index}.role",
204
+ message_role,
205
+ )
206
+ _set_span_attribute(
207
+ span,
208
+ f"{SpanAttributes.LLM_PROMPTS}.{prompt_index}.content",
209
+ message_content,
210
+ )
211
+ prompt_index += 1
212
+ else:
213
+ if should_emit_events():
214
+ emit_event(
215
+ ChoiceEvent(
216
+ index=completion_index,
217
+ message={"content": message_content, "role": message_role},
218
+ )
219
+ )
220
+ else:
221
+ _set_span_attribute(span, f"{prefix}.role", msg.get("role"))
222
+ _set_span_attribute(span, f"{prefix}.content", message_content)
223
+ _set_span_attribute(
224
+ span, f"gen_ai.response.{completion_index}.id", msg.get("id")
225
+ )
226
+ completion_index += 1
227
+
228
+ if run.get("usage"):
229
+ usage_dict = model_as_dict(run.get("usage"))
230
+ _set_span_attribute(
231
+ span,
232
+ SpanAttributes.LLM_USAGE_COMPLETION_TOKENS,
233
+ usage_dict.get("completion_tokens"),
234
+ )
235
+ _set_span_attribute(
236
+ span,
237
+ SpanAttributes.LLM_USAGE_PROMPT_TOKENS,
238
+ usage_dict.get("prompt_tokens"),
239
+ )
240
+
241
+ span.end(run.get("end_time"))
242
+
243
+ return response
244
+
245
+
246
+ @_with_tracer_wrapper
247
+ def runs_create_and_stream_wrapper(tracer, wrapped, instance, args, kwargs):
248
+ if context_api.get_value(_SUPPRESS_INSTRUMENTATION_KEY):
249
+ return wrapped(*args, **kwargs)
250
+
251
+ assistant_id = kwargs.get("assistant_id")
252
+ instructions = kwargs.get("instructions")
253
+
254
+ span = tracer.start_span(
255
+ "openai.assistant.run_stream",
256
+ kind=SpanKind.CLIENT,
257
+ attributes={SpanAttributes.LLM_REQUEST_TYPE: LLMRequestTypeValues.CHAT.value},
258
+ context=get_current_context(),
259
+ )
260
+
261
+ i = 0
262
+ if assistants.get(assistant_id) is not None or Config.enrich_assistant:
263
+ if Config.enrich_assistant:
264
+ assistant = model_as_dict(
265
+ instance._client.beta.assistants.retrieve(assistant_id)
266
+ )
267
+ assistants[assistant_id] = assistant
268
+ else:
269
+ assistant = assistants[assistant_id]
270
+
271
+ _set_span_attribute(
272
+ span, SpanAttributes.LLM_REQUEST_MODEL, assistants[assistant_id]["model"]
273
+ )
274
+ _set_span_attribute(
275
+ span,
276
+ SpanAttributes.LLM_SYSTEM,
277
+ "openai",
278
+ )
279
+ _set_span_attribute(
280
+ span,
281
+ SpanAttributes.LLM_RESPONSE_MODEL,
282
+ assistants[assistant_id]["model"],
283
+ )
284
+ if should_emit_events():
285
+ emit_event(
286
+ MessageEvent(
287
+ content=assistants[assistant_id]["instructions"], role="system"
288
+ )
289
+ )
290
+ else:
291
+ _set_span_attribute(
292
+ span, f"{SpanAttributes.LLM_PROMPTS}.{i}.role", "system"
293
+ )
294
+ _set_span_attribute(
295
+ span,
296
+ f"{SpanAttributes.LLM_PROMPTS}.{i}.content",
297
+ assistants[assistant_id]["instructions"],
298
+ )
299
+ i += 1
300
+ if should_emit_events():
301
+ emit_event(MessageEvent(content=instructions, role="system"))
302
+ else:
303
+ _set_span_attribute(span, f"{SpanAttributes.LLM_PROMPTS}.{i}.role", "system")
304
+ _set_span_attribute(
305
+ span, f"{SpanAttributes.LLM_PROMPTS}.{i}.content", instructions
306
+ )
307
+
308
+ from ..v1.event_handler_wrapper import (
309
+ EventHandlerWrapper,
310
+ )
311
+
312
+ kwargs["event_handler"] = EventHandlerWrapper(
313
+ original_handler=kwargs["event_handler"],
314
+ span=span,
315
+ )
316
+
317
+ try:
318
+ response = wrapped(*args, **kwargs)
319
+ return response
320
+ except Exception as e:
321
+ span.set_attribute(ERROR_TYPE, e.__class__.__name__)
322
+ span.record_exception(e, attributes=get_event_attributes_from_context())
323
+ span.set_status(Status(StatusCode.ERROR, str(e)))
324
+ span.end()
325
+ raise