lmnr 0.6.18__py3-none-any.whl → 0.6.19__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (28) hide show
  1. lmnr/opentelemetry_lib/opentelemetry/instrumentation/google_genai/__init__.py +55 -20
  2. lmnr/opentelemetry_lib/opentelemetry/instrumentation/google_genai/schema_utils.py +23 -0
  3. lmnr/opentelemetry_lib/opentelemetry/instrumentation/openai/__init__.py +61 -0
  4. lmnr/opentelemetry_lib/opentelemetry/instrumentation/openai/shared/__init__.py +442 -0
  5. lmnr/opentelemetry_lib/opentelemetry/instrumentation/openai/shared/chat_wrappers.py +1024 -0
  6. lmnr/opentelemetry_lib/opentelemetry/instrumentation/openai/shared/completion_wrappers.py +297 -0
  7. lmnr/opentelemetry_lib/opentelemetry/instrumentation/openai/shared/config.py +16 -0
  8. lmnr/opentelemetry_lib/opentelemetry/instrumentation/openai/shared/embeddings_wrappers.py +308 -0
  9. lmnr/opentelemetry_lib/opentelemetry/instrumentation/openai/shared/event_emitter.py +100 -0
  10. lmnr/opentelemetry_lib/opentelemetry/instrumentation/openai/shared/event_models.py +41 -0
  11. lmnr/opentelemetry_lib/opentelemetry/instrumentation/openai/shared/image_gen_wrappers.py +68 -0
  12. lmnr/opentelemetry_lib/opentelemetry/instrumentation/openai/utils.py +185 -0
  13. lmnr/opentelemetry_lib/opentelemetry/instrumentation/openai/v0/__init__.py +176 -0
  14. lmnr/opentelemetry_lib/opentelemetry/instrumentation/openai/v1/__init__.py +358 -0
  15. lmnr/opentelemetry_lib/opentelemetry/instrumentation/openai/v1/assistant_wrappers.py +319 -0
  16. lmnr/opentelemetry_lib/opentelemetry/instrumentation/openai/v1/event_handler_wrapper.py +132 -0
  17. lmnr/opentelemetry_lib/opentelemetry/instrumentation/openai/v1/responses_wrappers.py +626 -0
  18. lmnr/opentelemetry_lib/opentelemetry/instrumentation/openai/version.py +1 -0
  19. lmnr/opentelemetry_lib/tracing/_instrument_initializers.py +1 -3
  20. lmnr/sdk/browser/browser_use_otel.py +1 -1
  21. lmnr/sdk/browser/patchright_otel.py +0 -14
  22. lmnr/sdk/browser/playwright_otel.py +16 -130
  23. lmnr/sdk/browser/pw_utils.py +45 -31
  24. lmnr/version.py +1 -1
  25. {lmnr-0.6.18.dist-info → lmnr-0.6.19.dist-info}/METADATA +2 -5
  26. {lmnr-0.6.18.dist-info → lmnr-0.6.19.dist-info}/RECORD +28 -11
  27. {lmnr-0.6.18.dist-info → lmnr-0.6.19.dist-info}/WHEEL +1 -1
  28. {lmnr-0.6.18.dist-info → lmnr-0.6.19.dist-info}/entry_points.txt +0 -0
@@ -0,0 +1,297 @@
1
+ import logging
2
+
3
+ from opentelemetry import context as context_api
4
+ from ..shared import (
5
+ _set_client_attributes,
6
+ _set_functions_attributes,
7
+ _set_request_attributes,
8
+ _set_response_attributes,
9
+ _set_span_attribute,
10
+ _set_span_stream_usage,
11
+ get_token_count_from_string,
12
+ is_streaming_response,
13
+ model_as_dict,
14
+ propagate_trace_context,
15
+ should_record_stream_token_usage,
16
+ )
17
+ from ..shared.config import Config
18
+ from ..shared.event_emitter import emit_event
19
+ from ..shared.event_models import (
20
+ ChoiceEvent,
21
+ MessageEvent,
22
+ )
23
+ from ..utils import (
24
+ _with_tracer_wrapper,
25
+ dont_throw,
26
+ is_openai_v1,
27
+ should_emit_events,
28
+ should_send_prompts,
29
+ )
30
+ from opentelemetry.instrumentation.utils import _SUPPRESS_INSTRUMENTATION_KEY
31
+ from opentelemetry.semconv.attributes.error_attributes import ERROR_TYPE
32
+ from opentelemetry.semconv_ai import (
33
+ SUPPRESS_LANGUAGE_MODEL_INSTRUMENTATION_KEY,
34
+ LLMRequestTypeValues,
35
+ SpanAttributes,
36
+ )
37
+ from opentelemetry.trace import SpanKind
38
+ from opentelemetry.trace.status import Status, StatusCode
39
+
40
+ SPAN_NAME = "openai.completion"
41
+ LLM_REQUEST_TYPE = LLMRequestTypeValues.COMPLETION
42
+
43
+ logger = logging.getLogger(__name__)
44
+
45
+
46
+ @_with_tracer_wrapper
47
+ def completion_wrapper(tracer, wrapped, instance, args, kwargs):
48
+ if context_api.get_value(_SUPPRESS_INSTRUMENTATION_KEY) or context_api.get_value(
49
+ SUPPRESS_LANGUAGE_MODEL_INSTRUMENTATION_KEY
50
+ ):
51
+ return wrapped(*args, **kwargs)
52
+
53
+ # span needs to be opened and closed manually because the response is a generator
54
+ span = tracer.start_span(
55
+ SPAN_NAME,
56
+ kind=SpanKind.CLIENT,
57
+ attributes={SpanAttributes.LLM_REQUEST_TYPE: LLM_REQUEST_TYPE.value},
58
+ )
59
+
60
+ _handle_request(span, kwargs, instance)
61
+
62
+ try:
63
+ response = wrapped(*args, **kwargs)
64
+ except Exception as e:
65
+ span.set_attribute(ERROR_TYPE, e.__class__.__name__)
66
+ span.record_exception(e)
67
+ span.set_status(Status(StatusCode.ERROR, str(e)))
68
+ span.end()
69
+ raise
70
+
71
+ if is_streaming_response(response):
72
+ # span will be closed after the generator is done
73
+ return _build_from_streaming_response(span, kwargs, response)
74
+ else:
75
+ _handle_response(response, span, instance)
76
+
77
+ span.end()
78
+ return response
79
+
80
+
81
+ @_with_tracer_wrapper
82
+ async def acompletion_wrapper(tracer, wrapped, instance, args, kwargs):
83
+ if context_api.get_value(_SUPPRESS_INSTRUMENTATION_KEY) or context_api.get_value(
84
+ SUPPRESS_LANGUAGE_MODEL_INSTRUMENTATION_KEY
85
+ ):
86
+ return await wrapped(*args, **kwargs)
87
+
88
+ span = tracer.start_span(
89
+ name=SPAN_NAME,
90
+ kind=SpanKind.CLIENT,
91
+ attributes={SpanAttributes.LLM_REQUEST_TYPE: LLM_REQUEST_TYPE.value},
92
+ )
93
+
94
+ _handle_request(span, kwargs, instance)
95
+
96
+ try:
97
+ response = await wrapped(*args, **kwargs)
98
+ except Exception as e:
99
+ span.set_attribute(ERROR_TYPE, e.__class__.__name__)
100
+ span.record_exception(e)
101
+ span.set_status(Status(StatusCode.ERROR, str(e)))
102
+ span.end()
103
+ raise
104
+
105
+ if is_streaming_response(response):
106
+ # span will be closed after the generator is done
107
+ return _abuild_from_streaming_response(span, kwargs, response)
108
+ else:
109
+ _handle_response(response, span, instance)
110
+
111
+ span.end()
112
+ return response
113
+
114
+
115
+ @dont_throw
116
+ def _handle_request(span, kwargs, instance):
117
+ _set_request_attributes(span, kwargs, instance)
118
+ if should_emit_events():
119
+ _emit_prompts_events(kwargs)
120
+ else:
121
+ if should_send_prompts():
122
+ _set_prompts(span, kwargs.get("prompt"))
123
+ _set_functions_attributes(span, kwargs.get("functions"))
124
+ _set_client_attributes(span, instance)
125
+ if Config.enable_trace_context_propagation:
126
+ propagate_trace_context(span, kwargs)
127
+
128
+
129
+ def _emit_prompts_events(kwargs):
130
+ prompt = kwargs.get("prompt")
131
+ if isinstance(prompt, list):
132
+ for p in prompt:
133
+ emit_event(MessageEvent(content=p))
134
+ elif isinstance(prompt, str):
135
+ emit_event(MessageEvent(content=prompt))
136
+
137
+
138
+ @dont_throw
139
+ def _handle_response(response, span, instance=None):
140
+ if is_openai_v1():
141
+ response_dict = model_as_dict(response)
142
+ else:
143
+ response_dict = response
144
+
145
+ _set_response_attributes(span, response_dict)
146
+ if should_emit_events():
147
+ for choice in response.choices:
148
+ emit_event(_parse_choice_event(choice))
149
+ else:
150
+ if should_send_prompts():
151
+ _set_completions(span, response_dict.get("choices"))
152
+
153
+
154
+ def _set_prompts(span, prompt):
155
+ if not span.is_recording() or not prompt:
156
+ return
157
+
158
+ _set_span_attribute(
159
+ span,
160
+ f"{SpanAttributes.LLM_PROMPTS}.0.user",
161
+ prompt[0] if isinstance(prompt, list) else prompt,
162
+ )
163
+
164
+
165
+ @dont_throw
166
+ def _set_completions(span, choices):
167
+ if not span.is_recording() or not choices:
168
+ return
169
+
170
+ for choice in choices:
171
+ index = choice.get("index")
172
+ prefix = f"{SpanAttributes.LLM_COMPLETIONS}.{index}"
173
+ _set_span_attribute(
174
+ span, f"{prefix}.finish_reason", choice.get("finish_reason")
175
+ )
176
+ _set_span_attribute(span, f"{prefix}.content", choice.get("text"))
177
+
178
+
179
+ @dont_throw
180
+ def _build_from_streaming_response(span, request_kwargs, response):
181
+ complete_response = {"choices": [], "model": "", "id": ""}
182
+ for item in response:
183
+ yield item
184
+ _accumulate_streaming_response(complete_response, item)
185
+
186
+ _set_response_attributes(span, complete_response)
187
+
188
+ _set_token_usage(span, request_kwargs, complete_response)
189
+
190
+ if should_emit_events():
191
+ _emit_streaming_response_events(complete_response)
192
+ else:
193
+ if should_send_prompts():
194
+ _set_completions(span, complete_response.get("choices"))
195
+
196
+ span.set_status(Status(StatusCode.OK))
197
+ span.end()
198
+
199
+
200
+ @dont_throw
201
+ async def _abuild_from_streaming_response(span, request_kwargs, response):
202
+ complete_response = {"choices": [], "model": "", "id": ""}
203
+ async for item in response:
204
+ yield item
205
+ _accumulate_streaming_response(complete_response, item)
206
+
207
+ _set_response_attributes(span, complete_response)
208
+
209
+ _set_token_usage(span, request_kwargs, complete_response)
210
+
211
+ if should_emit_events():
212
+ _emit_streaming_response_events(complete_response)
213
+ else:
214
+ if should_send_prompts():
215
+ _set_completions(span, complete_response.get("choices"))
216
+
217
+ span.set_status(Status(StatusCode.OK))
218
+ span.end()
219
+
220
+
221
+ def _emit_streaming_response_events(complete_response):
222
+ for i, choice in enumerate(complete_response["choices"]):
223
+ emit_event(
224
+ ChoiceEvent(
225
+ index=choice.get("index", i),
226
+ message={"content": choice.get("text"), "role": "assistant"},
227
+ finish_reason=choice.get("finish_reason", "unknown"),
228
+ )
229
+ )
230
+
231
+
232
+ @dont_throw
233
+ def _set_token_usage(span, request_kwargs, complete_response):
234
+ # use tiktoken calculate token usage
235
+ if should_record_stream_token_usage():
236
+ prompt_usage = -1
237
+ completion_usage = -1
238
+
239
+ # prompt_usage
240
+ if request_kwargs and request_kwargs.get("prompt"):
241
+ prompt_content = request_kwargs.get("prompt")
242
+ model_name = complete_response.get("model") or None
243
+
244
+ if model_name:
245
+ prompt_usage = get_token_count_from_string(prompt_content, model_name)
246
+
247
+ # completion_usage
248
+ if complete_response.get("choices"):
249
+ completion_content = ""
250
+ model_name = complete_response.get("model") or None
251
+
252
+ for choice in complete_response.get("choices"):
253
+ if choice.get("text"):
254
+ completion_content += choice.get("text")
255
+
256
+ if model_name:
257
+ completion_usage = get_token_count_from_string(
258
+ completion_content, model_name
259
+ )
260
+
261
+ # span record
262
+ _set_span_stream_usage(span, prompt_usage, completion_usage)
263
+
264
+
265
+ @dont_throw
266
+ def _accumulate_streaming_response(complete_response, item):
267
+ if is_openai_v1():
268
+ item = model_as_dict(item)
269
+
270
+ complete_response["model"] = item.get("model")
271
+ complete_response["id"] = item.get("id")
272
+ for choice in item.get("choices"):
273
+ index = choice.get("index")
274
+ if len(complete_response.get("choices")) <= index:
275
+ complete_response["choices"].append({"index": index, "text": ""})
276
+ complete_choice = complete_response.get("choices")[index]
277
+ if choice.get("finish_reason"):
278
+ complete_choice["finish_reason"] = choice.get("finish_reason")
279
+
280
+ if choice.get("text"):
281
+ complete_choice["text"] += choice.get("text")
282
+
283
+ return complete_response
284
+
285
+
286
+ def _parse_choice_event(choice) -> ChoiceEvent:
287
+ has_message = choice.text is not None
288
+ has_finish_reason = choice.finish_reason is not None
289
+
290
+ content = choice.text if has_message else None
291
+ finish_reason = choice.finish_reason if has_finish_reason else "unknown"
292
+
293
+ return ChoiceEvent(
294
+ index=choice.index,
295
+ message={"content": content, "role": "assistant"},
296
+ finish_reason=finish_reason,
297
+ )
@@ -0,0 +1,16 @@
1
+ from typing import Callable, Optional
2
+
3
+ from opentelemetry._events import EventLogger
4
+
5
+
6
+ class Config:
7
+ enrich_token_usage = False
8
+ enrich_assistant = False
9
+ exception_logger = None
10
+ get_common_metrics_attributes: Callable[[], dict] = lambda: {}
11
+ upload_base64_image: Callable[[str, str, str], str] = (
12
+ lambda trace_id, span_id, base64_image_url: ""
13
+ )
14
+ enable_trace_context_propagation: bool = True
15
+ use_legacy_attributes = True
16
+ event_logger: Optional[EventLogger] = None
@@ -0,0 +1,308 @@
1
+ import logging
2
+ import time
3
+ from collections.abc import Iterable
4
+
5
+ from opentelemetry import context as context_api
6
+ from ..shared import (
7
+ OPENAI_LLM_USAGE_TOKEN_TYPES,
8
+ _get_openai_base_url,
9
+ _set_client_attributes,
10
+ _set_request_attributes,
11
+ _set_response_attributes,
12
+ _set_span_attribute,
13
+ _token_type,
14
+ metric_shared_attributes,
15
+ model_as_dict,
16
+ propagate_trace_context,
17
+ )
18
+ from ..shared.config import Config
19
+ from ..shared.event_emitter import emit_event
20
+ from ..shared.event_models import (
21
+ ChoiceEvent,
22
+ MessageEvent,
23
+ )
24
+ from ..utils import (
25
+ _with_embeddings_telemetry_wrapper,
26
+ dont_throw,
27
+ is_openai_v1,
28
+ should_emit_events,
29
+ should_send_prompts,
30
+ start_as_current_span_async,
31
+ )
32
+ from opentelemetry.instrumentation.utils import _SUPPRESS_INSTRUMENTATION_KEY
33
+ from opentelemetry.metrics import Counter, Histogram
34
+ from opentelemetry.semconv.attributes.error_attributes import ERROR_TYPE
35
+ from opentelemetry.semconv_ai import (
36
+ SUPPRESS_LANGUAGE_MODEL_INSTRUMENTATION_KEY,
37
+ LLMRequestTypeValues,
38
+ SpanAttributes,
39
+ )
40
+ from opentelemetry.trace import SpanKind, Status, StatusCode
41
+
42
+ from openai._legacy_response import LegacyAPIResponse
43
+ from openai.types.create_embedding_response import CreateEmbeddingResponse
44
+
45
+ SPAN_NAME = "openai.embeddings"
46
+ LLM_REQUEST_TYPE = LLMRequestTypeValues.EMBEDDING
47
+
48
+ logger = logging.getLogger(__name__)
49
+
50
+
51
+ @_with_embeddings_telemetry_wrapper
52
+ def embeddings_wrapper(
53
+ tracer,
54
+ token_counter: Counter,
55
+ vector_size_counter: Counter,
56
+ duration_histogram: Histogram,
57
+ exception_counter: Counter,
58
+ wrapped,
59
+ instance,
60
+ args,
61
+ kwargs,
62
+ ):
63
+ if context_api.get_value(_SUPPRESS_INSTRUMENTATION_KEY) or context_api.get_value(
64
+ SUPPRESS_LANGUAGE_MODEL_INSTRUMENTATION_KEY
65
+ ):
66
+ return wrapped(*args, **kwargs)
67
+
68
+ with tracer.start_as_current_span(
69
+ name=SPAN_NAME,
70
+ kind=SpanKind.CLIENT,
71
+ attributes={SpanAttributes.LLM_REQUEST_TYPE: LLM_REQUEST_TYPE.value},
72
+ ) as span:
73
+ _handle_request(span, kwargs, instance)
74
+
75
+ try:
76
+ # record time for duration
77
+ start_time = time.time()
78
+ response = wrapped(*args, **kwargs)
79
+ end_time = time.time()
80
+ except Exception as e: # pylint: disable=broad-except
81
+ end_time = time.time()
82
+ duration = end_time - start_time if "start_time" in locals() else 0
83
+ attributes = {
84
+ "error.type": e.__class__.__name__,
85
+ }
86
+
87
+ # if there are legal duration, record it
88
+ if duration > 0 and duration_histogram:
89
+ duration_histogram.record(duration, attributes=attributes)
90
+ if exception_counter:
91
+ exception_counter.add(1, attributes=attributes)
92
+
93
+ span.set_attribute(ERROR_TYPE, e.__class__.__name__)
94
+ span.record_exception(e)
95
+ span.set_status(Status(StatusCode.ERROR, str(e)))
96
+ span.end()
97
+
98
+ raise
99
+
100
+ duration = end_time - start_time
101
+
102
+ _handle_response(
103
+ response,
104
+ span,
105
+ instance,
106
+ token_counter,
107
+ vector_size_counter,
108
+ duration_histogram,
109
+ duration,
110
+ )
111
+
112
+ return response
113
+
114
+
115
+ @_with_embeddings_telemetry_wrapper
116
+ async def aembeddings_wrapper(
117
+ tracer,
118
+ token_counter: Counter,
119
+ vector_size_counter: Counter,
120
+ duration_histogram: Histogram,
121
+ exception_counter: Counter,
122
+ wrapped,
123
+ instance,
124
+ args,
125
+ kwargs,
126
+ ):
127
+ if context_api.get_value(_SUPPRESS_INSTRUMENTATION_KEY) or context_api.get_value(
128
+ SUPPRESS_LANGUAGE_MODEL_INSTRUMENTATION_KEY
129
+ ):
130
+ return await wrapped(*args, **kwargs)
131
+
132
+ async with start_as_current_span_async(
133
+ tracer=tracer,
134
+ name=SPAN_NAME,
135
+ kind=SpanKind.CLIENT,
136
+ attributes={SpanAttributes.LLM_REQUEST_TYPE: LLM_REQUEST_TYPE.value},
137
+ ) as span:
138
+ _handle_request(span, kwargs, instance)
139
+
140
+ try:
141
+ # record time for duration
142
+ start_time = time.time()
143
+ response = await wrapped(*args, **kwargs)
144
+ end_time = time.time()
145
+ except Exception as e: # pylint: disable=broad-except
146
+ end_time = time.time()
147
+ duration = end_time - start_time if "start_time" in locals() else 0
148
+ attributes = {
149
+ "error.type": e.__class__.__name__,
150
+ }
151
+
152
+ # if there are legal duration, record it
153
+ if duration > 0 and duration_histogram:
154
+ duration_histogram.record(duration, attributes=attributes)
155
+ if exception_counter:
156
+ exception_counter.add(1, attributes=attributes)
157
+
158
+ span.set_attribute(ERROR_TYPE, e.__class__.__name__)
159
+ span.record_exception(e)
160
+ span.set_status(Status(StatusCode.ERROR, str(e)))
161
+ span.end()
162
+
163
+ raise
164
+
165
+ duration = end_time - start_time
166
+
167
+ _handle_response(
168
+ response,
169
+ span,
170
+ instance,
171
+ token_counter,
172
+ vector_size_counter,
173
+ duration_histogram,
174
+ duration,
175
+ )
176
+
177
+ return response
178
+
179
+
180
+ @dont_throw
181
+ def _handle_request(span, kwargs, instance):
182
+ _set_request_attributes(span, kwargs, instance)
183
+
184
+ if should_emit_events():
185
+ _emit_embeddings_message_event(kwargs.get("input"))
186
+ else:
187
+ if should_send_prompts():
188
+ _set_prompts(span, kwargs.get("input"))
189
+
190
+ _set_client_attributes(span, instance)
191
+
192
+ if Config.enable_trace_context_propagation:
193
+ propagate_trace_context(span, kwargs)
194
+
195
+
196
+ @dont_throw
197
+ def _handle_response(
198
+ response,
199
+ span,
200
+ instance=None,
201
+ token_counter=None,
202
+ vector_size_counter=None,
203
+ duration_histogram=None,
204
+ duration=None,
205
+ ):
206
+ if is_openai_v1():
207
+ response_dict = model_as_dict(response)
208
+ else:
209
+ response_dict = response
210
+ # metrics record
211
+ _set_embeddings_metrics(
212
+ instance,
213
+ token_counter,
214
+ vector_size_counter,
215
+ duration_histogram,
216
+ response_dict,
217
+ duration,
218
+ )
219
+ # span attributes
220
+ _set_response_attributes(span, response_dict)
221
+
222
+ # emit events
223
+ if should_emit_events():
224
+ _emit_embeddings_choice_event(response)
225
+
226
+
227
+ def _set_embeddings_metrics(
228
+ instance,
229
+ token_counter,
230
+ vector_size_counter,
231
+ duration_histogram,
232
+ response_dict,
233
+ duration,
234
+ ):
235
+ shared_attributes = metric_shared_attributes(
236
+ response_model=response_dict.get("model") or None,
237
+ operation="embeddings",
238
+ server_address=_get_openai_base_url(instance),
239
+ )
240
+
241
+ # token count metrics
242
+ usage = response_dict.get("usage")
243
+ if usage and token_counter:
244
+ for name, val in usage.items():
245
+ if name in OPENAI_LLM_USAGE_TOKEN_TYPES:
246
+ if val is None:
247
+ logging.error(f"Received None value for {name} in usage")
248
+ continue
249
+ attributes_with_token_type = {
250
+ **shared_attributes,
251
+ SpanAttributes.LLM_TOKEN_TYPE: _token_type(name),
252
+ }
253
+ token_counter.record(val, attributes=attributes_with_token_type)
254
+
255
+ # vec size metrics
256
+ # should use counter for vector_size?
257
+ vec_embedding = (response_dict.get("data") or [{}])[0].get("embedding", [])
258
+ vec_size = len(vec_embedding)
259
+ if vector_size_counter:
260
+ vector_size_counter.add(vec_size, attributes=shared_attributes)
261
+
262
+ # duration metrics
263
+ if duration and isinstance(duration, (float, int)) and duration_histogram:
264
+ duration_histogram.record(duration, attributes=shared_attributes)
265
+
266
+
267
+ def _set_prompts(span, prompt):
268
+ if not span.is_recording() or not prompt:
269
+ return
270
+
271
+ if isinstance(prompt, list):
272
+ for i, p in enumerate(prompt):
273
+ _set_span_attribute(span, f"{SpanAttributes.LLM_PROMPTS}.{i}.content", p)
274
+ else:
275
+ _set_span_attribute(
276
+ span,
277
+ f"{SpanAttributes.LLM_PROMPTS}.0.content",
278
+ prompt,
279
+ )
280
+
281
+
282
+ def _emit_embeddings_message_event(embeddings) -> None:
283
+ if isinstance(embeddings, str):
284
+ emit_event(MessageEvent(content=embeddings))
285
+ elif isinstance(embeddings, Iterable):
286
+ for i in embeddings:
287
+ emit_event(MessageEvent(content=i))
288
+
289
+
290
+ def _emit_embeddings_choice_event(response) -> None:
291
+ if isinstance(response, CreateEmbeddingResponse):
292
+ for embedding in response.data:
293
+ emit_event(
294
+ ChoiceEvent(
295
+ index=embedding.index,
296
+ message={"content": embedding.embedding, "role": "assistant"},
297
+ )
298
+ )
299
+
300
+ elif isinstance(response, LegacyAPIResponse):
301
+ parsed_response = response.parse()
302
+ for embedding in parsed_response.data:
303
+ emit_event(
304
+ ChoiceEvent(
305
+ index=embedding.index,
306
+ message={"content": embedding.embedding, "role": "assistant"},
307
+ )
308
+ )