lmnr 0.7.7__py3-none-any.whl → 0.7.9__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -8,6 +8,7 @@ from typing import AsyncGenerator, Callable, Collection, Generator
8
8
 
9
9
  from google.genai import types
10
10
 
11
+ from lmnr.opentelemetry_lib.decorators import json_dumps
11
12
  from lmnr.opentelemetry_lib.tracing.context import (
12
13
  get_current_context,
13
14
  get_event_attributes_from_context,
@@ -20,9 +21,10 @@ from .schema_utils import SchemaJSONEncoder, process_schema
20
21
  from .utils import (
21
22
  dont_throw,
22
23
  get_content,
24
+ process_content_union,
25
+ process_stream_chunk,
23
26
  role_from_content_union,
24
27
  set_span_attribute,
25
- process_content_union,
26
28
  to_dict,
27
29
  with_tracer_wrapper,
28
30
  )
@@ -139,9 +141,7 @@ def _set_request_attributes(span, args, kwargs):
139
141
  try:
140
142
  set_span_attribute(
141
143
  span,
142
- # TODO: change to SpanAttributes.LLM_REQUEST_STRUCTURED_OUTPUT_SCHEMA
143
- # when we upgrade to opentelemetry-semantic-conventions-ai>=0.4.10
144
- "gen_ai.request.structured_output_schema",
144
+ SpanAttributes.LLM_REQUEST_STRUCTURED_OUTPUT_SCHEMA,
145
145
  json.dumps(process_schema(schema), cls=SchemaJSONEncoder),
146
146
  )
147
147
  except Exception:
@@ -150,10 +150,8 @@ def _set_request_attributes(span, args, kwargs):
150
150
  try:
151
151
  set_span_attribute(
152
152
  span,
153
- # TODO: change to SpanAttributes.LLM_REQUEST_STRUCTURED_OUTPUT_SCHEMA
154
- # when we upgrade to opentelemetry-semantic-conventions-ai>=0.4.10
155
- "gen_ai.request.structured_output_schema",
156
- json.dumps(json_schema),
153
+ SpanAttributes.LLM_REQUEST_STRUCTURED_OUTPUT_SCHEMA,
154
+ json_dumps(json_schema),
157
155
  )
158
156
  except Exception:
159
157
  pass
@@ -182,7 +180,7 @@ def _set_request_attributes(span, args, kwargs):
182
180
  set_span_attribute(
183
181
  span,
184
182
  f"{SpanAttributes.LLM_REQUEST_FUNCTIONS}.{tool_num}.parameters",
185
- json.dumps(tool_dict.get("parameters")),
183
+ json_dumps(tool_dict.get("parameters")),
186
184
  )
187
185
 
188
186
  if should_send_prompts():
@@ -215,7 +213,7 @@ def _set_request_attributes(span, args, kwargs):
215
213
  (
216
214
  content_str
217
215
  if isinstance(content_str, str)
218
- else json.dumps(content_str)
216
+ else json_dumps(content_str)
219
217
  ),
220
218
  )
221
219
  blocks = (
@@ -248,7 +246,7 @@ def _set_request_attributes(span, args, kwargs):
248
246
  set_span_attribute(
249
247
  span,
250
248
  f"{gen_ai_attributes.GEN_AI_PROMPT}.{i}.tool_calls.{tool_call_index}.arguments",
251
- json.dumps(function_call.get("arguments")),
249
+ json_dumps(function_call.get("arguments")),
252
250
  )
253
251
  tool_call_index += 1
254
252
 
@@ -300,22 +298,26 @@ def _set_response_attributes(span, response: types.GenerateContentResponse):
300
298
  span, f"{gen_ai_attributes.GEN_AI_COMPLETION}.0.role", "model"
301
299
  )
302
300
  candidates_list = candidates if isinstance(candidates, list) else [candidates]
303
- for i, candidate in enumerate(candidates_list):
301
+ i = 0
302
+ for candidate in candidates_list:
303
+ has_content = False
304
304
  processed_content = process_content_union(candidate.content)
305
305
  content_str = get_content(processed_content)
306
306
 
307
307
  set_span_attribute(
308
308
  span, f"{gen_ai_attributes.GEN_AI_COMPLETION}.{i}.role", "model"
309
309
  )
310
- set_span_attribute(
311
- span,
312
- f"{gen_ai_attributes.GEN_AI_COMPLETION}.{i}.content",
313
- (
314
- content_str
315
- if isinstance(content_str, str)
316
- else json.dumps(content_str)
317
- ),
318
- )
310
+ if content_str:
311
+ has_content = True
312
+ set_span_attribute(
313
+ span,
314
+ f"{gen_ai_attributes.GEN_AI_COMPLETION}.{i}.content",
315
+ (
316
+ content_str
317
+ if isinstance(content_str, str)
318
+ else json_dumps(content_str)
319
+ ),
320
+ )
319
321
  blocks = (
320
322
  processed_content
321
323
  if isinstance(processed_content, list)
@@ -328,6 +330,7 @@ def _set_response_attributes(span, response: types.GenerateContentResponse):
328
330
  if not block_dict.get("function_call"):
329
331
  continue
330
332
  function_call = to_dict(block_dict.get("function_call", {}))
333
+ has_content = True
331
334
  set_span_attribute(
332
335
  span,
333
336
  f"{gen_ai_attributes.GEN_AI_COMPLETION}.{i}.tool_calls.{tool_call_index}.name",
@@ -345,9 +348,11 @@ def _set_response_attributes(span, response: types.GenerateContentResponse):
345
348
  set_span_attribute(
346
349
  span,
347
350
  f"{gen_ai_attributes.GEN_AI_COMPLETION}.{i}.tool_calls.{tool_call_index}.arguments",
348
- json.dumps(function_call.get("arguments")),
351
+ json_dumps(function_call.get("arguments")),
349
352
  )
350
353
  tool_call_index += 1
354
+ if has_content:
355
+ i += 1
351
356
 
352
357
 
353
358
  @dont_throw
@@ -359,53 +364,45 @@ def _build_from_streaming_response(
359
364
  aggregated_usage_metadata = defaultdict(int)
360
365
  model_version = None
361
366
  for chunk in response:
362
- if chunk.model_version:
363
- model_version = chunk.model_version
364
-
365
- if chunk.candidates:
366
- # Currently gemini throws an error if you pass more than one candidate
367
- # with streaming
368
- if chunk.candidates and len(chunk.candidates) > 0:
369
- final_parts += chunk.candidates[0].content.parts or []
370
- role = chunk.candidates[0].content.role or role
371
- if chunk.usage_metadata:
372
- usage_dict = to_dict(chunk.usage_metadata)
373
- # prompt token count is sent in every chunk
374
- # (and is less by 1 in the last chunk, so we set it once);
375
- # total token count in every chunk is greater by prompt token count than it should be,
376
- # thus this awkward logic here
377
- if aggregated_usage_metadata.get("prompt_token_count") is None:
378
- aggregated_usage_metadata["prompt_token_count"] = (
379
- usage_dict.get("prompt_token_count") or 0
380
- )
381
- aggregated_usage_metadata["total_token_count"] = (
382
- usage_dict.get("total_token_count") or 0
383
- )
384
- aggregated_usage_metadata["candidates_token_count"] += (
385
- usage_dict.get("candidates_token_count") or 0
386
- )
387
- aggregated_usage_metadata["total_token_count"] += (
388
- usage_dict.get("candidates_token_count") or 0
389
- )
367
+ # Important: do all processing in a separate sync function, that is
368
+ # wrapped in @dont_throw. If we did it here, the @dont_throw on top of
369
+ # this function would not be able to catch the errors, as they are
370
+ # raised later, after the generator is returned, and when it is being
371
+ # consumed.
372
+ chunk_result = process_stream_chunk(
373
+ chunk,
374
+ role,
375
+ model_version,
376
+ aggregated_usage_metadata,
377
+ final_parts,
378
+ )
379
+ # even though process_stream_chunk can't return None, the result can be
380
+ # None, if the processing throws an error (see @dont_throw)
381
+ if chunk_result:
382
+ role = chunk_result["role"]
383
+ model_version = chunk_result["model_version"]
390
384
  yield chunk
391
385
 
392
- compound_response = types.GenerateContentResponse(
393
- candidates=[
394
- {
395
- "content": {
396
- "parts": final_parts,
397
- "role": role,
398
- },
399
- }
400
- ],
401
- usage_metadata=types.GenerateContentResponseUsageMetadataDict(
402
- **aggregated_usage_metadata
403
- ),
404
- model_version=model_version,
405
- )
406
- if span.is_recording():
407
- _set_response_attributes(span, compound_response)
408
- span.end()
386
+ try:
387
+ compound_response = types.GenerateContentResponse(
388
+ candidates=[
389
+ {
390
+ "content": {
391
+ "parts": final_parts,
392
+ "role": role,
393
+ },
394
+ }
395
+ ],
396
+ usage_metadata=types.GenerateContentResponseUsageMetadataDict(
397
+ **aggregated_usage_metadata
398
+ ),
399
+ model_version=model_version,
400
+ )
401
+ if span.is_recording():
402
+ _set_response_attributes(span, compound_response)
403
+ finally:
404
+ if span.is_recording():
405
+ span.end()
409
406
 
410
407
 
411
408
  @dont_throw
@@ -417,52 +414,45 @@ async def _abuild_from_streaming_response(
417
414
  aggregated_usage_metadata = defaultdict(int)
418
415
  model_version = None
419
416
  async for chunk in response:
420
- if chunk.candidates:
421
- # Currently gemini throws an error if you pass more than one candidate
422
- # with streaming
423
- if chunk.candidates and len(chunk.candidates) > 0:
424
- final_parts += chunk.candidates[0].content.parts or []
425
- role = chunk.candidates[0].content.role or role
426
- if chunk.model_version:
427
- model_version = chunk.model_version
428
- if chunk.usage_metadata:
429
- usage_dict = to_dict(chunk.usage_metadata)
430
- # prompt token count is sent in every chunk
431
- # (and is less by 1 in the last chunk, so we set it once);
432
- # total token count in every chunk is greater by prompt token count than it should be,
433
- # thus this awkward logic here
434
- if aggregated_usage_metadata.get("prompt_token_count") is None:
435
- aggregated_usage_metadata["prompt_token_count"] = usage_dict.get(
436
- "prompt_token_count"
437
- )
438
- aggregated_usage_metadata["total_token_count"] = usage_dict.get(
439
- "total_token_count"
440
- )
441
- aggregated_usage_metadata["candidates_token_count"] += (
442
- usage_dict.get("candidates_token_count") or 0
443
- )
444
- aggregated_usage_metadata["total_token_count"] += (
445
- usage_dict.get("candidates_token_count") or 0
446
- )
417
+ # Important: do all processing in a separate sync function, that is
418
+ # wrapped in @dont_throw. If we did it here, the @dont_throw on top of
419
+ # this function would not be able to catch the errors, as they are
420
+ # raised later, after the generator is returned, and when it is being
421
+ # consumed.
422
+ chunk_result = process_stream_chunk(
423
+ chunk,
424
+ role,
425
+ model_version,
426
+ aggregated_usage_metadata,
427
+ final_parts,
428
+ )
429
+ # even though process_stream_chunk can't return None, the result can be
430
+ # None, if the processing throws an error (see @dont_throw)
431
+ if chunk_result:
432
+ role = chunk_result["role"]
433
+ model_version = chunk_result["model_version"]
447
434
  yield chunk
448
435
 
449
- compound_response = types.GenerateContentResponse(
450
- candidates=[
451
- {
452
- "content": {
453
- "parts": final_parts,
454
- "role": role,
455
- },
456
- }
457
- ],
458
- usage_metadata=types.GenerateContentResponseUsageMetadataDict(
459
- **aggregated_usage_metadata
460
- ),
461
- model_version=model_version,
462
- )
463
- if span.is_recording():
464
- _set_response_attributes(span, compound_response)
465
- span.end()
436
+ try:
437
+ compound_response = types.GenerateContentResponse(
438
+ candidates=[
439
+ {
440
+ "content": {
441
+ "parts": final_parts,
442
+ "role": role,
443
+ },
444
+ }
445
+ ],
446
+ usage_metadata=types.GenerateContentResponseUsageMetadataDict(
447
+ **aggregated_usage_metadata
448
+ ),
449
+ model_version=model_version,
450
+ )
451
+ if span.is_recording():
452
+ _set_response_attributes(span, compound_response)
453
+ finally:
454
+ if span.is_recording():
455
+ span.end()
466
456
 
467
457
 
468
458
  @with_tracer_wrapper
@@ -499,7 +489,7 @@ def _wrap(tracer: Tracer, to_wrap, wrapped, instance, args, kwargs):
499
489
  span.record_exception(e, attributes=attributes)
500
490
  span.set_status(Status(StatusCode.ERROR, str(e)))
501
491
  span.end()
502
- raise e
492
+ raise
503
493
 
504
494
 
505
495
  @with_tracer_wrapper
@@ -538,7 +528,7 @@ async def _awrap(tracer: Tracer, to_wrap, wrapped, instance, args, kwargs):
538
528
  span.record_exception(e, attributes=attributes)
539
529
  span.set_status(Status(StatusCode.ERROR, str(e)))
540
530
  span.end()
541
- raise e
531
+ raise
542
532
 
543
533
 
544
534
  class GoogleGenAiSdkInstrumentor(BaseInstrumentor):
@@ -10,9 +10,12 @@ DUMMY_CLIENT = BaseApiClient(api_key="dummy")
10
10
 
11
11
  def process_schema(schema: Any) -> dict[str, Any]:
12
12
  # The only thing we need from the client is the t_schema function
13
- json_schema = t_schema(DUMMY_CLIENT, schema).json_schema.model_dump(
14
- exclude_unset=True, exclude_none=True
15
- )
13
+ try:
14
+ json_schema = t_schema(DUMMY_CLIENT, schema).json_schema.model_dump(
15
+ exclude_unset=True, exclude_none=True
16
+ )
17
+ except Exception:
18
+ json_schema = {}
16
19
  return json_schema
17
20
 
18
21
 
@@ -1,6 +1,8 @@
1
1
  import base64
2
+ from collections import defaultdict
2
3
  import logging
3
4
  import traceback
5
+ from typing_extensions import TypedDict
4
6
 
5
7
  from .config import (
6
8
  Config,
@@ -33,10 +35,14 @@ class ProcessedContentPart(pydantic.BaseModel):
33
35
  image_url: ImageUrl | None = pydantic.Field(default=None)
34
36
 
35
37
 
36
- def set_span_attribute(span: Span, name: str, value: str):
37
- if value is not None:
38
- if value != "":
39
- span.set_attribute(name, value)
38
+ class ProcessChunkResult(TypedDict):
39
+ role: str
40
+ model_version: str | None
41
+
42
+
43
+ def set_span_attribute(span: Span, name: str, value: Any):
44
+ if value is not None and value != "":
45
+ span.set_attribute(name, value)
40
46
  return
41
47
 
42
48
 
@@ -84,7 +90,7 @@ def get_content(
84
90
  content: (
85
91
  ProcessedContentPart | dict | list[ProcessedContentPart | dict] | str | None
86
92
  ),
87
- ) -> list[Any] | None:
93
+ ) -> dict | list[dict] | None:
88
94
  if isinstance(content, dict):
89
95
  return content.get("content") or content.get("image_url")
90
96
  if isinstance(content, ProcessedContentPart):
@@ -98,7 +104,8 @@ def get_content(
98
104
  else:
99
105
  return None
100
106
  elif isinstance(content, list):
101
- return [get_content(item) for item in content]
107
+ contents_list = [get_content(item) for item in content]
108
+ return [item for item in contents_list if item is not None]
102
109
  elif isinstance(content, str):
103
110
  return {
104
111
  "type": "text",
@@ -110,9 +117,6 @@ def get_content(
110
117
 
111
118
  def process_content_union(
112
119
  content: types.ContentUnion | types.ContentUnionDict,
113
- trace_id: str | None = None,
114
- span_id: str | None = None,
115
- message_index: int = 0,
116
120
  ) -> ProcessedContentPart | dict | list[ProcessedContentPart | dict] | None:
117
121
  if isinstance(content, types.Content):
118
122
  parts = to_dict(content).get("parts", [])
@@ -123,25 +127,16 @@ def process_content_union(
123
127
  return _process_part_union(content)
124
128
  elif isinstance(content, dict):
125
129
  if "parts" in content:
126
- return [
127
- _process_part_union(
128
- item, trace_id, span_id, message_index, content_index
129
- )
130
- for content_index, item in enumerate(content.get("parts", []))
131
- ]
130
+ return [_process_part_union(item) for item in content.get("parts", [])]
132
131
  else:
133
132
  # Assume it's PartDict
134
- return _process_part_union(content, trace_id, span_id, message_index)
133
+ return _process_part_union(content)
135
134
  else:
136
135
  return None
137
136
 
138
137
 
139
138
  def _process_part_union(
140
139
  content: types.PartDict | types.File | types.Part | str,
141
- trace_id: str | None = None,
142
- span_id: str | None = None,
143
- message_index: int = 0,
144
- content_index: int = 0,
145
140
  ) -> ProcessedContentPart | dict | None:
146
141
  if isinstance(content, str):
147
142
  return ProcessedContentPart(content=content)
@@ -154,36 +149,31 @@ def _process_part_union(
154
149
  )
155
150
  return ProcessedContentPart(content=f"files/{name}")
156
151
  elif isinstance(content, (types.Part, dict)):
157
- return _process_part(content, trace_id, span_id, message_index, content_index)
152
+ return _process_part(content)
158
153
  else:
159
154
  return None
160
155
 
161
156
 
162
157
  def _process_part(
163
158
  content: types.Part,
164
- trace_id: str | None = None,
165
- span_id: str | None = None,
166
- message_index: int = 0,
167
- content_index: int = 0,
168
159
  ) -> ProcessedContentPart | dict | None:
169
160
  part_dict = to_dict(content)
170
161
  if part_dict.get("inline_data"):
171
162
  blob = to_dict(part_dict.get("inline_data"))
172
- if blob.get("mime_type").startswith("image/"):
173
- return _process_image_item(
174
- blob, trace_id, span_id, message_index, content_index
175
- )
163
+ if blob.get("mime_type", "").startswith("image/"):
164
+ return _process_image_item(blob)
176
165
  else:
177
166
  # currently, only images are supported
178
167
  return ProcessedContentPart(
179
168
  content=blob.get("mime_type") or "unknown_media"
180
169
  )
181
- elif part_dict.get("function_call"):
170
+ elif function_call := part_dict.get("function_call"):
171
+ function_call_dict = to_dict(function_call)
182
172
  return ProcessedContentPart(
183
173
  function_call=ToolCall(
184
- name=part_dict.get("function_call").get("name"),
185
- id=part_dict.get("function_call").get("id"),
186
- arguments=part_dict.get("function_call").get("args", {}),
174
+ name=function_call_dict.get("name"),
175
+ id=function_call_dict.get("id"),
176
+ arguments=function_call_dict.get("args", {}),
187
177
  )
188
178
  )
189
179
  elif part_dict.get("text") is not None:
@@ -220,26 +210,71 @@ def with_tracer_wrapper(func):
220
210
  return _with_tracer
221
211
 
222
212
 
223
- def _process_image_item(
224
- blob: dict[str, Any],
225
- trace_id: str,
226
- span_id: str,
227
- message_index: int,
228
- content_index: int,
229
- ) -> ProcessedContentPart | dict | None:
213
+ def _process_image_item(blob: dict[str, Any]) -> ProcessedContentPart | dict | None:
230
214
  # Convert to openai format, so backends can handle it
231
215
  data = blob.get("data")
232
216
  encoded_data = (
233
217
  base64.b64encode(data).decode("utf-8") if isinstance(data, bytes) else data
234
218
  )
219
+ mime_type = blob.get("mime_type", "image/unknown")
220
+ image_type = mime_type.split("/")[1] if "/" in mime_type else "unknown"
221
+
235
222
  return (
236
223
  ProcessedContentPart(
237
224
  image_url=ImageUrl(
238
225
  image_url=ImageUrlInner(
239
- url=f"data:image/{blob.get('mime_type').split('/')[1]};base64,{encoded_data}",
226
+ url=f"data:image/{image_type};base64,{encoded_data}",
240
227
  )
241
228
  )
242
229
  )
243
230
  if Config.convert_image_to_openai_format
244
231
  else blob
245
232
  )
233
+
234
+
235
+ @dont_throw
236
+ def process_stream_chunk(
237
+ chunk: types.GenerateContentResponse,
238
+ existing_role: str,
239
+ existing_model_version: str | None,
240
+ # ============================== #
241
+ # mutable states, passed by reference
242
+ aggregated_usage_metadata: defaultdict[str, int],
243
+ final_parts: list[types.Part | None],
244
+ # ============================== #
245
+ ) -> ProcessChunkResult:
246
+ role = existing_role
247
+ model_version = existing_model_version
248
+
249
+ if chunk.model_version:
250
+ model_version = chunk.model_version
251
+
252
+ # Currently gemini throws an error if you pass more than one candidate
253
+ # with streaming
254
+ if chunk.candidates and len(chunk.candidates) > 0 and chunk.candidates[0].content:
255
+ final_parts += chunk.candidates[0].content.parts or []
256
+ role = chunk.candidates[0].content.role or role
257
+ if chunk.usage_metadata:
258
+ usage_dict = to_dict(chunk.usage_metadata)
259
+ # prompt token count is sent in every chunk
260
+ # (and is less by 1 in the last chunk, so we set it once);
261
+ # total token count in every chunk is greater by prompt token count than it should be,
262
+ # thus this awkward logic here
263
+ if aggregated_usage_metadata.get("prompt_token_count") is None:
264
+ # or 0, not .get(key, 0), because sometimes the value is explicitly None
265
+ aggregated_usage_metadata["prompt_token_count"] = (
266
+ usage_dict.get("prompt_token_count") or 0
267
+ )
268
+ aggregated_usage_metadata["total_token_count"] = (
269
+ usage_dict.get("total_token_count") or 0
270
+ )
271
+ aggregated_usage_metadata["candidates_token_count"] += (
272
+ usage_dict.get("candidates_token_count") or 0
273
+ )
274
+ aggregated_usage_metadata["total_token_count"] += (
275
+ usage_dict.get("candidates_token_count") or 0
276
+ )
277
+ return ProcessChunkResult(
278
+ role=role,
279
+ model_version=model_version,
280
+ )
lmnr/version.py CHANGED
@@ -3,7 +3,7 @@ import httpx
3
3
  from packaging import version
4
4
 
5
5
 
6
- __version__ = "0.7.7"
6
+ __version__ = "0.7.9"
7
7
  PYTHON_VERSION = f"{sys.version_info.major}.{sys.version_info.minor}"
8
8
 
9
9
 
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: lmnr
3
- Version: 0.7.7
3
+ Version: 0.7.9
4
4
  Summary: Python SDK for Laminar
5
5
  Author: lmnr.ai
6
6
  Author-email: lmnr.ai <founders@lmnr.ai>
@@ -13,10 +13,10 @@ lmnr/opentelemetry_lib/opentelemetry/instrumentation/anthropic/span_utils.py,sha
13
13
  lmnr/opentelemetry_lib/opentelemetry/instrumentation/anthropic/streaming.py,sha256=7ca9f49e4d9a3bac292d13a8ee9827fdfb8a46d13ebdcbbfbac9c5584d11eaf3,13441
14
14
  lmnr/opentelemetry_lib/opentelemetry/instrumentation/anthropic/utils.py,sha256=0044f02da8b99322fdbf3f8f6663f04ff5d1295ddae92a635fd16eb685d5fbb6,5386
15
15
  lmnr/opentelemetry_lib/opentelemetry/instrumentation/anthropic/version.py,sha256=5aacde4ca55ef50ed07a239ad8a86889e0621b1cc72be19bd93be7c9e20910a9,23
16
- lmnr/opentelemetry_lib/opentelemetry/instrumentation/google_genai/__init__.py,sha256=a47d4d1234e0278d1538748130a79c03d6cb3486976cb5d19578fe1b90f28e7b,20524
16
+ lmnr/opentelemetry_lib/opentelemetry/instrumentation/google_genai/__init__.py,sha256=e0f5283e5d960e91766c2658af12837e43772d7c23f13ee44365a145e891e6c4,19453
17
17
  lmnr/opentelemetry_lib/opentelemetry/instrumentation/google_genai/config.py,sha256=db9cdebc9ee0dccb493ffe608eede3047efec20ed26c3924b72b2e50edbd92c2,245
18
- lmnr/opentelemetry_lib/opentelemetry/instrumentation/google_genai/schema_utils.py,sha256=857a6bc52f8bfd4da72786173615d31faaf3f9378f8f6150ffe8f6f9c4bb78f9,685
19
- lmnr/opentelemetry_lib/opentelemetry/instrumentation/google_genai/utils.py,sha256=f1248196246826d899304e510c4c2df74088d8169d28f1d0aed578a7a6c3cbfd,7669
18
+ lmnr/opentelemetry_lib/opentelemetry/instrumentation/google_genai/schema_utils.py,sha256=b10619e76e5893f8b891f92531d29dcf6651e8f9a7dcbf81c3f35341ce311f6e,753
19
+ lmnr/opentelemetry_lib/opentelemetry/instrumentation/google_genai/utils.py,sha256=2e1dc91b04757f7f6c960e3f1c58bb9f7e735f0e1dcb811a08833faf18766d3b,9242
20
20
  lmnr/opentelemetry_lib/opentelemetry/instrumentation/groq/__init__.py,sha256=1e98467711405e4ff8ccd0b53c002e7a676c581616ef015e8b6606bd7057478b,14986
21
21
  lmnr/opentelemetry_lib/opentelemetry/instrumentation/groq/config.py,sha256=29d557d9dee56354e89634bdc3f4795f346ee67bbfec56184b4fb394e45a7e03,203
22
22
  lmnr/opentelemetry_lib/opentelemetry/instrumentation/groq/event_emitter.py,sha256=1f07d78bf360832951c708fcb3737718e50d39ce05beb8adbf57e818b4873703,4481
@@ -93,8 +93,8 @@ lmnr/sdk/laminar.py,sha256=24d680407ce694f1a7ec0e9c0524eae3deb7d638ad5caff3a591d
93
93
  lmnr/sdk/log.py,sha256=9edfd83263f0d4845b1b2d1beeae2b4ed3f8628de941f371a893d72b79c348d4,2213
94
94
  lmnr/sdk/types.py,sha256=f8a8368e225c4d2f82df54d92f029065afb60c3eff494c77c6e574963ed524ff,13454
95
95
  lmnr/sdk/utils.py,sha256=0c5a81c305dcd3922f4b31c4f42cf83719c03888725838395adae167de92db76,5019
96
- lmnr/version.py,sha256=15856d7bd036a8e4e7b5b9efe650f00ffb9007781aa8eee0b0f52b05dc316841,1321
97
- lmnr-0.7.7.dist-info/WHEEL,sha256=ab6157bc637547491fb4567cd7ddf26b04d63382916ca16c29a5c8e94c9c9ef7,79
98
- lmnr-0.7.7.dist-info/entry_points.txt,sha256=abdf3411b7dd2d7329a241f2da6669bab4e314a747a586ecdb9f888f3035003c,39
99
- lmnr-0.7.7.dist-info/METADATA,sha256=889297ed4b003131bde03a5613f7305cecfa90fd22c006debad4c3ace5220e9a,14196
100
- lmnr-0.7.7.dist-info/RECORD,,
96
+ lmnr/version.py,sha256=e9882c4c3fde6de255d516c74b58299c7fe25676f330f27a192a16c01106c4a5,1321
97
+ lmnr-0.7.9.dist-info/WHEEL,sha256=ab6157bc637547491fb4567cd7ddf26b04d63382916ca16c29a5c8e94c9c9ef7,79
98
+ lmnr-0.7.9.dist-info/entry_points.txt,sha256=abdf3411b7dd2d7329a241f2da6669bab4e314a747a586ecdb9f888f3035003c,39
99
+ lmnr-0.7.9.dist-info/METADATA,sha256=c80f3d2e429443e6bfa13bf62b50521954560a51c88164ab4b83894e3b0ab27b,14196
100
+ lmnr-0.7.9.dist-info/RECORD,,
File without changes