lmnr 0.3.6__py3-none-any.whl → 0.3.7__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
lmnr/__init__.py CHANGED
@@ -1,6 +1,6 @@
1
1
  from .sdk.client import Laminar
2
2
  from .sdk.decorators import observe, lmnr_context, wrap_llm_call
3
- from .sdk.interface import trace, TraceContext, SpanContext
3
+ from .sdk.interface import trace, TraceContext, SpanContext, initialize
4
4
  from .sdk.tracing_types import EvaluateEvent
5
5
  from .sdk.types import ChatMessage, PipelineRunError, PipelineRunResponse, NodeInput
6
6
 
lmnr/sdk/client.py CHANGED
@@ -119,8 +119,8 @@ class Laminar:
119
119
 
120
120
  def batch_post_traces(self, batch: list[Union[Span, Trace]]):
121
121
  log = logging.getLogger("laminar.client")
122
- url = self._base_url + "/v1/traces"
123
- data = json.dumps({"traces": [item.to_dict() for item in batch]})
122
+ url = self._base_url + "/v1/observations"
123
+ data = json.dumps({"observations": [item.to_dict() for item in batch]})
124
124
  log.debug(f"making request to {url}")
125
125
  headers = self._headers()
126
126
  res = requests.post(url, data=data, headers=headers)
lmnr/sdk/context.py CHANGED
@@ -34,6 +34,7 @@ class LaminarContextManager:
34
34
  project_api_key: str = None,
35
35
  threads: int = 1,
36
36
  max_task_queue_size: int = 1000,
37
+ env: dict[str, str] = {},
37
38
  ):
38
39
  self.project_api_key = project_api_key or os.environ.get("LMNR_PROJECT_API_KEY")
39
40
  if not self.project_api_key:
@@ -47,6 +48,7 @@ class LaminarContextManager:
47
48
  max_task_queue_size=max_task_queue_size,
48
49
  threads=threads,
49
50
  )
51
+ self.env = env
50
52
  # atexit executes functions last in first out, so we want to make sure
51
53
  # that we finalize the trace before thread manager is closed, so the updated
52
54
  # trace is sent to the server
@@ -60,7 +62,6 @@ class LaminarContextManager:
60
62
  metadata: Optional[dict[str, Any]] = None,
61
63
  attributes: Optional[dict[str, Any]] = None,
62
64
  span_type: Literal["DEFAULT", "LLM"] = "DEFAULT",
63
- check_event_names: list[str] = None,
64
65
  # trace attributes
65
66
  user_id: Optional[str] = None,
66
67
  session_id: Optional[str] = None,
@@ -89,7 +90,6 @@ class LaminarContextManager:
89
90
  attributes=attributes,
90
91
  parent_span_id=parent_span_id,
91
92
  span_type=span_type,
92
- check_event_names=check_event_names,
93
93
  )
94
94
  stack = _lmnr_stack_context.get()
95
95
  _lmnr_stack_context.set(stack + [span])
@@ -146,6 +146,7 @@ class LaminarContextManager:
146
146
  attributes = self._extract_llm_attributes_from_response(
147
147
  provider=provider, response=result
148
148
  )
149
+
149
150
  return self._finalize_span(
150
151
  span,
151
152
  provider=provider,
@@ -159,6 +160,7 @@ class LaminarContextManager:
159
160
  metadata: Optional[dict[str, Any]] = None,
160
161
  attributes: Optional[dict[str, Any]] = None,
161
162
  evaluate_events: list[EvaluateEvent] = None,
163
+ events: list[Event] = None,
162
164
  override: bool = False,
163
165
  ):
164
166
  stack = _lmnr_stack_context.get()
@@ -173,6 +175,7 @@ class LaminarContextManager:
173
175
  if override
174
176
  else span.evaluateEvents + (evaluate_events or [])
175
177
  )
178
+ new_events = events if override else span.events + (events or [])
176
179
  new_attributes = (
177
180
  attributes
178
181
  if override
@@ -182,6 +185,7 @@ class LaminarContextManager:
182
185
  span=span,
183
186
  metadata=new_metadata,
184
187
  evaluate_events=new_evaluate_events,
188
+ events=new_events,
185
189
  attributes=new_attributes,
186
190
  )
187
191
 
@@ -238,7 +242,8 @@ class LaminarContextManager:
238
242
  input: Optional[Any] = None,
239
243
  metadata: Optional[dict[str, Any]] = None,
240
244
  attributes: Optional[dict[str, Any]] = None,
241
- check_event_names: list[str] = None,
245
+ evaluate_events: Optional[list[EvaluateEvent]] = None,
246
+ events: Optional[list[Event]] = None,
242
247
  ) -> Span:
243
248
  """Internal method to create a span object. Use `ObservationContext.span` instead."""
244
249
  span = Span(
@@ -251,7 +256,8 @@ class LaminarContextManager:
251
256
  metadata=metadata,
252
257
  attributes=attributes,
253
258
  span_type=span_type,
254
- evaluate_events=check_event_names or [],
259
+ evaluate_events=evaluate_events or [],
260
+ events=events or [],
255
261
  )
256
262
  return span
257
263
 
@@ -265,6 +271,7 @@ class LaminarContextManager:
265
271
  metadata: Optional[dict[str, Any]] = None,
266
272
  attributes: Optional[dict[str, Any]] = None,
267
273
  evaluate_events: Optional[list[EvaluateEvent]] = None,
274
+ events: Optional[list[Event]] = None,
268
275
  override: bool = False,
269
276
  ) -> Span:
270
277
  """Internal method to update a span object. Use `SpanContext.update()` instead."""
@@ -275,6 +282,7 @@ class LaminarContextManager:
275
282
  metadata=metadata,
276
283
  attributes=attributes,
277
284
  evaluate_events=evaluate_events,
285
+ events=events,
278
286
  override=override,
279
287
  )
280
288
  if finalize:
@@ -287,10 +295,14 @@ class LaminarContextManager:
287
295
  value: Optional[Union[str, int, float, bool]] = None,
288
296
  timestamp: Optional[datetime.datetime] = None,
289
297
  ):
290
- span = _lmnr_stack_context.get()[-1] if _lmnr_stack_context.get() else None
291
- if not span or not isinstance(span, Span):
292
- self._log.warning(f"No active span to send event. Ignoring event. {name}")
298
+ stack = _lmnr_stack_context.get()
299
+ if not stack or not isinstance(stack[-1], Span):
300
+ self._log.warning(
301
+ f"No active span to add check event. Ignoring event. {name}"
302
+ )
293
303
  return
304
+
305
+ span = stack[-1]
294
306
  event = Event(
295
307
  name=name,
296
308
  span_id=span.id,
@@ -298,8 +310,9 @@ class LaminarContextManager:
298
310
  value=value,
299
311
  )
300
312
  span.add_event(event)
313
+ _lmnr_stack_context.set(stack)
301
314
 
302
- def evaluate_event(self, name: str, data: str):
315
+ def evaluate_event(self, name: str, evaluator: str, data: dict):
303
316
  stack = _lmnr_stack_context.get()
304
317
  if not stack or not isinstance(stack[-1], Span):
305
318
  self._log.warning(
@@ -309,10 +322,13 @@ class LaminarContextManager:
309
322
  stack[-1].evaluateEvents.append(
310
323
  EvaluateEvent(
311
324
  name=name,
325
+ evaluator=evaluator,
312
326
  data=data,
313
327
  timestamp=datetime.datetime.now(datetime.timezone.utc),
328
+ env=self.env,
314
329
  )
315
330
  )
331
+ _lmnr_stack_context.set(stack)
316
332
 
317
333
  def run_pipeline(
318
334
  self,
@@ -334,6 +350,9 @@ class LaminarContextManager:
334
350
  trace_id=trace_id,
335
351
  )
336
352
 
353
+ def set_env(self, env: dict[str, str]):
354
+ self.env = env
355
+
337
356
  def _force_finalize_trace(self):
338
357
  # TODO: flush in progress spans as error?
339
358
  pass
lmnr/sdk/decorators.py CHANGED
@@ -155,18 +155,21 @@ class LaminarDecorator:
155
155
  laminar = LaminarSingleton().get()
156
156
  laminar.event(name, value=value, timestamp=timestamp)
157
157
 
158
- def evaluate_event(self, name: str, data: str):
159
- """Evaluate an event with the given name and data. The event value will be assessed by the Laminar evaluation engine.
160
- Data is passed as an input to the agent, so you need to specify which data you want to evaluate. Most of the times,
161
- this is an output of the LLM generation, but sometimes, you may want to evaluate the input or both. In the latter case,
162
- concatenate the input and output annotating with natural language.
158
+ def evaluate_event(self, name: str, evaluator: str, data: dict):
159
+ """Evaluate an event with the given name by evaluator based on the given data.
160
+ Evaluator is the Laminar pipeline name.
161
+ Data is passed as an input to the the evaluator pipeline, so you need to specify which data you want to evaluate. The prompt
162
+ of the evaluator will be templated with the keys of the data dictionary.
163
+
164
+ Usually, you would want to pass the output of LLM generation, users' messages, and some other surrounding data to 'data'.
163
165
 
164
166
  Args:
165
- name (str): Name of the event. Must be predefined in the Laminar events page.
166
- data (str): Data to be evaluated. Typically the output of the LLM generation.
167
+ name (str): Name of the event.
168
+ evaluator (str): Name of the evaluator pipeline.
169
+ data (dict): Data to be used when evaluating the event.
167
170
  """
168
171
  laminar = LaminarSingleton().get()
169
- laminar.evaluate_event(name, data)
172
+ laminar.evaluate_event(name, evaluator=evaluator, data=data)
170
173
 
171
174
  def run(
172
175
  self,
lmnr/sdk/interface.py CHANGED
@@ -98,7 +98,6 @@ class SpanContext(ObservationContext):
98
98
  output (Optional[Any], optional): output of the span. Defaults to None.
99
99
  metadata (Optional[dict[str, Any]], optional): any additional metadata to the span. Defaults to None.
100
100
  attributes (Optional[dict[str, Any]], optional): pre-defined attributes (see semantic-convention). Defaults to None.
101
- check_event_names (Optional[list[EvaluateEvent]], optional): List of events to evaluate for and tag. Defaults to None.
102
101
  override (bool, optional): override existing metadata fully. If False, metadata is merged. Defaults to False.
103
102
 
104
103
  Returns:
@@ -137,7 +136,6 @@ class SpanContext(ObservationContext):
137
136
  output (Optional[Any], optional): output of the span. Defaults to None.
138
137
  metadata (Optional[dict[str, Any]], optional): any additional metadata to the span. Defaults to None.
139
138
  attributes (Optional[dict[str, Any]], optional): pre-defined attributes (see semantic-convention). Defaults to None.
140
- check_event_names (Optional[list[EvaluateEvent]], optional): List of events to evaluate for and tag. Defaults to None.
141
139
  override (bool, optional): override existing metadata fully. If False, metadata is merged. Defaults to False.
142
140
 
143
141
  Returns:
@@ -178,15 +176,18 @@ class SpanContext(ObservationContext):
178
176
  self.observation.add_event(event)
179
177
  return self
180
178
 
181
- def evaluate_event(self, name: str, data: str) -> "SpanContext":
182
- """Evaluate an event with the given name and data. The event value will be assessed by the Laminar evaluation engine.
183
- Data is passed as an input to the agent, so you need to specify which data you want to evaluate. Most of the times,
184
- this is an output of the LLM generation, but sometimes, you may want to evaluate the input or both. In the latter case,
185
- concatenate the input and output annotating with natural language.
179
+ def evaluate_event(self, name: str, evaluator: str, data: dict) -> "SpanContext":
180
+ """Evaluate an event with the given name by evaluator based on the given data.
181
+ Evaluator is the Laminar pipeline name.
182
+ Data is passed as an input to the the evaluator pipeline, so you need to specify which data you want to evaluate. The prompt
183
+ of the evaluator will be templated with the keys of the data dictionary.
184
+
185
+ Usually, you would want to pass the output of LLM generation, users' messages, and some other surrounding data to 'data'.
186
186
 
187
187
  Args:
188
- name (str): Name of the event. Must be predefined in the Laminar events page.
189
- data (str): Data to be evaluated. Typically the output of the LLM generation.
188
+ name (str): Name of the event.
189
+ evaluator (str): Name of the evaluator pipeline.
190
+ data (dict): Data to be used when evaluating the event.
190
191
 
191
192
  Returns:
192
193
  SpanContext: the updated span context
@@ -197,6 +198,7 @@ class SpanContext(ObservationContext):
197
198
  evaluate_events=[
198
199
  EvaluateEvent(
199
200
  name=name,
201
+ evaluator=evaluator,
200
202
  data=data,
201
203
  timestamp=datetime.datetime.now(datetime.timezone.utc),
202
204
  )
@@ -211,6 +213,7 @@ class SpanContext(ObservationContext):
211
213
  metadata: Optional[dict[str, Any]] = None,
212
214
  attributes: Optional[dict[str, Any]] = None,
213
215
  evaluate_events: Optional[list[EvaluateEvent]] = None,
216
+ events: Optional[list[Event]] = None,
214
217
  override: bool = False,
215
218
  finalize: bool = False,
216
219
  ) -> "SpanContext":
@@ -306,3 +309,8 @@ def trace(
306
309
  release=release,
307
310
  )
308
311
  return TraceContext(trace, None)
312
+
313
+
314
+ def initialize(env: dict[str, str]) -> None:
315
+ laminar = LaminarSingleton().get()
316
+ laminar.set_env(env)
lmnr/sdk/tracing_types.py CHANGED
@@ -8,9 +8,20 @@ from .utils import to_dict
8
8
 
9
9
 
10
10
  class EvaluateEvent(pydantic.BaseModel):
11
+ """
12
+ EvaluateEvent is an event which need to be evaluated on the server.
13
+
14
+ Args:
15
+ env: dict[str, str]: Environment variables to be used during evaluation.
16
+ It is optional and can be left empty, because it will be merged with LaminarContextManager's env.
17
+ So you need to only set it once there.
18
+ """
19
+
11
20
  name: str
12
- data: str
21
+ evaluator: str
22
+ data: dict
13
23
  timestamp: Optional[datetime.datetime] = None
24
+ env: dict[str, str] = {}
14
25
 
15
26
 
16
27
  class Span(pydantic.BaseModel):
@@ -29,7 +40,7 @@ class Span(pydantic.BaseModel):
29
40
  output: Optional[Any] = None
30
41
  metadata: Optional[dict[str, Any]] = None
31
42
  evaluateEvents: list[EvaluateEvent] = []
32
- events: list["Event"] = None
43
+ events: list["Event"] = []
33
44
 
34
45
  def __init__(
35
46
  self,
@@ -44,6 +55,7 @@ class Span(pydantic.BaseModel):
44
55
  metadata: Optional[dict[str, Any]] = {},
45
56
  attributes: Optional[dict[str, Any]] = {},
46
57
  evaluate_events: list[EvaluateEvent] = [],
58
+ events: list["Event"] = [],
47
59
  ):
48
60
  super().__init__(
49
61
  version=version,
@@ -57,7 +69,7 @@ class Span(pydantic.BaseModel):
57
69
  metadata=metadata or {},
58
70
  attributes=attributes or {},
59
71
  evaluateEvents=evaluate_events,
60
- events=[],
72
+ events=events,
61
73
  )
62
74
 
63
75
  def update(
@@ -68,6 +80,7 @@ class Span(pydantic.BaseModel):
68
80
  metadata: Optional[dict[str, Any]] = None,
69
81
  attributes: Optional[dict[str, Any]] = None,
70
82
  evaluate_events: Optional[list[EvaluateEvent]] = None,
83
+ events: Optional[list["Event"]] = None,
71
84
  override: bool = False,
72
85
  ):
73
86
  self.endTime = end_time or datetime.datetime.now(datetime.timezone.utc)
@@ -86,9 +99,11 @@ class Span(pydantic.BaseModel):
86
99
  if override
87
100
  else self.evaluateEvents + (evaluate_events or [])
88
101
  )
102
+ new_events = events or [] if override else self.events + (events or [])
89
103
  self.metadata = new_metadata
90
104
  self.attributes = new_attributes
91
105
  self.evaluateEvents = new_evaluate_events
106
+ self.events = new_events
92
107
 
93
108
  def add_event(self, event: "Event"):
94
109
  self.events.append(event)
@@ -106,6 +121,7 @@ class Span(pydantic.BaseModel):
106
121
  if isinstance(value, pydantic.BaseModel)
107
122
  else value
108
123
  )
124
+
109
125
  obj = to_dict(obj)
110
126
  return obj
111
127
 
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.1
2
2
  Name: lmnr
3
- Version: 0.3.6
3
+ Version: 0.3.7
4
4
  Summary: Python SDK for Laminar AI
5
5
  License: Apache-2.0
6
6
  Author: lmnr.ai
@@ -38,15 +38,9 @@ Read more [here](https://docs.lmnr.ai/api-reference/introduction#authentication)
38
38
 
39
39
  You can send events in two ways:
40
40
  - `.event(name, value)` – for a pre-defined event with one of possible values.
41
- - `.evaluate_event(name, data)` – for an event that our agent checks for and assigns a value from possible values.
41
+ - `.evaluate_event(name, evaluator, data)` – for an event that is evaluated by evaluator pipeline based on the data.
42
42
 
43
- There are 3 types of events:
44
- - Number - Numeric value.
45
- - String - Arbitrary string.
46
- - Boolean - Convenient to classify if something has took place or not.
47
-
48
- Important notes:
49
- - If event name does not match anything pre-defined in the UI, the event won't be saved.
43
+ Read our [docs](https://docs.lmnr.ai) to learn more about event types, how they are created and evaluated, etc.
50
44
 
51
45
  ## Instrumentation
52
46
 
@@ -70,9 +64,17 @@ You can also import `lmnr_context` in order to interact and have more control ov
70
64
  import os
71
65
  from openai import OpenAI
72
66
 
73
- from lmnr import observe, wrap_llm_call, lmnr_context
67
+ from lmnr import observe, wrap_llm_call, lmnr_context, initialize
74
68
  client = OpenAI(api_key=os.environ["OPENAI_API_KEY"])
75
69
 
70
+ # add if your online evaluation pipelines need these keys
71
+ initialize(
72
+ {
73
+ "OPENAI_API_KEY": "sk-...",
74
+ "ANTHROPIC_API_KEY": "sk-...",
75
+ }
76
+ )
77
+
76
78
  @observe() # annotate all functions you want to trace
77
79
  def poem_writer(topic="turbulence"):
78
80
  prompt = f"write a poem about {topic}"
@@ -93,7 +95,7 @@ def poem_writer(topic="turbulence"):
93
95
  lmnr_context.event("topic_alignment", "good")
94
96
 
95
97
  # to trigger an automatic check for a possible event do:
96
- lmnr_context.evaluate_event("excessive_wordiness", poem)
98
+ lmnr_context.evaluate_event("excessive_wordiness", "wordiness_evaluator", {"poem": poem})
97
99
 
98
100
  return poem
99
101
 
@@ -119,7 +121,7 @@ Both `TraceContext` and `SpanContext` expose the following interfaces:
119
121
 
120
122
  In addition, `SpanContext` allows you to:
121
123
  - `event(name: str, value: str | int)` - emit a custom event at any point
122
- - `evaluate_event(name: str, data: str)` - register a possible event for automatic checking by Laminar.
124
+ - `evaluate_event(name: str, evaluator: str, data: dict)` - register a possible event for automatic checking by Laminar's evaluator pipeline.
123
125
  - `end(**kwargs)` – update the current span, and terminate it
124
126
 
125
127
  Example:
@@ -128,10 +130,18 @@ Example:
128
130
  import os
129
131
  from openai import OpenAI
130
132
 
131
- from lmnr import trace, TraceContext, SpanContext, EvaluateEvent
133
+ from lmnr import trace, TraceContext, SpanContext, EvaluateEvent, initialize
132
134
  from lmnr.semantic_conventions.gen_ai_spans import INPUT_TOKEN_COUNT, OUTPUT_TOKEN_COUNT, RESPONSE_MODEL, PROVIDER, STREAM
133
135
  client = OpenAI(api_key=os.environ["OPENAI_API_KEY"])
134
136
 
137
+ # add if your online evaluation pipelines need these keys
138
+ initialize(
139
+ {
140
+ "OPENAI_API_KEY": "sk-...",
141
+ "ANTHROPIC_API_KEY": "sk-...",
142
+ }
143
+ )
144
+
135
145
  def poem_writer(t: TraceContext, topic = "turbulence"):
136
146
  span: SpanContext = t.span(name="poem_writer", input=topic)
137
147
 
@@ -154,11 +164,10 @@ def poem_writer(t: TraceContext, topic = "turbulence"):
154
164
  if topic in poem:
155
165
  llm_span.event("topic_alignment", "good") # send an event with a pre-defined name
156
166
 
157
- # note that you can register possible events here as well,
158
- # not only `llm_span.evaluate_event()`
167
+ llm_span.evaluate_event("positiveness", "positiveness_evaluator", {"poem": poem})
168
+
159
169
  llm_span.end(
160
170
  output=poem,
161
- evaluate_events=[EvaluateEvent(name="excessive_wordines", data=poem)],
162
171
  attributes={
163
172
  INPUT_TOKEN_COUNT: response.usage.prompt_tokens,
164
173
  OUTPUT_TOKEN_COUNT: response.usage.completion_tokens,
@@ -1,23 +1,23 @@
1
- lmnr/__init__.py,sha256=vUiBEqNVi-dWlFKqnyxt0387t1kzVXrwFp7ShhDJyXY,324
1
+ lmnr/__init__.py,sha256=hHpH10FQOUuTfwPDJW35ZWSYjLKNaLhm-xpW2-iIWdk,336
2
2
  lmnr/sdk/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
3
- lmnr/sdk/client.py,sha256=e6cIvJq38a6XAU8FGWYtNXVAPlEoZEyKS7hC3M_6EkU,5749
3
+ lmnr/sdk/client.py,sha256=sFWORPHh0iPOOIMy35VpE7YaDW-oFcTKiP0wQi-Ay1I,5761
4
4
  lmnr/sdk/collector.py,sha256=6LRmPhOcmGplUDWm_sJh0dVrLTHknd_kmq7POGuAvoQ,5338
5
5
  lmnr/sdk/constants.py,sha256=USCfwuUqRx6_0xC8WUxqGj766dInqQkWJcf8U5vPK7s,34
6
- lmnr/sdk/context.py,sha256=4ngtUYhix91hUW4Kpo-WplDiNs-SCqz6LmHMl84StoA,15398
7
- lmnr/sdk/decorators.py,sha256=vEGXYJUKGNVAVEt3sRKAUTtGAdhfQbyA474kfHsZHTk,12024
8
- lmnr/sdk/interface.py,sha256=ugtNIcHZhHC_qSyu_-1Fm62_phOb4vW-Lo5VZ1WaMhc,12255
6
+ lmnr/sdk/context.py,sha256=erEdKFElK_I1aWNUbqlb7rJ7A_zHPX7aks52P9WXWZw,15926
7
+ lmnr/sdk/decorators.py,sha256=ACaspfBDVjHeBPp0-YNfyCy99SDZrF6klSG80Towrqg,12086
8
+ lmnr/sdk/interface.py,sha256=W5yq-Y9EsCoTBIWWwxV7w542wtIZ2b3f9VP6Bh7HDUw,12227
9
9
  lmnr/sdk/providers/__init__.py,sha256=wNCgQnt9-bnTNXLQWdPgyKhqA1ajiaEd1Rr2KPOpazM,54
10
10
  lmnr/sdk/providers/base.py,sha256=xc6iA8yY_VK6tbzswt-3naZ53aAXtOLur9j8eimC_ZA,1054
11
11
  lmnr/sdk/providers/fallback.py,sha256=9-srLJgDK5CWD8DIdnxo6jbSsnwDvdHC-vA06BfEkqQ,5431
12
12
  lmnr/sdk/providers/openai.py,sha256=9X2VWz4_EqQBEA1UEww2EKdqods6IzOEmaO6dnhY-Lw,5725
13
13
  lmnr/sdk/providers/utils.py,sha256=ROt82VrvezExYOxionAynD3dp6oX5JoPW6F1ayTm7q8,946
14
- lmnr/sdk/tracing_types.py,sha256=O6sfTBRKxz8WtRy4uEr-_wkSqu58mmEa1SXXJXDGwBI,6267
14
+ lmnr/sdk/tracing_types.py,sha256=hbxRSkC2PHjgqaL6RRqp1Ev1C5U5-o_sNZH5QqtDHAw,6858
15
15
  lmnr/sdk/types.py,sha256=5-Ft-l35wtmn2xxE8BTqsM3nx1zD799tRv4qiOkED50,2121
16
16
  lmnr/sdk/utils.py,sha256=9ScSKalwZbW5wAhGN_Mn26_LZewVBkTz_OG5uQi6Og4,3401
17
17
  lmnr/semantic_conventions/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
18
18
  lmnr/semantic_conventions/gen_ai_spans.py,sha256=3s-2J5v3t5LcMKwK2DefPn56XpxN5oMEYtb9Mf9D_gA,1541
19
- lmnr-0.3.6.dist-info/LICENSE,sha256=67b_wJHVV1CBaWkrKFWU1wyqTPSdzH77Ls-59631COg,10411
20
- lmnr-0.3.6.dist-info/METADATA,sha256=OcWavyUecfR_YwlTJF003WghB4mDqbvUg0ril4arC0k,9485
21
- lmnr-0.3.6.dist-info/WHEEL,sha256=sP946D7jFCHeNz5Iq4fL4Lu-PrWrFsgfLXbbkciIZwg,88
22
- lmnr-0.3.6.dist-info/entry_points.txt,sha256=Qg7ZRax4k-rcQsZ26XRYQ8YFSBiyY2PNxYfq4a6PYXI,41
23
- lmnr-0.3.6.dist-info/RECORD,,
19
+ lmnr-0.3.7.dist-info/LICENSE,sha256=67b_wJHVV1CBaWkrKFWU1wyqTPSdzH77Ls-59631COg,10411
20
+ lmnr-0.3.7.dist-info/METADATA,sha256=5QEiLgfp5HJ0EI67nKkQt_bcqkWyKjz8d-_Zb6rUc2g,9654
21
+ lmnr-0.3.7.dist-info/WHEEL,sha256=sP946D7jFCHeNz5Iq4fL4Lu-PrWrFsgfLXbbkciIZwg,88
22
+ lmnr-0.3.7.dist-info/entry_points.txt,sha256=Qg7ZRax4k-rcQsZ26XRYQ8YFSBiyY2PNxYfq4a6PYXI,41
23
+ lmnr-0.3.7.dist-info/RECORD,,
File without changes
File without changes