lmnr 0.6.12__py3-none-any.whl → 0.6.14__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -347,7 +347,7 @@ try:
347
347
  self._process_response_choices(span, response_dict.get("choices"))
348
348
 
349
349
  except ImportError as e:
350
- logger.warning(f"LiteLLM callback unavailable: {e}")
350
+ logger.debug(f"LiteLLM callback unavailable: {e}")
351
351
 
352
352
  # Create a no-op logger when LiteLLM is not available
353
353
  class LaminarLiteLLMCallback:
@@ -12,7 +12,6 @@ from .config import (
12
12
  Config,
13
13
  )
14
14
  from .utils import (
15
- ProcessedContentPart,
16
15
  dont_throw,
17
16
  get_content,
18
17
  role_from_content_union,
@@ -130,27 +129,30 @@ def _set_request_attributes(span, args, kwargs):
130
129
  )
131
130
 
132
131
  tools: list[types.FunctionDeclaration] = []
133
- if kwargs.get("tools"):
134
- for tool in kwargs.get("tools"):
132
+ arg_tools = config_dict.get("tools", kwargs.get("tools"))
133
+ if arg_tools:
134
+ for tool in arg_tools:
135
135
  if isinstance(tool, types.Tool):
136
136
  tools += tool.function_declarations or []
137
137
  elif isinstance(tool, Callable):
138
138
  tools.append(types.FunctionDeclaration.from_callable(tool))
139
+
139
140
  for tool_num, tool in enumerate(tools):
141
+ tool_dict = to_dict(tool)
140
142
  set_span_attribute(
141
143
  span,
142
144
  f"{SpanAttributes.LLM_REQUEST_FUNCTIONS}.{tool_num}.name",
143
- to_dict(tool).get("name"),
145
+ tool_dict.get("name"),
144
146
  )
145
147
  set_span_attribute(
146
148
  span,
147
149
  f"{SpanAttributes.LLM_REQUEST_FUNCTIONS}.{tool_num}.description",
148
- to_dict(tool).get("description"),
150
+ tool_dict.get("description"),
149
151
  )
150
152
  set_span_attribute(
151
153
  span,
152
154
  f"{SpanAttributes.LLM_REQUEST_FUNCTIONS}.{tool_num}.parameters",
153
- to_dict(tool).get("parameters"),
155
+ json.dumps(tool_dict.get("parameters")),
154
156
  )
155
157
 
156
158
  if should_send_prompts():
@@ -162,7 +164,9 @@ def _set_request_attributes(span, args, kwargs):
162
164
  set_span_attribute(
163
165
  span,
164
166
  f"{gen_ai_attributes.GEN_AI_PROMPT}.{i}.content",
165
- (get_content(process_content_union(system_instruction)) or {}).get("text", ""),
167
+ (get_content(process_content_union(system_instruction)) or {}).get(
168
+ "text", ""
169
+ ),
166
170
  )
167
171
  set_span_attribute(
168
172
  span, f"{gen_ai_attributes.GEN_AI_PROMPT}.{i}.role", "system"
@@ -174,6 +178,7 @@ def _set_request_attributes(span, args, kwargs):
174
178
  for content in contents:
175
179
  processed_content = process_content_union(content)
176
180
  content_str = get_content(processed_content)
181
+
177
182
  set_span_attribute(
178
183
  span,
179
184
  f"{gen_ai_attributes.GEN_AI_PROMPT}.{i}.content",
@@ -188,26 +193,35 @@ def _set_request_attributes(span, args, kwargs):
188
193
  if isinstance(processed_content, list)
189
194
  else [processed_content]
190
195
  )
191
- for j, block in enumerate(blocks):
196
+ tool_call_index = 0
197
+ for block in blocks:
192
198
  block_dict = to_dict(block)
199
+
193
200
  if not block_dict.get("function_call"):
194
201
  continue
195
202
  function_call = to_dict(block_dict.get("function_call", {}))
203
+
196
204
  set_span_attribute(
197
205
  span,
198
- f"{gen_ai_attributes.GEN_AI_PROMPT}.{i}.tool_calls.{j}.name",
206
+ f"{gen_ai_attributes.GEN_AI_PROMPT}.{i}.tool_calls.{tool_call_index}.name",
199
207
  function_call.get("name"),
200
208
  )
201
209
  set_span_attribute(
202
210
  span,
203
- f"{gen_ai_attributes.GEN_AI_PROMPT}.{i}.tool_calls.{j}.id",
204
- function_call.get("id"),
211
+ f"{gen_ai_attributes.GEN_AI_PROMPT}.{i}.tool_calls.{tool_call_index}.id",
212
+ (
213
+ function_call.get("id")
214
+ if function_call.get("id") is not None
215
+ else function_call.get("name")
216
+ ), # google genai doesn't support tool call ids
205
217
  )
206
218
  set_span_attribute(
207
219
  span,
208
- f"{gen_ai_attributes.GEN_AI_PROMPT}.{i}.tool_calls.{j}.arguments",
220
+ f"{gen_ai_attributes.GEN_AI_PROMPT}.{i}.tool_calls.{tool_call_index}.arguments",
209
221
  json.dumps(function_call.get("arguments")),
210
222
  )
223
+ tool_call_index += 1
224
+
211
225
  set_span_attribute(
212
226
  span,
213
227
  f"{gen_ai_attributes.GEN_AI_PROMPT}.{i}.role",
@@ -258,21 +272,8 @@ def _set_response_attributes(span, response: types.GenerateContentResponse):
258
272
  candidates_list = candidates if isinstance(candidates, list) else [candidates]
259
273
  for i, candidate in enumerate(candidates_list):
260
274
  processed_content = process_content_union(candidate.content)
261
- if isinstance(processed_content, list):
262
- if all(
263
- isinstance(item, dict) and item.get("type") == "text"
264
- for item in processed_content
265
- ):
266
- content_str = processed_content[0]["text"]
267
- elif all(
268
- isinstance(item, ProcessedContentPart) and item.content
269
- for item in processed_content
270
- ):
271
- content_str = processed_content[0].content
272
- else:
273
- content_str = get_content(processed_content)
274
- else:
275
- content_str = get_content(processed_content)
275
+ content_str = get_content(processed_content)
276
+
276
277
  set_span_attribute(
277
278
  span, f"{gen_ai_attributes.GEN_AI_COMPLETION}.{i}.role", "model"
278
279
  )
@@ -290,26 +291,33 @@ def _set_response_attributes(span, response: types.GenerateContentResponse):
290
291
  if isinstance(processed_content, list)
291
292
  else [processed_content]
292
293
  )
293
- for j, block in enumerate(blocks):
294
+
295
+ tool_call_index = 0
296
+ for block in blocks:
294
297
  block_dict = to_dict(block)
295
298
  if not block_dict.get("function_call"):
296
299
  continue
297
300
  function_call = to_dict(block_dict.get("function_call", {}))
298
301
  set_span_attribute(
299
302
  span,
300
- f"{gen_ai_attributes.GEN_AI_COMPLETION}.{i}.tool_calls.{j}.name",
303
+ f"{gen_ai_attributes.GEN_AI_COMPLETION}.{i}.tool_calls.{tool_call_index}.name",
301
304
  function_call.get("name"),
302
305
  )
303
306
  set_span_attribute(
304
307
  span,
305
- f"{gen_ai_attributes.GEN_AI_COMPLETION}.{i}.tool_calls.{j}.id",
306
- function_call.get("id"),
308
+ f"{gen_ai_attributes.GEN_AI_COMPLETION}.{i}.tool_calls.{tool_call_index}.id",
309
+ (
310
+ function_call.get("id")
311
+ if function_call.get("id") is not None
312
+ else function_call.get("name")
313
+ ), # google genai doesn't support tool call ids
307
314
  )
308
315
  set_span_attribute(
309
316
  span,
310
- f"{gen_ai_attributes.GEN_AI_COMPLETION}.{i}.tool_calls.{j}.arguments",
317
+ f"{gen_ai_attributes.GEN_AI_COMPLETION}.{i}.tool_calls.{tool_call_index}.arguments",
311
318
  json.dumps(function_call.get("arguments")),
312
319
  )
320
+ tool_call_index += 1
313
321
 
314
322
 
315
323
  @dont_throw
@@ -1,3 +1,4 @@
1
+ import base64
1
2
  import logging
2
3
  import traceback
3
4
 
@@ -74,7 +75,8 @@ def to_dict(obj: BaseModel | pydantic.BaseModel | dict) -> dict[str, Any]:
74
75
  return obj
75
76
  else:
76
77
  return dict(obj)
77
- except Exception:
78
+ except Exception as e:
79
+ logging.error(f"Error converting to dict: {obj}, error: {e}")
78
80
  return dict(obj)
79
81
 
80
82
 
@@ -96,7 +98,7 @@ def get_content(
96
98
  else:
97
99
  return None
98
100
  elif isinstance(content, list):
99
- return [get_content(item) or "" for item in content if item is not None]
101
+ return [get_content(item) for item in content]
100
102
  elif isinstance(content, str):
101
103
  return {
102
104
  "type": "text",
@@ -226,11 +228,15 @@ def _process_image_item(
226
228
  content_index: int,
227
229
  ) -> ProcessedContentPart | dict | None:
228
230
  # Convert to openai format, so backends can handle it
231
+ data = blob.get("data")
232
+ encoded_data = (
233
+ base64.b64encode(data).decode("utf-8") if isinstance(data, bytes) else data
234
+ )
229
235
  return (
230
236
  ProcessedContentPart(
231
237
  image_url=ImageUrl(
232
238
  image_url=ImageUrlInner(
233
- url=f"data:image/{blob.get('mime_type').split('/')[1]};base64,{blob.get('data')}",
239
+ url=f"data:image/{blob.get('mime_type').split('/')[1]};base64,{encoded_data}",
234
240
  )
235
241
  )
236
242
  )
@@ -15,13 +15,14 @@ class AsyncEvals(BaseAsyncResource):
15
15
  """Resource for interacting with Laminar evaluations API."""
16
16
 
17
17
  async def init(
18
- self, name: str | None = None, group_name: str | None = None
18
+ self, name: str | None = None, group_name: str | None = None, metadata: dict[str, Any] | None = None
19
19
  ) -> InitEvaluationResponse:
20
20
  """Initialize a new evaluation.
21
21
 
22
22
  Args:
23
23
  name (str | None, optional): Name of the evaluation. Defaults to None.
24
24
  group_name (str | None, optional): Group name for the evaluation. Defaults to None.
25
+ metadata (dict[str, Any] | None, optional): Metadata to associate with. Defaults to None.
25
26
 
26
27
  Returns:
27
28
  InitEvaluationResponse: The response from the initialization request.
@@ -31,6 +32,7 @@ class AsyncEvals(BaseAsyncResource):
31
32
  json={
32
33
  "name": name,
33
34
  "groupName": group_name,
35
+ "metadata": metadata,
34
36
  },
35
37
  headers=self._headers(),
36
38
  )
@@ -45,6 +47,7 @@ class AsyncEvals(BaseAsyncResource):
45
47
  self,
46
48
  name: str | None = None,
47
49
  group_name: str | None = None,
50
+ metadata: dict[str, Any] | None = None,
48
51
  ) -> uuid.UUID:
49
52
  """
50
53
  Create a new evaluation and return its ID.
@@ -52,11 +55,12 @@ class AsyncEvals(BaseAsyncResource):
52
55
  Parameters:
53
56
  name (str | None, optional): Optional name of the evaluation.
54
57
  group_name (str | None, optional): An identifier to group evaluations.
55
-
58
+ metadata (dict[str, Any] | None, optional): Metadata to associate with. Defaults to None.
59
+
56
60
  Returns:
57
61
  uuid.UUID: The evaluation ID.
58
62
  """
59
- evaluation = await self.init(name=name, group_name=group_name)
63
+ evaluation = await self.init(name=name, group_name=group_name, metadata=metadata)
60
64
  return evaluation.id
61
65
 
62
66
  async def create_datapoint(
@@ -17,13 +17,14 @@ class Evals(BaseResource):
17
17
  """Resource for interacting with Laminar evaluations API."""
18
18
 
19
19
  def init(
20
- self, name: str | None = None, group_name: str | None = None
20
+ self, name: str | None = None, group_name: str | None = None, metadata: dict[str, Any] | None = None
21
21
  ) -> InitEvaluationResponse:
22
22
  """Initialize a new evaluation.
23
23
 
24
24
  Args:
25
25
  name (str | None, optional): Name of the evaluation. Defaults to None.
26
26
  group_name (str | None, optional): Group name for the evaluation. Defaults to None.
27
+ metadata (dict[str, Any] | None, optional): Metadata to associate with. Defaults to None.
27
28
 
28
29
  Returns:
29
30
  InitEvaluationResponse: The response from the initialization request.
@@ -33,6 +34,7 @@ class Evals(BaseResource):
33
34
  json={
34
35
  "name": name,
35
36
  "groupName": group_name,
37
+ "metadata": metadata,
36
38
  },
37
39
  headers=self._headers(),
38
40
  )
@@ -47,6 +49,7 @@ class Evals(BaseResource):
47
49
  self,
48
50
  name: str | None = None,
49
51
  group_name: str | None = None,
52
+ metadata: dict[str, Any] | None = None,
50
53
  ) -> uuid.UUID:
51
54
  """
52
55
  Create a new evaluation and return its ID.
@@ -54,11 +57,12 @@ class Evals(BaseResource):
54
57
  Parameters:
55
58
  name (str | None, optional): Optional name of the evaluation.
56
59
  group_name (str | None, optional): An identifier to group evaluations.
57
-
60
+ metadata (dict[str, Any] | None, optional): Metadata to associate with. Defaults to None.
61
+
58
62
  Returns:
59
63
  uuid.UUID: The evaluation ID.
60
64
  """
61
- evaluation = self.init(name=name, group_name=group_name)
65
+ evaluation = self.init(name=name, group_name=group_name, metadata=metadata)
62
66
  return evaluation.id
63
67
 
64
68
  def create_datapoint(
lmnr/sdk/evaluations.py CHANGED
@@ -104,6 +104,7 @@ class Evaluation:
104
104
  evaluators: dict[str, EvaluatorFunction | HumanEvaluator],
105
105
  name: str | None = None,
106
106
  group_name: str | None = None,
107
+ metadata: dict[str, Any] | None = None,
107
108
  concurrency_limit: int = DEFAULT_BATCH_SIZE,
108
109
  project_api_key: str | None = None,
109
110
  base_url: str | None = None,
@@ -143,6 +144,7 @@ class Evaluation:
143
144
  evaluations. Only evaluations within the same group_name can be\
144
145
  visually compared. If not provided, "default" is assigned.
145
146
  Defaults to None
147
+ metadata (dict[str, Any] | None): optional metadata to associate with\
146
148
  concurrency_limit (int, optional): The concurrency limit for\
147
149
  evaluation. This many data points will be evaluated in parallel\
148
150
  with a pool of workers.
@@ -192,6 +194,7 @@ class Evaluation:
192
194
  self.evaluators = evaluators
193
195
  self.group_name = group_name
194
196
  self.name = name
197
+ self.metadata = metadata
195
198
  self.concurrency_limit = concurrency_limit
196
199
  self.batch_size = concurrency_limit
197
200
  self._logger = get_default_logger(self.__class__.__name__)
@@ -242,7 +245,7 @@ class Evaluation:
242
245
  self.reporter.start(len(self.data))
243
246
  try:
244
247
  evaluation = await self.client.evals.init(
245
- name=self.name, group_name=self.group_name
248
+ name=self.name, group_name=self.group_name, metadata=self.metadata
246
249
  )
247
250
  result_datapoints = await self._evaluate_in_batches(evaluation.id)
248
251
 
@@ -409,6 +412,7 @@ def evaluate(
409
412
  evaluators: dict[str, EvaluatorFunction | HumanEvaluator],
410
413
  name: str | None = None,
411
414
  group_name: str | None = None,
415
+ metadata: dict[str, Any] | None = None,
412
416
  concurrency_limit: int = DEFAULT_BATCH_SIZE,
413
417
  project_api_key: str | None = None,
414
418
  base_url: str | None = None,
@@ -452,6 +456,7 @@ def evaluate(
452
456
  Only evaluations within the same group_name can be visually compared.\
453
457
  If not provided, set to "default".
454
458
  Defaults to None
459
+ metadata (dict[str, Any] | None, optional): Optional metadata to associate with\
455
460
  concurrency_limit (int, optional): The concurrency limit for evaluation.
456
461
  Defaults to DEFAULT_BATCH_SIZE.
457
462
  project_api_key (str | None, optional): The project API key.
@@ -478,6 +483,7 @@ def evaluate(
478
483
  executor=executor,
479
484
  evaluators=evaluators,
480
485
  group_name=group_name,
486
+ metadata=metadata,
481
487
  name=name,
482
488
  concurrency_limit=concurrency_limit,
483
489
  project_api_key=project_api_key,
lmnr/version.py CHANGED
@@ -3,7 +3,7 @@ import httpx
3
3
  from packaging import version
4
4
 
5
5
 
6
- __version__ = "0.6.12"
6
+ __version__ = "0.6.14"
7
7
  PYTHON_VERSION = f"{sys.version_info.major}.{sys.version_info.minor}"
8
8
 
9
9
 
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.3
2
2
  Name: lmnr
3
- Version: 0.6.12
3
+ Version: 0.6.14
4
4
  Summary: Python SDK for Laminar
5
5
  License: Apache-2.0
6
6
  Author: lmnr.ai
@@ -3,11 +3,11 @@ lmnr/cli.py,sha256=uHgLUfN_6eINtUlcQdOtODf2tI9AiwmlhojQF4UMB5Y,6047
3
3
  lmnr/opentelemetry_lib/.flake8,sha256=bCxuDlGx3YQ55QHKPiGJkncHanh9qGjQJUujcFa3lAU,150
4
4
  lmnr/opentelemetry_lib/__init__.py,sha256=aWKsqRXUhVhu2BS555nO2JhZSsK8bTUylAVwWybquGE,2160
5
5
  lmnr/opentelemetry_lib/decorators/__init__.py,sha256=45HVoYnHC1Y9D_VSkioDbqD3gm4RPC5sKoztomBI5j8,8496
6
- lmnr/opentelemetry_lib/litellm/__init__.py,sha256=PcZtTdoHZ0NyylKB-07FZwcQ02Jou3R0esk9_soNxJ0,14860
6
+ lmnr/opentelemetry_lib/litellm/__init__.py,sha256=wjo46It5GdhmxPCaiA8kaKyaz3VsuRDRYUCCstvK0-Y,14858
7
7
  lmnr/opentelemetry_lib/litellm/utils.py,sha256=2ozwVT-C3HIDEJ8Rekx7QYXouvNMqtEteCOHVRUgGic,539
8
- lmnr/opentelemetry_lib/opentelemetry/instrumentation/google_genai/__init__.py,sha256=6Fvkc_zZEX1lk8g6ZGFrADLNOL055pkMdO-hEef8qBY,18525
8
+ lmnr/opentelemetry_lib/opentelemetry/instrumentation/google_genai/__init__.py,sha256=mltiTDVCCyMuhQNuoLHvblg9O5X0ncG6xN3f1opSeQU,18613
9
9
  lmnr/opentelemetry_lib/opentelemetry/instrumentation/google_genai/config.py,sha256=25zevJ7g3MtJP_5gju3jBH7-wg7SbDkktysuUO29ksI,245
10
- lmnr/opentelemetry_lib/opentelemetry/instrumentation/google_genai/utils.py,sha256=ICQENOiICTKodjZVHhq3H5RIRY5bbuWp_KmzkDNgDRM,7471
10
+ lmnr/opentelemetry_lib/opentelemetry/instrumentation/google_genai/utils.py,sha256=8SSBliRoJtiZME5RDEwt90CI2BadKPHQrtV4p6bDy_0,7669
11
11
  lmnr/opentelemetry_lib/opentelemetry/instrumentation/langgraph/__init__.py,sha256=Jyv9koZdGA4-oTaB7ATB7DaX7aNOY-3YOGL4wX0c7PM,3107
12
12
  lmnr/opentelemetry_lib/opentelemetry/instrumentation/langgraph/utils.py,sha256=nf9sJZXnnts4gYZortEiDvwYjYqYJZTAT0zutuP_R6Y,1512
13
13
  lmnr/opentelemetry_lib/tracing/__init__.py,sha256=27QogAe-aHyrVr6b56-DduUm0KEE24K1aV2e1nouTNg,6007
@@ -35,26 +35,26 @@ lmnr/sdk/client/asynchronous/resources/__init__.py,sha256=9fkjlVJS8zhnCTITjhow17
35
35
  lmnr/sdk/client/asynchronous/resources/agent.py,sha256=Ong3K2KRLN7agx1_-aZxMGcT_OGF3_ZGtFLm8aPMbYw,17788
36
36
  lmnr/sdk/client/asynchronous/resources/base.py,sha256=aJ43Q1rltg23IQaI4eeaZKckxVTgDUbCJrChhQCUEoE,986
37
37
  lmnr/sdk/client/asynchronous/resources/browser_events.py,sha256=T-DUbbAfMQ2VqiVfgVplxuTaJZuoNcC1O6RCxdfw7UQ,1163
38
- lmnr/sdk/client/asynchronous/resources/evals.py,sha256=dYFuHmXW_FFNsmKC7_NuhxowzCJVUrRmrxeAJ_7EzOA,5420
38
+ lmnr/sdk/client/asynchronous/resources/evals.py,sha256=jG-AlpFmV-8mlGOy0FhXldnO2tBWoEerzeY2X_CzIL0,5761
39
39
  lmnr/sdk/client/asynchronous/resources/tags.py,sha256=VbsBMp120d_8drGFr1Obp4xSRktzPC-3kOYcblZnvKA,2565
40
40
  lmnr/sdk/client/synchronous/resources/__init__.py,sha256=hDGyNARdG3J25lLAP8JnlER7r8JL-JQuPN1xdheiCw4,318
41
41
  lmnr/sdk/client/synchronous/resources/agent.py,sha256=mnTu6toN2LbgmEhQ-mdZ0CzNAnkrGiksrys0AyMwz2A,17809
42
42
  lmnr/sdk/client/synchronous/resources/base.py,sha256=ne1ZZ10UmNkMrECVvClcEJfcFJlSGvaXOC8K6mZTPdY,971
43
43
  lmnr/sdk/client/synchronous/resources/browser_events.py,sha256=9rFYWZesXQomnFgbZ590tGFMTaNj0OAzT9RcFwD8q_Y,1135
44
- lmnr/sdk/client/synchronous/resources/evals.py,sha256=odN9ZfZnUXKzFZJ6AQDrIjEljqnj8aQKP1ivY188WGo,6667
44
+ lmnr/sdk/client/synchronous/resources/evals.py,sha256=QV_v4jRRn4r_sk2FjvqdbAc1-Wa2GUl3qWrCzhbQZsA,7008
45
45
  lmnr/sdk/client/synchronous/resources/tags.py,sha256=cNMEzMDhlBNpI7J4x6xkFAANiNSq-Vuu_zi5NPk2kcA,2485
46
46
  lmnr/sdk/client/synchronous/sync_client.py,sha256=IIzj-mAwHHoRuUX9KkJtrzTGi5UOygbA8wiA9Aqzf2E,4907
47
47
  lmnr/sdk/datasets.py,sha256=P9hRxfl7-I6qhLFFGgU-r_I7RJfLtF6sL56g5fKIbAA,1708
48
48
  lmnr/sdk/decorators.py,sha256=1uu9xxBYgblFqlhQqH17cZYq7babAmB1lEtvBgTsP0E,4468
49
49
  lmnr/sdk/eval_control.py,sha256=KROUrDhcZTrptRZ-hxvr60_o_Gt_8u045jb4cBXcuoY,184
50
- lmnr/sdk/evaluations.py,sha256=fMUDueAgGv9fyTuX7n0DsS8lOzrbZNMgPorA037tgDU,21458
50
+ lmnr/sdk/evaluations.py,sha256=rUDGpI1eNKoz89mtgFFPIFXn6zYLggaXvWy4KKY2RiU,21806
51
51
  lmnr/sdk/laminar.py,sha256=oOVco_c9ZstT71HsquGsgbtFumXd2Ejz0rl_qpmMlTU,33996
52
52
  lmnr/sdk/log.py,sha256=nt_YMmPw1IRbGy0b7q4rTtP4Yo3pQfNxqJPXK3nDSNQ,2213
53
53
  lmnr/sdk/types.py,sha256=ZQp5SeYJNZsK3KrbSeXPY_xn6mGjW5mSw_i0Rd_Oa4k,12328
54
54
  lmnr/sdk/utils.py,sha256=yrcHIhoADf9lWH9qJWZMmkRWYvd0DuxPSLP3mY6YFw0,4327
55
- lmnr/version.py,sha256=3yq9kXsV09LEVOD6hyJwTYbh8vgoF0ejCPZdrS2JiGs,1322
56
- lmnr-0.6.12.dist-info/LICENSE,sha256=67b_wJHVV1CBaWkrKFWU1wyqTPSdzH77Ls-59631COg,10411
57
- lmnr-0.6.12.dist-info/METADATA,sha256=UFjXPicsNBWXgXPxgeBuvtw4d-8dq1qJ8H-H9LeTprY,15186
58
- lmnr-0.6.12.dist-info/WHEEL,sha256=b4K_helf-jlQoXBBETfwnf4B04YC67LOev0jo4fX5m8,88
59
- lmnr-0.6.12.dist-info/entry_points.txt,sha256=K1jE20ww4jzHNZLnsfWBvU3YKDGBgbOiYG5Y7ivQcq4,37
60
- lmnr-0.6.12.dist-info/RECORD,,
55
+ lmnr/version.py,sha256=R9BOGoH8CfmmKLkCU4zXkOvGWsR9Tz0zP4HbcoSRTAA,1322
56
+ lmnr-0.6.14.dist-info/LICENSE,sha256=67b_wJHVV1CBaWkrKFWU1wyqTPSdzH77Ls-59631COg,10411
57
+ lmnr-0.6.14.dist-info/METADATA,sha256=lrCS9CWz7RZEh0JdrfuBdO4J3W5sBw_l1ElZbxm1c7E,15186
58
+ lmnr-0.6.14.dist-info/WHEEL,sha256=b4K_helf-jlQoXBBETfwnf4B04YC67LOev0jo4fX5m8,88
59
+ lmnr-0.6.14.dist-info/entry_points.txt,sha256=K1jE20ww4jzHNZLnsfWBvU3YKDGBgbOiYG5Y7ivQcq4,37
60
+ lmnr-0.6.14.dist-info/RECORD,,
File without changes
File without changes