lmnr 0.3.7__py3-none-any.whl → 0.4.1__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
lmnr/sdk/tracing_types.py DELETED
@@ -1,210 +0,0 @@
1
- from typing import Any, Literal, Optional, Union
2
- import datetime
3
- import pydantic
4
- import uuid
5
-
6
- from .constants import CURRENT_TRACING_VERSION
7
- from .utils import to_dict
8
-
9
-
10
- class EvaluateEvent(pydantic.BaseModel):
11
- """
12
- EvaluateEvent is an event which need to be evaluated on the server.
13
-
14
- Args:
15
- env: dict[str, str]: Environment variables to be used during evaluation.
16
- It is optional and can be left empty, because it will be merged with LaminarContextManager's env.
17
- So you need to only set it once there.
18
- """
19
-
20
- name: str
21
- evaluator: str
22
- data: dict
23
- timestamp: Optional[datetime.datetime] = None
24
- env: dict[str, str] = {}
25
-
26
-
27
- class Span(pydantic.BaseModel):
28
- version: str = CURRENT_TRACING_VERSION
29
- spanType: Literal["DEFAULT", "LLM"] = "DEFAULT"
30
- id: uuid.UUID
31
- traceId: uuid.UUID
32
- parentSpanId: Optional[uuid.UUID] = None
33
- name: str
34
- # generated at start of span, so required
35
- startTime: datetime.datetime
36
- # generated at end of span, optional when span is still running
37
- endTime: Optional[datetime.datetime] = None
38
- attributes: dict[str, Any] = {}
39
- input: Optional[Any] = None
40
- output: Optional[Any] = None
41
- metadata: Optional[dict[str, Any]] = None
42
- evaluateEvents: list[EvaluateEvent] = []
43
- events: list["Event"] = []
44
-
45
- def __init__(
46
- self,
47
- name: str,
48
- trace_id: uuid.UUID,
49
- start_time: Optional[datetime.datetime] = None,
50
- version: str = CURRENT_TRACING_VERSION,
51
- span_type: Literal["DEFAULT", "LLM"] = "DEFAULT",
52
- id: Optional[uuid.UUID] = None,
53
- parent_span_id: Optional[uuid.UUID] = None,
54
- input: Optional[Any] = None,
55
- metadata: Optional[dict[str, Any]] = {},
56
- attributes: Optional[dict[str, Any]] = {},
57
- evaluate_events: list[EvaluateEvent] = [],
58
- events: list["Event"] = [],
59
- ):
60
- super().__init__(
61
- version=version,
62
- spanType=span_type,
63
- id=id or uuid.uuid4(),
64
- traceId=trace_id,
65
- parentSpanId=parent_span_id,
66
- name=name,
67
- startTime=start_time or datetime.datetime.now(datetime.timezone.utc),
68
- input=input,
69
- metadata=metadata or {},
70
- attributes=attributes or {},
71
- evaluateEvents=evaluate_events,
72
- events=events,
73
- )
74
-
75
- def update(
76
- self,
77
- end_time: Optional[datetime.datetime],
78
- input: Optional[Any] = None,
79
- output: Optional[Any] = None,
80
- metadata: Optional[dict[str, Any]] = None,
81
- attributes: Optional[dict[str, Any]] = None,
82
- evaluate_events: Optional[list[EvaluateEvent]] = None,
83
- events: Optional[list["Event"]] = None,
84
- override: bool = False,
85
- ):
86
- self.endTime = end_time or datetime.datetime.now(datetime.timezone.utc)
87
- self.input = input
88
- self.output = output
89
- new_metadata = (
90
- metadata if override else {**(self.metadata or {}), **(metadata or {})}
91
- )
92
- new_attributes = (
93
- attributes or {}
94
- if override
95
- else {**(self.attributes or {}), **(attributes or {})}
96
- )
97
- new_evaluate_events = (
98
- evaluate_events or []
99
- if override
100
- else self.evaluateEvents + (evaluate_events or [])
101
- )
102
- new_events = events or [] if override else self.events + (events or [])
103
- self.metadata = new_metadata
104
- self.attributes = new_attributes
105
- self.evaluateEvents = new_evaluate_events
106
- self.events = new_events
107
-
108
- def add_event(self, event: "Event"):
109
- self.events.append(event)
110
-
111
- def to_dict(self) -> dict[str, Any]:
112
- try:
113
- obj = self.model_dump()
114
- except TypeError:
115
- # if inner values are pydantic models, we need to call model_dump on them
116
- # see: https://github.com/pydantic/pydantic/issues/7713
117
- obj = {}
118
- for key, value in self.__dict__.items():
119
- obj[key] = (
120
- value.model_dump()
121
- if isinstance(value, pydantic.BaseModel)
122
- else value
123
- )
124
-
125
- obj = to_dict(obj)
126
- return obj
127
-
128
-
129
- class Trace(pydantic.BaseModel):
130
- id: uuid.UUID
131
- version: str = CURRENT_TRACING_VERSION
132
- success: bool = True
133
- userId: Optional[str] = None # provided by user or null
134
- sessionId: Optional[str] = None # provided by user or uuid()
135
- release: Optional[str] = None
136
- metadata: Optional[dict[str, Any]] = None
137
-
138
- def __init__(
139
- self,
140
- success: bool = True,
141
- id: Optional[uuid.UUID] = None,
142
- user_id: Optional[str] = None,
143
- session_id: Optional[str] = None,
144
- release: Optional[str] = None,
145
- metadata: Optional[dict[str, Any]] = None,
146
- ):
147
- id_ = id or uuid.uuid4()
148
- super().__init__(
149
- id=id_,
150
- success=success,
151
- userId=user_id,
152
- sessionId=session_id,
153
- release=release,
154
- metadata=metadata or {},
155
- )
156
-
157
- def to_dict(self) -> dict[str, Any]:
158
- try:
159
- obj = self.model_dump()
160
- except TypeError:
161
- # if inner values are pydantic models, we need to call model_dump on them
162
- # see: https://github.com/pydantic/pydantic/issues/7713
163
- obj = {}
164
- for key, value in self.__dict__.items():
165
- obj[key] = (
166
- value.model_dump()
167
- if isinstance(value, pydantic.BaseModel)
168
- else value
169
- )
170
- obj = to_dict(obj)
171
- return obj
172
-
173
-
174
- class Event(pydantic.BaseModel):
175
- id: uuid.UUID
176
- templateName: str
177
- timestamp: datetime.datetime
178
- spanId: uuid.UUID
179
- value: Optional[Union[int, str, float, bool]] = None
180
-
181
- def __init__(
182
- self,
183
- name: str,
184
- span_id: uuid.UUID,
185
- timestamp: Optional[datetime.datetime] = None,
186
- value: Optional[Union[int, str, float, bool]] = None,
187
- ):
188
- super().__init__(
189
- id=uuid.uuid4(),
190
- templateName=name,
191
- spanId=span_id,
192
- timestamp=timestamp or datetime.datetime.now(datetime.timezone.utc),
193
- value=value,
194
- )
195
-
196
- def to_dict(self) -> dict[str, Any]:
197
- try:
198
- obj = self.model_dump()
199
- except TypeError:
200
- # if inner values are pydantic models, we need to call model_dump on them
201
- # see: https://github.com/pydantic/pydantic/issues/7713
202
- obj = {}
203
- for key, value in self.__dict__.items():
204
- obj[key] = (
205
- value.model_dump()
206
- if isinstance(value, pydantic.BaseModel)
207
- else value
208
- )
209
- obj = to_dict(obj)
210
- return obj
File without changes
@@ -1,48 +0,0 @@
1
- # source: https://github.com/open-telemetry/semantic-conventions/blob/main/docs/gen-ai/gen-ai-spans.md
2
- # last updated: 2024-08-26
3
-
4
- REQUEST_MODEL: str = "gen_ai.request.model"
5
- RESPONSE_MODEL: str = "gen_ai.response.model"
6
- PROVIDER: str = "gen_ai.system"
7
- INPUT_TOKEN_COUNT: str = "gen_ai.usage.input_tokens"
8
- OUTPUT_TOKEN_COUNT: str = "gen_ai.usage.output_tokens"
9
- TOTAL_TOKEN_COUNT: str = "gen_ai.usage.total_tokens" # custom, not in the spec
10
- # https://github.com/openlit/openlit/blob/main/sdk/python/src/openlit/semcov/__init__.py
11
- COST: str = "gen_ai.usage.cost"
12
-
13
- OPERATION: str = "gen_ai.operation.name"
14
-
15
- FREQUENCY_PENALTY: str = "gen_ai.request.frequency_penalty"
16
- TEMPERATURE: str = "gen_ai.request.temperature"
17
- MAX_TOKENS: str = "gen_ai.request.max_tokens"
18
- PRESENCE_PENALTY: str = "gen_ai.request.presence_penalty"
19
- STOP_SEQUENCES: str = "gen_ai.request.stop_sequences"
20
- TEMPERATURE: str = "gen_ai.request.temperature"
21
- TOP_P: str = "gen_ai.request.top_p"
22
- TOP_K: str = "gen_ai.request.top_k"
23
-
24
- # https://github.com/openlit/openlit/blob/main/sdk/python/src/openlit/semcov/__init__.py
25
- STREAM: str = "gen_ai.request.is_stream"
26
-
27
- FINISH_REASONS = "gen_ai.response.finish_reasons"
28
-
29
- __all__ = [
30
- "REQUEST_MODEL",
31
- "RESPONSE_MODEL",
32
- "PROVIDER",
33
- "INPUT_TOKEN_COUNT",
34
- "OUTPUT_TOKEN_COUNT",
35
- "TOTAL_TOKEN_COUNT",
36
- "COST",
37
- "OPERATION",
38
- "FREQUENCY_PENALTY",
39
- "TEMPERATURE",
40
- "MAX_TOKENS",
41
- "PRESENCE_PENALTY",
42
- "STOP_SEQUENCES",
43
- "TEMPERATURE",
44
- "TOP_P",
45
- "TOP_K",
46
- "STREAM",
47
- "FINISH_REASONS",
48
- ]
@@ -1,266 +0,0 @@
1
- Metadata-Version: 2.1
2
- Name: lmnr
3
- Version: 0.3.7
4
- Summary: Python SDK for Laminar AI
5
- License: Apache-2.0
6
- Author: lmnr.ai
7
- Requires-Python: >=3.9,<4.0
8
- Classifier: License :: OSI Approved :: Apache Software License
9
- Classifier: Programming Language :: Python :: 3
10
- Classifier: Programming Language :: Python :: 3.9
11
- Classifier: Programming Language :: Python :: 3.10
12
- Classifier: Programming Language :: Python :: 3.11
13
- Classifier: Programming Language :: Python :: 3.12
14
- Requires-Dist: backoff (>=2.2.1,<3.0.0)
15
- Requires-Dist: openai (>=1.41.1,<2.0.0)
16
- Requires-Dist: pydantic (>=2.7.4,<3.0.0)
17
- Requires-Dist: python-dotenv (>=1.0.1,<2.0.0)
18
- Requires-Dist: requests (>=2.32.3,<3.0.0)
19
- Description-Content-Type: text/markdown
20
-
21
- # Laminar AI
22
-
23
- This repo provides core for code generation, Laminar CLI, and Laminar SDK.
24
-
25
- ## Quickstart
26
- ```sh
27
- python3 -m venv .myenv
28
- source .myenv/bin/activate # or use your favorite env management tool
29
-
30
- pip install lmnr
31
- ```
32
-
33
- Create .env file at the root and add `LMNR_PROJECT_API_KEY` value to it.
34
-
35
- Read more [here](https://docs.lmnr.ai/api-reference/introduction#authentication) on how to get `LMNR_PROJECT_API_KEY`.
36
-
37
- ## Sending events
38
-
39
- You can send events in two ways:
40
- - `.event(name, value)` – for a pre-defined event with one of possible values.
41
- - `.evaluate_event(name, evaluator, data)` – for an event that is evaluated by evaluator pipeline based on the data.
42
-
43
- Read our [docs](https://docs.lmnr.ai) to learn more about event types, how they are created and evaluated, etc.
44
-
45
- ## Instrumentation
46
-
47
- We provide two ways to instrument your python code:
48
- - With `@observe()` decorators and `wrap_llm_call` helpers
49
- - Manually
50
-
51
- It is important to not mix the two styles of instrumentation, this can lead to unpredictable results.
52
-
53
- ## Decorator instrumentation example
54
-
55
- For easy automatic instrumentation, we provide you two simple primitives:
56
-
57
- - `observe` - a multi-purpose automatic decorator that starts traces and spans when functions are entered, and finishes them when functions return
58
- - `wrap_llm_call` - a function that takes in your LLM call and return a "decorated" version of it. This does all the same things as `observe`, plus
59
- a few utilities around LLM-specific things, such as counting tokens and recording model params.
60
-
61
- You can also import `lmnr_context` in order to interact and have more control over the context of the current span.
62
-
63
- ```python
64
- import os
65
- from openai import OpenAI
66
-
67
- from lmnr import observe, wrap_llm_call, lmnr_context, initialize
68
- client = OpenAI(api_key=os.environ["OPENAI_API_KEY"])
69
-
70
- # add if your online evaluation pipelines need these keys
71
- initialize(
72
- {
73
- "OPENAI_API_KEY": "sk-...",
74
- "ANTHROPIC_API_KEY": "sk-...",
75
- }
76
- )
77
-
78
- @observe() # annotate all functions you want to trace
79
- def poem_writer(topic="turbulence"):
80
- prompt = f"write a poem about {topic}"
81
-
82
- # wrap the actual final call to LLM with `wrap_llm_call`
83
- response = wrap_llm_call(client.chat.completions.create)(
84
- model="gpt-4o",
85
- messages=[
86
- {"role": "system", "content": "You are a helpful assistant."},
87
- {"role": "user", "content": prompt},
88
- ],
89
- )
90
-
91
- poem = response.choices[0].message.content
92
-
93
- if topic in poem:
94
- # send an event with a pre-defined name
95
- lmnr_context.event("topic_alignment", "good")
96
-
97
- # to trigger an automatic check for a possible event do:
98
- lmnr_context.evaluate_event("excessive_wordiness", "wordiness_evaluator", {"poem": poem})
99
-
100
- return poem
101
-
102
- if __name__ == "__main__":
103
- print(poem_writer(topic="laminar flow"))
104
- ```
105
-
106
- This gives an advantage of quick instrumentation, but is somewhat limited in flexibility + doesn't really work as expected with threading.
107
- This is due to the fact that we use `contextvars.ContextVar` for this, and how Python manages them between threads.
108
-
109
- If you want to instrument your code manually, follow on to the next section
110
-
111
- ## Manual instrumentation example
112
-
113
- For manual instrumetation you will need to import the following:
114
- - `trace` - this is a function to start a trace. It returns a `TraceContext`
115
- - `TraceContext` - a pointer to the current trace that you can pass around functions as you want.
116
- - `SpanContext` - a pointer to the current span that you can pass around functions as you want
117
-
118
- Both `TraceContext` and `SpanContext` expose the following interfaces:
119
- - `span(name: str, **kwargs)` - create a child span within the current context. Returns `SpanContext`
120
- - `update(**kwargs)` - update the current trace or span and return it. Returns `TraceContext` or `SpanContext`. Useful when some metadata becomes known later during the program execution
121
-
122
- In addition, `SpanContext` allows you to:
123
- - `event(name: str, value: str | int)` - emit a custom event at any point
124
- - `evaluate_event(name: str, evaluator: str, data: dict)` - register a possible event for automatic checking by Laminar's evaluator pipeline.
125
- - `end(**kwargs)` – update the current span, and terminate it
126
-
127
- Example:
128
-
129
- ```python
130
- import os
131
- from openai import OpenAI
132
-
133
- from lmnr import trace, TraceContext, SpanContext, EvaluateEvent, initialize
134
- from lmnr.semantic_conventions.gen_ai_spans import INPUT_TOKEN_COUNT, OUTPUT_TOKEN_COUNT, RESPONSE_MODEL, PROVIDER, STREAM
135
- client = OpenAI(api_key=os.environ["OPENAI_API_KEY"])
136
-
137
- # add if your online evaluation pipelines need these keys
138
- initialize(
139
- {
140
- "OPENAI_API_KEY": "sk-...",
141
- "ANTHROPIC_API_KEY": "sk-...",
142
- }
143
- )
144
-
145
- def poem_writer(t: TraceContext, topic = "turbulence"):
146
- span: SpanContext = t.span(name="poem_writer", input=topic)
147
-
148
- prompt = f"write a poem about {topic}"
149
- messages = [
150
- {"role": "system", "content": "You are a helpful assistant."},
151
- {"role": "user", "content": prompt},
152
- ]
153
- # create a child span within the current `poem_writer` span.
154
- llm_span = span.span(name="OpenAI completion", input=messages, span_type="LLM")
155
-
156
- response = client.chat.completions.create(
157
- model="gpt-4o-mini",
158
- messages=[
159
- {"role": "system", "content": "You are a helpful assistant."},
160
- {"role": "user", "content": "Hello. What is the capital of France?"},
161
- ],
162
- )
163
- poem = response.choices[0].message.content
164
- if topic in poem:
165
- llm_span.event("topic_alignment", "good") # send an event with a pre-defined name
166
-
167
- llm_span.evaluate_event("positiveness", "positiveness_evaluator", {"poem": poem})
168
-
169
- llm_span.end(
170
- output=poem,
171
- attributes={
172
- INPUT_TOKEN_COUNT: response.usage.prompt_tokens,
173
- OUTPUT_TOKEN_COUNT: response.usage.completion_tokens,
174
- RESPONSE_MODEL: response.model,
175
- PROVIDER: 'openai',
176
- STREAM: False
177
- }
178
- )
179
- span.end(output=poem)
180
- return poem
181
-
182
-
183
- t: TraceContext = trace(user_id="user123", session_id="session123", release="release")
184
- main(t, topic="laminar flow")
185
- ```
186
-
187
- ## Manual attributes
188
-
189
- You can specify span attributes when creating/updating/ending spans.
190
-
191
- If you use [decorator instrumentation](#decorator-instrumentation-example), `wrap_llm_call` handles all of this for you.
192
-
193
- Example usage:
194
-
195
- ```python
196
- from lmnr.semantic_conventions.gen_ai_spans import REQUEST_MODEL
197
-
198
- # span_type = LLM is important for correct attribute semantics
199
- llm_span = span.span(name="OpenAI completion", input=messages, span_type="LLM")
200
- llm_span.update(
201
- attributes={REQUEST_MODEL: "gpt-4o-mini"}
202
- )
203
- response = client.chat.completions.create(
204
- model="gpt-4o-mini",
205
- messages=[
206
- {"role": "system", "content": "You are a helpful assistant."},
207
- {"role": "user", "content": "Hello. What is the capital of France?"},
208
- ],
209
- )
210
- ```
211
-
212
- Semantics:
213
-
214
- Check for available semantic conventions in `lmnr.semantic_conventions.gen_ai_spans`.
215
-
216
- You can specify the cost with `COST`. Otherwise, the cost will be calculated
217
- on the Laminar servers, given the following are specified:
218
-
219
- - span_type is `"LLM"`
220
- - Model provider: `PROVIDER`, e.g. 'openai', 'anthropic'
221
- - Output tokens: `OUTPUT_TOKEN_COUNT`
222
- - Input tokens: `INPUT_TOKEN_COUNT`*
223
- - Model. We look at `RESPONSE_MODEL` first, and then, if it is not present, we take the value of `REQUEST_MODEL`
224
-
225
- \* Also, for the case when `PROVIDER` is `"openai"`, the `STREAM` is set to `True`, and `INPUT_TOKEN_COUNT` is not set, we will calculate
226
- the number of input tokens, and the cost on the server using [tiktoken](https://github.com/zurawiki/tiktoken-rs) and
227
- use it in cost calculation.
228
- This is done because OpenAI does not stream the usage back
229
- when streaming is enabled. Output token count is (approximately) equal to the number of streaming
230
- events sent by OpenAI, but there is no way to calculate the input token count, other than re-tokenizing.
231
-
232
- ## Making Laminar pipeline calls
233
-
234
- After you are ready to use your pipeline in your code, deploy it in Laminar by selecting the target version for the pipeline.
235
-
236
- Once your pipeline target is set, you can call it from Python in just a few lines.
237
-
238
- Example use:
239
-
240
- ```python
241
- from lmnr import Laminar
242
-
243
- # for decorator instrumentation, do: `from lmnr inport lmnr_context`
244
-
245
- l = Laminar('<YOUR_PROJECT_API_KEY>')
246
- result = l.run( # lmnr_context.run( for decorator instrumentation
247
- pipeline = 'my_pipeline_name',
248
- inputs = {'input_node_name': 'some_value'},
249
- # all environment variables
250
- env = {'OPENAI_API_KEY': 'sk-some-key'},
251
- # any metadata to attach to this run's trace
252
- metadata = {'session_id': 'your_custom_session_id'}
253
- )
254
- ```
255
-
256
- Resulting in:
257
-
258
- ```python
259
- >>> result
260
- PipelineRunResponse(
261
- outputs={'output': {'value': [ChatMessage(role='user', content='hello')]}},
262
- # useful to locate your trace
263
- run_id='53b012d5-5759-48a6-a9c5-0011610e3669'
264
- )
265
- ```
266
-
@@ -1,23 +0,0 @@
1
- lmnr/__init__.py,sha256=hHpH10FQOUuTfwPDJW35ZWSYjLKNaLhm-xpW2-iIWdk,336
2
- lmnr/sdk/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
3
- lmnr/sdk/client.py,sha256=sFWORPHh0iPOOIMy35VpE7YaDW-oFcTKiP0wQi-Ay1I,5761
4
- lmnr/sdk/collector.py,sha256=6LRmPhOcmGplUDWm_sJh0dVrLTHknd_kmq7POGuAvoQ,5338
5
- lmnr/sdk/constants.py,sha256=USCfwuUqRx6_0xC8WUxqGj766dInqQkWJcf8U5vPK7s,34
6
- lmnr/sdk/context.py,sha256=erEdKFElK_I1aWNUbqlb7rJ7A_zHPX7aks52P9WXWZw,15926
7
- lmnr/sdk/decorators.py,sha256=ACaspfBDVjHeBPp0-YNfyCy99SDZrF6klSG80Towrqg,12086
8
- lmnr/sdk/interface.py,sha256=W5yq-Y9EsCoTBIWWwxV7w542wtIZ2b3f9VP6Bh7HDUw,12227
9
- lmnr/sdk/providers/__init__.py,sha256=wNCgQnt9-bnTNXLQWdPgyKhqA1ajiaEd1Rr2KPOpazM,54
10
- lmnr/sdk/providers/base.py,sha256=xc6iA8yY_VK6tbzswt-3naZ53aAXtOLur9j8eimC_ZA,1054
11
- lmnr/sdk/providers/fallback.py,sha256=9-srLJgDK5CWD8DIdnxo6jbSsnwDvdHC-vA06BfEkqQ,5431
12
- lmnr/sdk/providers/openai.py,sha256=9X2VWz4_EqQBEA1UEww2EKdqods6IzOEmaO6dnhY-Lw,5725
13
- lmnr/sdk/providers/utils.py,sha256=ROt82VrvezExYOxionAynD3dp6oX5JoPW6F1ayTm7q8,946
14
- lmnr/sdk/tracing_types.py,sha256=hbxRSkC2PHjgqaL6RRqp1Ev1C5U5-o_sNZH5QqtDHAw,6858
15
- lmnr/sdk/types.py,sha256=5-Ft-l35wtmn2xxE8BTqsM3nx1zD799tRv4qiOkED50,2121
16
- lmnr/sdk/utils.py,sha256=9ScSKalwZbW5wAhGN_Mn26_LZewVBkTz_OG5uQi6Og4,3401
17
- lmnr/semantic_conventions/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
18
- lmnr/semantic_conventions/gen_ai_spans.py,sha256=3s-2J5v3t5LcMKwK2DefPn56XpxN5oMEYtb9Mf9D_gA,1541
19
- lmnr-0.3.7.dist-info/LICENSE,sha256=67b_wJHVV1CBaWkrKFWU1wyqTPSdzH77Ls-59631COg,10411
20
- lmnr-0.3.7.dist-info/METADATA,sha256=5QEiLgfp5HJ0EI67nKkQt_bcqkWyKjz8d-_Zb6rUc2g,9654
21
- lmnr-0.3.7.dist-info/WHEEL,sha256=sP946D7jFCHeNz5Iq4fL4Lu-PrWrFsgfLXbbkciIZwg,88
22
- lmnr-0.3.7.dist-info/entry_points.txt,sha256=Qg7ZRax4k-rcQsZ26XRYQ8YFSBiyY2PNxYfq4a6PYXI,41
23
- lmnr-0.3.7.dist-info/RECORD,,
File without changes
File without changes