lmnr 0.4.39b1__tar.gz → 0.4.42__tar.gz

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (33) hide show
  1. {lmnr-0.4.39b1 → lmnr-0.4.42}/PKG-INFO +62 -54
  2. {lmnr-0.4.39b1 → lmnr-0.4.42}/README.md +35 -27
  3. {lmnr-0.4.39b1 → lmnr-0.4.42}/pyproject.toml +9 -8
  4. lmnr-0.4.42/src/lmnr/cli.py +53 -0
  5. {lmnr-0.4.39b1 → lmnr-0.4.42}/src/lmnr/openllmetry_sdk/tracing/tracing.py +80 -33
  6. {lmnr-0.4.39b1 → lmnr-0.4.42}/src/lmnr/openllmetry_sdk/utils/package_check.py +1 -0
  7. {lmnr-0.4.39b1 → lmnr-0.4.42}/src/lmnr/sdk/datasets.py +2 -4
  8. {lmnr-0.4.39b1 → lmnr-0.4.42}/src/lmnr/sdk/decorators.py +1 -9
  9. lmnr-0.4.42/src/lmnr/sdk/eval_control.py +4 -0
  10. {lmnr-0.4.39b1 → lmnr-0.4.42}/src/lmnr/sdk/evaluations.py +11 -28
  11. {lmnr-0.4.39b1 → lmnr-0.4.42}/src/lmnr/sdk/laminar.py +123 -61
  12. {lmnr-0.4.39b1 → lmnr-0.4.42}/src/lmnr/sdk/types.py +31 -2
  13. lmnr-0.4.39b1/src/lmnr/cli.py +0 -39
  14. {lmnr-0.4.39b1 → lmnr-0.4.42}/LICENSE +0 -0
  15. {lmnr-0.4.39b1 → lmnr-0.4.42}/src/lmnr/__init__.py +0 -0
  16. {lmnr-0.4.39b1 → lmnr-0.4.42}/src/lmnr/openllmetry_sdk/.flake8 +0 -0
  17. {lmnr-0.4.39b1 → lmnr-0.4.42}/src/lmnr/openllmetry_sdk/.python-version +0 -0
  18. {lmnr-0.4.39b1 → lmnr-0.4.42}/src/lmnr/openllmetry_sdk/__init__.py +0 -0
  19. {lmnr-0.4.39b1 → lmnr-0.4.42}/src/lmnr/openllmetry_sdk/config/__init__.py +0 -0
  20. {lmnr-0.4.39b1 → lmnr-0.4.42}/src/lmnr/openllmetry_sdk/decorators/__init__.py +0 -0
  21. {lmnr-0.4.39b1 → lmnr-0.4.42}/src/lmnr/openllmetry_sdk/decorators/base.py +0 -0
  22. {lmnr-0.4.39b1 → lmnr-0.4.42}/src/lmnr/openllmetry_sdk/instruments.py +0 -0
  23. {lmnr-0.4.39b1 → lmnr-0.4.42}/src/lmnr/openllmetry_sdk/tracing/__init__.py +0 -0
  24. {lmnr-0.4.39b1 → lmnr-0.4.42}/src/lmnr/openllmetry_sdk/tracing/attributes.py +0 -0
  25. {lmnr-0.4.39b1 → lmnr-0.4.42}/src/lmnr/openllmetry_sdk/tracing/content_allow_list.py +0 -0
  26. {lmnr-0.4.39b1 → lmnr-0.4.42}/src/lmnr/openllmetry_sdk/tracing/context_manager.py +0 -0
  27. {lmnr-0.4.39b1 → lmnr-0.4.42}/src/lmnr/openllmetry_sdk/utils/__init__.py +0 -0
  28. {lmnr-0.4.39b1 → lmnr-0.4.42}/src/lmnr/openllmetry_sdk/utils/in_memory_span_exporter.py +0 -0
  29. {lmnr-0.4.39b1 → lmnr-0.4.42}/src/lmnr/openllmetry_sdk/utils/json_encoder.py +0 -0
  30. {lmnr-0.4.39b1 → lmnr-0.4.42}/src/lmnr/openllmetry_sdk/version.py +0 -0
  31. {lmnr-0.4.39b1 → lmnr-0.4.42}/src/lmnr/sdk/__init__.py +0 -0
  32. {lmnr-0.4.39b1 → lmnr-0.4.42}/src/lmnr/sdk/log.py +0 -0
  33. {lmnr-0.4.39b1 → lmnr-0.4.42}/src/lmnr/sdk/utils.py +0 -0
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.1
2
2
  Name: lmnr
3
- Version: 0.4.39b1
3
+ Version: 0.4.42
4
4
  Summary: Python SDK for Laminar AI
5
5
  License: Apache-2.0
6
6
  Author: lmnr.ai
@@ -13,11 +13,11 @@ Classifier: Programming Language :: Python :: 3.11
13
13
  Classifier: Programming Language :: Python :: 3.12
14
14
  Classifier: Programming Language :: Python :: 3.13
15
15
  Provides-Extra: alephalpha
16
+ Provides-Extra: all
16
17
  Provides-Extra: anthropic
17
18
  Provides-Extra: bedrock
18
19
  Provides-Extra: chromadb
19
20
  Provides-Extra: cohere
20
- Provides-Extra: full
21
21
  Provides-Extra: google-generativeai
22
22
  Provides-Extra: groq
23
23
  Provides-Extra: haystack
@@ -38,6 +38,7 @@ Provides-Extra: transformers
38
38
  Provides-Extra: vertexai
39
39
  Provides-Extra: watsonx
40
40
  Provides-Extra: weaviate
41
+ Requires-Dist: aiohttp (>=3.0,<4.0)
41
42
  Requires-Dist: argparse (>=1.0,<2.0)
42
43
  Requires-Dist: backoff (>=2.0,<3.0)
43
44
  Requires-Dist: deprecated (>=1.0,<2.0)
@@ -45,35 +46,35 @@ Requires-Dist: jinja2 (>=3.0,<4.0)
45
46
  Requires-Dist: opentelemetry-api (>=1.28.0)
46
47
  Requires-Dist: opentelemetry-exporter-otlp-proto-grpc (>=1.28.0)
47
48
  Requires-Dist: opentelemetry-exporter-otlp-proto-http (>=1.28.0)
48
- Requires-Dist: opentelemetry-instrumentation-alephalpha (>=0.33.12) ; extra == "full" or extra == "alephalpha"
49
- Requires-Dist: opentelemetry-instrumentation-anthropic (>=0.33.12) ; extra == "full" or extra == "anthropic"
50
- Requires-Dist: opentelemetry-instrumentation-bedrock (>=0.33.12) ; extra == "full" or extra == "bedrock"
51
- Requires-Dist: opentelemetry-instrumentation-chromadb (>=0.33.12) ; extra == "full" or extra == "chromadb"
52
- Requires-Dist: opentelemetry-instrumentation-cohere (>=0.33.12) ; extra == "full" or extra == "cohere"
53
- Requires-Dist: opentelemetry-instrumentation-google-generativeai (>=0.33.12) ; extra == "full" or extra == "google-generativeai"
54
- Requires-Dist: opentelemetry-instrumentation-groq (>=0.33.12) ; extra == "full" or extra == "groq"
55
- Requires-Dist: opentelemetry-instrumentation-haystack (>=0.33.12) ; extra == "full" or extra == "haystack"
56
- Requires-Dist: opentelemetry-instrumentation-lancedb (>=0.33.12) ; extra == "full" or extra == "lancedb"
57
- Requires-Dist: opentelemetry-instrumentation-langchain (>=0.33.12) ; extra == "full" or extra == "langchain"
58
- Requires-Dist: opentelemetry-instrumentation-llamaindex (>=0.33.12) ; extra == "full" or extra == "llamaindex"
59
- Requires-Dist: opentelemetry-instrumentation-marqo (>=0.33.12) ; extra == "full" or extra == "marqo"
60
- Requires-Dist: opentelemetry-instrumentation-milvus (>=0.33.12) ; extra == "full" or extra == "milvus"
61
- Requires-Dist: opentelemetry-instrumentation-mistralai (>=0.33.12) ; extra == "full" or extra == "mistralai"
62
- Requires-Dist: opentelemetry-instrumentation-ollama (>=0.33.12) ; extra == "full" or extra == "ollama"
63
- Requires-Dist: opentelemetry-instrumentation-openai (>=0.33.12) ; extra == "full" or extra == "openai"
64
- Requires-Dist: opentelemetry-instrumentation-pinecone (>=0.33.12) ; extra == "full" or extra == "pinecone"
65
- Requires-Dist: opentelemetry-instrumentation-qdrant (>=0.33.12) ; extra == "full" or extra == "qdrant"
66
- Requires-Dist: opentelemetry-instrumentation-replicate (>=0.33.12) ; extra == "full" or extra == "replicate"
49
+ Requires-Dist: opentelemetry-instrumentation-alephalpha (>=0.33.12) ; extra == "all" or extra == "alephalpha"
50
+ Requires-Dist: opentelemetry-instrumentation-anthropic (>=0.33.12) ; extra == "all" or extra == "anthropic"
51
+ Requires-Dist: opentelemetry-instrumentation-bedrock (>=0.33.12) ; extra == "all" or extra == "bedrock"
52
+ Requires-Dist: opentelemetry-instrumentation-chromadb (>=0.33.12) ; extra == "all" or extra == "chromadb"
53
+ Requires-Dist: opentelemetry-instrumentation-cohere (>=0.33.12) ; extra == "all" or extra == "cohere"
54
+ Requires-Dist: opentelemetry-instrumentation-google-generativeai (>=0.33.12) ; extra == "all" or extra == "google-generativeai"
55
+ Requires-Dist: opentelemetry-instrumentation-groq (>=0.33.12) ; extra == "all" or extra == "groq"
56
+ Requires-Dist: opentelemetry-instrumentation-haystack (>=0.33.12) ; extra == "all" or extra == "haystack"
57
+ Requires-Dist: opentelemetry-instrumentation-lancedb (>=0.33.12) ; extra == "all" or extra == "lancedb"
58
+ Requires-Dist: opentelemetry-instrumentation-langchain (>=0.33.12) ; extra == "all" or extra == "langchain"
59
+ Requires-Dist: opentelemetry-instrumentation-llamaindex (>=0.33.12) ; extra == "all" or extra == "llamaindex"
60
+ Requires-Dist: opentelemetry-instrumentation-marqo (>=0.33.12) ; extra == "all" or extra == "marqo"
61
+ Requires-Dist: opentelemetry-instrumentation-milvus (>=0.33.12) ; extra == "all" or extra == "milvus"
62
+ Requires-Dist: opentelemetry-instrumentation-mistralai (>=0.33.12) ; extra == "all" or extra == "mistralai"
63
+ Requires-Dist: opentelemetry-instrumentation-ollama (>=0.33.12) ; extra == "all" or extra == "ollama"
64
+ Requires-Dist: opentelemetry-instrumentation-openai (>=0.33.12) ; extra == "all" or extra == "openai"
65
+ Requires-Dist: opentelemetry-instrumentation-pinecone (>=0.33.12) ; extra == "all" or extra == "pinecone"
66
+ Requires-Dist: opentelemetry-instrumentation-qdrant (>=0.33.12) ; extra == "all" or extra == "qdrant"
67
+ Requires-Dist: opentelemetry-instrumentation-replicate (>=0.33.12) ; extra == "all" or extra == "replicate"
67
68
  Requires-Dist: opentelemetry-instrumentation-requests (>=0.49b0,<0.50)
68
- Requires-Dist: opentelemetry-instrumentation-sagemaker (>=0.33.12) ; extra == "full" or extra == "sagemaker"
69
+ Requires-Dist: opentelemetry-instrumentation-sagemaker (>=0.33.12) ; extra == "all" or extra == "sagemaker"
69
70
  Requires-Dist: opentelemetry-instrumentation-sqlalchemy (>=0.49b0,<0.50)
70
71
  Requires-Dist: opentelemetry-instrumentation-threading (>=0.49b0,<0.50)
71
- Requires-Dist: opentelemetry-instrumentation-together (>=0.33.12) ; extra == "full" or extra == "together"
72
- Requires-Dist: opentelemetry-instrumentation-transformers (>=0.33.12) ; extra == "full" or extra == "transformers"
72
+ Requires-Dist: opentelemetry-instrumentation-together (>=0.33.12) ; extra == "all" or extra == "together"
73
+ Requires-Dist: opentelemetry-instrumentation-transformers (>=0.33.12) ; extra == "all" or extra == "transformers"
73
74
  Requires-Dist: opentelemetry-instrumentation-urllib3 (>=0.49b0,<0.50)
74
- Requires-Dist: opentelemetry-instrumentation-vertexai (>=0.33.12) ; extra == "full" or extra == "vertexai"
75
- Requires-Dist: opentelemetry-instrumentation-watsonx (>=0.33.12) ; extra == "full" or extra == "watsonx"
76
- Requires-Dist: opentelemetry-instrumentation-weaviate (>=0.33.12) ; extra == "full" or extra == "weaviate"
75
+ Requires-Dist: opentelemetry-instrumentation-vertexai (>=0.33.12) ; extra == "all" or extra == "vertexai"
76
+ Requires-Dist: opentelemetry-instrumentation-watsonx (>=0.33.12) ; extra == "all" or extra == "watsonx"
77
+ Requires-Dist: opentelemetry-instrumentation-weaviate (>=0.33.12) ; extra == "all" or extra == "weaviate"
77
78
  Requires-Dist: opentelemetry-sdk (>=1.28.0)
78
79
  Requires-Dist: opentelemetry-semantic-conventions-ai (==0.4.2)
79
80
  Requires-Dist: pydantic (>=2.7,<3.0)
@@ -98,23 +99,28 @@ Check our [open-source repo](https://github.com/lmnr-ai/lmnr) and don't forget t
98
99
 
99
100
  ## Quickstart
100
101
 
101
- First, install the package:
102
+ First, install the package, specifying the instrumentations you want to use.
103
+
104
+ For example, to install the package with OpenAI and Anthropic instrumentations:
102
105
 
103
106
  ```sh
104
- pip install lmnr
107
+ pip install 'lmnr[anthropic,openai]'
108
+ ```
109
+
110
+ To install all possible instrumentations, use the following command:
111
+
112
+ ```sh
113
+ pip install 'lmnr[all]'
105
114
  ```
106
115
 
107
- And then in the code
116
+ Initialize Laminar in your code:
108
117
 
109
118
  ```python
110
- from lmnr import Laminar as L
119
+ from lmnr import Laminar
111
120
 
112
- L.initialize(project_api_key="<PROJECT_API_KEY>")
121
+ Laminar.initialize(project_api_key="<PROJECT_API_KEY>")
113
122
  ```
114
123
 
115
- This will automatically instrument most of the LLM, Vector DB, and related
116
- calls with OpenTelemetry-compatible instrumentation.
117
-
118
124
  Note that you need to only initialize Laminar once in your application.
119
125
 
120
126
  ## Instrumentation
@@ -127,9 +133,9 @@ This can be useful if you want to trace a request handler or a function which co
127
133
  ```python
128
134
  import os
129
135
  from openai import OpenAI
130
- from lmnr import Laminar as L, Instruments
136
+ from lmnr import Laminar
131
137
 
132
- L.initialize(project_api_key=os.environ["LMNR_PROJECT_API_KEY"])
138
+ Laminar.initialize(project_api_key=os.environ["LMNR_PROJECT_API_KEY"])
133
139
 
134
140
  client = OpenAI(api_key=os.environ["OPENAI_API_KEY"])
135
141
 
@@ -152,9 +158,7 @@ def poem_writer(topic: str):
152
158
  @observe()
153
159
  def generate_poems():
154
160
  poem1 = poem_writer(topic="laminar flow")
155
- L.event("is_poem_generated", True)
156
161
  poem2 = poem_writer(topic="turbulence")
157
- L.event("is_poem_generated", True)
158
162
  poems = f"{poem1}\n\n---\n\n{poem2}"
159
163
  return poems
160
164
  ```
@@ -163,18 +167,10 @@ Also, you can use `Laminar.start_as_current_span` if you want to record a chunk
163
167
 
164
168
  ```python
165
169
  def handle_user_request(topic: str):
166
- with L.start_as_current_span(name="poem_writer", input=topic):
167
- ...
168
-
170
+ with Laminar.start_as_current_span(name="poem_writer", input=topic):
169
171
  poem = poem_writer(topic=topic)
170
-
171
- ...
172
-
173
- # while within the span, you can attach laminar events to it
174
- L.event("is_poem_generated", True)
175
-
176
172
  # Use set_span_output to record the output of the span
177
- L.set_span_output(poem)
173
+ Laminar.set_span_output(poem)
178
174
  ```
179
175
 
180
176
  ### Automatic instrumentation
@@ -189,9 +185,9 @@ calls with OpenTelemetry-compatible instrumentation, then pass the appropriate i
189
185
  For example, if you want to only instrument OpenAI and Anthropic, then do the following:
190
186
 
191
187
  ```python
192
- from lmnr import Laminar as L, Instruments
188
+ from lmnr import Laminar, Instruments
193
189
 
194
- L.initialize(project_api_key=os.environ["LMNR_PROJECT_API_KEY"], instruments={Instruments.OPENAI, Instruments.ANTHROPIC})
190
+ Laminar.initialize(project_api_key=os.environ["LMNR_PROJECT_API_KEY"], instruments={Instruments.OPENAI, Instruments.ANTHROPIC})
195
191
  ```
196
192
 
197
193
  If you want to fully disable any kind of autoinstrumentation, pass an empty set as `instruments=set()` to `.initialize()`.
@@ -272,11 +268,11 @@ Once your pipeline target is set, you can call it from Python in just a few line
272
268
  Example use:
273
269
 
274
270
  ```python
275
- from lmnr import Laminar as L
271
+ from lmnr import Laminar
276
272
 
277
- L.initialize('<YOUR_PROJECT_API_KEY>', instruments=set())
273
+ Laminar.initialize('<YOUR_PROJECT_API_KEY>', instruments=set())
278
274
 
279
- result = l.run(
275
+ result = Laminar.run(
280
276
  pipeline = 'my_pipeline_name',
281
277
  inputs = {'input_node_name': 'some_value'},
282
278
  # all environment variables
@@ -295,3 +291,15 @@ PipelineRunResponse(
295
291
  )
296
292
  ```
297
293
 
294
+ ## Semantic search
295
+
296
+ You can perform a semantic search on a dataset in Laminar by calling `Laminar.semantic_search`.
297
+
298
+ ```python
299
+ response = Laminar.semantic_search(
300
+ query="Greatest Chinese architectural wonders",
301
+ dataset_id=uuid.UUID("413f8404-724c-4aa4-af16-714d84fd7958"),
302
+ )
303
+ ```
304
+
305
+ [Read more](https://docs.lmnr.ai/datasets/indexing) about indexing and semantic search.
@@ -13,23 +13,28 @@ Check our [open-source repo](https://github.com/lmnr-ai/lmnr) and don't forget t
13
13
 
14
14
  ## Quickstart
15
15
 
16
- First, install the package:
16
+ First, install the package, specifying the instrumentations you want to use.
17
+
18
+ For example, to install the package with OpenAI and Anthropic instrumentations:
17
19
 
18
20
  ```sh
19
- pip install lmnr
21
+ pip install 'lmnr[anthropic,openai]'
22
+ ```
23
+
24
+ To install all possible instrumentations, use the following command:
25
+
26
+ ```sh
27
+ pip install 'lmnr[all]'
20
28
  ```
21
29
 
22
- And then in the code
30
+ Initialize Laminar in your code:
23
31
 
24
32
  ```python
25
- from lmnr import Laminar as L
33
+ from lmnr import Laminar
26
34
 
27
- L.initialize(project_api_key="<PROJECT_API_KEY>")
35
+ Laminar.initialize(project_api_key="<PROJECT_API_KEY>")
28
36
  ```
29
37
 
30
- This will automatically instrument most of the LLM, Vector DB, and related
31
- calls with OpenTelemetry-compatible instrumentation.
32
-
33
38
  Note that you need to only initialize Laminar once in your application.
34
39
 
35
40
  ## Instrumentation
@@ -42,9 +47,9 @@ This can be useful if you want to trace a request handler or a function which co
42
47
  ```python
43
48
  import os
44
49
  from openai import OpenAI
45
- from lmnr import Laminar as L, Instruments
50
+ from lmnr import Laminar
46
51
 
47
- L.initialize(project_api_key=os.environ["LMNR_PROJECT_API_KEY"])
52
+ Laminar.initialize(project_api_key=os.environ["LMNR_PROJECT_API_KEY"])
48
53
 
49
54
  client = OpenAI(api_key=os.environ["OPENAI_API_KEY"])
50
55
 
@@ -67,9 +72,7 @@ def poem_writer(topic: str):
67
72
  @observe()
68
73
  def generate_poems():
69
74
  poem1 = poem_writer(topic="laminar flow")
70
- L.event("is_poem_generated", True)
71
75
  poem2 = poem_writer(topic="turbulence")
72
- L.event("is_poem_generated", True)
73
76
  poems = f"{poem1}\n\n---\n\n{poem2}"
74
77
  return poems
75
78
  ```
@@ -78,18 +81,10 @@ Also, you can use `Laminar.start_as_current_span` if you want to record a chunk
78
81
 
79
82
  ```python
80
83
  def handle_user_request(topic: str):
81
- with L.start_as_current_span(name="poem_writer", input=topic):
82
- ...
83
-
84
+ with Laminar.start_as_current_span(name="poem_writer", input=topic):
84
85
  poem = poem_writer(topic=topic)
85
-
86
- ...
87
-
88
- # while within the span, you can attach laminar events to it
89
- L.event("is_poem_generated", True)
90
-
91
86
  # Use set_span_output to record the output of the span
92
- L.set_span_output(poem)
87
+ Laminar.set_span_output(poem)
93
88
  ```
94
89
 
95
90
  ### Automatic instrumentation
@@ -104,9 +99,9 @@ calls with OpenTelemetry-compatible instrumentation, then pass the appropriate i
104
99
  For example, if you want to only instrument OpenAI and Anthropic, then do the following:
105
100
 
106
101
  ```python
107
- from lmnr import Laminar as L, Instruments
102
+ from lmnr import Laminar, Instruments
108
103
 
109
- L.initialize(project_api_key=os.environ["LMNR_PROJECT_API_KEY"], instruments={Instruments.OPENAI, Instruments.ANTHROPIC})
104
+ Laminar.initialize(project_api_key=os.environ["LMNR_PROJECT_API_KEY"], instruments={Instruments.OPENAI, Instruments.ANTHROPIC})
110
105
  ```
111
106
 
112
107
  If you want to fully disable any kind of autoinstrumentation, pass an empty set as `instruments=set()` to `.initialize()`.
@@ -187,11 +182,11 @@ Once your pipeline target is set, you can call it from Python in just a few line
187
182
  Example use:
188
183
 
189
184
  ```python
190
- from lmnr import Laminar as L
185
+ from lmnr import Laminar
191
186
 
192
- L.initialize('<YOUR_PROJECT_API_KEY>', instruments=set())
187
+ Laminar.initialize('<YOUR_PROJECT_API_KEY>', instruments=set())
193
188
 
194
- result = l.run(
189
+ result = Laminar.run(
195
190
  pipeline = 'my_pipeline_name',
196
191
  inputs = {'input_node_name': 'some_value'},
197
192
  # all environment variables
@@ -209,3 +204,16 @@ PipelineRunResponse(
209
204
  run_id='53b012d5-5759-48a6-a9c5-0011610e3669'
210
205
  )
211
206
  ```
207
+
208
+ ## Semantic search
209
+
210
+ You can perform a semantic search on a dataset in Laminar by calling `Laminar.semantic_search`.
211
+
212
+ ```python
213
+ response = Laminar.semantic_search(
214
+ query="Greatest Chinese architectural wonders",
215
+ dataset_id=uuid.UUID("413f8404-724c-4aa4-af16-714d84fd7958"),
216
+ )
217
+ ```
218
+
219
+ [Read more](https://docs.lmnr.ai/datasets/indexing) about indexing and semantic search.
@@ -6,7 +6,7 @@
6
6
 
7
7
  [project]
8
8
  name = "lmnr"
9
- version = "0.4.39b1"
9
+ version = "0.4.42"
10
10
  description = "Python SDK for Laminar AI"
11
11
  authors = [
12
12
  { name = "lmnr.ai", email = "founders@lmnr.ai" }
@@ -17,7 +17,7 @@ license = "Apache-2.0"
17
17
 
18
18
  [tool.poetry]
19
19
  name = "lmnr"
20
- version = "0.4.39b1"
20
+ version = "0.4.42"
21
21
  description = "Python SDK for Laminar AI"
22
22
  authors = ["lmnr.ai"]
23
23
  readme = "README.md"
@@ -43,6 +43,7 @@ jinja2 = "~=3.0"
43
43
  deprecated = "~=1.0"
44
44
  tqdm = "~=4.0"
45
45
  argparse = "~=1.0"
46
+ aiohttp = "~=3.0"
46
47
  opentelemetry-instrumentation-alephalpha = {version = ">=0.33.12", optional = true}
47
48
  opentelemetry-instrumentation-anthropic = {version = ">=0.33.12", optional = true}
48
49
  opentelemetry-instrumentation-bedrock = {version = ">=0.33.12", optional = true}
@@ -69,17 +70,17 @@ opentelemetry-instrumentation-vertexai = {version = ">=0.33.12", optional = true
69
70
  opentelemetry-instrumentation-watsonx = {version = ">=0.33.12", optional = true}
70
71
  opentelemetry-instrumentation-weaviate = {version = ">=0.33.12", optional = true}
71
72
 
73
+ [tool.poetry.extras]
72
74
  # List of all possible extras. You can specify one or more of these extras
73
- # when installing the package, like
75
+ # when installing the package, using any of the following:
74
76
  # `pip install 'lmnr[anthropic,openai]'`
75
- # `uv pip install lmnr[anthropic,openai]`
77
+ # `uv pip install 'lmnr[anthropic,openai]'`
76
78
  # `uv add lmnr --extra anthropic --extra openai`
77
- # `poetry add lmnr[anthropic,openai]`
79
+ # `poetry add 'lmnr[anthropic,openai]'`
78
80
 
79
- # `full` is the group added for convenience, if you want to install all
81
+ # `all` is the group added for convenience, if you want to install all
80
82
  # the instrumentations.
81
- [tool.poetry.extras]
82
- full = [
83
+ all = [
83
84
  "opentelemetry-instrumentation-alephalpha",
84
85
  "opentelemetry-instrumentation-anthropic",
85
86
  "opentelemetry-instrumentation-bedrock",
@@ -0,0 +1,53 @@
1
+ from argparse import ArgumentParser
2
+ import asyncio
3
+ import importlib.util
4
+ import os
5
+ import sys
6
+
7
+ from .sdk.eval_control import PREPARE_ONLY, EVALUATION_INSTANCE
8
+
9
+
10
+ async def run_evaluation(args):
11
+ sys.path.append(os.getcwd())
12
+
13
+ prep_token = PREPARE_ONLY.set(True)
14
+ try:
15
+ file = os.path.abspath(args.file)
16
+ name = "user_module"
17
+
18
+ spec = importlib.util.spec_from_file_location(name, file)
19
+ if spec is None or spec.loader is None:
20
+ raise ImportError(f"Could not load module specification from {file}")
21
+ mod = importlib.util.module_from_spec(spec)
22
+ sys.modules[name] = mod
23
+
24
+ spec.loader.exec_module(mod)
25
+ evaluation = EVALUATION_INSTANCE.get()
26
+ if evaluation is None:
27
+ raise RuntimeError("Evaluation instance not found")
28
+
29
+ await evaluation.run()
30
+ finally:
31
+ PREPARE_ONLY.reset(prep_token)
32
+
33
+
34
+ def cli():
35
+ parser = ArgumentParser(
36
+ prog="lmnr",
37
+ description="CLI for Laminar",
38
+ )
39
+
40
+ subparsers = parser.add_subparsers(title="subcommands", dest="subcommand")
41
+
42
+ parser_eval = subparsers.add_parser(
43
+ "eval",
44
+ description="Run an evaluation",
45
+ help="Run an evaluation",
46
+ )
47
+ parser_eval.add_argument("file", help="A file containing the evaluation to run")
48
+
49
+ parsed = parser.parse_args()
50
+ if parsed.subcommand == "eval":
51
+ asyncio.run(run_evaluation(parsed))
52
+ else:
53
+ parser.print_help()