lmnr 0.4.65__tar.gz → 0.5.0__tar.gz

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (56) hide show
  1. {lmnr-0.4.65 → lmnr-0.5.0}/PKG-INFO +88 -38
  2. {lmnr-0.4.65 → lmnr-0.5.0}/README.md +78 -28
  3. {lmnr-0.4.65 → lmnr-0.5.0}/pyproject.toml +12 -12
  4. lmnr-0.5.0/src/lmnr/__init__.py +46 -0
  5. {lmnr-0.4.65 → lmnr-0.5.0}/src/lmnr/openllmetry_sdk/__init__.py +4 -15
  6. {lmnr-0.4.65 → lmnr-0.5.0}/src/lmnr/openllmetry_sdk/tracing/attributes.py +0 -1
  7. {lmnr-0.4.65 → lmnr-0.5.0}/src/lmnr/openllmetry_sdk/tracing/tracing.py +24 -9
  8. {lmnr-0.4.65 → lmnr-0.5.0}/src/lmnr/sdk/browser/browser_use_otel.py +11 -12
  9. lmnr-0.5.0/src/lmnr/sdk/browser/playwright_otel.py +295 -0
  10. lmnr-0.4.65/src/lmnr/sdk/browser/playwright_otel.py → lmnr-0.5.0/src/lmnr/sdk/browser/pw_utils.py +108 -129
  11. {lmnr-0.4.65 → lmnr-0.5.0}/src/lmnr/sdk/browser/utils.py +18 -53
  12. lmnr-0.5.0/src/lmnr/sdk/client/asynchronous/async_client.py +157 -0
  13. lmnr-0.5.0/src/lmnr/sdk/client/asynchronous/resources/__init__.py +13 -0
  14. lmnr-0.5.0/src/lmnr/sdk/client/asynchronous/resources/agent.py +215 -0
  15. lmnr-0.5.0/src/lmnr/sdk/client/asynchronous/resources/base.py +32 -0
  16. lmnr-0.5.0/src/lmnr/sdk/client/asynchronous/resources/browser_events.py +40 -0
  17. lmnr-0.5.0/src/lmnr/sdk/client/asynchronous/resources/evals.py +64 -0
  18. lmnr-0.5.0/src/lmnr/sdk/client/asynchronous/resources/pipeline.py +89 -0
  19. lmnr-0.5.0/src/lmnr/sdk/client/asynchronous/resources/semantic_search.py +60 -0
  20. lmnr-0.5.0/src/lmnr/sdk/client/synchronous/resources/__init__.py +7 -0
  21. lmnr-0.5.0/src/lmnr/sdk/client/synchronous/resources/agent.py +209 -0
  22. lmnr-0.5.0/src/lmnr/sdk/client/synchronous/resources/base.py +32 -0
  23. lmnr-0.5.0/src/lmnr/sdk/client/synchronous/resources/browser_events.py +40 -0
  24. lmnr-0.5.0/src/lmnr/sdk/client/synchronous/resources/evals.py +102 -0
  25. lmnr-0.5.0/src/lmnr/sdk/client/synchronous/resources/pipeline.py +89 -0
  26. lmnr-0.5.0/src/lmnr/sdk/client/synchronous/resources/semantic_search.py +60 -0
  27. lmnr-0.5.0/src/lmnr/sdk/client/synchronous/sync_client.py +170 -0
  28. {lmnr-0.4.65 → lmnr-0.5.0}/src/lmnr/sdk/datasets.py +7 -2
  29. {lmnr-0.4.65 → lmnr-0.5.0}/src/lmnr/sdk/evaluations.py +53 -27
  30. {lmnr-0.4.65 → lmnr-0.5.0}/src/lmnr/sdk/laminar.py +22 -175
  31. {lmnr-0.4.65 → lmnr-0.5.0}/src/lmnr/sdk/types.py +121 -23
  32. {lmnr-0.4.65 → lmnr-0.5.0}/src/lmnr/sdk/utils.py +10 -0
  33. {lmnr-0.4.65 → lmnr-0.5.0}/src/lmnr/version.py +6 -6
  34. lmnr-0.4.65/src/lmnr/__init__.py +0 -16
  35. lmnr-0.4.65/src/lmnr/sdk/client.py +0 -313
  36. {lmnr-0.4.65 → lmnr-0.5.0}/LICENSE +0 -0
  37. {lmnr-0.4.65 → lmnr-0.5.0}/src/lmnr/cli.py +0 -0
  38. {lmnr-0.4.65 → lmnr-0.5.0}/src/lmnr/openllmetry_sdk/.flake8 +0 -0
  39. {lmnr-0.4.65 → lmnr-0.5.0}/src/lmnr/openllmetry_sdk/config/__init__.py +0 -0
  40. {lmnr-0.4.65 → lmnr-0.5.0}/src/lmnr/openllmetry_sdk/decorators/__init__.py +0 -0
  41. {lmnr-0.4.65 → lmnr-0.5.0}/src/lmnr/openllmetry_sdk/decorators/base.py +0 -0
  42. {lmnr-0.4.65 → lmnr-0.5.0}/src/lmnr/openllmetry_sdk/instruments.py +0 -0
  43. {lmnr-0.4.65 → lmnr-0.5.0}/src/lmnr/openllmetry_sdk/tracing/__init__.py +0 -0
  44. {lmnr-0.4.65 → lmnr-0.5.0}/src/lmnr/openllmetry_sdk/tracing/content_allow_list.py +0 -0
  45. {lmnr-0.4.65 → lmnr-0.5.0}/src/lmnr/openllmetry_sdk/tracing/context_manager.py +0 -0
  46. {lmnr-0.4.65 → lmnr-0.5.0}/src/lmnr/openllmetry_sdk/utils/__init__.py +0 -0
  47. {lmnr-0.4.65 → lmnr-0.5.0}/src/lmnr/openllmetry_sdk/utils/in_memory_span_exporter.py +0 -0
  48. {lmnr-0.4.65 → lmnr-0.5.0}/src/lmnr/openllmetry_sdk/utils/json_encoder.py +0 -0
  49. {lmnr-0.4.65 → lmnr-0.5.0}/src/lmnr/openllmetry_sdk/utils/package_check.py +0 -0
  50. {lmnr-0.4.65 → lmnr-0.5.0}/src/lmnr/py.typed +0 -0
  51. {lmnr-0.4.65 → lmnr-0.5.0}/src/lmnr/sdk/__init__.py +0 -0
  52. {lmnr-0.4.65 → lmnr-0.5.0}/src/lmnr/sdk/browser/__init__.py +0 -0
  53. {lmnr-0.4.65 → lmnr-0.5.0}/src/lmnr/sdk/browser/rrweb/rrweb.min.js +0 -0
  54. {lmnr-0.4.65 → lmnr-0.5.0}/src/lmnr/sdk/decorators.py +0 -0
  55. {lmnr-0.4.65 → lmnr-0.5.0}/src/lmnr/sdk/eval_control.py +0 -0
  56. {lmnr-0.4.65 → lmnr-0.5.0}/src/lmnr/sdk/log.py +0 -0
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.3
2
2
  Name: lmnr
3
- Version: 0.4.65
3
+ Version: 0.5.0
4
4
  Summary: Python SDK for Laminar
5
5
  License: Apache-2.0
6
6
  Author: lmnr.ai
@@ -39,12 +39,12 @@ Provides-Extra: transformers
39
39
  Provides-Extra: vertexai
40
40
  Provides-Extra: watsonx
41
41
  Provides-Extra: weaviate
42
- Requires-Dist: aiohttp (>=3.0)
43
42
  Requires-Dist: argparse (>=1.0)
44
43
  Requires-Dist: grpcio (<1.68.0)
45
- Requires-Dist: opentelemetry-api (>=1.28.0)
46
- Requires-Dist: opentelemetry-exporter-otlp-proto-grpc (>=1.28.0)
47
- Requires-Dist: opentelemetry-exporter-otlp-proto-http (>=1.28.0)
44
+ Requires-Dist: httpx (>=0.28.1)
45
+ Requires-Dist: opentelemetry-api (>=1.31.1)
46
+ Requires-Dist: opentelemetry-exporter-otlp-proto-grpc (>=1.31.1)
47
+ Requires-Dist: opentelemetry-exporter-otlp-proto-http (>=1.31.1)
48
48
  Requires-Dist: opentelemetry-instrumentation-alephalpha (>=0.38.12) ; extra == "alephalpha"
49
49
  Requires-Dist: opentelemetry-instrumentation-alephalpha (>=0.38.12) ; extra == "all"
50
50
  Requires-Dist: opentelemetry-instrumentation-anthropic (>=0.38.12) ; extra == "all"
@@ -83,27 +83,26 @@ Requires-Dist: opentelemetry-instrumentation-qdrant (>=0.38.12) ; extra == "all"
83
83
  Requires-Dist: opentelemetry-instrumentation-qdrant (>=0.38.12) ; extra == "qdrant"
84
84
  Requires-Dist: opentelemetry-instrumentation-replicate (>=0.38.12) ; extra == "all"
85
85
  Requires-Dist: opentelemetry-instrumentation-replicate (>=0.38.12) ; extra == "replicate"
86
- Requires-Dist: opentelemetry-instrumentation-requests (>=0.50b0)
86
+ Requires-Dist: opentelemetry-instrumentation-requests (>=0.52b0)
87
87
  Requires-Dist: opentelemetry-instrumentation-sagemaker (>=0.38.12) ; extra == "all"
88
88
  Requires-Dist: opentelemetry-instrumentation-sagemaker (>=0.38.12) ; extra == "sagemaker"
89
- Requires-Dist: opentelemetry-instrumentation-sqlalchemy (>=0.50b0)
90
- Requires-Dist: opentelemetry-instrumentation-threading (>=0.50b0)
89
+ Requires-Dist: opentelemetry-instrumentation-sqlalchemy (>=0.52b0)
90
+ Requires-Dist: opentelemetry-instrumentation-threading (>=0.52b0)
91
91
  Requires-Dist: opentelemetry-instrumentation-together (>=0.38.12) ; extra == "all"
92
92
  Requires-Dist: opentelemetry-instrumentation-together (>=0.38.12) ; extra == "together"
93
93
  Requires-Dist: opentelemetry-instrumentation-transformers (>=0.38.12) ; extra == "all"
94
94
  Requires-Dist: opentelemetry-instrumentation-transformers (>=0.38.12) ; extra == "transformers"
95
- Requires-Dist: opentelemetry-instrumentation-urllib3 (>=0.50b0)
95
+ Requires-Dist: opentelemetry-instrumentation-urllib3 (>=0.52b0)
96
96
  Requires-Dist: opentelemetry-instrumentation-vertexai (>=0.38.12) ; extra == "all"
97
97
  Requires-Dist: opentelemetry-instrumentation-vertexai (>=0.38.12) ; extra == "vertexai"
98
98
  Requires-Dist: opentelemetry-instrumentation-watsonx (>=0.38.12) ; extra == "all"
99
99
  Requires-Dist: opentelemetry-instrumentation-watsonx (>=0.38.12) ; extra == "watsonx"
100
100
  Requires-Dist: opentelemetry-instrumentation-weaviate (>=0.38.12) ; extra == "all"
101
101
  Requires-Dist: opentelemetry-instrumentation-weaviate (>=0.38.12) ; extra == "weaviate"
102
- Requires-Dist: opentelemetry-sdk (>=1.28.0)
102
+ Requires-Dist: opentelemetry-sdk (>=1.31.1)
103
103
  Requires-Dist: opentelemetry-semantic-conventions-ai (>=0.4.2)
104
104
  Requires-Dist: pydantic (>=2.0.3)
105
105
  Requires-Dist: python-dotenv (>=1.0)
106
- Requires-Dist: requests (>=2.0)
107
106
  Requires-Dist: tenacity (>=8.0)
108
107
  Requires-Dist: tqdm (>=4.0)
109
108
  Description-Content-Type: text/markdown
@@ -145,7 +144,28 @@ from lmnr import Laminar
145
144
  Laminar.initialize(project_api_key="<PROJECT_API_KEY>")
146
145
  ```
147
146
 
148
- Note that you need to only initialize Laminar once in your application.
147
+ You can also skip passing the `project_api_key`, in which case it will be looked
148
+ in the environment (or local .env file) by the key `LMNR_PROJECT_API_KEY`.
149
+
150
+ Note that you need to only initialize Laminar once in your application. You should
151
+ try to do that as early as possible in your application, e.g. at server startup.
152
+
153
+ ## Set-up for self-hosting
154
+
155
+ If you self-host a Laminar instance, the default connection settings to it are
156
+ `http://localhost:8000` for HTTP and `http://localhost:8001` for gRPC. Initialize
157
+ the SDK accordingly:
158
+
159
+ ```python
160
+ from lmnr import Laminar
161
+
162
+ Laminar.initialize(
163
+ project_api_key="<PROJECT_API_KEY>",
164
+ base_url="http://localhost",
165
+ http_port=8000,
166
+ grpc_port=8001,
167
+ )
168
+ ```
149
169
 
150
170
  ## Instrumentation
151
171
 
@@ -281,49 +301,79 @@ You can run evaluations locally by providing executor (part of the logic used in
281
301
 
282
302
  Read the [docs](https://docs.lmnr.ai/evaluations/introduction) to learn more about evaluations.
283
303
 
284
- ## Laminar pipelines as prompt chain managers
304
+ ## Client for HTTP operations
285
305
 
286
- You can create Laminar pipelines in the UI and manage chains of LLM calls there.
306
+ Various interactions with Laminar [API](https://docs.lmnr.ai/api-reference/) are available in `LaminarClient`
307
+ and its asynchronous version `AsyncLaminarClient`.
287
308
 
288
- After you are ready to use your pipeline in your code, deploy it in Laminar by selecting the target version for the pipeline.
309
+ ### Agent
289
310
 
290
- Once your pipeline target is set, you can call it from Python in just a few lines.
291
-
292
- Example use:
311
+ To run Laminar agent, you can invoke `client.agent.run`
293
312
 
294
313
  ```python
295
- from lmnr import Laminar
314
+ from lmnr import LaminarClient
296
315
 
297
- Laminar.initialize('<YOUR_PROJECT_API_KEY>', instruments=set())
316
+ client = LaminarClient(project_api_key="<YOUR_PROJECT_API_KEY>")
298
317
 
299
- result = Laminar.run(
300
- pipeline = 'my_pipeline_name',
301
- inputs = {'input_node_name': 'some_value'},
302
- # all environment variables
303
- env = {'OPENAI_API_KEY': 'sk-some-key'},
318
+ response = client.agent.run(
319
+ prompt="What is the weather in London today?"
304
320
  )
321
+
322
+ print(response.result.content)
305
323
  ```
306
324
 
307
- Resulting in:
325
+ #### Streaming
326
+
327
+ Agent run supports streaming as well.
308
328
 
309
329
  ```python
310
- >>> result
311
- PipelineRunResponse(
312
- outputs={'output': {'value': [ChatMessage(role='user', content='hello')]}},
313
- # useful to locate your trace
314
- run_id='53b012d5-5759-48a6-a9c5-0011610e3669'
315
- )
330
+ from lmnr import LaminarClient
331
+
332
+ client = LaminarClient(project_api_key="<YOUR_PROJECT_API_KEY>")
333
+
334
+ for chunk in client.agent.run(
335
+ prompt="What is the weather in London today?",
336
+ stream=True
337
+ ):
338
+ if chunk.chunkType == 'step':
339
+ print(chunk.summary)
340
+ elif chunk.chunkType == 'finalOutput':
341
+ print(chunk.content.result.content)
316
342
  ```
317
343
 
318
- ## Semantic search
344
+ #### Async mode
345
+
346
+ ```python
347
+ from lmnr import AsyncLaminarClient
348
+
349
+ client = AsyncLaminarClient(project_api_key="<YOUR_PROJECT_API_KEY>")
319
350
 
320
- You can perform a semantic search on a dataset in Laminar by calling `Laminar.semantic_search`.
351
+ response = await client.agent.run(
352
+ prompt="What is the weather in London today?"
353
+ )
354
+
355
+ print(response.result.content)
356
+ ```
357
+
358
+ #### Async mode with streaming
321
359
 
322
360
  ```python
323
- response = Laminar.semantic_search(
324
- query="Greatest Chinese architectural wonders",
325
- dataset_id=uuid.UUID("413f8404-724c-4aa4-af16-714d84fd7958"),
361
+ from lmnr import AsyncLaminarClient
362
+
363
+ client = AsyncLaminarClient(project_api_key="<YOUR_PROJECT_API_KEY>")
364
+
365
+ # Note that you need to await the operation even though we use `async for` below
366
+ response = await client.agent.run(
367
+ prompt="What is the weather in London today?",
368
+ stream=True
326
369
  )
370
+ async for chunk in client.agent.run(
371
+ prompt="What is the weather in London today?",
372
+ stream=True
373
+ ):
374
+ if chunk.chunkType == 'step':
375
+ print(chunk.summary)
376
+ else if chunk.chunkType == 'finalOutput':
377
+ print(chunk.content.result.content)
327
378
  ```
328
379
 
329
- [Read more](https://docs.lmnr.ai/datasets/indexing) about indexing and semantic search.
@@ -35,7 +35,28 @@ from lmnr import Laminar
35
35
  Laminar.initialize(project_api_key="<PROJECT_API_KEY>")
36
36
  ```
37
37
 
38
- Note that you need to only initialize Laminar once in your application.
38
+ You can also skip passing the `project_api_key`, in which case it will be looked
39
+ in the environment (or local .env file) by the key `LMNR_PROJECT_API_KEY`.
40
+
41
+ Note that you need to only initialize Laminar once in your application. You should
42
+ try to do that as early as possible in your application, e.g. at server startup.
43
+
44
+ ## Set-up for self-hosting
45
+
46
+ If you self-host a Laminar instance, the default connection settings to it are
47
+ `http://localhost:8000` for HTTP and `http://localhost:8001` for gRPC. Initialize
48
+ the SDK accordingly:
49
+
50
+ ```python
51
+ from lmnr import Laminar
52
+
53
+ Laminar.initialize(
54
+ project_api_key="<PROJECT_API_KEY>",
55
+ base_url="http://localhost",
56
+ http_port=8000,
57
+ grpc_port=8001,
58
+ )
59
+ ```
39
60
 
40
61
  ## Instrumentation
41
62
 
@@ -171,49 +192,78 @@ You can run evaluations locally by providing executor (part of the logic used in
171
192
 
172
193
  Read the [docs](https://docs.lmnr.ai/evaluations/introduction) to learn more about evaluations.
173
194
 
174
- ## Laminar pipelines as prompt chain managers
195
+ ## Client for HTTP operations
175
196
 
176
- You can create Laminar pipelines in the UI and manage chains of LLM calls there.
197
+ Various interactions with Laminar [API](https://docs.lmnr.ai/api-reference/) are available in `LaminarClient`
198
+ and its asynchronous version `AsyncLaminarClient`.
177
199
 
178
- After you are ready to use your pipeline in your code, deploy it in Laminar by selecting the target version for the pipeline.
200
+ ### Agent
179
201
 
180
- Once your pipeline target is set, you can call it from Python in just a few lines.
181
-
182
- Example use:
202
+ To run Laminar agent, you can invoke `client.agent.run`
183
203
 
184
204
  ```python
185
- from lmnr import Laminar
205
+ from lmnr import LaminarClient
186
206
 
187
- Laminar.initialize('<YOUR_PROJECT_API_KEY>', instruments=set())
207
+ client = LaminarClient(project_api_key="<YOUR_PROJECT_API_KEY>")
188
208
 
189
- result = Laminar.run(
190
- pipeline = 'my_pipeline_name',
191
- inputs = {'input_node_name': 'some_value'},
192
- # all environment variables
193
- env = {'OPENAI_API_KEY': 'sk-some-key'},
209
+ response = client.agent.run(
210
+ prompt="What is the weather in London today?"
194
211
  )
212
+
213
+ print(response.result.content)
195
214
  ```
196
215
 
197
- Resulting in:
216
+ #### Streaming
217
+
218
+ Agent run supports streaming as well.
198
219
 
199
220
  ```python
200
- >>> result
201
- PipelineRunResponse(
202
- outputs={'output': {'value': [ChatMessage(role='user', content='hello')]}},
203
- # useful to locate your trace
204
- run_id='53b012d5-5759-48a6-a9c5-0011610e3669'
205
- )
221
+ from lmnr import LaminarClient
222
+
223
+ client = LaminarClient(project_api_key="<YOUR_PROJECT_API_KEY>")
224
+
225
+ for chunk in client.agent.run(
226
+ prompt="What is the weather in London today?",
227
+ stream=True
228
+ ):
229
+ if chunk.chunkType == 'step':
230
+ print(chunk.summary)
231
+ elif chunk.chunkType == 'finalOutput':
232
+ print(chunk.content.result.content)
206
233
  ```
207
234
 
208
- ## Semantic search
209
-
210
- You can perform a semantic search on a dataset in Laminar by calling `Laminar.semantic_search`.
235
+ #### Async mode
211
236
 
212
237
  ```python
213
- response = Laminar.semantic_search(
214
- query="Greatest Chinese architectural wonders",
215
- dataset_id=uuid.UUID("413f8404-724c-4aa4-af16-714d84fd7958"),
238
+ from lmnr import AsyncLaminarClient
239
+
240
+ client = AsyncLaminarClient(project_api_key="<YOUR_PROJECT_API_KEY>")
241
+
242
+ response = await client.agent.run(
243
+ prompt="What is the weather in London today?"
216
244
  )
245
+
246
+ print(response.result.content)
217
247
  ```
218
248
 
219
- [Read more](https://docs.lmnr.ai/datasets/indexing) about indexing and semantic search.
249
+ #### Async mode with streaming
250
+
251
+ ```python
252
+ from lmnr import AsyncLaminarClient
253
+
254
+ client = AsyncLaminarClient(project_api_key="<YOUR_PROJECT_API_KEY>")
255
+
256
+ # Note that you need to await the operation even though we use `async for` below
257
+ response = await client.agent.run(
258
+ prompt="What is the weather in London today?",
259
+ stream=True
260
+ )
261
+ async for chunk in client.agent.run(
262
+ prompt="What is the weather in London today?",
263
+ stream=True
264
+ ):
265
+ if chunk.chunkType == 'step':
266
+ print(chunk.summary)
267
+ else if chunk.chunkType == 'finalOutput':
268
+ print(chunk.content.result.content)
269
+ ```
@@ -6,7 +6,7 @@
6
6
 
7
7
  [project]
8
8
  name = "lmnr"
9
- version = "0.4.65"
9
+ version = "0.5.0"
10
10
  description = "Python SDK for Laminar"
11
11
  authors = [
12
12
  { name = "lmnr.ai", email = "founders@lmnr.ai" }
@@ -16,20 +16,18 @@ requires-python = ">=3.9,<4"
16
16
  license = "Apache-2.0"
17
17
  dependencies = [
18
18
  "pydantic (>=2.0.3)",
19
- "requests (>=2.0)",
20
19
  "python-dotenv (>=1.0)",
21
- "opentelemetry-api (>=1.28.0)",
22
- "opentelemetry-sdk (>=1.28.0)",
23
- "opentelemetry-exporter-otlp-proto-http (>=1.28.0)",
24
- "opentelemetry-exporter-otlp-proto-grpc (>=1.28.0)",
25
- "opentelemetry-instrumentation-requests (>=0.50b0)",
26
- "opentelemetry-instrumentation-sqlalchemy (>=0.50b0)",
27
- "opentelemetry-instrumentation-urllib3 (>=0.50b0)",
28
- "opentelemetry-instrumentation-threading (>=0.50b0)",
20
+ "opentelemetry-api (>=1.31.1)",
21
+ "opentelemetry-sdk (>=1.31.1)",
22
+ "opentelemetry-exporter-otlp-proto-http (>=1.31.1)",
23
+ "opentelemetry-exporter-otlp-proto-grpc (>=1.31.1)",
24
+ "opentelemetry-instrumentation-requests (>=0.52b0)",
25
+ "opentelemetry-instrumentation-sqlalchemy (>=0.52b0)",
26
+ "opentelemetry-instrumentation-urllib3 (>=0.52b0)",
27
+ "opentelemetry-instrumentation-threading (>=0.52b0)",
29
28
  "opentelemetry-semantic-conventions-ai (>=0.4.2)",
30
29
  "tqdm (>=4.0)",
31
30
  "argparse (>=1.0)",
32
- "aiohttp (>=3.0)",
33
31
  "tenacity (>=8.0)",
34
32
  # explicitly freeze grpcio. Since 1.68.0, grpcio writes a warning message
35
33
  # that looks scary, but is harmless.
@@ -40,6 +38,7 @@ dependencies = [
40
38
  # https://discuss.ai.google.dev/t/warning-all-log-messages-before-absl-initializelog-is-called-are-written-to-stderr-e0000-001731955515-629532-17124-init-cc-229-grpc-wait-for-shutdown-with-timeout-timed-out/50020
41
39
  # https://github.com/grpc/grpc/issues/38490
42
40
  "grpcio<1.68.0",
41
+ "httpx>=0.28.1",
43
42
  ]
44
43
 
45
44
  [project.scripts]
@@ -114,7 +113,8 @@ dev = [
114
113
  "flake8",
115
114
  "pytest>=8.3.4",
116
115
  "pytest-sugar",
117
- "pytest-asyncio>=0.25.2"
116
+ "pytest-asyncio>=0.25.2",
117
+ "playwright>=1.51.0"
118
118
  ]
119
119
 
120
120
  [build-system]
@@ -0,0 +1,46 @@
1
+ from .sdk.client.synchronous.sync_client import LaminarClient
2
+ from .sdk.client.asynchronous.async_client import AsyncLaminarClient
3
+ from .sdk.datasets import EvaluationDataset, LaminarDataset
4
+ from .sdk.evaluations import evaluate
5
+ from .sdk.laminar import Laminar
6
+ from .sdk.types import (
7
+ AgentOutput,
8
+ FinalOutputChunkContent,
9
+ ChatMessage,
10
+ HumanEvaluator,
11
+ NodeInput,
12
+ PipelineRunError,
13
+ PipelineRunResponse,
14
+ RunAgentResponseChunk,
15
+ StepChunkContent,
16
+ TracingLevel,
17
+ )
18
+ from .sdk.decorators import observe
19
+ from .sdk.types import LaminarSpanContext
20
+ from .openllmetry_sdk import Instruments
21
+ from .openllmetry_sdk.tracing.attributes import Attributes
22
+ from opentelemetry.trace import use_span
23
+
24
+ __all__ = [
25
+ "AgentOutput",
26
+ "AsyncLaminarClient",
27
+ "Attributes",
28
+ "ChatMessage",
29
+ "EvaluationDataset",
30
+ "FinalOutputChunkContent",
31
+ "HumanEvaluator",
32
+ "Instruments",
33
+ "Laminar",
34
+ "LaminarClient",
35
+ "LaminarDataset",
36
+ "LaminarSpanContext",
37
+ "NodeInput",
38
+ "PipelineRunError",
39
+ "PipelineRunResponse",
40
+ "RunAgentResponseChunk",
41
+ "StepChunkContent",
42
+ "TracingLevel",
43
+ "evaluate",
44
+ "observe",
45
+ "use_span",
46
+ ]
@@ -16,7 +16,7 @@ from lmnr.openllmetry_sdk.tracing.tracing import TracerWrapper
16
16
  from typing import Dict
17
17
 
18
18
 
19
- class Traceloop:
19
+ class TracerManager:
20
20
  __tracer_wrapper: TracerWrapper
21
21
 
22
22
  @staticmethod
@@ -44,17 +44,6 @@ class Traceloop:
44
44
  if isinstance(headers, str):
45
45
  headers = parse_env_headers(headers)
46
46
 
47
- if (
48
- not exporter
49
- and not processor
50
- and api_endpoint == "https://api.lmnr.ai"
51
- and not api_key
52
- ):
53
- print(
54
- "Set the LMNR_PROJECT_API_KEY environment variable to your project API key"
55
- )
56
- return
57
-
58
47
  if api_key and not exporter and not processor and not headers:
59
48
  headers = {
60
49
  "Authorization": f"Bearer {api_key}",
@@ -65,7 +54,7 @@ class Traceloop:
65
54
  TracerWrapper.set_static_params(
66
55
  resource_attributes, enable_content_tracing, api_endpoint, headers
67
56
  )
68
- Traceloop.__tracer_wrapper = TracerWrapper(
57
+ TracerManager.__tracer_wrapper = TracerWrapper(
69
58
  disable_batch=disable_batch,
70
59
  processor=processor,
71
60
  propagator=propagator,
@@ -79,5 +68,5 @@ class Traceloop:
79
68
 
80
69
  @staticmethod
81
70
  def flush():
82
- if Traceloop.__tracer_wrapper:
83
- Traceloop.__tracer_wrapper.flush()
71
+ if getattr(TracerManager, "__tracer_wrapper", None):
72
+ TracerManager.__tracer_wrapper.flush()
@@ -9,7 +9,6 @@ SPAN_IDS_PATH = "lmnr.span.ids_path"
9
9
  SPAN_INSTRUMENTATION_SOURCE = "lmnr.span.instrumentation_source"
10
10
  SPAN_SDK_VERSION = "lmnr.span.sdk_version"
11
11
  SPAN_LANGUAGE_VERSION = "lmnr.span.language_version"
12
- OVERRIDE_PARENT_SPAN = "lmnr.internal.override_parent_span"
13
12
 
14
13
  ASSOCIATION_PROPERTIES = "lmnr.association.properties"
15
14
  SESSION_ID = "session_id"
@@ -4,6 +4,8 @@ import logging
4
4
  import uuid
5
5
 
6
6
  from contextvars import Context
7
+ from lmnr.sdk.client.asynchronous.async_client import AsyncLaminarClient
8
+ from lmnr.sdk.client.synchronous.sync_client import LaminarClient
7
9
  from lmnr.sdk.log import VerboseColorfulFormatter
8
10
  from lmnr.openllmetry_sdk.instruments import Instruments
9
11
  from lmnr.openllmetry_sdk.tracing.attributes import (
@@ -41,7 +43,7 @@ from opentelemetry.trace import get_tracer_provider, ProxyTracerProvider
41
43
 
42
44
  from typing import Dict, Optional, Set
43
45
 
44
- from lmnr.version import SDK_VERSION, PYTHON_VERSION
46
+ from lmnr.version import __version__, PYTHON_VERSION
45
47
 
46
48
  module_logger = logging.getLogger(__name__)
47
49
  console_log_handler = logging.StreamHandler()
@@ -80,6 +82,8 @@ class TracerWrapper(object):
80
82
  __logger: logging.Logger = None
81
83
  __span_id_to_path: dict[int, list[str]] = {}
82
84
  __span_id_lists: dict[int, list[str]] = {}
85
+ __client: LaminarClient = None
86
+ __async_client: AsyncLaminarClient = None
83
87
 
84
88
  def __new__(
85
89
  cls,
@@ -99,6 +103,15 @@ class TracerWrapper(object):
99
103
  if not TracerWrapper.endpoint:
100
104
  return obj
101
105
 
106
+ obj.__client = LaminarClient(
107
+ base_url=base_http_url,
108
+ project_api_key=project_api_key,
109
+ )
110
+ obj.__async_client = AsyncLaminarClient(
111
+ base_url=base_http_url,
112
+ project_api_key=project_api_key,
113
+ )
114
+
102
115
  obj.__resource = Resource(attributes=TracerWrapper.resource_attributes)
103
116
  obj.__tracer_provider = init_tracer_provider(resource=obj.__resource)
104
117
  if processor:
@@ -135,8 +148,8 @@ class TracerWrapper(object):
135
148
  instrument_set = init_instrumentations(
136
149
  should_enrich_metrics,
137
150
  instruments,
138
- base_http_url=base_http_url,
139
- project_api_key=project_api_key,
151
+ client=obj.__client,
152
+ async_client=obj.__async_client,
140
153
  )
141
154
 
142
155
  if not instrument_set:
@@ -184,7 +197,7 @@ class TracerWrapper(object):
184
197
  self.__span_id_lists[span.get_span_context().span_id] = span_ids_path
185
198
 
186
199
  span.set_attribute(SPAN_INSTRUMENTATION_SOURCE, "python")
187
- span.set_attribute(SPAN_SDK_VERSION, SDK_VERSION)
200
+ span.set_attribute(SPAN_SDK_VERSION, __version__)
188
201
  span.set_attribute(SPAN_LANGUAGE_VERSION, f"python@{PYTHON_VERSION}")
189
202
 
190
203
  association_properties = get_value("association_properties")
@@ -320,8 +333,8 @@ def init_instrumentations(
320
333
  should_enrich_metrics: bool,
321
334
  instruments: Optional[Set[Instruments]] = None,
322
335
  block_instruments: Optional[Set[Instruments]] = None,
323
- base_http_url: Optional[str] = None,
324
- project_api_key: Optional[str] = None,
336
+ client: Optional[LaminarClient] = None,
337
+ async_client: Optional[AsyncLaminarClient] = None,
325
338
  ):
326
339
  block_instruments = block_instruments or set()
327
340
  # These libraries are not instrumented by default,
@@ -434,7 +447,7 @@ def init_instrumentations(
434
447
  if init_weaviate_instrumentor():
435
448
  instrument_set = True
436
449
  elif instrument == Instruments.PLAYWRIGHT:
437
- if init_playwright_instrumentor():
450
+ if init_playwright_instrumentor(client, async_client):
438
451
  instrument_set = True
439
452
  elif instrument == Instruments.BROWSER_USE:
440
453
  if init_browser_use_instrumentor():
@@ -465,12 +478,14 @@ def init_browser_use_instrumentor():
465
478
  return False
466
479
 
467
480
 
468
- def init_playwright_instrumentor():
481
+ def init_playwright_instrumentor(
482
+ client: LaminarClient, async_client: AsyncLaminarClient
483
+ ):
469
484
  try:
470
485
  if is_package_installed("playwright"):
471
486
  from lmnr.sdk.browser.playwright_otel import PlaywrightInstrumentor
472
487
 
473
- instrumentor = PlaywrightInstrumentor()
488
+ instrumentor = PlaywrightInstrumentor(client, async_client)
474
489
  instrumentor.instrument()
475
490
  return True
476
491
  except Exception as e:
@@ -1,7 +1,7 @@
1
1
  from lmnr.openllmetry_sdk.decorators.base import json_dumps
2
- from lmnr.sdk.browser.utils import _with_tracer_wrapper
2
+ from lmnr.sdk.browser.utils import with_tracer_wrapper
3
3
  from lmnr.sdk.utils import get_input_from_func_args
4
- from lmnr.version import SDK_VERSION
4
+ from lmnr.version import __version__
5
5
 
6
6
  from opentelemetry.instrumentation.instrumentor import BaseInstrumentor
7
7
  from opentelemetry.instrumentation.utils import unwrap
@@ -35,7 +35,7 @@ WRAPPED_METHODS = [
35
35
  "object": "Controller",
36
36
  "method": "act",
37
37
  "span_name": "controller.act",
38
- "ignore_input": False,
38
+ "ignore_input": True,
39
39
  "ignore_output": False,
40
40
  "span_type": "DEFAULT",
41
41
  },
@@ -50,7 +50,7 @@ WRAPPED_METHODS = [
50
50
  ]
51
51
 
52
52
 
53
- @_with_tracer_wrapper
53
+ @with_tracer_wrapper
54
54
  async def _wrap(tracer: Tracer, to_wrap, wrapped, instance, args, kwargs):
55
55
  span_name = to_wrap.get("span_name")
56
56
  attributes = {
@@ -58,13 +58,12 @@ async def _wrap(tracer: Tracer, to_wrap, wrapped, instance, args, kwargs):
58
58
  }
59
59
  if to_wrap.get("method") == "execute_action":
60
60
  span_name = args[0] if len(args) > 0 else kwargs.get("action_name", "action")
61
- elif to_wrap.get("method") == "execute_action":
62
- attributes["lmnr.span.input"] = {
63
- "action": span_name,
64
- "params": json_dumps(
65
- args[1] if len(args) > 1 else kwargs.get("params", {})
66
- ),
67
- }
61
+ attributes["lmnr.span.input"] = json_dumps(
62
+ {
63
+ "action": span_name,
64
+ "params": args[1] if len(args) > 1 else kwargs.get("params", {}),
65
+ }
66
+ )
68
67
  else:
69
68
  if not to_wrap.get("ignore_input"):
70
69
  attributes["lmnr.span.input"] = json_dumps(
@@ -87,7 +86,7 @@ class BrowserUseInstrumentor(BaseInstrumentor):
87
86
 
88
87
  def _instrument(self, **kwargs):
89
88
  tracer_provider = kwargs.get("tracer_provider")
90
- tracer = get_tracer(__name__, SDK_VERSION, tracer_provider)
89
+ tracer = get_tracer(__name__, __version__, tracer_provider)
91
90
 
92
91
  for wrapped_method in WRAPPED_METHODS:
93
92
  wrap_package = wrapped_method.get("package")