lmnr 0.5.1a0__py3-none-any.whl → 0.5.3__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (50) hide show
  1. lmnr/__init__.py +2 -10
  2. lmnr/cli.py +10 -8
  3. lmnr/{openllmetry_sdk → opentelemetry_lib}/__init__.py +8 -36
  4. lmnr/{openllmetry_sdk → opentelemetry_lib}/decorators/base.py +27 -20
  5. lmnr/{openllmetry_sdk → opentelemetry_lib}/instruments.py +2 -0
  6. lmnr/opentelemetry_lib/opentelemetry/instrumentation/google_genai/__init__.py +454 -0
  7. lmnr/opentelemetry_lib/opentelemetry/instrumentation/google_genai/config.py +9 -0
  8. lmnr/opentelemetry_lib/opentelemetry/instrumentation/google_genai/utils.py +216 -0
  9. lmnr/opentelemetry_lib/tracing/__init__.py +1 -0
  10. lmnr/opentelemetry_lib/tracing/context_manager.py +13 -0
  11. lmnr/{openllmetry_sdk → opentelemetry_lib}/tracing/tracing.py +253 -257
  12. lmnr/sdk/browser/browser_use_otel.py +20 -3
  13. lmnr/sdk/browser/patchright_otel.py +177 -0
  14. lmnr/sdk/browser/playwright_otel.py +55 -62
  15. lmnr/sdk/browser/pw_utils.py +122 -116
  16. lmnr/sdk/browser/rrweb/rrweb.umd.min.cjs +98 -0
  17. lmnr/sdk/client/asynchronous/async_client.py +0 -34
  18. lmnr/sdk/client/asynchronous/resources/__init__.py +0 -4
  19. lmnr/sdk/client/asynchronous/resources/agent.py +115 -6
  20. lmnr/sdk/client/synchronous/resources/__init__.py +1 -3
  21. lmnr/sdk/client/synchronous/resources/agent.py +112 -6
  22. lmnr/sdk/client/synchronous/sync_client.py +0 -36
  23. lmnr/sdk/decorators.py +19 -5
  24. lmnr/sdk/eval_control.py +3 -2
  25. lmnr/sdk/evaluations.py +8 -14
  26. lmnr/sdk/laminar.py +10 -10
  27. lmnr/sdk/types.py +86 -170
  28. lmnr/sdk/utils.py +8 -1
  29. lmnr/version.py +1 -1
  30. {lmnr-0.5.1a0.dist-info → lmnr-0.5.3.dist-info}/METADATA +58 -58
  31. lmnr-0.5.3.dist-info/RECORD +55 -0
  32. {lmnr-0.5.1a0.dist-info → lmnr-0.5.3.dist-info}/WHEEL +1 -1
  33. lmnr/openllmetry_sdk/tracing/__init__.py +0 -0
  34. lmnr/sdk/browser/rrweb/rrweb.min.js +0 -18
  35. lmnr/sdk/client/asynchronous/resources/pipeline.py +0 -89
  36. lmnr/sdk/client/asynchronous/resources/semantic_search.py +0 -60
  37. lmnr/sdk/client/synchronous/resources/pipeline.py +0 -89
  38. lmnr/sdk/client/synchronous/resources/semantic_search.py +0 -60
  39. lmnr-0.5.1a0.dist-info/RECORD +0 -54
  40. /lmnr/{openllmetry_sdk → opentelemetry_lib}/.flake8 +0 -0
  41. /lmnr/{openllmetry_sdk → opentelemetry_lib}/config/__init__.py +0 -0
  42. /lmnr/{openllmetry_sdk → opentelemetry_lib}/decorators/__init__.py +0 -0
  43. /lmnr/{openllmetry_sdk → opentelemetry_lib}/tracing/attributes.py +0 -0
  44. /lmnr/{openllmetry_sdk → opentelemetry_lib}/tracing/content_allow_list.py +0 -0
  45. /lmnr/{openllmetry_sdk → opentelemetry_lib}/utils/__init__.py +0 -0
  46. /lmnr/{openllmetry_sdk → opentelemetry_lib}/utils/in_memory_span_exporter.py +0 -0
  47. /lmnr/{openllmetry_sdk → opentelemetry_lib}/utils/json_encoder.py +0 -0
  48. /lmnr/{openllmetry_sdk → opentelemetry_lib}/utils/package_check.py +0 -0
  49. {lmnr-0.5.1a0.dist-info → lmnr-0.5.3.dist-info}/LICENSE +0 -0
  50. {lmnr-0.5.1a0.dist-info → lmnr-0.5.3.dist-info}/entry_points.txt +0 -0
lmnr/sdk/eval_control.py CHANGED
@@ -1,4 +1,5 @@
1
1
  from contextvars import ContextVar
2
2
 
3
- PREPARE_ONLY = ContextVar("__lmnr_prepare_only", default=False)
4
- EVALUATION_INSTANCE = ContextVar("__lmnr_evaluation_instance", default=None)
3
+
4
+ PREPARE_ONLY: ContextVar[bool] = ContextVar("__lmnr_prepare_only", default=False)
5
+ EVALUATION_INSTANCE = ContextVar("__lmnr_evaluation_instance")
lmnr/sdk/evaluations.py CHANGED
@@ -5,8 +5,8 @@ import dotenv
5
5
  from tqdm import tqdm
6
6
  from typing import Any, Awaitable, Optional, Set, Union
7
7
 
8
- from lmnr.openllmetry_sdk.instruments import Instruments
9
- from lmnr.openllmetry_sdk.tracing.attributes import SPAN_TYPE
8
+ from lmnr.opentelemetry_lib.instruments import Instruments
9
+ from lmnr.opentelemetry_lib.tracing.attributes import SPAN_TYPE
10
10
 
11
11
  from lmnr.sdk.client.asynchronous.async_client import AsyncLaminarClient
12
12
  from lmnr.sdk.client.synchronous.sync_client import LaminarClient
@@ -176,7 +176,6 @@ class Evaluation:
176
176
 
177
177
  base_url = base_url or from_env("LMNR_BASE_URL") or "https://api.lmnr.ai"
178
178
 
179
- self.is_finished = False
180
179
  self.reporter = EvaluationReporter(base_url)
181
180
  if isinstance(data, list):
182
181
  self.data = [
@@ -196,12 +195,7 @@ class Evaluation:
196
195
  self.upload_tasks = []
197
196
  self.base_http_url = f"{base_url}:{http_port or 443}"
198
197
 
199
- api_key = project_api_key
200
- if not api_key:
201
- dotenv_path = dotenv.find_dotenv(usecwd=True)
202
- api_key = dotenv.get_key(
203
- dotenv_path=dotenv_path, key_to_get="LMNR_PROJECT_API_KEY"
204
- )
198
+ api_key = project_api_key or from_env("LMNR_PROJECT_API_KEY")
205
199
  if not api_key:
206
200
  raise ValueError(
207
201
  "Please initialize the Laminar object with"
@@ -225,8 +219,6 @@ class Evaluation:
225
219
  )
226
220
 
227
221
  async def run(self) -> Awaitable[None]:
228
- if self.is_finished:
229
- raise Exception("Evaluation is already finished")
230
222
  return await self._run()
231
223
 
232
224
  async def _run(self) -> None:
@@ -253,17 +245,19 @@ class Evaluation:
253
245
  self._logger.debug("All upload tasks completed")
254
246
  except Exception as e:
255
247
  self.reporter.stopWithError(e)
256
- self.is_finished = True
257
248
  await self._shutdown()
258
249
  return
259
250
 
260
251
  average_scores = get_average_scores(result_datapoints)
261
252
  self.reporter.stop(average_scores, evaluation.projectId, evaluation.id)
262
- self.is_finished = True
263
253
  await self._shutdown()
264
254
 
265
255
  async def _shutdown(self):
266
- L.shutdown()
256
+ # We use flush() instead of shutdown() because multiple evaluations
257
+ # can be run sequentially in the same process. `shutdown()` would
258
+ # close the OTLP exporter and we wouldn't be able to export traces in
259
+ # the next evaluation.
260
+ L.flush()
267
261
  await self.client.close()
268
262
  if isinstance(self.data, LaminarDataset) and self.data.client:
269
263
  self.data.client.close()
lmnr/sdk/laminar.py CHANGED
@@ -1,15 +1,15 @@
1
1
  from contextlib import contextmanager
2
2
  from contextvars import Context
3
- from lmnr.openllmetry_sdk import TracerManager
4
- from lmnr.openllmetry_sdk.instruments import Instruments
5
- from lmnr.openllmetry_sdk import get_tracer
6
- from lmnr.openllmetry_sdk.tracing.attributes import (
3
+ from lmnr.opentelemetry_lib import TracerManager
4
+ from lmnr.opentelemetry_lib.instruments import Instruments
5
+ from lmnr.opentelemetry_lib.tracing import get_tracer
6
+ from lmnr.opentelemetry_lib.tracing.attributes import (
7
7
  ASSOCIATION_PROPERTIES,
8
8
  Attributes,
9
9
  SPAN_TYPE,
10
10
  )
11
- from lmnr.openllmetry_sdk.config import MAX_MANUAL_SPAN_PAYLOAD_SIZE
12
- from lmnr.openllmetry_sdk.decorators.base import json_dumps
11
+ from lmnr.opentelemetry_lib.config import MAX_MANUAL_SPAN_PAYLOAD_SIZE
12
+ from lmnr.opentelemetry_lib.decorators.base import json_dumps
13
13
  from opentelemetry import context as context_api, trace
14
14
  from opentelemetry.context import attach, detach
15
15
  from opentelemetry.exporter.otlp.proto.grpc.trace_exporter import (
@@ -28,13 +28,13 @@ import os
28
28
  import re
29
29
  import uuid
30
30
 
31
- from lmnr.openllmetry_sdk.tracing.attributes import (
31
+ from lmnr.opentelemetry_lib.tracing.attributes import (
32
32
  SESSION_ID,
33
33
  SPAN_INPUT,
34
34
  SPAN_OUTPUT,
35
35
  TRACE_TYPE,
36
36
  )
37
- from lmnr.openllmetry_sdk.tracing.tracing import (
37
+ from lmnr.opentelemetry_lib.tracing.tracing import (
38
38
  get_association_properties,
39
39
  remove_association_properties,
40
40
  set_association_properties,
@@ -652,8 +652,8 @@ class Laminar:
652
652
 
653
653
  @classmethod
654
654
  def shutdown(cls):
655
- cls.__initialized = False
656
- return TracerManager.shutdown()
655
+ if cls.is_initialized():
656
+ TracerManager.shutdown()
657
657
 
658
658
  @classmethod
659
659
  def set_session(
lmnr/sdk/types.py CHANGED
@@ -1,118 +1,20 @@
1
1
  import logging
2
2
  import datetime
3
3
  from enum import Enum
4
- import httpx
5
4
  import json
6
5
  from opentelemetry.trace import SpanContext, TraceFlags
7
6
  import pydantic
8
7
  from typing import Any, Awaitable, Callable, Literal, Optional, Union
9
8
  import uuid
10
9
 
11
- from .utils import serialize
12
-
13
-
14
- class ChatMessage(pydantic.BaseModel):
15
- role: str
16
- content: str
17
-
10
+ import pydantic.alias_generators
18
11
 
19
- class ConditionedValue(pydantic.BaseModel):
20
- condition: str
21
- value: "NodeInput"
12
+ from .utils import serialize
22
13
 
23
14
 
24
15
  Numeric = Union[int, float]
25
16
  NumericTypes = (int, float) # for use with isinstance
26
17
 
27
- NodeInput = Union[str, list[ChatMessage], ConditionedValue, Numeric, bool]
28
- PipelineOutput = Union[NodeInput]
29
-
30
-
31
- class PipelineRunRequest(pydantic.BaseModel):
32
- inputs: dict[str, NodeInput]
33
- pipeline: str
34
- env: dict[str, str] = pydantic.Field(default_factory=dict)
35
- metadata: dict[str, str] = pydantic.Field(default_factory=dict)
36
- stream: bool = pydantic.Field(default=False)
37
- parent_span_id: Optional[uuid.UUID] = pydantic.Field(default=None)
38
- trace_id: Optional[uuid.UUID] = pydantic.Field(default=None)
39
-
40
- # uuid is not serializable by default, so we need to convert it to a string
41
- def to_dict(self):
42
- return {
43
- "inputs": {
44
- k: v.model_dump() if isinstance(v, pydantic.BaseModel) else serialize(v)
45
- for k, v in self.inputs.items()
46
- },
47
- "pipeline": self.pipeline,
48
- "env": self.env,
49
- "metadata": self.metadata,
50
- "stream": self.stream,
51
- "parentSpanId": str(self.parent_span_id) if self.parent_span_id else None,
52
- "traceId": str(self.trace_id) if self.trace_id else None,
53
- }
54
-
55
-
56
- class PipelineRunResponse(pydantic.BaseModel):
57
- outputs: dict[str, dict[str, PipelineOutput]]
58
- run_id: str
59
-
60
-
61
- class SemanticSearchRequest(pydantic.BaseModel):
62
- query: str
63
- dataset_id: uuid.UUID
64
- limit: Optional[int] = pydantic.Field(default=None)
65
- threshold: Optional[float] = pydantic.Field(default=None, ge=0.0, le=1.0)
66
-
67
- def to_dict(self):
68
- res = {
69
- "query": self.query,
70
- "datasetId": str(self.dataset_id),
71
- }
72
- if self.limit is not None:
73
- res["limit"] = self.limit
74
- if self.threshold is not None:
75
- res["threshold"] = self.threshold
76
- return res
77
-
78
-
79
- class SemanticSearchResult(pydantic.BaseModel):
80
- dataset_id: uuid.UUID
81
- score: float
82
- data: dict[str, Any]
83
- content: str
84
-
85
-
86
- class SemanticSearchResponse(pydantic.BaseModel):
87
- results: list[SemanticSearchResult]
88
-
89
-
90
- class PipelineRunError(Exception):
91
- error_code: str
92
- error_message: str
93
-
94
- def __init__(self, response: httpx.Response):
95
- try:
96
- resp_json = response.json()
97
- try:
98
- resp_dict = dict(resp_json)
99
- except Exception:
100
- resp_dict = {}
101
- self.error_code = resp_dict.get("error_code")
102
- self.error_message = resp_dict.get("error_message")
103
- super().__init__(self.error_message)
104
- except Exception:
105
- super().__init__(response.text)
106
-
107
- def __str__(self) -> str:
108
- try:
109
- return str(
110
- {"error_code": self.error_code, "error_message": self.error_message}
111
- )
112
- except Exception:
113
- return super().__str__()
114
-
115
-
116
18
  EvaluationDatapointData = Any # non-null, must be JSON-serializable
117
19
  EvaluationDatapointTarget = Optional[Any] # must be JSON-serializable
118
20
  EvaluationDatapointMetadata = Optional[Any] # must be JSON-serializable
@@ -322,52 +224,18 @@ class LaminarSpanContext(pydantic.BaseModel):
322
224
  class ModelProvider(str, Enum):
323
225
  ANTHROPIC = "anthropic"
324
226
  BEDROCK = "bedrock"
325
-
326
-
327
- # class AgentChatMessageContentTextBlock(pydantic.BaseModel):
328
- # type: Literal["text"]
329
- # text: str
330
-
331
-
332
- # class AgentChatMessageImageUrlBlock(pydantic.BaseModel):
333
- # type: Literal["image"]
334
- # imageUrl: str
335
-
336
-
337
- # class AgentChatMessageImageBase64Block(pydantic.BaseModel):
338
- # type: Literal["image"]
339
- # imageB64: str
340
-
341
-
342
- # class AgentChatMessageImageBlock(pydantic.RootModel):
343
- # root: Union[AgentChatMessageImageUrlBlock, AgentChatMessageImageBase64Block]
344
-
345
-
346
- # class AgentChatMessageContentBlock(pydantic.RootModel):
347
- # root: Union[AgentChatMessageContentTextBlock, AgentChatMessageImageBlock]
348
-
349
-
350
- # class AgentChatMessageContent(pydantic.RootModel):
351
- # root: Union[str, list[AgentChatMessageContentBlock]]
352
-
353
-
354
- # class AgentChatMessage(pydantic.BaseModel):
355
- # role: str
356
- # content: AgentChatMessageContent
357
- # name: Optional[str] = None
358
- # toolCallId: Optional[str] = None
359
- # isStateMessage: bool = False
360
-
361
-
362
- # class AgentState(pydantic.BaseModel):
363
- # messages: str = pydantic.Field(default="")
364
- # messages: list[AgentChatMessage] = pydantic.Field(default_factory=list)
365
- # browser_state: Optional[BrowserState] = None
227
+ OPENAI = "openai"
228
+ GEMINI = "gemini"
366
229
 
367
230
 
368
231
  class RunAgentRequest(pydantic.BaseModel):
232
+ model_config = pydantic.ConfigDict(
233
+ alias_generator=pydantic.alias_generators.to_camel,
234
+ populate_by_name=True,
235
+ )
369
236
  prompt: str
370
- state: Optional[str] = pydantic.Field(default=None)
237
+ storage_state: Optional[str] = pydantic.Field(default=None)
238
+ agent_state: Optional[str] = pydantic.Field(default=None)
371
239
  parent_span_context: Optional[str] = pydantic.Field(default=None)
372
240
  model_provider: Optional[ModelProvider] = pydantic.Field(default=None)
373
241
  model: Optional[str] = pydantic.Field(default=None)
@@ -375,50 +243,98 @@ class RunAgentRequest(pydantic.BaseModel):
375
243
  enable_thinking: bool = pydantic.Field(default=True)
376
244
  cdp_url: Optional[str] = pydantic.Field(default=None)
377
245
  return_screenshots: bool = pydantic.Field(default=False)
378
-
379
- def to_dict(self):
380
- result = {
381
- "prompt": self.prompt,
382
- "stream": self.stream,
383
- "enableThinking": self.enable_thinking,
384
- "returnScreenshots": self.return_screenshots,
385
- }
386
- if self.state:
387
- result["state"] = self.state
388
- if self.parent_span_context:
389
- result["parentSpanContext"] = self.parent_span_context
390
- if self.model_provider:
391
- result["modelProvider"] = self.model_provider.value
392
- if self.model:
393
- result["model"] = self.model
394
- if self.cdp_url:
395
- result["cdpUrl"] = self.cdp_url
396
- return result
246
+ return_storage_state: bool = pydantic.Field(default=False)
247
+ return_agent_state: bool = pydantic.Field(default=False)
248
+ timeout: Optional[int] = pydantic.Field(default=None)
249
+ max_steps: Optional[int] = pydantic.Field(default=None)
250
+ thinking_token_budget: Optional[int] = pydantic.Field(default=None)
251
+ start_url: Optional[str] = pydantic.Field(default=None)
252
+ disable_give_control: bool = pydantic.Field(default=False)
253
+ user_agent: Optional[str] = pydantic.Field(default=None)
397
254
 
398
255
 
399
256
  class ActionResult(pydantic.BaseModel):
400
- isDone: bool = pydantic.Field(default=False)
257
+ model_config = pydantic.ConfigDict(
258
+ alias_generator=pydantic.alias_generators.to_camel
259
+ )
260
+ is_done: bool = pydantic.Field(default=False)
401
261
  content: Optional[str] = pydantic.Field(default=None)
402
262
  error: Optional[str] = pydantic.Field(default=None)
403
263
 
404
264
 
405
265
  class AgentOutput(pydantic.BaseModel):
266
+ model_config = pydantic.ConfigDict(
267
+ alias_generator=pydantic.alias_generators.to_camel
268
+ )
406
269
  result: ActionResult = pydantic.Field(default_factory=ActionResult)
270
+ # Browser state with data related to auth, such as cookies.
271
+ # A stringified JSON object.
272
+ # Only returned if return_storage_state is True.
273
+ # CAUTION: This object may become large. It also may contain sensitive data.
274
+ storage_state: Optional[str] = pydantic.Field(default=None)
275
+ # Agent state with data related to the agent's state, such as the chat history.
276
+ # A stringified JSON object.
277
+ # Only returned if return_agent_state is True.
278
+ # CAUTION: This object is large.
279
+ agent_state: Optional[str] = pydantic.Field(default=None)
407
280
 
408
281
 
409
282
  class StepChunkContent(pydantic.BaseModel):
410
- chunkType: Literal["step"]
411
- messageId: uuid.UUID
412
- actionResult: ActionResult
413
- summary: str
283
+ model_config = pydantic.ConfigDict(
284
+ alias_generator=pydantic.alias_generators.to_camel
285
+ )
286
+ chunk_type: Literal["step"] = pydantic.Field(default="step")
287
+ message_id: uuid.UUID = pydantic.Field()
288
+ action_result: ActionResult = pydantic.Field()
289
+ summary: str = pydantic.Field()
290
+ screenshot: Optional[str] = pydantic.Field(default=None)
291
+
292
+
293
+ class TimeoutChunkContent(pydantic.BaseModel):
294
+ """Chunk content to indicate that timeout has been hit. The only difference from a regular step
295
+ is the chunk type. This is the last chunk in the stream.
296
+ """
297
+
298
+ model_config = pydantic.ConfigDict(
299
+ alias_generator=pydantic.alias_generators.to_camel
300
+ )
301
+ chunk_type: Literal["timeout"] = pydantic.Field(default="timeout")
302
+ message_id: uuid.UUID = pydantic.Field()
303
+ summary: str = pydantic.Field()
414
304
  screenshot: Optional[str] = pydantic.Field(default=None)
415
305
 
416
306
 
417
307
  class FinalOutputChunkContent(pydantic.BaseModel):
418
- chunkType: Literal["finalOutput"]
419
- messageId: uuid.UUID
420
- content: AgentOutput
308
+ """Chunk content to indicate that the agent has finished executing. This
309
+ is the last chunk in the stream.
310
+ """
311
+
312
+ model_config = pydantic.ConfigDict(
313
+ alias_generator=pydantic.alias_generators.to_camel
314
+ )
315
+
316
+ chunk_type: Literal["finalOutput"] = pydantic.Field(default="finalOutput")
317
+ message_id: uuid.UUID = pydantic.Field()
318
+ content: AgentOutput = pydantic.Field()
319
+
320
+
321
+ class ErrorChunkContent(pydantic.BaseModel):
322
+ """Chunk content to indicate that an error has occurred. Typically, this
323
+ is the last chunk in the stream.
324
+ """
325
+
326
+ model_config = pydantic.ConfigDict(
327
+ alias_generator=pydantic.alias_generators.to_camel
328
+ )
329
+ chunk_type: Literal["error"] = pydantic.Field(default="error")
330
+ message_id: uuid.UUID = pydantic.Field()
331
+ error: str = pydantic.Field()
421
332
 
422
333
 
423
334
  class RunAgentResponseChunk(pydantic.RootModel):
424
- root: Union[StepChunkContent, FinalOutputChunkContent]
335
+ root: Union[
336
+ StepChunkContent,
337
+ FinalOutputChunkContent,
338
+ ErrorChunkContent,
339
+ TimeoutChunkContent,
340
+ ]
lmnr/sdk/utils.py CHANGED
@@ -88,13 +88,20 @@ def get_input_from_func_args(
88
88
  is_method: bool = False,
89
89
  func_args: list[typing.Any] = [],
90
90
  func_kwargs: dict[str, typing.Any] = {},
91
+ ignore_inputs: typing.Optional[list[str]] = None,
91
92
  ) -> dict[str, typing.Any]:
92
93
  # Remove implicitly passed "self" or "cls" argument for
93
94
  # instance or class methods
94
- res = func_kwargs.copy()
95
+ res = {
96
+ k: v
97
+ for k, v in func_kwargs.items()
98
+ if not (ignore_inputs and k in ignore_inputs)
99
+ }
95
100
  for i, k in enumerate(inspect.signature(func).parameters.keys()):
96
101
  if is_method and k in ["self", "cls"]:
97
102
  continue
103
+ if ignore_inputs and k in ignore_inputs:
104
+ continue
98
105
  # If param has default value, then it's not present in func args
99
106
  if i < len(func_args):
100
107
  res[k] = func_args[i]
lmnr/version.py CHANGED
@@ -3,7 +3,7 @@ import httpx
3
3
  from packaging import version
4
4
 
5
5
 
6
- __version__ = "0.5.1a0"
6
+ __version__ = "0.5.3"
7
7
  PYTHON_VERSION = f"{sys.version_info.major}.{sys.version_info.minor}"
8
8
 
9
9
 
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.3
2
2
  Name: lmnr
3
- Version: 0.5.1a0
3
+ Version: 0.5.3
4
4
  Summary: Python SDK for Laminar
5
5
  License: Apache-2.0
6
6
  Author: lmnr.ai
@@ -41,67 +41,67 @@ Provides-Extra: watsonx
41
41
  Provides-Extra: weaviate
42
42
  Requires-Dist: argparse (>=1.0)
43
43
  Requires-Dist: grpcio (<1.68.0)
44
- Requires-Dist: httpx (>=0.28.1)
44
+ Requires-Dist: httpx (>=0.25.0)
45
45
  Requires-Dist: opentelemetry-api (>=1.31.1)
46
46
  Requires-Dist: opentelemetry-exporter-otlp-proto-grpc (>=1.31.1)
47
47
  Requires-Dist: opentelemetry-exporter-otlp-proto-http (>=1.31.1)
48
- Requires-Dist: opentelemetry-instrumentation-alephalpha (>=0.38.12) ; extra == "alephalpha"
49
- Requires-Dist: opentelemetry-instrumentation-alephalpha (>=0.38.12) ; extra == "all"
50
- Requires-Dist: opentelemetry-instrumentation-anthropic (>=0.38.12) ; extra == "all"
51
- Requires-Dist: opentelemetry-instrumentation-anthropic (>=0.38.12) ; extra == "anthropic"
52
- Requires-Dist: opentelemetry-instrumentation-bedrock (>=0.38.12) ; extra == "all"
53
- Requires-Dist: opentelemetry-instrumentation-bedrock (>=0.38.12) ; extra == "bedrock"
54
- Requires-Dist: opentelemetry-instrumentation-chromadb (>=0.38.12) ; extra == "all"
55
- Requires-Dist: opentelemetry-instrumentation-chromadb (>=0.38.12) ; extra == "chromadb"
56
- Requires-Dist: opentelemetry-instrumentation-cohere (>=0.38.12) ; extra == "all"
57
- Requires-Dist: opentelemetry-instrumentation-cohere (>=0.38.12) ; extra == "cohere"
58
- Requires-Dist: opentelemetry-instrumentation-google-generativeai (>=0.38.12) ; extra == "all"
59
- Requires-Dist: opentelemetry-instrumentation-google-generativeai (>=0.38.12) ; extra == "google-generativeai"
60
- Requires-Dist: opentelemetry-instrumentation-groq (>=0.38.12) ; extra == "all"
61
- Requires-Dist: opentelemetry-instrumentation-groq (>=0.38.12) ; extra == "groq"
62
- Requires-Dist: opentelemetry-instrumentation-haystack (>=0.38.12) ; extra == "all"
63
- Requires-Dist: opentelemetry-instrumentation-haystack (>=0.38.12) ; extra == "haystack"
64
- Requires-Dist: opentelemetry-instrumentation-lancedb (>=0.38.12) ; extra == "all"
65
- Requires-Dist: opentelemetry-instrumentation-lancedb (>=0.38.12) ; extra == "lancedb"
66
- Requires-Dist: opentelemetry-instrumentation-langchain (>=0.38.12) ; extra == "all"
67
- Requires-Dist: opentelemetry-instrumentation-langchain (>=0.38.12) ; extra == "langchain"
68
- Requires-Dist: opentelemetry-instrumentation-llamaindex (>=0.38.12) ; extra == "all"
69
- Requires-Dist: opentelemetry-instrumentation-llamaindex (>=0.38.12) ; extra == "llamaindex"
70
- Requires-Dist: opentelemetry-instrumentation-marqo (>=0.38.12) ; extra == "all"
71
- Requires-Dist: opentelemetry-instrumentation-marqo (>=0.38.12) ; extra == "marqo"
72
- Requires-Dist: opentelemetry-instrumentation-milvus (>=0.38.12) ; extra == "all"
73
- Requires-Dist: opentelemetry-instrumentation-milvus (>=0.38.12) ; extra == "milvus"
74
- Requires-Dist: opentelemetry-instrumentation-mistralai (>=0.38.12) ; extra == "all"
75
- Requires-Dist: opentelemetry-instrumentation-mistralai (>=0.38.12) ; extra == "mistralai"
76
- Requires-Dist: opentelemetry-instrumentation-ollama (>=0.38.12) ; extra == "all"
77
- Requires-Dist: opentelemetry-instrumentation-ollama (>=0.38.12) ; extra == "ollama"
78
- Requires-Dist: opentelemetry-instrumentation-openai (>=0.38.12) ; extra == "all"
79
- Requires-Dist: opentelemetry-instrumentation-openai (>=0.38.12) ; extra == "openai"
80
- Requires-Dist: opentelemetry-instrumentation-pinecone (>=0.38.12) ; extra == "all"
81
- Requires-Dist: opentelemetry-instrumentation-pinecone (>=0.38.12) ; extra == "pinecone"
82
- Requires-Dist: opentelemetry-instrumentation-qdrant (>=0.38.12) ; extra == "all"
83
- Requires-Dist: opentelemetry-instrumentation-qdrant (>=0.38.12) ; extra == "qdrant"
84
- Requires-Dist: opentelemetry-instrumentation-replicate (>=0.38.12) ; extra == "all"
85
- Requires-Dist: opentelemetry-instrumentation-replicate (>=0.38.12) ; extra == "replicate"
48
+ Requires-Dist: opentelemetry-instrumentation-alephalpha (>=0.39.2) ; extra == "alephalpha"
49
+ Requires-Dist: opentelemetry-instrumentation-alephalpha (>=0.39.2) ; extra == "all"
50
+ Requires-Dist: opentelemetry-instrumentation-anthropic (>=0.39.2) ; extra == "all"
51
+ Requires-Dist: opentelemetry-instrumentation-anthropic (>=0.39.2) ; extra == "anthropic"
52
+ Requires-Dist: opentelemetry-instrumentation-bedrock (>=0.39.2) ; extra == "all"
53
+ Requires-Dist: opentelemetry-instrumentation-bedrock (>=0.39.2) ; extra == "bedrock"
54
+ Requires-Dist: opentelemetry-instrumentation-chromadb (>=0.39.2) ; extra == "all"
55
+ Requires-Dist: opentelemetry-instrumentation-chromadb (>=0.39.2) ; extra == "chromadb"
56
+ Requires-Dist: opentelemetry-instrumentation-cohere (>=0.39.2) ; extra == "all"
57
+ Requires-Dist: opentelemetry-instrumentation-cohere (>=0.39.2) ; extra == "cohere"
58
+ Requires-Dist: opentelemetry-instrumentation-google-generativeai (>=0.39.2) ; extra == "all"
59
+ Requires-Dist: opentelemetry-instrumentation-google-generativeai (>=0.39.2) ; extra == "google-generativeai"
60
+ Requires-Dist: opentelemetry-instrumentation-groq (>=0.39.2) ; extra == "all"
61
+ Requires-Dist: opentelemetry-instrumentation-groq (>=0.39.2) ; extra == "groq"
62
+ Requires-Dist: opentelemetry-instrumentation-haystack (>=0.39.2) ; extra == "all"
63
+ Requires-Dist: opentelemetry-instrumentation-haystack (>=0.39.2) ; extra == "haystack"
64
+ Requires-Dist: opentelemetry-instrumentation-lancedb (>=0.39.2) ; extra == "all"
65
+ Requires-Dist: opentelemetry-instrumentation-lancedb (>=0.39.2) ; extra == "lancedb"
66
+ Requires-Dist: opentelemetry-instrumentation-langchain (>=0.39.2) ; extra == "all"
67
+ Requires-Dist: opentelemetry-instrumentation-langchain (>=0.39.2) ; extra == "langchain"
68
+ Requires-Dist: opentelemetry-instrumentation-llamaindex (>=0.39.2) ; extra == "all"
69
+ Requires-Dist: opentelemetry-instrumentation-llamaindex (>=0.39.2) ; extra == "llamaindex"
70
+ Requires-Dist: opentelemetry-instrumentation-marqo (>=0.39.2) ; extra == "all"
71
+ Requires-Dist: opentelemetry-instrumentation-marqo (>=0.39.2) ; extra == "marqo"
72
+ Requires-Dist: opentelemetry-instrumentation-milvus (>=0.39.2) ; extra == "all"
73
+ Requires-Dist: opentelemetry-instrumentation-milvus (>=0.39.2) ; extra == "milvus"
74
+ Requires-Dist: opentelemetry-instrumentation-mistralai (>=0.39.2) ; extra == "all"
75
+ Requires-Dist: opentelemetry-instrumentation-mistralai (>=0.39.2) ; extra == "mistralai"
76
+ Requires-Dist: opentelemetry-instrumentation-ollama (>=0.39.2) ; extra == "all"
77
+ Requires-Dist: opentelemetry-instrumentation-ollama (>=0.39.2) ; extra == "ollama"
78
+ Requires-Dist: opentelemetry-instrumentation-openai (>=0.39.2) ; extra == "all"
79
+ Requires-Dist: opentelemetry-instrumentation-openai (>=0.39.2) ; extra == "openai"
80
+ Requires-Dist: opentelemetry-instrumentation-pinecone (>=0.39.2) ; extra == "all"
81
+ Requires-Dist: opentelemetry-instrumentation-pinecone (>=0.39.2) ; extra == "pinecone"
82
+ Requires-Dist: opentelemetry-instrumentation-qdrant (>=0.39.2) ; extra == "all"
83
+ Requires-Dist: opentelemetry-instrumentation-qdrant (>=0.39.2) ; extra == "qdrant"
84
+ Requires-Dist: opentelemetry-instrumentation-replicate (>=0.39.2) ; extra == "all"
85
+ Requires-Dist: opentelemetry-instrumentation-replicate (>=0.39.2) ; extra == "replicate"
86
86
  Requires-Dist: opentelemetry-instrumentation-requests (>=0.52b0)
87
- Requires-Dist: opentelemetry-instrumentation-sagemaker (>=0.38.12) ; extra == "all"
88
- Requires-Dist: opentelemetry-instrumentation-sagemaker (>=0.38.12) ; extra == "sagemaker"
87
+ Requires-Dist: opentelemetry-instrumentation-sagemaker (>=0.39.2) ; extra == "all"
88
+ Requires-Dist: opentelemetry-instrumentation-sagemaker (>=0.39.2) ; extra == "sagemaker"
89
89
  Requires-Dist: opentelemetry-instrumentation-sqlalchemy (>=0.52b0)
90
90
  Requires-Dist: opentelemetry-instrumentation-threading (>=0.52b0)
91
- Requires-Dist: opentelemetry-instrumentation-together (>=0.38.12) ; extra == "all"
92
- Requires-Dist: opentelemetry-instrumentation-together (>=0.38.12) ; extra == "together"
93
- Requires-Dist: opentelemetry-instrumentation-transformers (>=0.38.12) ; extra == "all"
94
- Requires-Dist: opentelemetry-instrumentation-transformers (>=0.38.12) ; extra == "transformers"
91
+ Requires-Dist: opentelemetry-instrumentation-together (>=0.39.2) ; extra == "all"
92
+ Requires-Dist: opentelemetry-instrumentation-together (>=0.39.2) ; extra == "together"
93
+ Requires-Dist: opentelemetry-instrumentation-transformers (>=0.39.2) ; extra == "all"
94
+ Requires-Dist: opentelemetry-instrumentation-transformers (>=0.39.2) ; extra == "transformers"
95
95
  Requires-Dist: opentelemetry-instrumentation-urllib3 (>=0.52b0)
96
- Requires-Dist: opentelemetry-instrumentation-vertexai (>=0.38.12) ; extra == "all"
97
- Requires-Dist: opentelemetry-instrumentation-vertexai (>=0.38.12) ; extra == "vertexai"
98
- Requires-Dist: opentelemetry-instrumentation-watsonx (>=0.38.12) ; extra == "all"
99
- Requires-Dist: opentelemetry-instrumentation-watsonx (>=0.38.12) ; extra == "watsonx"
100
- Requires-Dist: opentelemetry-instrumentation-weaviate (>=0.38.12) ; extra == "all"
101
- Requires-Dist: opentelemetry-instrumentation-weaviate (>=0.38.12) ; extra == "weaviate"
96
+ Requires-Dist: opentelemetry-instrumentation-vertexai (>=0.39.2) ; extra == "all"
97
+ Requires-Dist: opentelemetry-instrumentation-vertexai (>=0.39.2) ; extra == "vertexai"
98
+ Requires-Dist: opentelemetry-instrumentation-watsonx (>=0.39.2) ; extra == "all"
99
+ Requires-Dist: opentelemetry-instrumentation-watsonx (>=0.39.2) ; extra == "watsonx"
100
+ Requires-Dist: opentelemetry-instrumentation-weaviate (>=0.39.2) ; extra == "all"
101
+ Requires-Dist: opentelemetry-instrumentation-weaviate (>=0.39.2) ; extra == "weaviate"
102
102
  Requires-Dist: opentelemetry-sdk (>=1.31.1)
103
103
  Requires-Dist: opentelemetry-semantic-conventions-ai (>=0.4.2)
104
- Requires-Dist: pydantic (>=2.0.3)
104
+ Requires-Dist: pydantic (>=2.0.3,<3.0.0)
105
105
  Requires-Dist: python-dotenv (>=1.0)
106
106
  Requires-Dist: tenacity (>=8.0)
107
107
  Requires-Dist: tqdm (>=4.0)
@@ -222,7 +222,7 @@ def handle_user_request(topic: str):
222
222
  Laminar allows you to automatically instrument majority of the most popular LLM, Vector DB, database, requests, and other libraries.
223
223
 
224
224
  If you want to automatically instrument a default set of libraries, then simply do NOT pass `instruments` argument to `.initialize()`.
225
- See the full list of available instrumentations in the [enum](https://github.com/lmnr-ai/lmnr-python/blob/main/src/lmnr/openllmetry_sdk/instruments.py).
225
+ See the full list of available instrumentations in the [enum](https://github.com/lmnr-ai/lmnr-python/blob/main/src/lmnr/opentelemetry_lib/instruments.py).
226
226
 
227
227
  If you want to automatically instrument only specific LLM, Vector DB, or other
228
228
  calls with OpenTelemetry-compatible instrumentation, then pass the appropriate instruments to `.initialize()`.
@@ -335,9 +335,9 @@ for chunk in client.agent.run(
335
335
  prompt="What is the weather in London today?",
336
336
  stream=True
337
337
  ):
338
- if chunk.chunkType == 'step':
338
+ if chunk.chunk_type == 'step':
339
339
  print(chunk.summary)
340
- elif chunk.chunkType == 'finalOutput':
340
+ elif chunk.chunk_type == 'finalOutput':
341
341
  print(chunk.content.result.content)
342
342
  ```
343
343
 
@@ -371,9 +371,9 @@ async for chunk in client.agent.run(
371
371
  prompt="What is the weather in London today?",
372
372
  stream=True
373
373
  ):
374
- if chunk.chunkType == 'step':
374
+ if chunk.chunk_type == 'step':
375
375
  print(chunk.summary)
376
- elif chunk.chunkType == 'finalOutput':
376
+ elif chunk.chunk_type == 'finalOutput':
377
377
  print(chunk.content.result.content)
378
378
  ```
379
379