lmnr 0.4.53.dev0__py3-none-any.whl → 0.7.26__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (133) hide show
  1. lmnr/__init__.py +32 -11
  2. lmnr/cli/__init__.py +270 -0
  3. lmnr/cli/datasets.py +371 -0
  4. lmnr/cli/evals.py +111 -0
  5. lmnr/cli/rules.py +42 -0
  6. lmnr/opentelemetry_lib/__init__.py +70 -0
  7. lmnr/opentelemetry_lib/decorators/__init__.py +337 -0
  8. lmnr/opentelemetry_lib/litellm/__init__.py +685 -0
  9. lmnr/opentelemetry_lib/litellm/utils.py +100 -0
  10. lmnr/opentelemetry_lib/opentelemetry/instrumentation/anthropic/__init__.py +849 -0
  11. lmnr/opentelemetry_lib/opentelemetry/instrumentation/anthropic/config.py +13 -0
  12. lmnr/opentelemetry_lib/opentelemetry/instrumentation/anthropic/event_emitter.py +211 -0
  13. lmnr/opentelemetry_lib/opentelemetry/instrumentation/anthropic/event_models.py +41 -0
  14. lmnr/opentelemetry_lib/opentelemetry/instrumentation/anthropic/span_utils.py +401 -0
  15. lmnr/opentelemetry_lib/opentelemetry/instrumentation/anthropic/streaming.py +425 -0
  16. lmnr/opentelemetry_lib/opentelemetry/instrumentation/anthropic/utils.py +332 -0
  17. lmnr/opentelemetry_lib/opentelemetry/instrumentation/anthropic/version.py +1 -0
  18. lmnr/opentelemetry_lib/opentelemetry/instrumentation/claude_agent/__init__.py +451 -0
  19. lmnr/opentelemetry_lib/opentelemetry/instrumentation/claude_agent/proxy.py +144 -0
  20. lmnr/opentelemetry_lib/opentelemetry/instrumentation/cua_agent/__init__.py +100 -0
  21. lmnr/opentelemetry_lib/opentelemetry/instrumentation/cua_computer/__init__.py +476 -0
  22. lmnr/opentelemetry_lib/opentelemetry/instrumentation/cua_computer/utils.py +12 -0
  23. lmnr/opentelemetry_lib/opentelemetry/instrumentation/google_genai/__init__.py +599 -0
  24. lmnr/opentelemetry_lib/opentelemetry/instrumentation/google_genai/config.py +9 -0
  25. lmnr/opentelemetry_lib/opentelemetry/instrumentation/google_genai/schema_utils.py +26 -0
  26. lmnr/opentelemetry_lib/opentelemetry/instrumentation/google_genai/utils.py +330 -0
  27. lmnr/opentelemetry_lib/opentelemetry/instrumentation/groq/__init__.py +488 -0
  28. lmnr/opentelemetry_lib/opentelemetry/instrumentation/groq/config.py +8 -0
  29. lmnr/opentelemetry_lib/opentelemetry/instrumentation/groq/event_emitter.py +143 -0
  30. lmnr/opentelemetry_lib/opentelemetry/instrumentation/groq/event_models.py +41 -0
  31. lmnr/opentelemetry_lib/opentelemetry/instrumentation/groq/span_utils.py +229 -0
  32. lmnr/opentelemetry_lib/opentelemetry/instrumentation/groq/utils.py +92 -0
  33. lmnr/opentelemetry_lib/opentelemetry/instrumentation/groq/version.py +1 -0
  34. lmnr/opentelemetry_lib/opentelemetry/instrumentation/kernel/__init__.py +381 -0
  35. lmnr/opentelemetry_lib/opentelemetry/instrumentation/kernel/utils.py +36 -0
  36. lmnr/opentelemetry_lib/opentelemetry/instrumentation/langgraph/__init__.py +121 -0
  37. lmnr/opentelemetry_lib/opentelemetry/instrumentation/langgraph/utils.py +60 -0
  38. lmnr/opentelemetry_lib/opentelemetry/instrumentation/openai/__init__.py +61 -0
  39. lmnr/opentelemetry_lib/opentelemetry/instrumentation/openai/shared/__init__.py +472 -0
  40. lmnr/opentelemetry_lib/opentelemetry/instrumentation/openai/shared/chat_wrappers.py +1185 -0
  41. lmnr/opentelemetry_lib/opentelemetry/instrumentation/openai/shared/completion_wrappers.py +305 -0
  42. lmnr/opentelemetry_lib/opentelemetry/instrumentation/openai/shared/config.py +16 -0
  43. lmnr/opentelemetry_lib/opentelemetry/instrumentation/openai/shared/embeddings_wrappers.py +312 -0
  44. lmnr/opentelemetry_lib/opentelemetry/instrumentation/openai/shared/event_emitter.py +100 -0
  45. lmnr/opentelemetry_lib/opentelemetry/instrumentation/openai/shared/event_models.py +41 -0
  46. lmnr/opentelemetry_lib/opentelemetry/instrumentation/openai/shared/image_gen_wrappers.py +68 -0
  47. lmnr/opentelemetry_lib/opentelemetry/instrumentation/openai/utils.py +197 -0
  48. lmnr/opentelemetry_lib/opentelemetry/instrumentation/openai/v0/__init__.py +176 -0
  49. lmnr/opentelemetry_lib/opentelemetry/instrumentation/openai/v1/__init__.py +368 -0
  50. lmnr/opentelemetry_lib/opentelemetry/instrumentation/openai/v1/assistant_wrappers.py +325 -0
  51. lmnr/opentelemetry_lib/opentelemetry/instrumentation/openai/v1/event_handler_wrapper.py +135 -0
  52. lmnr/opentelemetry_lib/opentelemetry/instrumentation/openai/v1/responses_wrappers.py +786 -0
  53. lmnr/opentelemetry_lib/opentelemetry/instrumentation/openai/version.py +1 -0
  54. lmnr/opentelemetry_lib/opentelemetry/instrumentation/openhands_ai/__init__.py +388 -0
  55. lmnr/opentelemetry_lib/opentelemetry/instrumentation/opentelemetry/__init__.py +69 -0
  56. lmnr/opentelemetry_lib/opentelemetry/instrumentation/skyvern/__init__.py +191 -0
  57. lmnr/opentelemetry_lib/opentelemetry/instrumentation/threading/__init__.py +197 -0
  58. lmnr/opentelemetry_lib/tracing/__init__.py +263 -0
  59. lmnr/opentelemetry_lib/tracing/_instrument_initializers.py +516 -0
  60. lmnr/{openllmetry_sdk → opentelemetry_lib}/tracing/attributes.py +21 -8
  61. lmnr/opentelemetry_lib/tracing/context.py +200 -0
  62. lmnr/opentelemetry_lib/tracing/exporter.py +153 -0
  63. lmnr/opentelemetry_lib/tracing/instruments.py +140 -0
  64. lmnr/opentelemetry_lib/tracing/processor.py +193 -0
  65. lmnr/opentelemetry_lib/tracing/span.py +398 -0
  66. lmnr/opentelemetry_lib/tracing/tracer.py +57 -0
  67. lmnr/opentelemetry_lib/tracing/utils.py +62 -0
  68. lmnr/opentelemetry_lib/utils/package_check.py +18 -0
  69. lmnr/opentelemetry_lib/utils/wrappers.py +11 -0
  70. lmnr/sdk/browser/__init__.py +0 -0
  71. lmnr/sdk/browser/background_send_events.py +158 -0
  72. lmnr/sdk/browser/browser_use_cdp_otel.py +100 -0
  73. lmnr/sdk/browser/browser_use_otel.py +142 -0
  74. lmnr/sdk/browser/bubus_otel.py +71 -0
  75. lmnr/sdk/browser/cdp_utils.py +518 -0
  76. lmnr/sdk/browser/inject_script.js +514 -0
  77. lmnr/sdk/browser/patchright_otel.py +151 -0
  78. lmnr/sdk/browser/playwright_otel.py +322 -0
  79. lmnr/sdk/browser/pw_utils.py +363 -0
  80. lmnr/sdk/browser/recorder/record.umd.min.cjs +84 -0
  81. lmnr/sdk/browser/utils.py +70 -0
  82. lmnr/sdk/client/asynchronous/async_client.py +180 -0
  83. lmnr/sdk/client/asynchronous/resources/__init__.py +6 -0
  84. lmnr/sdk/client/asynchronous/resources/base.py +32 -0
  85. lmnr/sdk/client/asynchronous/resources/browser_events.py +41 -0
  86. lmnr/sdk/client/asynchronous/resources/datasets.py +131 -0
  87. lmnr/sdk/client/asynchronous/resources/evals.py +266 -0
  88. lmnr/sdk/client/asynchronous/resources/evaluators.py +85 -0
  89. lmnr/sdk/client/asynchronous/resources/tags.py +83 -0
  90. lmnr/sdk/client/synchronous/resources/__init__.py +6 -0
  91. lmnr/sdk/client/synchronous/resources/base.py +32 -0
  92. lmnr/sdk/client/synchronous/resources/browser_events.py +40 -0
  93. lmnr/sdk/client/synchronous/resources/datasets.py +131 -0
  94. lmnr/sdk/client/synchronous/resources/evals.py +263 -0
  95. lmnr/sdk/client/synchronous/resources/evaluators.py +85 -0
  96. lmnr/sdk/client/synchronous/resources/tags.py +83 -0
  97. lmnr/sdk/client/synchronous/sync_client.py +191 -0
  98. lmnr/sdk/datasets/__init__.py +94 -0
  99. lmnr/sdk/datasets/file_utils.py +91 -0
  100. lmnr/sdk/decorators.py +163 -26
  101. lmnr/sdk/eval_control.py +3 -2
  102. lmnr/sdk/evaluations.py +403 -191
  103. lmnr/sdk/laminar.py +1080 -549
  104. lmnr/sdk/log.py +7 -2
  105. lmnr/sdk/types.py +246 -134
  106. lmnr/sdk/utils.py +151 -7
  107. lmnr/version.py +46 -0
  108. {lmnr-0.4.53.dev0.dist-info → lmnr-0.7.26.dist-info}/METADATA +152 -106
  109. lmnr-0.7.26.dist-info/RECORD +116 -0
  110. lmnr-0.7.26.dist-info/WHEEL +4 -0
  111. lmnr-0.7.26.dist-info/entry_points.txt +3 -0
  112. lmnr/cli.py +0 -101
  113. lmnr/openllmetry_sdk/.python-version +0 -1
  114. lmnr/openllmetry_sdk/__init__.py +0 -72
  115. lmnr/openllmetry_sdk/config/__init__.py +0 -9
  116. lmnr/openllmetry_sdk/decorators/base.py +0 -185
  117. lmnr/openllmetry_sdk/instruments.py +0 -38
  118. lmnr/openllmetry_sdk/tracing/__init__.py +0 -1
  119. lmnr/openllmetry_sdk/tracing/content_allow_list.py +0 -24
  120. lmnr/openllmetry_sdk/tracing/context_manager.py +0 -13
  121. lmnr/openllmetry_sdk/tracing/tracing.py +0 -884
  122. lmnr/openllmetry_sdk/utils/in_memory_span_exporter.py +0 -61
  123. lmnr/openllmetry_sdk/utils/package_check.py +0 -7
  124. lmnr/openllmetry_sdk/version.py +0 -1
  125. lmnr/sdk/datasets.py +0 -55
  126. lmnr-0.4.53.dev0.dist-info/LICENSE +0 -75
  127. lmnr-0.4.53.dev0.dist-info/RECORD +0 -33
  128. lmnr-0.4.53.dev0.dist-info/WHEEL +0 -4
  129. lmnr-0.4.53.dev0.dist-info/entry_points.txt +0 -3
  130. /lmnr/{openllmetry_sdk → opentelemetry_lib}/.flake8 +0 -0
  131. /lmnr/{openllmetry_sdk → opentelemetry_lib}/utils/__init__.py +0 -0
  132. /lmnr/{openllmetry_sdk → opentelemetry_lib}/utils/json_encoder.py +0 -0
  133. /lmnr/{openllmetry_sdk/decorators/__init__.py → py.typed} +0 -0
@@ -0,0 +1,266 @@
1
+ """Evals resource for interacting with Laminar evaluations API."""
2
+
3
+ import uuid
4
+ import warnings
5
+
6
+ from typing import Any
7
+
8
+ from lmnr.sdk.client.asynchronous.resources.base import BaseAsyncResource
9
+ from lmnr.sdk.log import get_default_logger
10
+ from lmnr.sdk.types import (
11
+ GetDatapointsResponse,
12
+ InitEvaluationResponse,
13
+ EvaluationResultDatapoint,
14
+ PartialEvaluationDatapoint,
15
+ )
16
+ from lmnr.sdk.utils import serialize
17
+
18
+ INITIAL_EVALUATION_DATAPOINT_MAX_DATA_LENGTH = 16_000_000 # 16MB
19
+ logger = get_default_logger(__name__)
20
+
21
+
22
+ class AsyncEvals(BaseAsyncResource):
23
+ """Resource for interacting with Laminar evaluations API."""
24
+
25
+ async def init(
26
+ self,
27
+ name: str | None = None,
28
+ group_name: str | None = None,
29
+ metadata: dict[str, Any] | None = None,
30
+ ) -> InitEvaluationResponse:
31
+ """Initialize a new evaluation.
32
+
33
+ Args:
34
+ name (str | None, optional): Name of the evaluation. Defaults to None.
35
+ group_name (str | None, optional): Group name for the evaluation. Defaults to None.
36
+ metadata (dict[str, Any] | None, optional): Metadata to associate with. Defaults to None.
37
+
38
+ Returns:
39
+ InitEvaluationResponse: The response from the initialization request.
40
+ """
41
+ response = await self._client.post(
42
+ self._base_url + "/v1/evals",
43
+ json={
44
+ "name": name,
45
+ "groupName": group_name,
46
+ "metadata": metadata,
47
+ },
48
+ headers=self._headers(),
49
+ )
50
+ if response.status_code != 200:
51
+ if response.status_code == 401:
52
+ raise ValueError("Unauthorized. Please check your project API key.")
53
+ raise ValueError(f"Error initializing evaluation: {response.text}")
54
+ resp_json = response.json()
55
+ return InitEvaluationResponse.model_validate(resp_json)
56
+
57
+ async def create_evaluation(
58
+ self,
59
+ name: str | None = None,
60
+ group_name: str | None = None,
61
+ metadata: dict[str, Any] | None = None,
62
+ ) -> uuid.UUID:
63
+ """
64
+ Create a new evaluation and return its ID.
65
+
66
+ Parameters:
67
+ name (str | None, optional): Optional name of the evaluation.
68
+ group_name (str | None, optional): An identifier to group evaluations.
69
+ metadata (dict[str, Any] | None, optional): Metadata to associate with. Defaults to None.
70
+
71
+ Returns:
72
+ uuid.UUID: The evaluation ID.
73
+ """
74
+ evaluation = await self.init(
75
+ name=name, group_name=group_name, metadata=metadata
76
+ )
77
+ return evaluation.id
78
+
79
+ async def create_datapoint(
80
+ self,
81
+ eval_id: uuid.UUID,
82
+ data: Any,
83
+ target: Any = None,
84
+ metadata: dict[str, Any] | None = None,
85
+ index: int | None = None,
86
+ trace_id: uuid.UUID | None = None,
87
+ ) -> uuid.UUID:
88
+ """
89
+ Create a datapoint for an evaluation.
90
+
91
+ Parameters:
92
+ eval_id (uuid.UUID): The evaluation ID.
93
+ data: The input data for the executor.
94
+ target: The target/expected output for evaluators.
95
+ metadata (dict[str, Any] | None, optional): Optional metadata.
96
+ index (int | None, optional): Optional index of the datapoint.
97
+ trace_id (uuid.UUID | None, optional): Optional trace ID.
98
+
99
+ Returns:
100
+ uuid.UUID: The datapoint ID.
101
+ """
102
+
103
+ datapoint_id = uuid.uuid4()
104
+
105
+ # Create a minimal datapoint first
106
+ partial_datapoint = PartialEvaluationDatapoint(
107
+ id=datapoint_id,
108
+ data=data,
109
+ target=target,
110
+ index=index or 0,
111
+ trace_id=trace_id or uuid.uuid4(),
112
+ executor_span_id=uuid.uuid4(), # Will be updated when executor runs
113
+ metadata=metadata,
114
+ )
115
+
116
+ await self.save_datapoints(eval_id, [partial_datapoint])
117
+ return datapoint_id
118
+
119
+ async def save_datapoints(
120
+ self,
121
+ eval_id: uuid.UUID,
122
+ datapoints: list[EvaluationResultDatapoint | PartialEvaluationDatapoint],
123
+ group_name: str | None = None,
124
+ ):
125
+ """Save evaluation datapoints.
126
+
127
+ Args:
128
+ eval_id (uuid.UUID): The evaluation ID.
129
+ datapoints (list[EvaluationResultDatapoint | PartialEvaluationDatapoint]): The datapoints to save.
130
+ group_name (str | None, optional): Group name for the datapoints. Defaults to None.
131
+
132
+ Raises:
133
+ ValueError: If there's an error saving the datapoints.
134
+ """
135
+ length = INITIAL_EVALUATION_DATAPOINT_MAX_DATA_LENGTH
136
+ points = [datapoint.to_dict(max_data_length=length) for datapoint in datapoints]
137
+ response = await self._client.post(
138
+ self._base_url + f"/v1/evals/{eval_id}/datapoints",
139
+ json={
140
+ "points": points,
141
+ "groupName": group_name,
142
+ },
143
+ headers=self._headers(),
144
+ )
145
+ if response.status_code == 413:
146
+ await self._retry_save_datapoints(eval_id, datapoints, group_name)
147
+ return
148
+
149
+ if response.status_code != 200:
150
+ raise ValueError(
151
+ f"Error saving evaluation datapoints: [{response.status_code}] {response.text}"
152
+ )
153
+
154
+ async def get_datapoints(
155
+ self,
156
+ dataset_name: str,
157
+ offset: int,
158
+ limit: int,
159
+ ) -> GetDatapointsResponse:
160
+ """Get datapoints from a dataset.
161
+
162
+ Args:
163
+ dataset_name (str): The name of the dataset.
164
+ offset (int): The offset to start from.
165
+ limit (int): The maximum number of datapoints to return.
166
+
167
+ Returns:
168
+ GetDatapointsResponse: The response containing the datapoints.
169
+
170
+ Raises:
171
+ ValueError: If there's an error fetching the datapoints.
172
+ """
173
+ warnings.warn(
174
+ "Use client.datasets.pull instead",
175
+ DeprecationWarning,
176
+ )
177
+
178
+ params = {"name": dataset_name, "offset": offset, "limit": limit}
179
+ response = await self._client.get(
180
+ self._base_url + "/v1/datasets/datapoints",
181
+ params=params,
182
+ headers=self._headers(),
183
+ )
184
+ if response.status_code != 200:
185
+ try:
186
+ resp_json = response.json()
187
+ raise ValueError(
188
+ f"Error fetching datapoints: [{response.status_code}] {resp_json}"
189
+ )
190
+ except Exception:
191
+ raise ValueError(
192
+ f"Error fetching datapoints: [{response.status_code}] {response.text}"
193
+ )
194
+ return GetDatapointsResponse.model_validate(response.json())
195
+
196
+ async def update_datapoint(
197
+ self,
198
+ eval_id: uuid.UUID,
199
+ datapoint_id: uuid.UUID,
200
+ scores: dict[str, float | int],
201
+ executor_output: Any | None = None,
202
+ ) -> None:
203
+ """Update a datapoint with evaluation results.
204
+
205
+ Args:
206
+ eval_id (uuid.UUID): The evaluation ID.
207
+ datapoint_id (uuid.UUID): The datapoint ID.
208
+ executor_output (Any): The executor output.
209
+ scores (dict[str, float | int] | None, optional): The scores. Defaults to None.
210
+ """
211
+
212
+ response = await self._client.post(
213
+ self._base_url + f"/v1/evals/{eval_id}/datapoints/{datapoint_id}",
214
+ json={
215
+ "executorOutput": (
216
+ str(serialize(executor_output))[
217
+ :INITIAL_EVALUATION_DATAPOINT_MAX_DATA_LENGTH
218
+ ]
219
+ if executor_output is not None
220
+ else None
221
+ ),
222
+ "scores": scores,
223
+ },
224
+ headers=self._headers(),
225
+ )
226
+
227
+ if response.status_code != 200:
228
+ raise ValueError(
229
+ f"Error updating evaluation datapoint: [{response.status_code}] {response.text}"
230
+ )
231
+
232
+ async def _retry_save_datapoints(
233
+ self,
234
+ eval_id: uuid.UUID,
235
+ datapoints: list[EvaluationResultDatapoint | PartialEvaluationDatapoint],
236
+ group_name: str | None = None,
237
+ initial_length: int = INITIAL_EVALUATION_DATAPOINT_MAX_DATA_LENGTH,
238
+ max_retries: int = 20,
239
+ ):
240
+ retry = 0
241
+ length = initial_length
242
+ while retry < max_retries:
243
+ retry += 1
244
+ length = length // 2
245
+ logger.debug(
246
+ f"Retrying save datapoints: {retry} of {max_retries}, length: {length}"
247
+ )
248
+ if length == 0:
249
+ raise ValueError("Error saving evaluation datapoints")
250
+ points = [
251
+ datapoint.to_dict(max_data_length=length) for datapoint in datapoints
252
+ ]
253
+ response = await self._client.post(
254
+ self._base_url + f"/v1/evals/{eval_id}/datapoints",
255
+ json={
256
+ "points": points,
257
+ "groupName": group_name,
258
+ },
259
+ headers=self._headers(),
260
+ )
261
+ if response.status_code != 413:
262
+ break
263
+ if response.status_code != 200:
264
+ raise ValueError(
265
+ f"Error saving evaluation datapoints: [{response.status_code}] {response.text}"
266
+ )
@@ -0,0 +1,85 @@
1
+ """Evaluators resource for creating evaluator scores."""
2
+
3
+ import uuid
4
+ from typing import Any
5
+
6
+ from lmnr.sdk.client.asynchronous.resources.base import BaseAsyncResource
7
+ from lmnr.sdk.utils import format_id
8
+
9
+
10
+ class AsyncEvaluators(BaseAsyncResource):
11
+ """Resource for creating evaluator scores."""
12
+
13
+ async def score(
14
+ self,
15
+ *,
16
+ name: str,
17
+ trace_id: str | int | uuid.UUID | None = None,
18
+ span_id: str | int | uuid.UUID | None = None,
19
+ metadata: dict[str, Any] | None = None,
20
+ score: float,
21
+ ) -> None:
22
+ """Create a score for a span.
23
+
24
+ Args:
25
+ name (str): Name of the score
26
+ trace_id (str | int | uuid.UUID | None, optional): The trace ID to score (will be attached to root span)
27
+ span_id (str | int | uuid.UUID | None, optional): The span ID to score
28
+ metadata (dict[str, Any] | None, optional): Additional metadata. Defaults to None.
29
+ score (float): The score value (float)
30
+
31
+ Raises:
32
+ ValueError: If there's an error creating the score.
33
+
34
+ Example:
35
+ Score by trace ID (will attach to root span):
36
+
37
+ >>> await laminar_client.evaluators.score(
38
+ ... name="quality",
39
+ ... trace_id="trace-id-here",
40
+ ... score=0.95,
41
+ ... metadata={"model": "gpt-4"}
42
+ ... )
43
+
44
+ Score by span ID:
45
+
46
+ >>> await laminar_client.evaluators.score(
47
+ ... name="relevance",
48
+ ... span_id="span-id-here",
49
+ ... score=0.87
50
+ ... )
51
+ """
52
+ if trace_id is not None and span_id is not None:
53
+ raise ValueError("Cannot provide both trace_id and span_id. Please provide only one.")
54
+ if trace_id is None and span_id is None:
55
+ raise ValueError("Either 'trace_id' or 'span_id' must be provided.")
56
+
57
+ if trace_id is not None:
58
+ formatted_trace_id = format_id(trace_id)
59
+ payload = {
60
+ "name": name,
61
+ "traceId": formatted_trace_id,
62
+ "metadata": metadata,
63
+ "score": score,
64
+ "source": "Code",
65
+ }
66
+ else:
67
+ formatted_span_id = format_id(span_id)
68
+ payload = {
69
+ "name": name,
70
+ "spanId": formatted_span_id,
71
+ "metadata": metadata,
72
+ "score": score,
73
+ "source": "Code",
74
+ }
75
+
76
+ response = await self._client.post(
77
+ self._base_url + "/v1/evaluators/score",
78
+ json=payload,
79
+ headers=self._headers(),
80
+ )
81
+
82
+ if response.status_code != 200:
83
+ if response.status_code == 401:
84
+ raise ValueError("Unauthorized. Please check your project API key.")
85
+ raise ValueError(f"Error creating evaluator score: {response.text}")
@@ -0,0 +1,83 @@
1
+ """Resource for tagging traces."""
2
+
3
+ import json
4
+ import uuid
5
+
6
+ from lmnr.sdk.client.asynchronous.resources.base import BaseAsyncResource
7
+ from lmnr.sdk.log import get_default_logger
8
+ from lmnr.sdk.utils import format_id
9
+
10
+ logger = get_default_logger(__name__)
11
+
12
+
13
+ class AsyncTags(BaseAsyncResource):
14
+ """Resource for tagging traces."""
15
+
16
+ async def tag(
17
+ self,
18
+ trace_id: str | int | uuid.UUID,
19
+ tags: list[str] | str,
20
+ ):
21
+ """Tag a trace with a list of tags. Note that the trace must be ended
22
+ before tagging it. You may want to call `Laminar.flush()` after the
23
+ trace that you want to tag.
24
+
25
+ Args:
26
+ trace_id (str | int | uuid.UUID): The trace id to tag.
27
+ tags (list[str] | str): The tag or list of tags to add to the trace.
28
+
29
+ Raises:
30
+ ValueError: If the trace id is not a valid UUID.
31
+
32
+ Returns:
33
+ list[dict]: The response from the server.
34
+
35
+ Example:
36
+ ```python
37
+ from lmnr import Laminar, AsyncLaminarClient, observe
38
+
39
+ Laminar.initialize()
40
+ client = AsyncLaminarClient()
41
+ trace_id = None
42
+
43
+ @observe()
44
+ def foo():
45
+ trace_id = Laminar.get_trace_id()
46
+ pass
47
+
48
+ # make sure `foo` is called outside a trace context
49
+ foo()
50
+
51
+ # or make sure the trace is ended by this point
52
+ Laminar.flush()
53
+
54
+ await client.tags.tag(trace_id, "my_tag")
55
+ ```
56
+ """
57
+ trace_tags = tags if isinstance(tags, list) else [tags]
58
+ formatted_trace_id = format_id(trace_id)
59
+
60
+ url = self._base_url + "/v1/tag"
61
+ payload = {
62
+ "traceId": formatted_trace_id,
63
+ "names": trace_tags,
64
+ }
65
+ response = await self._client.post(
66
+ url,
67
+ content=json.dumps(payload),
68
+ headers={
69
+ **self._headers(),
70
+ },
71
+ )
72
+
73
+ if response.status_code == 404:
74
+ logger.warning(
75
+ f"Trace {formatted_trace_id} not found. The trace may have not been ended yet."
76
+ )
77
+ return []
78
+
79
+ if response.status_code != 200:
80
+ raise ValueError(
81
+ f"Failed to tag trace: [{response.status_code}] {response.text}"
82
+ )
83
+ return response.json()
@@ -0,0 +1,6 @@
1
+ from lmnr.sdk.client.synchronous.resources.browser_events import BrowserEvents
2
+ from lmnr.sdk.client.synchronous.resources.evals import Evals
3
+ from lmnr.sdk.client.synchronous.resources.tags import Tags
4
+ from lmnr.sdk.client.synchronous.resources.evaluators import Evaluators
5
+
6
+ __all__ = ["Evals", "Evaluators", "BrowserEvents", "Tags"]
@@ -0,0 +1,32 @@
1
+ """Base class for resource objects."""
2
+
3
+ import httpx
4
+
5
+
6
+ class BaseResource:
7
+ """Base class for all API resources."""
8
+
9
+ def __init__(self, client: httpx.Client, base_url: str, project_api_key: str):
10
+ """Initialize the resource.
11
+
12
+ Args:
13
+ client (httpx.Client): HTTP client instance
14
+ base_url (str): Base URL for the API
15
+ project_api_key (str): Project API key
16
+ """
17
+ self._client = client
18
+ self._base_url = base_url
19
+ self._project_api_key = project_api_key
20
+
21
+ def _headers(self) -> dict[str, str]:
22
+ """Generate request headers with authentication.
23
+
24
+ Returns:
25
+ dict[str, str]: Headers dictionary
26
+ """
27
+ assert self._project_api_key is not None, "Project API key is not set"
28
+ return {
29
+ "Authorization": "Bearer " + self._project_api_key,
30
+ "Content-Type": "application/json",
31
+ "Accept": "application/json",
32
+ }
@@ -0,0 +1,40 @@
1
+ """Resource for sending browser events."""
2
+
3
+ import gzip
4
+ import json
5
+
6
+ from lmnr.sdk.client.synchronous.resources.base import BaseResource
7
+
8
+ from lmnr.version import PYTHON_VERSION, __version__
9
+
10
+
11
+ class BrowserEvents(BaseResource):
12
+ """Resource for sending browser events."""
13
+
14
+ def send(
15
+ self,
16
+ session_id: str,
17
+ trace_id: str,
18
+ events: list[dict],
19
+ ):
20
+ url = self._base_url + "/v1/browser-sessions/events"
21
+ payload = {
22
+ "sessionId": session_id,
23
+ "traceId": trace_id,
24
+ "events": events,
25
+ "source": f"python@{PYTHON_VERSION}",
26
+ "sdkVersion": __version__,
27
+ }
28
+ compressed_payload = gzip.compress(json.dumps(payload).encode("utf-8"))
29
+ response = self._client.post(
30
+ url,
31
+ content=compressed_payload,
32
+ headers={
33
+ **self._headers(),
34
+ "Content-Encoding": "gzip",
35
+ },
36
+ )
37
+ if response.status_code != 200:
38
+ raise ValueError(
39
+ f"Failed to send events: [{response.status_code}] {response.text}"
40
+ )
@@ -0,0 +1,131 @@
1
+ """Datasets resource for interacting with Laminar datasets API."""
2
+
3
+ import math
4
+ import uuid
5
+
6
+ from lmnr.sdk.client.synchronous.resources.base import BaseResource
7
+ from lmnr.sdk.log import get_default_logger
8
+ from lmnr.sdk.types import (
9
+ Datapoint,
10
+ Dataset,
11
+ GetDatapointsResponse,
12
+ PushDatapointsResponse,
13
+ )
14
+ from lmnr.sdk.utils import serialize
15
+
16
+ logger = get_default_logger(__name__)
17
+
18
+ DEFAULT_DATASET_PULL_LIMIT = 100
19
+ DEFAULT_DATASET_PUSH_BATCH_SIZE = 100
20
+
21
+
22
+ class Datasets(BaseResource):
23
+ """Resource for interacting with Laminar datasets API."""
24
+
25
+ def list_datasets(self) -> list[Dataset]:
26
+ """List all datasets."""
27
+ response = self._client.get(
28
+ f"{self._base_url}/v1/datasets",
29
+ headers=self._headers(),
30
+ )
31
+ if response.status_code != 200:
32
+ raise ValueError(
33
+ f"Error listing datasets: [{response.status_code}] {response.text}"
34
+ )
35
+ return [Dataset.model_validate(dataset) for dataset in response.json()]
36
+
37
+ def get_dataset_by_name(self, name: str) -> list[Dataset]:
38
+ """Get a dataset by name."""
39
+ response = self._client.get(
40
+ f"{self._base_url}/v1/datasets",
41
+ params={"name": name},
42
+ headers=self._headers(),
43
+ )
44
+ if response.status_code != 200:
45
+ raise ValueError(
46
+ f"Error getting dataset: [{response.status_code}] {response.text}"
47
+ )
48
+ return [Dataset.model_validate(dataset) for dataset in response.json()]
49
+
50
+ def push(
51
+ self,
52
+ points: list[Datapoint],
53
+ name: str | None = None,
54
+ id: uuid.UUID | None = None,
55
+ batch_size: int = DEFAULT_DATASET_PUSH_BATCH_SIZE,
56
+ create_dataset: bool = False,
57
+ ) -> PushDatapointsResponse | None:
58
+ """Push data to a dataset."""
59
+
60
+ if name is None and id is None:
61
+ raise ValueError("Either name or id must be provided")
62
+
63
+ if name is not None and id is not None:
64
+ raise ValueError("Only one of name or id must be provided")
65
+
66
+ if create_dataset and name is None:
67
+ raise ValueError("Name must be provided when creating a new dataset")
68
+
69
+ identifier = {"name": name} if name is not None else {"datasetId": id}
70
+
71
+ batch_num = 0
72
+ total_batches = math.ceil(len(points) / batch_size)
73
+ response = None
74
+ for i in range(0, len(points), batch_size):
75
+ batch_num += 1
76
+ logger.debug(f"Pushing batch {batch_num} of {total_batches}")
77
+ batch = points[i : i + batch_size]
78
+ response = self._client.post(
79
+ f"{self._base_url}/v1/datasets/datapoints",
80
+ json={
81
+ **identifier,
82
+ "datapoints": [serialize(point) for point in batch],
83
+ "createDataset": create_dataset,
84
+ },
85
+ headers=self._headers(),
86
+ )
87
+
88
+ # 201 when creating a new dataset
89
+ if response.status_code not in [200, 201]:
90
+ raise ValueError(
91
+ f"Error pushing data to dataset: [{response.status_code}] {response.text}"
92
+ )
93
+
94
+ response = PushDatapointsResponse.model_validate(response.json())
95
+ # Currently, the response only contains the dataset ID,
96
+ # so it's safe to return the last response only.
97
+ return response
98
+
99
+ def pull(
100
+ self,
101
+ name: str | None = None,
102
+ id: uuid.UUID | None = None,
103
+ # TODO: move const to one file, import in CLI
104
+ limit: int = DEFAULT_DATASET_PULL_LIMIT,
105
+ offset: int = 0,
106
+ ) -> GetDatapointsResponse:
107
+ """Pull data from a dataset."""
108
+
109
+ if name is None and id is None:
110
+ raise ValueError("Either name or id must be provided")
111
+
112
+ if name is not None and id is not None:
113
+ raise ValueError("Only one of name or id must be provided")
114
+
115
+ identifier = {"name": name} if name is not None else {"datasetId": id}
116
+
117
+ params = {
118
+ **identifier,
119
+ "offset": offset,
120
+ "limit": limit,
121
+ }
122
+ response = self._client.get(
123
+ f"{self._base_url}/v1/datasets/datapoints",
124
+ params=params,
125
+ headers=self._headers(),
126
+ )
127
+ if response.status_code != 200:
128
+ raise ValueError(
129
+ f"Error pulling data from dataset: [{response.status_code}] {response.text}"
130
+ )
131
+ return GetDatapointsResponse.model_validate(response.json())