lmnr 0.4.17b0__py2.py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (50) hide show
  1. lmnr/__init__.py +5 -0
  2. lmnr/cli.py +39 -0
  3. lmnr/sdk/__init__.py +0 -0
  4. lmnr/sdk/decorators.py +66 -0
  5. lmnr/sdk/evaluations.py +354 -0
  6. lmnr/sdk/laminar.py +403 -0
  7. lmnr/sdk/log.py +39 -0
  8. lmnr/sdk/types.py +155 -0
  9. lmnr/sdk/utils.py +99 -0
  10. lmnr/traceloop_sdk/.flake8 +12 -0
  11. lmnr/traceloop_sdk/.python-version +1 -0
  12. lmnr/traceloop_sdk/__init__.py +89 -0
  13. lmnr/traceloop_sdk/config/__init__.py +9 -0
  14. lmnr/traceloop_sdk/decorators/__init__.py +0 -0
  15. lmnr/traceloop_sdk/decorators/base.py +178 -0
  16. lmnr/traceloop_sdk/instruments.py +34 -0
  17. lmnr/traceloop_sdk/tests/__init__.py +1 -0
  18. lmnr/traceloop_sdk/tests/cassettes/test_association_properties/test_langchain_and_external_association_properties.yaml +101 -0
  19. lmnr/traceloop_sdk/tests/cassettes/test_association_properties/test_langchain_association_properties.yaml +99 -0
  20. lmnr/traceloop_sdk/tests/cassettes/test_manual/test_manual_report.yaml +98 -0
  21. lmnr/traceloop_sdk/tests/cassettes/test_manual/test_resource_attributes.yaml +98 -0
  22. lmnr/traceloop_sdk/tests/cassettes/test_privacy_no_prompts/test_simple_workflow.yaml +199 -0
  23. lmnr/traceloop_sdk/tests/cassettes/test_prompt_management/test_prompt_management.yaml +202 -0
  24. lmnr/traceloop_sdk/tests/cassettes/test_sdk_initialization/test_resource_attributes.yaml +199 -0
  25. lmnr/traceloop_sdk/tests/cassettes/test_tasks/test_task_io_serialization_with_langchain.yaml +96 -0
  26. lmnr/traceloop_sdk/tests/cassettes/test_workflows/test_simple_aworkflow.yaml +98 -0
  27. lmnr/traceloop_sdk/tests/cassettes/test_workflows/test_simple_workflow.yaml +199 -0
  28. lmnr/traceloop_sdk/tests/cassettes/test_workflows/test_streaming_workflow.yaml +167 -0
  29. lmnr/traceloop_sdk/tests/conftest.py +111 -0
  30. lmnr/traceloop_sdk/tests/test_association_properties.py +229 -0
  31. lmnr/traceloop_sdk/tests/test_manual.py +48 -0
  32. lmnr/traceloop_sdk/tests/test_nested_tasks.py +47 -0
  33. lmnr/traceloop_sdk/tests/test_privacy_no_prompts.py +50 -0
  34. lmnr/traceloop_sdk/tests/test_sdk_initialization.py +57 -0
  35. lmnr/traceloop_sdk/tests/test_tasks.py +32 -0
  36. lmnr/traceloop_sdk/tests/test_workflows.py +262 -0
  37. lmnr/traceloop_sdk/tracing/__init__.py +1 -0
  38. lmnr/traceloop_sdk/tracing/attributes.py +9 -0
  39. lmnr/traceloop_sdk/tracing/content_allow_list.py +24 -0
  40. lmnr/traceloop_sdk/tracing/context_manager.py +13 -0
  41. lmnr/traceloop_sdk/tracing/tracing.py +913 -0
  42. lmnr/traceloop_sdk/utils/__init__.py +26 -0
  43. lmnr/traceloop_sdk/utils/in_memory_span_exporter.py +61 -0
  44. lmnr/traceloop_sdk/utils/json_encoder.py +20 -0
  45. lmnr/traceloop_sdk/utils/package_check.py +8 -0
  46. lmnr/traceloop_sdk/version.py +1 -0
  47. lmnr-0.4.17b0.dist-info/LICENSE +75 -0
  48. lmnr-0.4.17b0.dist-info/METADATA +250 -0
  49. lmnr-0.4.17b0.dist-info/RECORD +50 -0
  50. lmnr-0.4.17b0.dist-info/WHEEL +4 -0
lmnr/sdk/laminar.py ADDED
@@ -0,0 +1,403 @@
1
+ from lmnr.traceloop_sdk.instruments import Instruments
2
+ from opentelemetry import context
3
+ from opentelemetry.trace import (
4
+ INVALID_SPAN,
5
+ get_current_span,
6
+ )
7
+ from opentelemetry.util.types import AttributeValue
8
+ from opentelemetry.context import set_value, attach, detach
9
+ from lmnr.traceloop_sdk import Traceloop
10
+ from lmnr.traceloop_sdk.tracing import get_tracer
11
+ from contextlib import contextmanager
12
+ from opentelemetry.exporter.otlp.proto.grpc.trace_exporter import OTLPSpanExporter
13
+
14
+ from pydantic.alias_generators import to_snake
15
+ from typing import Any, Optional, Set, Union
16
+
17
+ import copy
18
+ import datetime
19
+ import dotenv
20
+ import json
21
+ import logging
22
+ import os
23
+ import requests
24
+ import uuid
25
+
26
+ from lmnr.traceloop_sdk.tracing.attributes import (
27
+ SESSION_ID,
28
+ SPAN_INPUT,
29
+ SPAN_OUTPUT,
30
+ SPAN_PATH,
31
+ TRACE_TYPE,
32
+ USER_ID,
33
+ )
34
+ from lmnr.traceloop_sdk.tracing.tracing import (
35
+ get_span_path,
36
+ set_association_properties,
37
+ update_association_properties,
38
+ )
39
+
40
+ from .log import VerboseColorfulFormatter
41
+
42
+ from .types import (
43
+ CreateEvaluationResponse,
44
+ EvaluationResultDatapoint,
45
+ PipelineRunError,
46
+ PipelineRunResponse,
47
+ NodeInput,
48
+ PipelineRunRequest,
49
+ TraceType,
50
+ )
51
+
52
+
53
+ class Laminar:
54
+ __base_http_url: str
55
+ __base_grpc_url: str
56
+ __project_api_key: Optional[str] = None
57
+ __env: dict[str, str] = {}
58
+ __initialized: bool = False
59
+
60
+ @classmethod
61
+ def initialize(
62
+ cls,
63
+ project_api_key: Optional[str] = None,
64
+ env: dict[str, str] = {},
65
+ base_url: Optional[str] = None,
66
+ http_port: Optional[int] = None,
67
+ grpc_port: Optional[int] = None,
68
+ instruments: Optional[Set[Instruments]] = None,
69
+ ):
70
+ """Initialize Laminar context across the application.
71
+ This method must be called before using any other Laminar methods or
72
+ decorators.
73
+
74
+ Args:
75
+ project_api_key (Optional[str], optional): Laminar project api key.
76
+ You can generate one by going to the projects
77
+ settings page on the Laminar dashboard.
78
+ If not specified, it will try to read from the
79
+ LMNR_PROJECT_API_KEY environment variable
80
+ in os.environ or in .env file.
81
+ Defaults to None.
82
+ env (dict[str, str], optional): Default environment passed to
83
+ `run` and `evaluate_event` requests, unless
84
+ overriden at request time. Usually, model
85
+ provider keys are stored here.
86
+ Defaults to {}.
87
+ base_url (Optional[str], optional): Laminar API url.
88
+ If not specified, defaults to https://api.lmnr.ai.
89
+ http_port (Optional[int], optional): Laminar API http port.
90
+ If not specified, defaults to 443.
91
+ grpc_port (Optional[int], optional): Laminar API grpc port.
92
+ If not specified, defaults to 8443.
93
+
94
+ Raises:
95
+ ValueError: If project API key is not set
96
+ """
97
+ cls.__project_api_key = project_api_key or os.environ.get(
98
+ "LMNR_PROJECT_API_KEY"
99
+ )
100
+ if not cls.__project_api_key:
101
+ dotenv_path = dotenv.find_dotenv(usecwd=True)
102
+ cls.__project_api_key = dotenv.get_key(
103
+ dotenv_path=dotenv_path, key_to_get="LMNR_PROJECT_API_KEY"
104
+ )
105
+ if not cls.__project_api_key:
106
+ raise ValueError(
107
+ "Please initialize the Laminar object with"
108
+ " your project API key or set the LMNR_PROJECT_API_KEY"
109
+ " environment variable in your environment or .env file"
110
+ )
111
+
112
+ cls.__base_http_url = f"{base_url or 'https://api.lmnr.ai'}:{http_port or 443}"
113
+ cls.__base_grpc_url = f"{base_url or 'https://api.lmnr.ai'}:{grpc_port or 8443}"
114
+
115
+ cls.__env = env
116
+ cls.__initialized = True
117
+ cls._initialize_logger()
118
+ Traceloop.init(
119
+ exporter=OTLPSpanExporter(
120
+ endpoint=cls.__base_grpc_url,
121
+ headers={"authorization": f"Bearer {cls.__project_api_key}"},
122
+ ),
123
+ instruments=instruments,
124
+ )
125
+
126
+ @classmethod
127
+ def is_initialized(cls):
128
+ """Check if Laminar is initialized. A utility to make sure other
129
+ methods are called after initialization.
130
+
131
+ Returns:
132
+ bool: True if Laminar is initialized, False otherwise
133
+ """
134
+ return cls.__initialized
135
+
136
+ @classmethod
137
+ def _initialize_logger(cls):
138
+ cls.__logger = logging.getLogger(__name__)
139
+ console_log_handler = logging.StreamHandler()
140
+ console_log_handler.setFormatter(VerboseColorfulFormatter())
141
+ cls.__logger.addHandler(console_log_handler)
142
+
143
+ @classmethod
144
+ def run(
145
+ cls,
146
+ pipeline: str,
147
+ inputs: dict[str, NodeInput],
148
+ env: dict[str, str] = {},
149
+ metadata: dict[str, str] = {},
150
+ parent_span_id: Optional[uuid.UUID] = None,
151
+ trace_id: Optional[uuid.UUID] = None,
152
+ ) -> PipelineRunResponse:
153
+ """Runs the pipeline with the given inputs
154
+
155
+ Args:
156
+ pipeline (str): name of the Laminar pipeline.
157
+ The pipeline must have a target version set.
158
+ inputs (dict[str, NodeInput]):
159
+ inputs to the endpoint's target pipeline.
160
+ Keys in the dictionary must match input node names
161
+ env (dict[str, str], optional):
162
+ Environment variables for the pipeline execution.
163
+ Defaults to {}.
164
+ metadata (dict[str, str], optional):
165
+ any custom metadata to be stored
166
+ with execution trace. Defaults to {}.
167
+ parent_span_id (Optional[uuid.UUID], optional):
168
+ parent span id for the resulting span.
169
+ Defaults to None.
170
+ trace_id (Optional[uuid.UUID], optional):
171
+ trace id for the resulting trace.
172
+ Defaults to None.
173
+
174
+ Returns:
175
+ PipelineRunResponse: response object containing the outputs
176
+
177
+ Raises:
178
+ ValueError: if project API key is not set
179
+ PipelineRunError: if the endpoint run fails
180
+ """
181
+ if cls.__project_api_key is None:
182
+ raise ValueError(
183
+ "Please initialize the Laminar object with your project "
184
+ "API key or set the LMNR_PROJECT_API_KEY environment variable"
185
+ )
186
+ try:
187
+ current_span = get_current_span()
188
+ if current_span != INVALID_SPAN:
189
+ parent_span_id = parent_span_id or uuid.UUID(
190
+ int=current_span.get_span_context().span_id
191
+ )
192
+ trace_id = trace_id or uuid.UUID(
193
+ int=current_span.get_span_context().trace_id
194
+ )
195
+ request = PipelineRunRequest(
196
+ inputs=inputs,
197
+ pipeline=pipeline,
198
+ env=env,
199
+ metadata=metadata,
200
+ parent_span_id=parent_span_id,
201
+ trace_id=trace_id,
202
+ )
203
+ except Exception as e:
204
+ raise ValueError(f"Invalid request: {e}")
205
+
206
+ response = requests.post(
207
+ cls.__base_http_url + "/v1/pipeline/run",
208
+ data=json.dumps(request.to_dict()),
209
+ headers=cls._headers(),
210
+ )
211
+ if response.status_code != 200:
212
+ raise PipelineRunError(response)
213
+ try:
214
+ resp_json = response.json()
215
+ keys = list(resp_json.keys())
216
+ for key in keys:
217
+ value = resp_json[key]
218
+ del resp_json[key]
219
+ resp_json[to_snake(key)] = value
220
+ return PipelineRunResponse(**resp_json)
221
+ except Exception:
222
+ raise PipelineRunError(response)
223
+
224
+ @classmethod
225
+ def event(
226
+ cls,
227
+ name: str,
228
+ value: Optional[AttributeValue] = None,
229
+ timestamp: Optional[Union[datetime.datetime, int]] = None,
230
+ ):
231
+ """Associate an event with the current span. If event with such name never
232
+ existed, Laminar will create a new event and infer its type from the value.
233
+ If the event already exists, Laminar will append the value to the event
234
+ if and only if the value is of a matching type. Otherwise, the event won't
235
+ be recorded Supported types are string, numeric, and boolean. If the value
236
+ is `None`, event is considered a boolean tag with the value of `True`.
237
+
238
+ Args:
239
+ name (str): event name
240
+ value (Optional[AttributeValue]): event value. Must be a primitive type.
241
+ Boolean true is assumed in the backend if value is None.
242
+ Defaults to None.
243
+ timestamp (Optional[Union[datetime.datetime, int]], optional):
244
+ If int, must be epoch nanoseconds. If not
245
+ specified, relies on the underlying OpenTelemetry
246
+ implementation. Defaults to None.
247
+ """
248
+ if timestamp and isinstance(timestamp, datetime.datetime):
249
+ timestamp = int(timestamp.timestamp() * 1e9)
250
+
251
+ event = {
252
+ "lmnr.event.type": "default",
253
+ }
254
+ if value is not None:
255
+ event["lmnr.event.value"] = value
256
+
257
+ current_span = get_current_span()
258
+ if current_span == INVALID_SPAN:
259
+ cls.__logger.warning(
260
+ "`Laminar().event()` called outside of span context. "
261
+ f"Event '{name}' will not be recorded in the trace. "
262
+ "Make sure to annotate the function with a decorator"
263
+ )
264
+ return
265
+
266
+ current_span.add_event(name, event, timestamp)
267
+
268
+ @classmethod
269
+ @contextmanager
270
+ def start_as_current_span(
271
+ cls,
272
+ name: str,
273
+ input: Any = None,
274
+ ):
275
+ """Start a new span as the current span. Useful for manual instrumentation.
276
+ This is the preferred and more stable way to use manual instrumentation.
277
+
278
+ Usage example:
279
+ ```python
280
+ with Laminar.start_as_current_span("my_span", input="my_input"):
281
+ await my_async_function()
282
+ ```
283
+
284
+ Args:
285
+ name (str): name of the span
286
+ input (Any, optional): input to the span. Will be sent as an
287
+ attribute, so must be json serializable. Defaults to None.
288
+ """
289
+ with get_tracer() as tracer:
290
+ span_path = get_span_path(name)
291
+ ctx = set_value("span_path", span_path)
292
+ ctx_token = attach(set_value("span_path", span_path))
293
+ with tracer.start_as_current_span(
294
+ name,
295
+ context=ctx,
296
+ attributes={SPAN_PATH: span_path},
297
+ ) as span:
298
+ if input is not None:
299
+ span.set_attribute(
300
+ SPAN_INPUT,
301
+ json.dumps(input),
302
+ )
303
+ yield span
304
+
305
+ # TODO: Figure out if this is necessary
306
+ try:
307
+ detach(ctx_token)
308
+ except Exception:
309
+ pass
310
+
311
+ @classmethod
312
+ def set_span_output(cls, output: Any = None):
313
+ """Set the output of the current span. Useful for manual instrumentation.
314
+
315
+ Args:
316
+ output (Any, optional): output of the span. Will be sent as an
317
+ attribute, so must be json serializable. Defaults to None.
318
+ """
319
+ span = get_current_span()
320
+ if output is not None and span != INVALID_SPAN:
321
+ span.set_attribute(SPAN_OUTPUT, json.dumps(output))
322
+
323
+ @classmethod
324
+ def set_session(
325
+ cls,
326
+ session_id: Optional[str] = None,
327
+ user_id: Optional[str] = None,
328
+ ):
329
+ """Set the session and user id for the current span and the context
330
+ (i.e. any children spans created from the current span in the current
331
+ thread).
332
+
333
+ Args:
334
+ session_id (Optional[str], optional): Custom session id.
335
+ Useful to debug and group long-running
336
+ sessions/conversations.
337
+ Defaults to None.
338
+ user_id (Optional[str], optional): Custom user id.
339
+ Useful for grouping spans or traces by user.
340
+ Defaults to None.
341
+ """
342
+ association_properties = {}
343
+ if session_id is not None:
344
+ association_properties[SESSION_ID] = session_id
345
+ if user_id is not None:
346
+ association_properties[USER_ID] = user_id
347
+ update_association_properties(association_properties)
348
+
349
+ @classmethod
350
+ def _set_trace_type(
351
+ cls,
352
+ trace_type: TraceType,
353
+ ):
354
+ """Set the trace_type for the current span and the context
355
+ Args:
356
+ trace_type (TraceType): Type of the trace
357
+ """
358
+ association_properties = {
359
+ TRACE_TYPE: trace_type.value,
360
+ }
361
+ update_association_properties(association_properties)
362
+
363
+ @classmethod
364
+ def clear_session(cls):
365
+ """Clear the session and user id from the context"""
366
+ props: dict = copy.copy(context.get_value("association_properties"))
367
+ props.pop("session_id", None)
368
+ props.pop("user_id", None)
369
+ set_association_properties(props)
370
+
371
+ @classmethod
372
+ def create_evaluation(
373
+ cls,
374
+ data: list[EvaluationResultDatapoint],
375
+ group_id: Optional[str] = None,
376
+ name: Optional[str] = None,
377
+ ) -> CreateEvaluationResponse:
378
+ response = requests.post(
379
+ cls.__base_http_url + "/v1/evaluations",
380
+ data=json.dumps(
381
+ {
382
+ "groupId": group_id,
383
+ "name": name,
384
+ "points": [datapoint.to_dict() for datapoint in data],
385
+ }
386
+ ),
387
+ headers=cls._headers(),
388
+ )
389
+ if response.status_code != 200:
390
+ try:
391
+ resp_json = response.json()
392
+ raise ValueError(f"Error creating evaluation {json.dumps(resp_json)}")
393
+ except Exception:
394
+ raise ValueError(f"Error creating evaluation {response.text}")
395
+ return CreateEvaluationResponse.model_validate(response.json())
396
+
397
+ @classmethod
398
+ def _headers(cls):
399
+ assert cls.__project_api_key is not None, "Project API key is not set"
400
+ return {
401
+ "Authorization": "Bearer " + cls.__project_api_key,
402
+ "Content-Type": "application/json",
403
+ }
lmnr/sdk/log.py ADDED
@@ -0,0 +1,39 @@
1
+ import logging
2
+
3
+
4
+ class CustomFormatter(logging.Formatter):
5
+ grey = "\x1b[38;20m"
6
+ green = "\x1b[32;20m"
7
+ yellow = "\x1b[33;20m"
8
+ red = "\x1b[31;20m"
9
+ bold_red = "\x1b[31;1m"
10
+ reset = "\x1b[0m"
11
+ fmt = "%(asctime)s::%(name)s::%(levelname)s: %(message)s (%(filename)s:%(lineno)d)"
12
+
13
+ FORMATS = {
14
+ logging.DEBUG: grey + fmt + reset,
15
+ logging.INFO: green + fmt + reset,
16
+ logging.WARNING: yellow + fmt + reset,
17
+ logging.ERROR: red + fmt + reset,
18
+ logging.CRITICAL: bold_red + fmt + reset,
19
+ }
20
+
21
+ def format(self, record: logging.LogRecord):
22
+ log_fmt = self.FORMATS.get(record.levelno)
23
+ formatter = logging.Formatter(log_fmt)
24
+ return formatter.format(record)
25
+
26
+
27
+ # For StreamHandlers / console
28
+ class VerboseColorfulFormatter(CustomFormatter):
29
+ def format(self, record):
30
+ return super().format(record)
31
+
32
+
33
+ # For Verbose FileHandlers / files
34
+ class VerboseFormatter(CustomFormatter):
35
+ fmt = "%(asctime)s::%(name)s::%(levelname)s| %(message)s (%(filename)s:%(lineno)d)"
36
+
37
+ def format(self, record):
38
+ formatter = logging.Formatter(self.fmt)
39
+ return formatter.format(record)
lmnr/sdk/types.py ADDED
@@ -0,0 +1,155 @@
1
+ import datetime
2
+ from enum import Enum
3
+ import pydantic
4
+ import requests
5
+ from typing import Any, Awaitable, Callable, Optional, Union
6
+ import uuid
7
+
8
+ from .utils import serialize
9
+
10
+
11
+ class ChatMessage(pydantic.BaseModel):
12
+ role: str
13
+ content: str
14
+
15
+
16
+ class ConditionedValue(pydantic.BaseModel):
17
+ condition: str
18
+ value: "NodeInput"
19
+
20
+
21
+ Numeric = Union[int, float]
22
+ NumericTypes = (int, float) # for use with isinstance
23
+
24
+ NodeInput = Union[str, list[ChatMessage], ConditionedValue, Numeric, bool]
25
+ PipelineOutput = Union[NodeInput]
26
+
27
+
28
+ class PipelineRunRequest(pydantic.BaseModel):
29
+ inputs: dict[str, NodeInput]
30
+ pipeline: str
31
+ env: dict[str, str] = pydantic.Field(default_factory=dict)
32
+ metadata: dict[str, str] = pydantic.Field(default_factory=dict)
33
+ stream: bool = pydantic.Field(default=False)
34
+ parent_span_id: Optional[uuid.UUID] = pydantic.Field(default=None)
35
+ trace_id: Optional[uuid.UUID] = pydantic.Field(default=None)
36
+
37
+ # uuid is not serializable by default, so we need to convert it to a string
38
+ def to_dict(self):
39
+ return {
40
+ "inputs": {
41
+ k: v.model_dump() if isinstance(v, pydantic.BaseModel) else serialize(v)
42
+ for k, v in self.inputs.items()
43
+ },
44
+ "pipeline": self.pipeline,
45
+ "env": self.env,
46
+ "metadata": self.metadata,
47
+ "stream": self.stream,
48
+ "parentSpanId": str(self.parent_span_id) if self.parent_span_id else None,
49
+ "traceId": str(self.trace_id) if self.trace_id else None,
50
+ }
51
+
52
+
53
+ class PipelineRunResponse(pydantic.BaseModel):
54
+ outputs: dict[str, dict[str, PipelineOutput]]
55
+ run_id: str
56
+
57
+
58
+ class PipelineRunError(Exception):
59
+ error_code: str
60
+ error_message: str
61
+
62
+ def __init__(self, response: requests.Response):
63
+ try:
64
+ resp_json = response.json()
65
+ self.error_code = resp_json["error_code"]
66
+ self.error_message = resp_json["error_message"]
67
+ super().__init__(self.error_message)
68
+ except Exception:
69
+ super().__init__(response.text)
70
+
71
+ def __str__(self) -> str:
72
+ try:
73
+ return str(
74
+ {"error_code": self.error_code, "error_message": self.error_message}
75
+ )
76
+ except Exception:
77
+ return super().__str__()
78
+
79
+
80
+ EvaluationDatapointData = dict[str, Any]
81
+ EvaluationDatapointTarget = dict[str, Any]
82
+
83
+
84
+ # EvaluationDatapoint is a single data point in the evaluation
85
+ class Datapoint(pydantic.BaseModel):
86
+ # input to the executor function. Must be a dict with string keys
87
+ data: EvaluationDatapointData
88
+ # input to the evaluator function (alongside the executor output).
89
+ # Must be a dict with string keys
90
+ target: EvaluationDatapointTarget
91
+
92
+
93
+ ExecutorFunctionReturnType = Any
94
+ EvaluatorFunctionReturnType = Union[Numeric, dict[str, Numeric]]
95
+
96
+ ExecutorFunction = Callable[
97
+ [EvaluationDatapointData, Any, dict[str, Any]],
98
+ Union[ExecutorFunctionReturnType, Awaitable[ExecutorFunctionReturnType]],
99
+ ]
100
+
101
+ # EvaluatorFunction is a function that takes the output of the executor and the
102
+ # target data, and returns a score. The score can be a single number or a
103
+ # record of string keys and number values. The latter is useful for evaluating
104
+ # multiple criteria in one go instead of running multiple evaluators.
105
+ EvaluatorFunction = Callable[
106
+ [ExecutorFunctionReturnType, Any, dict[str, Any]],
107
+ Union[EvaluatorFunctionReturnType, Awaitable[EvaluatorFunctionReturnType]],
108
+ ]
109
+
110
+
111
+ class CreateEvaluationResponse(pydantic.BaseModel):
112
+ id: uuid.UUID
113
+ createdAt: datetime.datetime
114
+ groupId: str
115
+ name: str
116
+ projectId: uuid.UUID
117
+
118
+
119
+ class EvaluationResultDatapoint(pydantic.BaseModel):
120
+ data: EvaluationDatapointData
121
+ target: EvaluationDatapointTarget
122
+ executor_output: ExecutorFunctionReturnType
123
+ scores: dict[str, Numeric]
124
+ trace_id: uuid.UUID
125
+
126
+ # uuid is not serializable by default, so we need to convert it to a string
127
+ def to_dict(self):
128
+ return {
129
+ "data": {
130
+ k: v.model_dump() if isinstance(v, pydantic.BaseModel) else serialize(v)
131
+ for k, v in self.data.items()
132
+ },
133
+ "target": {
134
+ k: v.model_dump() if isinstance(v, pydantic.BaseModel) else serialize(v)
135
+ for k, v in self.target.items()
136
+ },
137
+ "executorOutput": serialize(self.executor_output),
138
+ "scores": self.scores,
139
+ "traceId": str(self.trace_id),
140
+ }
141
+
142
+
143
+ class SpanType(Enum):
144
+ DEFAULT = "DEFAULT"
145
+ LLM = "LLM"
146
+ PIPELINE = "PIPELINE" # must not be set manually
147
+ EXECUTOR = "EXECUTOR"
148
+ EVALUATOR = "EVALUATOR"
149
+ EVALUATION = "EVALUATION"
150
+
151
+
152
+ class TraceType(Enum):
153
+ DEFAULT = "DEFAULT"
154
+ EVENT = "EVENT" # must not be set manually
155
+ EVALUATION = "EVALUATION"
lmnr/sdk/utils.py ADDED
@@ -0,0 +1,99 @@
1
+ import asyncio
2
+ import datetime
3
+ import dataclasses
4
+ import enum
5
+ import inspect
6
+ import pydantic
7
+ import queue
8
+ import typing
9
+ import uuid
10
+
11
+
12
+ def is_method(func: typing.Callable) -> bool:
13
+ # inspect.ismethod is True for bound methods only, but in the decorator,
14
+ # the method is not bound yet, so we need to check if the first parameter
15
+ # is either 'self' or 'cls'. This only relies on naming conventions
16
+
17
+ # `signature._parameters` is an OrderedDict,
18
+ # so the order of insertion is preserved
19
+ params = list(inspect.signature(func).parameters)
20
+ return len(params) > 0 and params[0] in ["self", "cls"]
21
+
22
+
23
+ def is_async(func: typing.Callable) -> bool:
24
+ # `__wrapped__` is set automatically by `functools.wraps` and
25
+ # `functools.update_wrapper`
26
+ # so we can use it to get the original function
27
+ while hasattr(func, "__wrapped__"):
28
+ func = func.__wrapped__
29
+
30
+ if not inspect.isfunction(func):
31
+ return False
32
+
33
+ # Check if the function is asynchronous
34
+ if asyncio.iscoroutinefunction(func):
35
+ return True
36
+
37
+ # Fallback: check if the function's code object contains 'async'.
38
+ # This is for cases when a decorator did not properly use
39
+ # `functools.wraps` or `functools.update_wrapper`
40
+ CO_COROUTINE = inspect.CO_COROUTINE
41
+ return (func.__code__.co_flags & CO_COROUTINE) != 0
42
+
43
+
44
+ def is_async_iterator(o: typing.Any) -> bool:
45
+ return hasattr(o, "__aiter__") and hasattr(o, "__anext__")
46
+
47
+
48
+ def is_iterator(o: typing.Any) -> bool:
49
+ return hasattr(o, "__iter__") and hasattr(o, "__next__")
50
+
51
+
52
+ def serialize(obj: typing.Any) -> dict[str, typing.Any]:
53
+ def to_dict_inner(o: typing.Any):
54
+ if isinstance(o, (datetime.datetime, datetime.date)):
55
+ return o.strftime("%Y-%m-%dT%H:%M:%S.%f%z")
56
+ elif o is None:
57
+ return None
58
+ elif isinstance(o, (int, float, str, bool)):
59
+ return o
60
+ elif isinstance(o, uuid.UUID):
61
+ return str(o) # same as in final return, but explicit
62
+ elif isinstance(o, enum.Enum):
63
+ return o.value
64
+ elif dataclasses.is_dataclass(o):
65
+ return dataclasses.asdict(o)
66
+ elif isinstance(o, bytes):
67
+ return o.decode("utf-8")
68
+ elif isinstance(o, pydantic.BaseModel):
69
+ return o.model_dump()
70
+ elif isinstance(o, (tuple, set, frozenset)):
71
+ return [to_dict_inner(item) for item in o]
72
+ elif isinstance(o, list):
73
+ return [to_dict_inner(item) for item in o]
74
+ elif isinstance(o, dict):
75
+ return {to_dict_inner(k): to_dict_inner(v) for k, v in o.items()}
76
+ elif isinstance(o, queue.Queue):
77
+ return type(o).__name__
78
+
79
+ return str(o)
80
+
81
+ return to_dict_inner(obj)
82
+
83
+
84
+ def get_input_from_func_args(
85
+ func: typing.Callable,
86
+ is_method: bool = False,
87
+ func_args: list[typing.Any] = [],
88
+ func_kwargs: dict[str, typing.Any] = {},
89
+ ) -> dict[str, typing.Any]:
90
+ # Remove implicitly passed "self" or "cls" argument for
91
+ # instance or class methods
92
+ res = func_kwargs.copy()
93
+ for i, k in enumerate(inspect.signature(func).parameters.keys()):
94
+ if is_method and k in ["self", "cls"]:
95
+ continue
96
+ # If param has default value, then it's not present in func args
97
+ if i < len(func_args):
98
+ res[k] = func_args[i]
99
+ return res