lmnr 0.3.7__py3-none-any.whl → 0.4.1__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
lmnr/sdk/laminar.py ADDED
@@ -0,0 +1,447 @@
1
+ from opentelemetry import context
2
+ from opentelemetry.trace import (
3
+ INVALID_SPAN,
4
+ get_current_span,
5
+ set_span_in_context,
6
+ Span,
7
+ )
8
+ from opentelemetry.semconv_ai import SpanAttributes
9
+ from opentelemetry.util.types import AttributeValue
10
+ from traceloop.sdk import Traceloop
11
+ from traceloop.sdk.tracing import get_tracer
12
+
13
+ from pydantic.alias_generators import to_snake
14
+ from typing import Any, Optional, Tuple, Union
15
+
16
+ import copy
17
+ import datetime
18
+ import dotenv
19
+ import json
20
+ import logging
21
+ import os
22
+ import requests
23
+ import uuid
24
+
25
+ from .log import VerboseColorfulFormatter
26
+
27
+ from .types import (
28
+ CreateEvaluationResponse,
29
+ EvaluationResultDatapoint,
30
+ PipelineRunError,
31
+ PipelineRunResponse,
32
+ NodeInput,
33
+ PipelineRunRequest,
34
+ )
35
+
36
+
37
+ class Laminar:
38
+ __base_url: str = "https://api.lmnr.ai"
39
+ __project_api_key: Optional[str] = None
40
+ __env: dict[str, str] = {}
41
+ __initialized: bool = False
42
+
43
+ @classmethod
44
+ def initialize(
45
+ cls,
46
+ project_api_key: Optional[str] = None,
47
+ env: dict[str, str] = {},
48
+ base_url: Optional[str] = None,
49
+ ):
50
+ """Initialize Laminar context across the application.
51
+ This method must be called before using any other Laminar methods or
52
+ decorators.
53
+
54
+ Args:
55
+ project_api_key (Optional[str], optional): Laminar project api key.
56
+ You can generate one by going to the projects
57
+ settings page on the Laminar dashboard.
58
+ If not specified, it will try to read from the
59
+ LMNR_PROJECT_API_KEY environment variable
60
+ in os.environ or in .env file.
61
+ Defaults to None.
62
+ env (dict[str, str], optional): Default environment passed to
63
+ `run` and `evaluate_event` requests, unless
64
+ overriden at request time. Usually, model
65
+ provider keys are stored here.
66
+ Defaults to {}.
67
+ base_url (Optional[str], optional): Url of Laminar endpoint,
68
+ or the customopen telemetry ingester.
69
+ If not specified, defaults to
70
+ https://api.lmnr.ai.
71
+ For locally hosted Laminar, default setting
72
+ must be http://localhost:8000
73
+ Defaults to None.
74
+
75
+ Raises:
76
+ ValueError: If project API key is not set
77
+ """
78
+ cls.__project_api_key = project_api_key or os.environ.get(
79
+ "LMNR_PROJECT_API_KEY"
80
+ )
81
+ if not project_api_key:
82
+ dotenv_path = dotenv.find_dotenv(usecwd=True)
83
+ cls.__project_api_key = dotenv.get_key(
84
+ dotenv_path=dotenv_path, key_to_get="LMNR_PROJECT_API_KEY"
85
+ )
86
+ if not cls.__project_api_key:
87
+ raise ValueError(
88
+ "Please initialize the Laminar object with"
89
+ " your project API key or set the LMNR_PROJECT_API_KEY"
90
+ " environment variable in your environment or .env file"
91
+ )
92
+ if base_url is not None:
93
+ cls.__base_url = base_url
94
+ cls.__env = env
95
+ cls.__initialized = True
96
+ cls._initialize_logger()
97
+ Traceloop.init(
98
+ api_endpoint=cls.__base_url,
99
+ api_key=cls.__project_api_key,
100
+ disable_batch=True,
101
+ )
102
+
103
+ @classmethod
104
+ def is_initialized(cls):
105
+ """Check if Laminar is initialized. A utility to make sure other
106
+ methods are called after initialization.
107
+
108
+ Returns:
109
+ bool: True if Laminar is initialized, False otherwise
110
+ """
111
+ return cls.__initialized
112
+
113
+ @classmethod
114
+ def _initialize_logger(cls):
115
+ cls.__logger = logging.getLogger(__name__)
116
+ console_log_handler = logging.StreamHandler()
117
+ console_log_handler.setFormatter(VerboseColorfulFormatter())
118
+ cls.__logger.addHandler(console_log_handler)
119
+
120
+ @classmethod
121
+ def run(
122
+ cls,
123
+ pipeline: str,
124
+ inputs: dict[str, NodeInput],
125
+ env: dict[str, str] = {},
126
+ metadata: dict[str, str] = {},
127
+ parent_span_id: Optional[uuid.UUID] = None,
128
+ trace_id: Optional[uuid.UUID] = None,
129
+ ) -> PipelineRunResponse:
130
+ """Runs the pipeline with the given inputs
131
+
132
+ Args:
133
+ pipeline (str): name of the Laminar pipeline.
134
+ The pipeline must have a target version set.
135
+ inputs (dict[str, NodeInput]):
136
+ inputs to the endpoint's target pipeline.
137
+ Keys in the dictionary must match input node names
138
+ env (dict[str, str], optional):
139
+ Environment variables for the pipeline execution.
140
+ Defaults to {}.
141
+ metadata (dict[str, str], optional):
142
+ any custom metadata to be stored
143
+ with execution trace. Defaults to {}.
144
+ parent_span_id (Optional[uuid.UUID], optional):
145
+ parent span id for the resulting span.
146
+ Defaults to None.
147
+ trace_id (Optional[uuid.UUID], optional):
148
+ trace id for the resulting trace.
149
+ Defaults to None.
150
+
151
+ Returns:
152
+ PipelineRunResponse: response object containing the outputs
153
+
154
+ Raises:
155
+ ValueError: if project API key is not set
156
+ PipelineRunError: if the endpoint run fails
157
+ """
158
+ if cls.__project_api_key is None:
159
+ raise ValueError(
160
+ "Please initialize the Laminar object with your project "
161
+ "API key or set the LMNR_PROJECT_API_KEY environment variable"
162
+ )
163
+ try:
164
+ current_span = get_current_span()
165
+ if current_span != INVALID_SPAN:
166
+ parent_span_id = parent_span_id or uuid.UUID(
167
+ int=current_span.get_span_context().span_id
168
+ )
169
+ trace_id = trace_id or uuid.UUID(
170
+ int=current_span.get_span_context().trace_id
171
+ )
172
+ request = PipelineRunRequest(
173
+ inputs=inputs,
174
+ pipeline=pipeline,
175
+ env=env,
176
+ metadata=metadata,
177
+ parent_span_id=parent_span_id,
178
+ trace_id=trace_id,
179
+ )
180
+ except Exception as e:
181
+ raise ValueError(f"Invalid request: {e}")
182
+
183
+ response = requests.post(
184
+ cls.__base_url + "/v1/pipeline/run",
185
+ data=json.dumps(request.to_dict()),
186
+ headers=cls._headers(),
187
+ )
188
+ if response.status_code != 200:
189
+ raise PipelineRunError(response)
190
+ try:
191
+ resp_json = response.json()
192
+ keys = list(resp_json.keys())
193
+ for key in keys:
194
+ value = resp_json[key]
195
+ del resp_json[key]
196
+ resp_json[to_snake(key)] = value
197
+ return PipelineRunResponse(**resp_json)
198
+ except Exception:
199
+ raise PipelineRunError(response)
200
+
201
+ @classmethod
202
+ def event(
203
+ cls,
204
+ name: str,
205
+ value: AttributeValue,
206
+ timestamp: Optional[Union[datetime.datetime, int]] = None,
207
+ ):
208
+ """Associate an event with the current span
209
+
210
+ Args:
211
+ name (str): event name
212
+ value (AttributeValue): event value. Must be a primitive type
213
+ or a sequence of values of the same primitive type
214
+ timestamp (Optional[Union[datetime.datetime, int]], optional):
215
+ If int, must be epoch nanoseconds. If not
216
+ specified, relies on the underlying OpenTelemetry
217
+ implementation. Defaults to None.
218
+ """
219
+ if timestamp and isinstance(timestamp, datetime.datetime):
220
+ timestamp = int(timestamp.timestamp() * 1e9)
221
+
222
+ event = {
223
+ "lmnr.event.type": "default",
224
+ "lmnr.event.value": value,
225
+ }
226
+
227
+ current_span = get_current_span()
228
+ if current_span == INVALID_SPAN:
229
+ cls.__logger.warning(
230
+ "`Laminar().event()` called outside of span context. "
231
+ f"Event '{name}' will not be recorded in the trace. "
232
+ "Make sure to annotate the function with a decorator"
233
+ )
234
+ return
235
+
236
+ current_span.add_event(name, event, timestamp)
237
+
238
+ @classmethod
239
+ def evaluate_event(
240
+ cls,
241
+ name: str,
242
+ evaluator: str,
243
+ data: dict[str, AttributeValue],
244
+ env: Optional[dict[str, str]] = {},
245
+ timestamp: Optional[Union[datetime.datetime, int]] = None,
246
+ ):
247
+ """Send an event for evaluation to the Laminar backend
248
+
249
+ Args:
250
+ name (str): name of the event
251
+ evaluator (str): name of the pipeline that evaluates the event.
252
+ The pipeline must have a target version set.
253
+ data (dict[str, AttributeValue]): map from input node name to
254
+ its value in the evaluator pipeline
255
+ env (dict[str, str], optional): environment variables required
256
+ to run the pipeline. Defaults to {}.
257
+ timestamp (Optional[Union[datetime.datetime, int]], optional):
258
+ If int, must be epoch nanoseconds.
259
+ If not specified, relies on the underlying
260
+ OpenTelemetry implementation. Defaults to None.
261
+ """
262
+ if timestamp and isinstance(timestamp, datetime.datetime):
263
+ timestamp = int(timestamp.timestamp() * 1e9)
264
+ event = {
265
+ "lmnr.event.type": "evaluate",
266
+ "lmnr.event.evaluator": evaluator,
267
+ "lmnr.event.data": json.dumps(data),
268
+ "lmnr.event.env": json.dumps(env if env is not None else cls.__env),
269
+ }
270
+ current_span = get_current_span()
271
+ if current_span == INVALID_SPAN:
272
+ cls.__logger.warning(
273
+ "`Laminar().evaluate_event()` called outside of span context."
274
+ f"Event '{name}' will not be recorded in the trace. "
275
+ "Make sure to annotate the function with a decorator"
276
+ )
277
+ return
278
+
279
+ current_span.add_event(name, event)
280
+
281
+ @classmethod
282
+ def start_span(
283
+ cls,
284
+ name: str,
285
+ input: Any = None,
286
+ ) -> Tuple[Span, object]:
287
+ """Start a new span with the given name. Useful for manual
288
+ instrumentation.
289
+
290
+ Args:
291
+ name (str): name of the span
292
+ input (Any, optional): input to the span. Will be sent as an
293
+ attribute, so must be json serializable. Defaults to None.
294
+
295
+ Returns:
296
+ Tuple[Span, object]: Span - the started span, object -
297
+ context token
298
+ that must be passed to `end_span` to end the span.
299
+
300
+ """
301
+ with get_tracer() as tracer:
302
+ span = tracer.start_span(name)
303
+ ctx = set_span_in_context(span)
304
+ token = context.attach(ctx)
305
+ span.set_attribute(SpanAttributes.TRACELOOP_ENTITY_NAME, name)
306
+ if input is not None:
307
+ span.set_attribute(
308
+ SpanAttributes.TRACELOOP_ENTITY_INPUT, json.dumps({"input": input})
309
+ )
310
+ return (span, token)
311
+
312
+ @classmethod
313
+ def end_span(cls, span: Span, token: object, output: Any = None):
314
+ """End the span started with `start_span`
315
+
316
+ Args:
317
+ span (Span): span returned by `start_span`
318
+ token (object): context token returned by `start_span`
319
+ output (Any, optional): output of the span. Will be sent as an
320
+ attribute, so must be json serializable. Defaults to None.
321
+ """
322
+ if output is not None:
323
+ span.set_attribute(
324
+ SpanAttributes.TRACELOOP_ENTITY_OUTPUT, json.dumps({"output": output})
325
+ )
326
+ span.end()
327
+ context.detach(token)
328
+
329
+ @classmethod
330
+ def set_session(
331
+ cls,
332
+ session_id: Optional[str] = None,
333
+ user_id: Optional[str] = None,
334
+ ):
335
+ """Set the session and user id for the current span and the context
336
+ (i.e. any children spans created from the current span in the current
337
+ thread).
338
+
339
+ Args:
340
+ session_id (Optional[str], optional): Custom session id.
341
+ Useful to debug and group long-running
342
+ sessions/conversations.
343
+ Defaults to None.
344
+ user_id (Optional[str], optional): Custom user id.
345
+ Useful for grouping spans or traces by user.
346
+ Defaults to None.
347
+ """
348
+ current_span = get_current_span()
349
+ if current_span != INVALID_SPAN:
350
+ cls.__logger.debug(
351
+ "Laminar().set_session() called inside a span context. Setting"
352
+ " it manually in the current span."
353
+ )
354
+ if session_id is not None:
355
+ current_span.set_attribute(
356
+ "traceloop.association.properties.session_id", session_id
357
+ )
358
+ if user_id is not None:
359
+ current_span.set_attribute(
360
+ "traceloop.association.properties.user_id", user_id
361
+ )
362
+ association_properties = {}
363
+ if session_id is not None:
364
+ association_properties["session_id"] = session_id
365
+ if user_id is not None:
366
+ association_properties["user_id"] = user_id
367
+ Traceloop.set_association_properties(association_properties)
368
+
369
+ @classmethod
370
+ def clear_session(cls):
371
+ """Clear the session and user id from the context"""
372
+ props: dict = copy.copy(context.get_value("association_properties"))
373
+ props.pop("session_id", None)
374
+ props.pop("user_id", None)
375
+ Traceloop.set_association_properties(props)
376
+
377
+ @classmethod
378
+ def create_evaluation(cls, name: str) -> CreateEvaluationResponse:
379
+ response = requests.post(
380
+ cls.__base_url + "/v1/evaluations",
381
+ data=json.dumps({"name": name}),
382
+ headers=cls._headers(),
383
+ )
384
+ if response.status_code != 200:
385
+ try:
386
+ resp_json = response.json()
387
+ raise ValueError(f"Error creating evaluation {json.dumps(resp_json)}")
388
+ except Exception:
389
+ raise ValueError(f"Error creating evaluation {response.text}")
390
+ return CreateEvaluationResponse.model_validate(response.json())
391
+
392
+ @classmethod
393
+ def post_evaluation_results(
394
+ cls, evaluation_name: str, data: list[EvaluationResultDatapoint]
395
+ ) -> requests.Response:
396
+ body = {
397
+ "name": evaluation_name,
398
+ "points": data,
399
+ }
400
+ response = requests.post(
401
+ cls.__base_url + "/v1/evaluation-datapoints",
402
+ data=json.dumps(body),
403
+ headers=cls._headers(),
404
+ )
405
+ if response.status_code != 200:
406
+ try:
407
+ resp_json = response.json()
408
+ raise ValueError(
409
+ f"Failed to send evaluation results. Response: {json.dumps(resp_json)}"
410
+ )
411
+ except Exception:
412
+ raise ValueError(
413
+ f"Failed to send evaluation results. Error: {response.text}"
414
+ )
415
+ return response
416
+
417
+ @classmethod
418
+ def update_evaluation_status(
419
+ cls, evaluation_name: str, status: str
420
+ ) -> requests.Response:
421
+ body = {
422
+ "name": evaluation_name,
423
+ "status": status,
424
+ }
425
+ response = requests.put(
426
+ cls.__base_url + "/v1/evaluations/",
427
+ data=json.dumps(body),
428
+ headers=cls._headers(),
429
+ )
430
+ if response.status_code != 200:
431
+ try:
432
+ resp_json = response.json()
433
+ raise ValueError(
434
+ f"Failed to send evaluation status. Response: {json.dumps(resp_json)}"
435
+ )
436
+ except Exception:
437
+ raise ValueError(
438
+ f"Failed to send evaluation status. Error: {response.text}"
439
+ )
440
+ return response
441
+
442
+ @classmethod
443
+ def _headers(cls):
444
+ return {
445
+ "Authorization": "Bearer " + cls.__project_api_key,
446
+ "Content-Type": "application/json",
447
+ }
lmnr/sdk/log.py ADDED
@@ -0,0 +1,39 @@
1
+ import logging
2
+
3
+
4
+ class CustomFormatter(logging.Formatter):
5
+ grey = "\x1b[38;20m"
6
+ green = "\x1b[32;20m"
7
+ yellow = "\x1b[33;20m"
8
+ red = "\x1b[31;20m"
9
+ bold_red = "\x1b[31;1m"
10
+ reset = "\x1b[0m"
11
+ fmt = "%(asctime)s::%(name)s::%(levelname)s: %(message)s (%(filename)s:%(lineno)d)"
12
+
13
+ FORMATS = {
14
+ logging.DEBUG: grey + fmt + reset,
15
+ logging.INFO: green + fmt + reset,
16
+ logging.WARNING: yellow + fmt + reset,
17
+ logging.ERROR: red + fmt + reset,
18
+ logging.CRITICAL: bold_red + fmt + reset,
19
+ }
20
+
21
+ def format(self, record: logging.LogRecord):
22
+ log_fmt = self.FORMATS.get(record.levelno)
23
+ formatter = logging.Formatter(log_fmt)
24
+ return formatter.format(record)
25
+
26
+
27
+ # For StreamHandlers / console
28
+ class VerboseColorfulFormatter(CustomFormatter):
29
+ def format(self, record):
30
+ return super().format(record)
31
+
32
+
33
+ # For Verbose FileHandlers / files
34
+ class VerboseFormatter(CustomFormatter):
35
+ fmt = "%(asctime)s::%(name)s::%(levelname)s| %(message)s (%(filename)s:%(lineno)d)"
36
+
37
+ def format(self, record):
38
+ formatter = logging.Formatter(self.fmt)
39
+ return formatter.format(record)
lmnr/sdk/types.py CHANGED
@@ -1,7 +1,8 @@
1
+ import datetime
1
2
  import requests
2
3
  import pydantic
3
4
  import uuid
4
- from typing import Optional, Union
5
+ from typing import Any, Awaitable, Callable, Literal, Optional, TypeAlias, Union
5
6
 
6
7
  from .utils import to_dict
7
8
 
@@ -16,7 +17,9 @@ class ConditionedValue(pydantic.BaseModel):
16
17
  value: "NodeInput"
17
18
 
18
19
 
19
- NodeInput = Union[str, list[ChatMessage], ConditionedValue] # TypeAlias
20
+ Numeric: TypeAlias = Union[int, float]
21
+ NodeInput: TypeAlias = Union[str, list[ChatMessage], ConditionedValue, Numeric, bool]
22
+ PipelineOutput: TypeAlias = Union[NodeInput]
20
23
 
21
24
 
22
25
  class PipelineRunRequest(pydantic.BaseModel):
@@ -45,7 +48,7 @@ class PipelineRunRequest(pydantic.BaseModel):
45
48
 
46
49
 
47
50
  class PipelineRunResponse(pydantic.BaseModel):
48
- outputs: dict[str, dict[str, NodeInput]]
51
+ outputs: dict[str, dict[str, PipelineOutput]]
49
52
  run_id: str
50
53
 
51
54
 
@@ -69,3 +72,52 @@ class PipelineRunError(Exception):
69
72
  )
70
73
  except Exception:
71
74
  return super().__str__()
75
+
76
+
77
+ EvaluationDatapointData: TypeAlias = dict[str, Any]
78
+ EvaluationDatapointTarget: TypeAlias = dict[str, Any]
79
+
80
+
81
+ # EvaluationDatapoint is a single data point in the evaluation
82
+ class EvaluationDatapoint(pydantic.BaseModel):
83
+ # input to the executor function. Must be a dict with string keys
84
+ data: EvaluationDatapointData
85
+ # input to the evaluator function (alongside the executor output).
86
+ # Must be a dict with string keys
87
+ target: EvaluationDatapointTarget
88
+
89
+
90
+ ExecutorFunctionReturnType: TypeAlias = Any
91
+ EvaluatorFunctionReturnType: TypeAlias = Union[Numeric, dict[str, Numeric]]
92
+
93
+ ExecutorFunction: TypeAlias = Callable[
94
+ [EvaluationDatapointData, *tuple[Any, ...], dict[str, Any]],
95
+ Union[ExecutorFunctionReturnType, Awaitable[ExecutorFunctionReturnType]],
96
+ ]
97
+
98
+ # EvaluatorFunction is a function that takes the output of the executor and the
99
+ # target data, and returns a score. The score can be a single number or a
100
+ # record of string keys and number values. The latter is useful for evaluating
101
+ # multiple criteria in one go instead of running multiple evaluators.
102
+ EvaluatorFunction: TypeAlias = Callable[
103
+ [ExecutorFunctionReturnType, *tuple[Any, ...], dict[str, Any]],
104
+ Union[EvaluatorFunctionReturnType, Awaitable[EvaluatorFunctionReturnType]],
105
+ ]
106
+
107
+ EvaluationStatus: TypeAlias = Literal["Started", "Finished", "Error"]
108
+
109
+
110
+ class CreateEvaluationResponse(pydantic.BaseModel):
111
+ id: uuid.UUID
112
+ createdAt: datetime.datetime
113
+ name: str
114
+ status: EvaluationStatus
115
+ projectId: uuid.UUID
116
+ metadata: Optional[dict[str, Any]] = None
117
+
118
+
119
+ class EvaluationResultDatapoint(pydantic.BaseModel):
120
+ data: EvaluationDatapointData
121
+ target: EvaluationDatapointTarget
122
+ executor_output: ExecutorFunctionReturnType
123
+ scores: dict[str, Numeric]
lmnr/sdk/utils.py CHANGED
@@ -9,8 +9,6 @@ import queue
9
9
  import typing
10
10
  import uuid
11
11
 
12
- from .providers import Provider, OpenAI
13
-
14
12
 
15
13
  def is_method(func: typing.Callable) -> bool:
16
14
  # inspect.ismethod is True for bound methods only, but in the decorator,
@@ -24,17 +22,22 @@ def is_method(func: typing.Callable) -> bool:
24
22
 
25
23
 
26
24
  def is_async(func: typing.Callable) -> bool:
27
- # `__wrapped__` is set automatically by `functools.wraps` and `functools.update_wrapper`
25
+ # `__wrapped__` is set automatically by `functools.wraps` and
26
+ # `functools.update_wrapper`
28
27
  # so we can use it to get the original function
29
28
  while hasattr(func, "__wrapped__"):
30
29
  func = func.__wrapped__
31
30
 
31
+ if not inspect.isfunction(func):
32
+ return False
33
+
32
34
  # Check if the function is asynchronous
33
35
  if asyncio.iscoroutinefunction(func):
34
36
  return True
35
37
 
36
- # Fallback: check if the function's code object contains 'async'. This is for
37
- # cases when the decorator did not properly use `functools.wraps` or `functools.update_wrapper`
38
+ # Fallback: check if the function's code object contains 'async'.
39
+ # This is for cases when a decorator did not properly use
40
+ # `functools.wraps` or `functools.update_wrapper`
38
41
  CO_COROUTINE = inspect.CO_COROUTINE
39
42
  return (func.__code__.co_flags & CO_COROUTINE) != 0
40
43
 
@@ -85,7 +88,8 @@ def get_input_from_func_args(
85
88
  func_args: list[typing.Any] = [],
86
89
  func_kwargs: dict[str, typing.Any] = {},
87
90
  ) -> dict[str, typing.Any]:
88
- # Remove implicitly passed "self" or "cls" argument for instance or class methods
91
+ # Remove implicitly passed "self" or "cls" argument for
92
+ # instance or class methods
89
93
  res = copy.deepcopy(func_kwargs)
90
94
  for i, k in enumerate(inspect.signature(func).parameters.keys()):
91
95
  if is_method and k in ["self", "cls"]:
@@ -94,8 +98,3 @@ def get_input_from_func_args(
94
98
  if len(func_args) > i:
95
99
  res[k] = func_args[i]
96
100
  return res
97
-
98
-
99
- PROVIDER_NAME_TO_OBJECT: dict[str, Provider] = {
100
- "openai": OpenAI(),
101
- }