lmnr 0.4.11__py3-none-any.whl → 0.4.12__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
lmnr/sdk/laminar.py CHANGED
@@ -5,7 +5,6 @@ from opentelemetry.trace import (
5
5
  get_current_span,
6
6
  SpanKind,
7
7
  )
8
- from opentelemetry.semconv_ai import SpanAttributes
9
8
  from opentelemetry.util.types import AttributeValue
10
9
  from opentelemetry.context.context import Context
11
10
  from opentelemetry.util import types
@@ -26,7 +25,17 @@ import os
26
25
  import requests
27
26
  import uuid
28
27
 
29
- from lmnr.traceloop_sdk.tracing.tracing import set_association_properties, update_association_properties
28
+ from lmnr.traceloop_sdk.tracing.attributes import (
29
+ SESSION_ID,
30
+ SPAN_INPUT,
31
+ SPAN_OUTPUT,
32
+ TRACE_TYPE,
33
+ USER_ID,
34
+ )
35
+ from lmnr.traceloop_sdk.tracing.tracing import (
36
+ set_association_properties,
37
+ update_association_properties,
38
+ )
30
39
 
31
40
  from .log import VerboseColorfulFormatter
32
41
 
@@ -37,11 +46,14 @@ from .types import (
37
46
  PipelineRunResponse,
38
47
  NodeInput,
39
48
  PipelineRunRequest,
49
+ TraceType,
50
+ UpdateEvaluationResponse,
40
51
  )
41
52
 
42
53
 
43
54
  class Laminar:
44
- __base_url: str = "https://api.lmnr.ai:8443"
55
+ __base_http_url: str
56
+ __base_grpc_url: str
45
57
  __project_api_key: Optional[str] = None
46
58
  __env: dict[str, str] = {}
47
59
  __initialized: bool = False
@@ -52,6 +64,8 @@ class Laminar:
52
64
  project_api_key: Optional[str] = None,
53
65
  env: dict[str, str] = {},
54
66
  base_url: Optional[str] = None,
67
+ http_port: Optional[int] = None,
68
+ grpc_port: Optional[int] = None,
55
69
  instruments: Optional[Set[Instruments]] = None,
56
70
  ):
57
71
  """Initialize Laminar context across the application.
@@ -71,13 +85,12 @@ class Laminar:
71
85
  overriden at request time. Usually, model
72
86
  provider keys are stored here.
73
87
  Defaults to {}.
74
- base_url (Optional[str], optional): Url of Laminar endpoint,
75
- or the customopen telemetry ingester.
76
- If not specified, defaults to
77
- https://api.lmnr.ai:8443.
78
- For locally hosted Laminar, default setting
79
- must be http://localhost:8001
80
- Defaults to None.
88
+ base_url (Optional[str], optional): Laminar API url.
89
+ If not specified, defaults to https://api.lmnr.ai.
90
+ http_port (Optional[int], optional): Laminar API http port.
91
+ If not specified, defaults to 443.
92
+ grpc_port (Optional[int], optional): Laminar API grpc port.
93
+ If not specified, defaults to 8443.
81
94
 
82
95
  Raises:
83
96
  ValueError: If project API key is not set
@@ -85,7 +98,7 @@ class Laminar:
85
98
  cls.__project_api_key = project_api_key or os.environ.get(
86
99
  "LMNR_PROJECT_API_KEY"
87
100
  )
88
- if not project_api_key:
101
+ if not cls.__project_api_key:
89
102
  dotenv_path = dotenv.find_dotenv(usecwd=True)
90
103
  cls.__project_api_key = dotenv.get_key(
91
104
  dotenv_path=dotenv_path, key_to_get="LMNR_PROJECT_API_KEY"
@@ -96,14 +109,16 @@ class Laminar:
96
109
  " your project API key or set the LMNR_PROJECT_API_KEY"
97
110
  " environment variable in your environment or .env file"
98
111
  )
99
- if base_url is not None:
100
- cls.__base_url = base_url
112
+
113
+ cls.__base_http_url = f"{base_url or 'https://api.lmnr.ai'}:{http_port or 443}"
114
+ cls.__base_grpc_url = f"{base_url or 'https://api.lmnr.ai'}:{grpc_port or 8443}"
115
+
101
116
  cls.__env = env
102
117
  cls.__initialized = True
103
118
  cls._initialize_logger()
104
119
  Traceloop.init(
105
120
  exporter=OTLPSpanExporter(
106
- endpoint=cls.__base_url,
121
+ endpoint=cls.__base_grpc_url,
107
122
  headers={"authorization": f"Bearer {cls.__project_api_key}"},
108
123
  ),
109
124
  instruments=instruments,
@@ -190,7 +205,7 @@ class Laminar:
190
205
  raise ValueError(f"Invalid request: {e}")
191
206
 
192
207
  response = requests.post(
193
- cls.__base_url + "/v1/pipeline/run",
208
+ cls.__base_http_url + "/v1/pipeline/run",
194
209
  data=json.dumps(request.to_dict()),
195
210
  headers=cls._headers(),
196
211
  )
@@ -292,7 +307,7 @@ class Laminar:
292
307
  )
293
308
  return
294
309
 
295
- current_span.add_event(name, event)
310
+ current_span.add_event(name, event, timestamp)
296
311
 
297
312
  @classmethod
298
313
  @contextmanager
@@ -351,8 +366,8 @@ class Laminar:
351
366
  ) as span:
352
367
  if input is not None:
353
368
  span.set_attribute(
354
- SpanAttributes.TRACELOOP_ENTITY_INPUT,
355
- json.dumps({"input": input}),
369
+ SPAN_INPUT,
370
+ json.dumps(input),
356
371
  )
357
372
  yield span
358
373
 
@@ -366,9 +381,7 @@ class Laminar:
366
381
  """
367
382
  span = get_current_span()
368
383
  if output is not None and span != INVALID_SPAN:
369
- span.set_attribute(
370
- SpanAttributes.TRACELOOP_ENTITY_OUTPUT, json.dumps(output)
371
- )
384
+ span.set_attribute(SPAN_OUTPUT, json.dumps(output))
372
385
 
373
386
  @classmethod
374
387
  def set_session(
@@ -391,9 +404,23 @@ class Laminar:
391
404
  """
392
405
  association_properties = {}
393
406
  if session_id is not None:
394
- association_properties["session_id"] = session_id
407
+ association_properties[SESSION_ID] = session_id
395
408
  if user_id is not None:
396
- association_properties["user_id"] = user_id
409
+ association_properties[USER_ID] = user_id
410
+ update_association_properties(association_properties)
411
+
412
+ @classmethod
413
+ def _set_trace_type(
414
+ cls,
415
+ trace_type: TraceType,
416
+ ):
417
+ """Set the trace_type for the current span and the context
418
+ Args:
419
+ trace_type (TraceType): Type of the trace
420
+ """
421
+ association_properties = {
422
+ TRACE_TYPE: trace_type.value,
423
+ }
397
424
  update_association_properties(association_properties)
398
425
 
399
426
  @classmethod
@@ -405,9 +432,9 @@ class Laminar:
405
432
  set_association_properties(props)
406
433
 
407
434
  @classmethod
408
- def create_evaluation(cls, name: str) -> CreateEvaluationResponse:
435
+ def create_evaluation(cls, name: Optional[str]) -> CreateEvaluationResponse:
409
436
  response = requests.post(
410
- cls.__base_url + "/v1/evaluations",
437
+ cls.__base_http_url + "/v1/evaluations",
411
438
  data=json.dumps({"name": name}),
412
439
  headers=cls._headers(),
413
440
  )
@@ -421,14 +448,14 @@ class Laminar:
421
448
 
422
449
  @classmethod
423
450
  def post_evaluation_results(
424
- cls, evaluation_name: str, data: list[EvaluationResultDatapoint]
451
+ cls, evaluation_id: uuid.UUID, data: list[EvaluationResultDatapoint]
425
452
  ) -> requests.Response:
426
453
  body = {
427
- "name": evaluation_name,
428
- "points": data,
454
+ "evaluationId": str(evaluation_id),
455
+ "points": [datapoint.to_dict() for datapoint in data],
429
456
  }
430
457
  response = requests.post(
431
- cls.__base_url + "/v1/evaluation-datapoints",
458
+ cls.__base_http_url + "/v1/evaluation-datapoints",
432
459
  data=json.dumps(body),
433
460
  headers=cls._headers(),
434
461
  )
@@ -446,28 +473,38 @@ class Laminar:
446
473
 
447
474
  @classmethod
448
475
  def update_evaluation_status(
449
- cls, evaluation_name: str, status: str
450
- ) -> requests.Response:
476
+ cls, evaluation_id: str, status: str
477
+ ) -> UpdateEvaluationResponse:
478
+ """
479
+ Updates the status of an evaluation. Returns the updated evaluation object.
480
+
481
+ Args:
482
+ evaluation_id (str): The ID of the evaluation to update.
483
+ status (str): The status to set for the evaluation.
484
+
485
+ Returns:
486
+ UpdateEvaluationResponse: The updated evaluation response.
487
+
488
+ Raises:
489
+ ValueError: If the request fails.
490
+ """
451
491
  body = {
452
- "name": evaluation_name,
453
492
  "status": status,
454
493
  }
455
- response = requests.put(
456
- cls.__base_url + "/v1/evaluations/",
494
+ url = f"{cls.__base_http_url}/v1/evaluations/{evaluation_id}"
495
+
496
+ response = requests.post(
497
+ url,
457
498
  data=json.dumps(body),
458
499
  headers=cls._headers(),
459
500
  )
460
501
  if response.status_code != 200:
461
- try:
462
- resp_json = response.json()
463
- raise ValueError(
464
- f"Failed to send evaluation status. Response: {json.dumps(resp_json)}"
465
- )
466
- except Exception:
467
- raise ValueError(
468
- f"Failed to send evaluation status. Error: {response.text}"
469
- )
470
- return response
502
+ raise ValueError(
503
+ f"Failed to update evaluation status {evaluation_id}. "
504
+ f"Response: {response.text}"
505
+ )
506
+
507
+ return UpdateEvaluationResponse.model_validate(response.json())
471
508
 
472
509
  @classmethod
473
510
  def _headers(cls):
lmnr/sdk/types.py CHANGED
@@ -1,10 +1,11 @@
1
1
  import datetime
2
- import requests
2
+ from enum import Enum
3
3
  import pydantic
4
- import uuid
4
+ import requests
5
5
  from typing import Any, Awaitable, Callable, Literal, Optional, Union
6
+ import uuid
6
7
 
7
- from .utils import to_dict
8
+ from .utils import serialize
8
9
 
9
10
 
10
11
  class ChatMessage(pydantic.BaseModel):
@@ -18,6 +19,8 @@ class ConditionedValue(pydantic.BaseModel):
18
19
 
19
20
 
20
21
  Numeric = Union[int, float]
22
+ NumericTypes = (int, float) # for use with isinstance
23
+
21
24
  NodeInput = Union[str, list[ChatMessage], ConditionedValue, Numeric, bool]
22
25
  PipelineOutput = Union[NodeInput]
23
26
 
@@ -35,7 +38,7 @@ class PipelineRunRequest(pydantic.BaseModel):
35
38
  def to_dict(self):
36
39
  return {
37
40
  "inputs": {
38
- k: v.model_dump() if isinstance(v, pydantic.BaseModel) else to_dict(v)
41
+ k: v.model_dump() if isinstance(v, pydantic.BaseModel) else serialize(v)
39
42
  for k, v in self.inputs.items()
40
43
  },
41
44
  "pipeline": self.pipeline,
@@ -79,7 +82,7 @@ EvaluationDatapointTarget = dict[str, Any]
79
82
 
80
83
 
81
84
  # EvaluationDatapoint is a single data point in the evaluation
82
- class EvaluationDatapoint(pydantic.BaseModel):
85
+ class Datapoint(pydantic.BaseModel):
83
86
  # input to the executor function. Must be a dict with string keys
84
87
  data: EvaluationDatapointData
85
88
  # input to the evaluator function (alongside the executor output).
@@ -114,6 +117,10 @@ class CreateEvaluationResponse(pydantic.BaseModel):
114
117
  status: EvaluationStatus
115
118
  projectId: uuid.UUID
116
119
  metadata: Optional[dict[str, Any]] = None
120
+ averageScores: Optional[dict[str, Numeric]] = None
121
+
122
+
123
+ UpdateEvaluationResponse = CreateEvaluationResponse
117
124
 
118
125
 
119
126
  class EvaluationResultDatapoint(pydantic.BaseModel):
@@ -121,3 +128,35 @@ class EvaluationResultDatapoint(pydantic.BaseModel):
121
128
  target: EvaluationDatapointTarget
122
129
  executor_output: ExecutorFunctionReturnType
123
130
  scores: dict[str, Numeric]
131
+ trace_id: uuid.UUID
132
+
133
+ # uuid is not serializable by default, so we need to convert it to a string
134
+ def to_dict(self):
135
+ return {
136
+ "data": {
137
+ k: v.model_dump() if isinstance(v, pydantic.BaseModel) else serialize(v)
138
+ for k, v in self.data.items()
139
+ },
140
+ "target": {
141
+ k: v.model_dump() if isinstance(v, pydantic.BaseModel) else serialize(v)
142
+ for k, v in self.target.items()
143
+ },
144
+ "executorOutput": serialize(self.executor_output),
145
+ "scores": self.scores,
146
+ "traceId": str(self.trace_id),
147
+ }
148
+
149
+
150
+ class SpanType(Enum):
151
+ DEFAULT = "DEFAULT"
152
+ LLM = "LLM"
153
+ PIPELINE = "PIPELINE" # must not be set manually
154
+ EXECUTOR = "EXECUTOR"
155
+ EVALUATOR = "EVALUATOR"
156
+ EVALUATION = "EVALUATION"
157
+
158
+
159
+ class TraceType(Enum):
160
+ DEFAULT = "DEFAULT"
161
+ EVENT = "EVENT" # must not be set manually
162
+ EVALUATION = "EVALUATION"
lmnr/sdk/utils.py CHANGED
@@ -1,5 +1,4 @@
1
1
  import asyncio
2
- import copy
3
2
  import datetime
4
3
  import dataclasses
5
4
  import enum
@@ -50,7 +49,7 @@ def is_iterator(o: typing.Any) -> bool:
50
49
  return hasattr(o, "__iter__") and hasattr(o, "__next__")
51
50
 
52
51
 
53
- def to_dict(obj: typing.Any) -> dict[str, typing.Any]:
52
+ def serialize(obj: typing.Any) -> dict[str, typing.Any]:
54
53
  def to_dict_inner(o: typing.Any):
55
54
  if isinstance(o, (datetime.datetime, datetime.date)):
56
55
  return o.strftime("%Y-%m-%dT%H:%M:%S.%f%z")
@@ -59,7 +58,7 @@ def to_dict(obj: typing.Any) -> dict[str, typing.Any]:
59
58
  elif isinstance(o, (int, float, str, bool)):
60
59
  return o
61
60
  elif isinstance(o, uuid.UUID):
62
- return str(o) # same as in return, but explicit
61
+ return str(o) # same as in final return, but explicit
63
62
  elif isinstance(o, enum.Enum):
64
63
  return o.value
65
64
  elif dataclasses.is_dataclass(o):
@@ -90,11 +89,11 @@ def get_input_from_func_args(
90
89
  ) -> dict[str, typing.Any]:
91
90
  # Remove implicitly passed "self" or "cls" argument for
92
91
  # instance or class methods
93
- res = copy.deepcopy(func_kwargs)
92
+ res = func_kwargs.copy()
94
93
  for i, k in enumerate(inspect.signature(func).parameters.keys()):
95
94
  if is_method and k in ["self", "cls"]:
96
95
  continue
97
96
  # If param has default value, then it's not present in func args
98
- if len(func_args) > i:
97
+ if i < len(func_args):
99
98
  res[k] = func_args[i]
100
99
  return res
@@ -3,20 +3,16 @@ import sys
3
3
  from pathlib import Path
4
4
 
5
5
  from typing import Optional, Set
6
- from colorama import Fore
7
6
  from opentelemetry.sdk.trace import SpanProcessor
8
7
  from opentelemetry.sdk.trace.export import SpanExporter
9
- from opentelemetry.sdk.metrics.export import MetricExporter
10
8
  from opentelemetry.sdk.resources import SERVICE_NAME
11
9
  from opentelemetry.propagators.textmap import TextMapPropagator
12
10
  from opentelemetry.util.re import parse_env_headers
13
11
 
14
- from lmnr.traceloop_sdk.metrics.metrics import MetricsWrapper
15
12
  from lmnr.traceloop_sdk.instruments import Instruments
16
13
  from lmnr.traceloop_sdk.config import (
17
14
  is_content_tracing_enabled,
18
15
  is_tracing_enabled,
19
- is_metrics_enabled,
20
16
  )
21
17
  from lmnr.traceloop_sdk.tracing.tracing import TracerWrapper
22
18
  from typing import Dict
@@ -38,8 +34,6 @@ class Traceloop:
38
34
  headers: Dict[str, str] = {},
39
35
  disable_batch=False,
40
36
  exporter: Optional[SpanExporter] = None,
41
- metrics_exporter: Optional[MetricExporter] = None,
42
- metrics_headers: Optional[Dict[str, str]] = None,
43
37
  processor: Optional[SpanProcessor] = None,
44
38
  propagator: Optional[TextMapPropagator] = None,
45
39
  should_enrich_metrics: bool = True,
@@ -50,14 +44,11 @@ class Traceloop:
50
44
  api_key = os.getenv("TRACELOOP_API_KEY") or api_key
51
45
 
52
46
  if not is_tracing_enabled():
53
- print(Fore.YELLOW + "Tracing is disabled" + Fore.RESET)
47
+ # print(Fore.YELLOW + "Tracing is disabled" + Fore.RESET)
54
48
  return
55
49
 
56
50
  enable_content_tracing = is_content_tracing_enabled()
57
51
 
58
- if exporter or processor:
59
- print(Fore.GREEN + "Laminar exporting traces to a custom exporter")
60
-
61
52
  headers = os.getenv("TRACELOOP_HEADERS") or headers
62
53
 
63
54
  if isinstance(headers, str):
@@ -70,30 +61,18 @@ class Traceloop:
70
61
  and not api_key
71
62
  ):
72
63
  print(
73
- Fore.RED
74
- + "Error: Missing API key,"
64
+ "Error: Missing API key,"
75
65
  + " go to project settings to create one"
76
66
  )
77
67
  print("Set the LMNR_PROJECT_API_KEY environment variable to the key")
78
- print(Fore.RESET)
79
68
  return
80
69
 
81
- if not exporter and not processor and headers:
82
- print(
83
- Fore.GREEN
84
- + f"Laminar exporting traces to {api_endpoint}, authenticating with custom headers"
85
- )
86
-
87
70
  if api_key and not exporter and not processor and not headers:
88
- print(
89
- Fore.GREEN
90
- + f"Laminar exporting traces to {api_endpoint} authenticating with bearer token"
91
- )
92
71
  headers = {
93
72
  "Authorization": f"Bearer {api_key}",
94
73
  }
95
74
 
96
- print(Fore.RESET)
75
+ # print(Fore.RESET)
97
76
 
98
77
  # Tracer init
99
78
  resource_attributes.update({SERVICE_NAME: app_name})
@@ -108,21 +87,3 @@ class Traceloop:
108
87
  should_enrich_metrics=should_enrich_metrics,
109
88
  instruments=instruments,
110
89
  )
111
-
112
- if not metrics_exporter and exporter:
113
- return
114
-
115
- metrics_endpoint = os.getenv("TRACELOOP_METRICS_ENDPOINT") or api_endpoint
116
- metrics_headers = (
117
- os.getenv("TRACELOOP_METRICS_HEADERS") or metrics_headers or headers
118
- )
119
-
120
- if not is_metrics_enabled() or not metrics_exporter and exporter:
121
- print(Fore.YELLOW + "Metrics are disabled" + Fore.RESET)
122
- return
123
-
124
- MetricsWrapper.set_static_params(
125
- resource_attributes, metrics_endpoint, metrics_headers
126
- )
127
-
128
- Traceloop.__metrics_wrapper = MetricsWrapper(exporter=metrics_exporter)
@@ -7,7 +7,3 @@ def is_tracing_enabled() -> bool:
7
7
 
8
8
  def is_content_tracing_enabled() -> bool:
9
9
  return (os.getenv("TRACELOOP_TRACE_CONTENT") or "true").lower() == "true"
10
-
11
-
12
- def is_metrics_enabled() -> bool:
13
- return (os.getenv("TRACELOOP_METRICS_ENABLED") or "true").lower() == "true"
@@ -7,9 +7,10 @@ import warnings
7
7
 
8
8
  from opentelemetry import trace
9
9
  from opentelemetry import context as context_api
10
- from opentelemetry.semconv_ai import SpanAttributes
11
10
 
11
+ from lmnr.sdk.utils import get_input_from_func_args, is_method
12
12
  from lmnr.traceloop_sdk.tracing import get_tracer
13
+ from lmnr.traceloop_sdk.tracing.attributes import SPAN_INPUT, SPAN_OUTPUT
13
14
  from lmnr.traceloop_sdk.tracing.tracing import TracerWrapper
14
15
  from lmnr.traceloop_sdk.utils.json_encoder import JSONEncoder
15
16
 
@@ -52,8 +53,12 @@ def entity_method(
52
53
  try:
53
54
  if _should_send_prompts():
54
55
  span.set_attribute(
55
- SpanAttributes.TRACELOOP_ENTITY_INPUT,
56
- _json_dumps({"args": args, "kwargs": kwargs}),
56
+ SPAN_INPUT,
57
+ _json_dumps(
58
+ get_input_from_func_args(
59
+ fn, is_method(fn), args, kwargs
60
+ )
61
+ ),
57
62
  )
58
63
  except TypeError:
59
64
  pass
@@ -67,7 +72,7 @@ def entity_method(
67
72
  try:
68
73
  if _should_send_prompts():
69
74
  span.set_attribute(
70
- SpanAttributes.TRACELOOP_ENTITY_OUTPUT,
75
+ SPAN_OUTPUT,
71
76
  _json_dumps(res),
72
77
  )
73
78
  except TypeError:
@@ -105,8 +110,12 @@ def aentity_method(
105
110
  try:
106
111
  if _should_send_prompts():
107
112
  span.set_attribute(
108
- SpanAttributes.TRACELOOP_ENTITY_INPUT,
109
- _json_dumps({"args": args, "kwargs": kwargs}),
113
+ SPAN_INPUT,
114
+ _json_dumps(
115
+ get_input_from_func_args(
116
+ fn, is_method(fn), args, kwargs
117
+ )
118
+ ),
110
119
  )
111
120
  except TypeError:
112
121
  pass
@@ -119,9 +128,7 @@ def aentity_method(
119
128
 
120
129
  try:
121
130
  if _should_send_prompts():
122
- span.set_attribute(
123
- SpanAttributes.TRACELOOP_ENTITY_OUTPUT, json.dumps(res)
124
- )
131
+ span.set_attribute(SPAN_OUTPUT, json.dumps(res))
125
132
  except TypeError:
126
133
  pass
127
134
 
@@ -0,0 +1,8 @@
1
+ SPAN_INPUT = "lmnr.span.input"
2
+ SPAN_OUTPUT = "lmnr.span.output"
3
+ SPAN_TYPE = "lmnr.span.type"
4
+
5
+ ASSOCIATION_PROPERTIES = "lmnr.association.properties"
6
+ SESSION_ID = "session_id"
7
+ USER_ID = "user_id"
8
+ TRACE_TYPE = "trace_type"