lmnr 0.4.10__py3-none-any.whl → 0.4.12b1__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
lmnr/__init__.py CHANGED
@@ -1,4 +1,4 @@
1
- from .sdk.evaluations import Evaluation
1
+ from .sdk.evaluations import evaluate
2
2
  from .sdk.laminar import Laminar
3
3
  from .sdk.types import ChatMessage, PipelineRunError, PipelineRunResponse, NodeInput
4
4
  from .sdk.decorators import observe
lmnr/cli.py ADDED
@@ -0,0 +1,39 @@
1
+ from argparse import ArgumentParser
2
+ import asyncio
3
+ import importlib
4
+ import os
5
+ import sys
6
+
7
+ from lmnr.sdk.evaluations import set_global_evaluation
8
+
9
+
10
+ # TODO: Refactor this code
11
+ async def run_evaluation(args):
12
+ sys.path.insert(0, os.getcwd())
13
+
14
+ with set_global_evaluation(True):
15
+ file = os.path.abspath(args.file)
16
+
17
+ spec = importlib.util.spec_from_file_location("run_eval", file)
18
+ mod = importlib.util.module_from_spec(spec)
19
+ spec.loader.exec_module(mod)
20
+
21
+ from lmnr.sdk.evaluations import _evaluation
22
+ evaluation = _evaluation
23
+ await evaluation.run()
24
+
25
+
26
+ def cli():
27
+ parser = ArgumentParser(
28
+ prog="lmnr",
29
+ description="CLI for Laminar",
30
+ )
31
+
32
+ subparsers = parser.add_subparsers(title="subcommands", dest="subcommand")
33
+
34
+ parser_eval = subparsers.add_parser("eval", description="Run an evaluation")
35
+ parser_eval.add_argument("file", help="A file containing the evaluation to run")
36
+ parser_eval.set_defaults(func=run_evaluation)
37
+
38
+ parsed = parser.parse_args()
39
+ asyncio.run(parsed.func(parsed))
lmnr/sdk/decorators.py CHANGED
@@ -4,22 +4,19 @@ from lmnr.traceloop_sdk.decorators.base import (
4
4
  )
5
5
  from opentelemetry.trace import INVALID_SPAN, get_current_span
6
6
 
7
- from typing import Callable, Optional, ParamSpec, TypeVar, cast
7
+ from typing import Callable, Optional, cast
8
8
 
9
9
  from lmnr.traceloop_sdk.tracing.tracing import update_association_properties
10
10
 
11
11
  from .utils import is_async
12
12
 
13
- P = ParamSpec("P")
14
- R = TypeVar("R")
15
-
16
13
 
17
14
  def observe(
18
15
  *,
19
16
  name: Optional[str] = None,
20
17
  user_id: Optional[str] = None,
21
18
  session_id: Optional[str] = None,
22
- ) -> Callable[[Callable[P, R]], Callable[P, R]]:
19
+ ) -> Callable[[Callable], Callable]:
23
20
  """The main decorator entrypoint for Laminar. This is used to wrap
24
21
  functions and methods to create spans.
25
22
 
@@ -41,7 +38,7 @@ def observe(
41
38
  R: Returns the result of the wrapped function
42
39
  """
43
40
 
44
- def decorator(func: Callable[P, R]) -> Callable[P, R]:
41
+ def decorator(func: Callable) -> Callable:
45
42
  current_span = get_current_span()
46
43
  if current_span != INVALID_SPAN:
47
44
  if session_id is not None:
@@ -64,4 +61,4 @@ def observe(
64
61
  else entity_method(name=name)(func)
65
62
  )
66
63
 
67
- return cast(Callable[P, R], decorator)
64
+ return cast(Callable, decorator)
lmnr/sdk/evaluations.py CHANGED
@@ -1,14 +1,60 @@
1
- from typing import Any, Union
2
-
3
- from .types import EvaluationDatapoint
4
- from .utils import is_async
5
- from .laminar import Laminar as L
6
1
  import asyncio
7
-
2
+ import sys
8
3
  from abc import ABC, abstractmethod
4
+ from contextlib import contextmanager
5
+ from typing import Any, Awaitable, Optional, Union
6
+
7
+ from tqdm import tqdm
8
+
9
+ from .laminar import Laminar as L
10
+ from .types import CreateEvaluationResponse, Datapoint, EvaluationResultDatapoint, Numeric
11
+ from .utils import is_async
9
12
 
10
13
  DEFAULT_BATCH_SIZE = 5
11
14
 
15
+ _evaluation = None
16
+ _set_global_evaluation = False
17
+
18
+
19
+ @contextmanager
20
+ def set_global_evaluation(set_global_evaluation: bool):
21
+ global _set_global_evaluation
22
+ original = _set_global_evaluation
23
+ try:
24
+ _set_global_evaluation = set_global_evaluation
25
+ yield
26
+ finally:
27
+ _set_global_evaluation = original
28
+ pass
29
+
30
+
31
+ def get_evaluation_url(project_id: str, evaluation_id: str):
32
+ return f"https://www.lmnr.ai/project/{project_id}/evaluations/{evaluation_id}"
33
+
34
+
35
+ class EvaluationReporter:
36
+ def __init__(self):
37
+ pass
38
+
39
+ def start(self, name: str, project_id: str, id: str, length: int):
40
+ print(f"Running evaluation {name}...\n")
41
+ print(f"Check progress and results at {get_evaluation_url(project_id, id)}\n")
42
+ self.cli_progress = tqdm(total=length, bar_format="{bar} {percentage:3.0f}% | ETA: {remaining}s | {n_fmt}/{total_fmt}", ncols=60)
43
+
44
+ def update(self, batch_length: int):
45
+ self.cli_progress.update(batch_length)
46
+
47
+ def stopWithError(self, error: Exception):
48
+ self.cli_progress.close()
49
+ sys.stderr.write(f"\nError: {error}\n")
50
+
51
+ def stop(self, average_scores: dict[str, Numeric]):
52
+ self.cli_progress.close()
53
+ print("\nAverage scores:")
54
+ for (name, score) in average_scores.items():
55
+ print(f"{name}: {score}")
56
+ print("\n")
57
+
12
58
 
13
59
  class EvaluationDataset(ABC):
14
60
  @abstractmethod
@@ -20,7 +66,7 @@ class EvaluationDataset(ABC):
20
66
  pass
21
67
 
22
68
  @abstractmethod
23
- def __getitem__(self, idx) -> EvaluationDatapoint:
69
+ def __getitem__(self, idx) -> Datapoint:
24
70
  pass
25
71
 
26
72
  def slice(self, start: int, end: int):
@@ -30,16 +76,18 @@ class EvaluationDataset(ABC):
30
76
  class Evaluation:
31
77
  def __init__(
32
78
  self,
33
- name,
34
- data: Union[EvaluationDataset, list[Union[EvaluationDatapoint, dict]]],
79
+ name: str,
80
+ data: Union[EvaluationDataset, list[Union[Datapoint, dict]]],
35
81
  executor: Any,
36
82
  evaluators: list[Any],
37
83
  batch_size: int = DEFAULT_BATCH_SIZE,
38
- project_api_key: str = "",
39
- base_url: str = "https://api.lmnr.ai",
84
+ project_api_key: Optional[str] = None,
85
+ base_url: Optional[str] = None,
86
+ http_port: Optional[int] = None,
40
87
  ):
41
88
  """
42
89
  Initializes an instance of the Evaluations class.
90
+
43
91
  Parameters:
44
92
  name (str): The name of the evaluation.
45
93
  data (Union[List[Union[EvaluationDatapoint, dict]], EvaluationDataset]): List of data points to evaluate or an evaluation dataset.
@@ -58,14 +106,18 @@ class Evaluation:
58
106
  evaluator function in the list starting from 1.
59
107
  batch_size (int, optional): The batch size for evaluation.
60
108
  Defaults to DEFAULT_BATCH_SIZE.
61
- project_api_key (str, optional): The project API key.
109
+ project_api_key (Optional[str], optional): The project API key.
62
110
  Defaults to an empty string.
63
- base_url (str, optional): The base URL for the LMNR API.
111
+ base_url (Optional[str], optional): The base URL for the Laminar API.
64
112
  Useful if self-hosted elsewhere.
65
113
  Defaults to "https://api.lmnr.ai".
114
+ http_port (Optional[int], optional): The port for the Laminar API HTTP service.
115
+ Defaults to 443.
66
116
  """
67
117
 
118
+ self.is_finished = False
68
119
  self.name = name
120
+ self.reporter = EvaluationReporter()
69
121
  self.executor = executor
70
122
  self.evaluators = dict(
71
123
  zip(
@@ -84,7 +136,7 @@ class Evaluation:
84
136
  if isinstance(data, list):
85
137
  self.data = [
86
138
  (
87
- EvaluationDatapoint.model_validate(point)
139
+ Datapoint.model_validate(point)
88
140
  if isinstance(point, dict)
89
141
  else point
90
142
  )
@@ -93,9 +145,14 @@ class Evaluation:
93
145
  else:
94
146
  self.data = data
95
147
  self.batch_size = batch_size
96
- L.initialize(project_api_key=project_api_key, base_url=base_url)
148
+ L.initialize(
149
+ project_api_key=project_api_key,
150
+ base_url=base_url,
151
+ http_port=http_port,
152
+ instruments=set(),
153
+ )
97
154
 
98
- def run(self):
155
+ def run(self) -> Union[None, Awaitable[None]]:
99
156
  """Runs the evaluation.
100
157
 
101
158
  Creates a new evaluation if no evaluation with such name exists, or
@@ -113,40 +170,58 @@ class Evaluation:
113
170
  ```
114
171
 
115
172
  """
173
+ if self.is_finished:
174
+ raise Exception("Evaluation is already finished")
175
+
116
176
  loop = asyncio.get_event_loop()
117
177
  if loop.is_running():
118
178
  return loop.create_task(self._run())
119
179
  else:
120
180
  return loop.run_until_complete(self._run())
121
181
 
122
- async def _run(self):
123
- response = L.create_evaluation(self.name)
182
+ async def _run(self) -> None:
183
+ evaluation = L.create_evaluation(self.name)
184
+ self.reporter.start(
185
+ evaluation.name,
186
+ evaluation.projectId,
187
+ evaluation.id,
188
+ len(self.data),
189
+ )
124
190
 
125
- # Process batches sequentially
191
+ try:
192
+ await self.evaluate_in_batches(evaluation)
193
+ except Exception as e:
194
+ L.update_evaluation_status(evaluation.id, "Error")
195
+ self.reporter.stopWithError(e)
196
+ self.is_finished = True
197
+ return
198
+
199
+ # If we update with status "Finished", we expect averageScores to be not empty
200
+ updated_evaluation = L.update_evaluation_status(evaluation.id, "Finished")
201
+ self.reporter.stop(updated_evaluation.averageScores)
202
+ self.is_finished = True
203
+
204
+ async def evaluate_in_batches(self, evaluation: CreateEvaluationResponse):
126
205
  for i in range(0, len(self.data), self.batch_size):
127
206
  batch = (
128
- self.data[i : i + self.batch_size]
207
+ self.data[i: i + self.batch_size]
129
208
  if isinstance(self.data, list)
130
209
  else self.data.slice(i, i + self.batch_size)
131
210
  )
132
211
  try:
133
- await self._evaluate_batch(batch)
212
+ results = await self._evaluate_batch(batch)
213
+ L.post_evaluation_results(evaluation.id, results)
134
214
  except Exception as e:
135
215
  print(f"Error evaluating batch: {e}")
216
+ finally:
217
+ self.reporter.update(len(batch))
136
218
 
137
- try:
138
- L.update_evaluation_status(response.name, "Finished")
139
- print(f"Evaluation {response.id} complete")
140
- except Exception as e:
141
- print(f"Error updating evaluation status: {e}")
142
-
143
- async def _evaluate_batch(self, batch: list[EvaluationDatapoint]):
219
+ async def _evaluate_batch(self, batch: list[Datapoint]) -> list[EvaluationResultDatapoint]:
144
220
  batch_promises = [self._evaluate_datapoint(datapoint) for datapoint in batch]
145
221
  results = await asyncio.gather(*batch_promises)
222
+ return results
146
223
 
147
- return L.post_evaluation_results(self.name, results)
148
-
149
- async def _evaluate_datapoint(self, datapoint):
224
+ async def _evaluate_datapoint(self, datapoint) -> EvaluationResultDatapoint:
150
225
  output = (
151
226
  await self.executor(datapoint.data)
152
227
  if is_async(self.executor)
@@ -155,7 +230,7 @@ class Evaluation:
155
230
  target = datapoint.target
156
231
 
157
232
  # Iterate over evaluators
158
- scores = {}
233
+ scores: dict[str, Numeric] = {}
159
234
  for evaluator_name in self.evaluator_names:
160
235
  evaluator = self.evaluators[evaluator_name]
161
236
  value = (
@@ -165,14 +240,51 @@ class Evaluation:
165
240
  )
166
241
 
167
242
  # If evaluator returns a single number, use evaluator name as key
168
- if isinstance(value, (int, float)):
243
+ if isinstance(value, Numeric):
169
244
  scores[evaluator_name] = value
170
245
  else:
171
246
  scores.update(value)
172
247
 
173
- return {
174
- "executorOutput": output,
175
- "data": datapoint.data,
176
- "target": target,
177
- "scores": scores,
178
- }
248
+ return EvaluationResultDatapoint(
249
+ data=datapoint.data,
250
+ target=target,
251
+ executorOutput=output,
252
+ scores=scores,
253
+ )
254
+
255
+
256
+ def evaluate(
257
+ name: str,
258
+ data: Union[EvaluationDataset, list[Union[Datapoint, dict]]],
259
+ executor: Any,
260
+ evaluators: list[Any],
261
+ batch_size: int = DEFAULT_BATCH_SIZE,
262
+ project_api_key: Optional[str] = None,
263
+ base_url: Optional[str] = None,
264
+ http_port: Optional[int] = None,
265
+ ) -> Optional[Awaitable[None]]:
266
+ """
267
+ Run evaluation.
268
+
269
+ If `_set_global_evaluation` is `True`, sets the global evaluation to be run in another part of the program.
270
+
271
+ Otherwise, if there is no event loop, runs the evaluation in the current thread until completion.
272
+ If there is an event loop, schedules the evaluation as a task in the event loop and returns an awaitable handle.
273
+ """
274
+
275
+ evaluation = Evaluation(
276
+ name,
277
+ data,
278
+ executor,
279
+ evaluators,
280
+ batch_size,
281
+ project_api_key,
282
+ base_url,
283
+ http_port,
284
+ )
285
+
286
+ global _evaluation
287
+ if _set_global_evaluation:
288
+ _evaluation = evaluation
289
+ else:
290
+ return evaluation.run()
lmnr/sdk/laminar.py CHANGED
@@ -37,11 +37,13 @@ from .types import (
37
37
  PipelineRunResponse,
38
38
  NodeInput,
39
39
  PipelineRunRequest,
40
+ UpdateEvaluationResponse,
40
41
  )
41
42
 
42
43
 
43
44
  class Laminar:
44
- __base_url: str = "https://api.lmnr.ai:8443"
45
+ __base_http_url: str
46
+ __base_grpc_url: str
45
47
  __project_api_key: Optional[str] = None
46
48
  __env: dict[str, str] = {}
47
49
  __initialized: bool = False
@@ -52,6 +54,8 @@ class Laminar:
52
54
  project_api_key: Optional[str] = None,
53
55
  env: dict[str, str] = {},
54
56
  base_url: Optional[str] = None,
57
+ http_port: Optional[int] = None,
58
+ grpc_port: Optional[int] = None,
55
59
  instruments: Optional[Set[Instruments]] = None,
56
60
  ):
57
61
  """Initialize Laminar context across the application.
@@ -71,13 +75,12 @@ class Laminar:
71
75
  overriden at request time. Usually, model
72
76
  provider keys are stored here.
73
77
  Defaults to {}.
74
- base_url (Optional[str], optional): Url of Laminar endpoint,
75
- or the customopen telemetry ingester.
76
- If not specified, defaults to
77
- https://api.lmnr.ai:8443.
78
- For locally hosted Laminar, default setting
79
- must be http://localhost:8001
80
- Defaults to None.
78
+ base_url (Optional[str], optional): Laminar API url.
79
+ If not specified, defaults to https://api.lmnr.ai.
80
+ http_port (Optional[int], optional): Laminar API http port.
81
+ If not specified, defaults to 443.
82
+ grpc_port (Optional[int], optional): Laminar API grpc port.
83
+ If not specified, defaults to 8443.
81
84
 
82
85
  Raises:
83
86
  ValueError: If project API key is not set
@@ -85,7 +88,7 @@ class Laminar:
85
88
  cls.__project_api_key = project_api_key or os.environ.get(
86
89
  "LMNR_PROJECT_API_KEY"
87
90
  )
88
- if not project_api_key:
91
+ if not cls.__project_api_key:
89
92
  dotenv_path = dotenv.find_dotenv(usecwd=True)
90
93
  cls.__project_api_key = dotenv.get_key(
91
94
  dotenv_path=dotenv_path, key_to_get="LMNR_PROJECT_API_KEY"
@@ -96,14 +99,16 @@ class Laminar:
96
99
  " your project API key or set the LMNR_PROJECT_API_KEY"
97
100
  " environment variable in your environment or .env file"
98
101
  )
99
- if base_url is not None:
100
- cls.__base_url = base_url
102
+
103
+ cls.__base_http_url = f"{base_url or 'https://api.lmnr.ai'}:{http_port or 443}"
104
+ cls.__base_grpc_url = f"{base_url or 'https://api.lmnr.ai'}:{grpc_port or 8443}"
105
+
101
106
  cls.__env = env
102
107
  cls.__initialized = True
103
108
  cls._initialize_logger()
104
109
  Traceloop.init(
105
110
  exporter=OTLPSpanExporter(
106
- endpoint=cls.__base_url,
111
+ endpoint=cls.__base_grpc_url,
107
112
  headers={"authorization": f"Bearer {cls.__project_api_key}"},
108
113
  ),
109
114
  instruments=instruments,
@@ -190,7 +195,7 @@ class Laminar:
190
195
  raise ValueError(f"Invalid request: {e}")
191
196
 
192
197
  response = requests.post(
193
- cls.__base_url + "/v1/pipeline/run",
198
+ cls.__base_http_url + "/v1/pipeline/run",
194
199
  data=json.dumps(request.to_dict()),
195
200
  headers=cls._headers(),
196
201
  )
@@ -292,7 +297,7 @@ class Laminar:
292
297
  )
293
298
  return
294
299
 
295
- current_span.add_event(name, event)
300
+ current_span.add_event(name, event, timestamp)
296
301
 
297
302
  @classmethod
298
303
  @contextmanager
@@ -407,7 +412,7 @@ class Laminar:
407
412
  @classmethod
408
413
  def create_evaluation(cls, name: str) -> CreateEvaluationResponse:
409
414
  response = requests.post(
410
- cls.__base_url + "/v1/evaluations",
415
+ cls.__base_http_url + "/v1/evaluations",
411
416
  data=json.dumps({"name": name}),
412
417
  headers=cls._headers(),
413
418
  )
@@ -421,14 +426,14 @@ class Laminar:
421
426
 
422
427
  @classmethod
423
428
  def post_evaluation_results(
424
- cls, evaluation_name: str, data: list[EvaluationResultDatapoint]
429
+ cls, evaluation_id: uuid.UUID, data: list[EvaluationResultDatapoint]
425
430
  ) -> requests.Response:
426
431
  body = {
427
- "name": evaluation_name,
428
- "points": data,
432
+ "evaluationId": str(evaluation_id),
433
+ "points": [datapoint.model_dump() for datapoint in data],
429
434
  }
430
435
  response = requests.post(
431
- cls.__base_url + "/v1/evaluation-datapoints",
436
+ cls.__base_http_url + "/v1/evaluation-datapoints",
432
437
  data=json.dumps(body),
433
438
  headers=cls._headers(),
434
439
  )
@@ -446,28 +451,38 @@ class Laminar:
446
451
 
447
452
  @classmethod
448
453
  def update_evaluation_status(
449
- cls, evaluation_name: str, status: str
450
- ) -> requests.Response:
454
+ cls, evaluation_id: str, status: str
455
+ ) -> UpdateEvaluationResponse:
456
+ """
457
+ Updates the status of an evaluation. Returns the updated evaluation object.
458
+
459
+ Args:
460
+ evaluation_id (str): The ID of the evaluation to update.
461
+ status (str): The status to set for the evaluation.
462
+
463
+ Returns:
464
+ UpdateEvaluationResponse: The updated evaluation response.
465
+
466
+ Raises:
467
+ ValueError: If the request fails.
468
+ """
451
469
  body = {
452
- "name": evaluation_name,
453
470
  "status": status,
454
471
  }
455
- response = requests.put(
456
- cls.__base_url + "/v1/evaluations/",
472
+ url = f"{cls.__base_http_url}/v1/evaluations/{evaluation_id}"
473
+
474
+ response = requests.post(
475
+ url,
457
476
  data=json.dumps(body),
458
477
  headers=cls._headers(),
459
478
  )
460
479
  if response.status_code != 200:
461
- try:
462
- resp_json = response.json()
463
- raise ValueError(
464
- f"Failed to send evaluation status. Response: {json.dumps(resp_json)}"
465
- )
466
- except Exception:
467
- raise ValueError(
468
- f"Failed to send evaluation status. Error: {response.text}"
469
- )
470
- return response
480
+ raise ValueError(
481
+ f"Failed to update evaluation status {evaluation_id}. "
482
+ f"Response: {response.text}"
483
+ )
484
+
485
+ return UpdateEvaluationResponse.model_validate(response.json())
471
486
 
472
487
  @classmethod
473
488
  def _headers(cls):
lmnr/sdk/types.py CHANGED
@@ -2,7 +2,7 @@ import datetime
2
2
  import requests
3
3
  import pydantic
4
4
  import uuid
5
- from typing import Any, Literal, Optional, TypeAlias, Union
5
+ from typing import Any, Awaitable, Callable, Literal, Optional, Union
6
6
 
7
7
  from .utils import to_dict
8
8
 
@@ -17,9 +17,9 @@ class ConditionedValue(pydantic.BaseModel):
17
17
  value: "NodeInput"
18
18
 
19
19
 
20
- Numeric: TypeAlias = Union[int, float]
21
- NodeInput: TypeAlias = Union[str, list[ChatMessage], ConditionedValue, Numeric, bool]
22
- PipelineOutput: TypeAlias = Union[NodeInput]
20
+ Numeric = Union[int, float]
21
+ NodeInput = Union[str, list[ChatMessage], ConditionedValue, Numeric, bool]
22
+ PipelineOutput = Union[NodeInput]
23
23
 
24
24
 
25
25
  class PipelineRunRequest(pydantic.BaseModel):
@@ -74,12 +74,12 @@ class PipelineRunError(Exception):
74
74
  return super().__str__()
75
75
 
76
76
 
77
- EvaluationDatapointData: TypeAlias = dict[str, Any]
78
- EvaluationDatapointTarget: TypeAlias = dict[str, Any]
77
+ EvaluationDatapointData = dict[str, Any]
78
+ EvaluationDatapointTarget = dict[str, Any]
79
79
 
80
80
 
81
81
  # EvaluationDatapoint is a single data point in the evaluation
82
- class EvaluationDatapoint(pydantic.BaseModel):
82
+ class Datapoint(pydantic.BaseModel):
83
83
  # input to the executor function. Must be a dict with string keys
84
84
  data: EvaluationDatapointData
85
85
  # input to the evaluator function (alongside the executor output).
@@ -87,24 +87,24 @@ class EvaluationDatapoint(pydantic.BaseModel):
87
87
  target: EvaluationDatapointTarget
88
88
 
89
89
 
90
- ExecutorFunctionReturnType: TypeAlias = Any
91
- EvaluatorFunctionReturnType: TypeAlias = Union[Numeric, dict[str, Numeric]]
90
+ ExecutorFunctionReturnType = Any
91
+ EvaluatorFunctionReturnType = Union[Numeric, dict[str, Numeric]]
92
92
 
93
- # ExecutorFunction: TypeAlias = Callable[
94
- # [EvaluationDatapointData, *tuple[Any, ...], dict[str, Any]],
95
- # Union[ExecutorFunctionReturnType, Awaitable[ExecutorFunctionReturnType]],
96
- # ]
93
+ ExecutorFunction = Callable[
94
+ [EvaluationDatapointData, Any, dict[str, Any]],
95
+ Union[ExecutorFunctionReturnType, Awaitable[ExecutorFunctionReturnType]],
96
+ ]
97
97
 
98
98
  # EvaluatorFunction is a function that takes the output of the executor and the
99
99
  # target data, and returns a score. The score can be a single number or a
100
100
  # record of string keys and number values. The latter is useful for evaluating
101
101
  # multiple criteria in one go instead of running multiple evaluators.
102
- # EvaluatorFunction: TypeAlias = Callable[
103
- # [ExecutorFunctionReturnType, *tuple[Any, ...], dict[str, Any]],
104
- # Union[EvaluatorFunctionReturnType, Awaitable[EvaluatorFunctionReturnType]],
105
- # ]
102
+ EvaluatorFunction = Callable[
103
+ [ExecutorFunctionReturnType, Any, dict[str, Any]],
104
+ Union[EvaluatorFunctionReturnType, Awaitable[EvaluatorFunctionReturnType]],
105
+ ]
106
106
 
107
- EvaluationStatus: TypeAlias = Literal["Started", "Finished", "Error"]
107
+ EvaluationStatus = Literal["Started", "Finished", "Error"]
108
108
 
109
109
 
110
110
  class CreateEvaluationResponse(pydantic.BaseModel):
@@ -114,10 +114,14 @@ class CreateEvaluationResponse(pydantic.BaseModel):
114
114
  status: EvaluationStatus
115
115
  projectId: uuid.UUID
116
116
  metadata: Optional[dict[str, Any]] = None
117
+ averageScores: Optional[dict[str, Numeric]] = None
118
+
119
+
120
+ UpdateEvaluationResponse = CreateEvaluationResponse
117
121
 
118
122
 
119
123
  class EvaluationResultDatapoint(pydantic.BaseModel):
120
124
  data: EvaluationDatapointData
121
125
  target: EvaluationDatapointTarget
122
- executor_output: ExecutorFunctionReturnType
126
+ executorOutput: ExecutorFunctionReturnType
123
127
  scores: dict[str, Numeric]
@@ -55,9 +55,6 @@ class Traceloop:
55
55
 
56
56
  enable_content_tracing = is_content_tracing_enabled()
57
57
 
58
- if exporter or processor:
59
- print(Fore.GREEN + "Laminar exporting traces to a custom exporter")
60
-
61
58
  headers = os.getenv("TRACELOOP_HEADERS") or headers
62
59
 
63
60
  if isinstance(headers, str):
@@ -78,17 +75,7 @@ class Traceloop:
78
75
  print(Fore.RESET)
79
76
  return
80
77
 
81
- if not exporter and not processor and headers:
82
- print(
83
- Fore.GREEN
84
- + f"Laminar exporting traces to {api_endpoint}, authenticating with custom headers"
85
- )
86
-
87
78
  if api_key and not exporter and not processor and not headers:
88
- print(
89
- Fore.GREEN
90
- + f"Laminar exporting traces to {api_endpoint} authenticating with bearer token"
91
- )
92
79
  headers = {
93
80
  "Authorization": f"Bearer {api_key}",
94
81
  }
@@ -2,6 +2,8 @@ from enum import Enum
2
2
 
3
3
 
4
4
  class Instruments(Enum):
5
+ # The list of libraries which will be autoinstrumented
6
+ # if no specific instruments are provided to initialize()
5
7
  OPENAI = "openai"
6
8
  ANTHROPIC = "anthropic"
7
9
  COHERE = "cohere"
@@ -15,10 +17,6 @@ class Instruments(Enum):
15
17
  MILVUS = "milvus"
16
18
  TRANSFORMERS = "transformers"
17
19
  TOGETHER = "together"
18
- REDIS = "redis"
19
- REQUESTS = "requests"
20
- URLLIB3 = "urllib3"
21
- PYMYSQL = "pymysql"
22
20
  BEDROCK = "bedrock"
23
21
  REPLICATE = "replicate"
24
22
  VERTEXAI = "vertexai"
@@ -27,3 +25,10 @@ class Instruments(Enum):
27
25
  ALEPHALPHA = "alephalpha"
28
26
  MARQO = "marqo"
29
27
  LANCEDB = "lancedb"
28
+
29
+ # The following libraries will not be autoinstrumented unless
30
+ # specified explicitly in the initialize() call.
31
+ REDIS = "redis"
32
+ REQUESTS = "requests"
33
+ URLLIB3 = "urllib3"
34
+ PYMYSQL = "pymysql"
@@ -124,46 +124,34 @@ class TracerWrapper(object):
124
124
  # this makes sure otel context is propagated so we always want it
125
125
  ThreadingInstrumentor().instrument()
126
126
 
127
- instrument_set = False
128
127
  if instruments is None:
129
128
  init_instrumentations(should_enrich_metrics)
130
- instrument_set = True
131
129
  else:
132
130
  for instrument in instruments:
133
131
  if instrument == Instruments.OPENAI:
134
132
  if not init_openai_instrumentor(should_enrich_metrics):
135
133
  print(Fore.RED + "Warning: OpenAI library does not exist.")
136
134
  print(Fore.RESET)
137
- else:
138
- instrument_set = True
139
135
  elif instrument == Instruments.ANTHROPIC:
140
136
  if not init_anthropic_instrumentor(should_enrich_metrics):
141
137
  print(
142
138
  Fore.RED + "Warning: Anthropic library does not exist."
143
139
  )
144
140
  print(Fore.RESET)
145
- else:
146
- instrument_set = True
147
141
  elif instrument == Instruments.COHERE:
148
142
  if not init_cohere_instrumentor():
149
143
  print(Fore.RED + "Warning: Cohere library does not exist.")
150
144
  print(Fore.RESET)
151
- else:
152
- instrument_set = True
153
145
  elif instrument == Instruments.PINECONE:
154
146
  if not init_pinecone_instrumentor():
155
147
  print(
156
148
  Fore.RED + "Warning: Pinecone library does not exist."
157
149
  )
158
150
  print(Fore.RESET)
159
- else:
160
- instrument_set = True
161
151
  elif instrument == Instruments.CHROMA:
162
152
  if not init_chroma_instrumentor():
163
153
  print(Fore.RED + "Warning: Chroma library does not exist.")
164
154
  print(Fore.RESET)
165
- else:
166
- instrument_set = True
167
155
  elif instrument == Instruments.GOOGLE_GENERATIVEAI:
168
156
  if not init_google_generativeai_instrumentor():
169
157
  print(
@@ -171,44 +159,32 @@ class TracerWrapper(object):
171
159
  + "Warning: Google Generative AI library does not exist."
172
160
  )
173
161
  print(Fore.RESET)
174
- else:
175
- instrument_set = True
176
162
  elif instrument == Instruments.LANGCHAIN:
177
163
  if not init_langchain_instrumentor():
178
164
  print(
179
165
  Fore.RED + "Warning: LangChain library does not exist."
180
166
  )
181
167
  print(Fore.RESET)
182
- else:
183
- instrument_set = True
184
168
  elif instrument == Instruments.MISTRAL:
185
169
  if not init_mistralai_instrumentor():
186
170
  print(
187
171
  Fore.RED + "Warning: MistralAI library does not exist."
188
172
  )
189
173
  print(Fore.RESET)
190
- else:
191
- instrument_set = True
192
174
  elif instrument == Instruments.OLLAMA:
193
175
  if not init_ollama_instrumentor():
194
176
  print(Fore.RED + "Warning: Ollama library does not exist.")
195
177
  print(Fore.RESET)
196
- else:
197
- instrument_set = True
198
178
  elif instrument == Instruments.LLAMA_INDEX:
199
179
  if not init_llama_index_instrumentor():
200
180
  print(
201
181
  Fore.RED + "Warning: LlamaIndex library does not exist."
202
182
  )
203
183
  print(Fore.RESET)
204
- else:
205
- instrument_set = True
206
184
  elif instrument == Instruments.MILVUS:
207
185
  if not init_milvus_instrumentor():
208
186
  print(Fore.RED + "Warning: Milvus library does not exist.")
209
187
  print(Fore.RESET)
210
- else:
211
- instrument_set = True
212
188
  elif instrument == Instruments.TRANSFORMERS:
213
189
  if not init_transformers_instrumentor():
214
190
  print(
@@ -216,72 +192,52 @@ class TracerWrapper(object):
216
192
  + "Warning: Transformers library does not exist."
217
193
  )
218
194
  print(Fore.RESET)
219
- else:
220
- instrument_set = True
221
195
  elif instrument == Instruments.TOGETHER:
222
196
  if not init_together_instrumentor():
223
197
  print(
224
198
  Fore.RED + "Warning: TogetherAI library does not exist."
225
199
  )
226
200
  print(Fore.RESET)
227
- else:
228
- instrument_set = True
229
201
  elif instrument == Instruments.REQUESTS:
230
202
  if not init_requests_instrumentor():
231
203
  print(
232
204
  Fore.RED + "Warning: Requests library does not exist."
233
205
  )
234
206
  print(Fore.RESET)
235
- else:
236
- instrument_set = True
237
207
  elif instrument == Instruments.URLLIB3:
238
208
  if not init_urllib3_instrumentor():
239
209
  print(Fore.RED + "Warning: urllib3 library does not exist.")
240
210
  print(Fore.RESET)
241
- else:
242
- instrument_set = True
243
211
  elif instrument == Instruments.PYMYSQL:
244
212
  if not init_pymysql_instrumentor():
245
213
  print(Fore.RED + "Warning: PyMySQL library does not exist.")
246
214
  print(Fore.RESET)
247
- else:
248
- instrument_set = True
249
215
  elif instrument == Instruments.BEDROCK:
250
216
  if not init_bedrock_instrumentor(should_enrich_metrics):
251
217
  print(Fore.RED + "Warning: Bedrock library does not exist.")
252
218
  print(Fore.RESET)
253
- else:
254
- instrument_set = True
255
219
  elif instrument == Instruments.REPLICATE:
256
220
  if not init_replicate_instrumentor():
257
221
  print(
258
222
  Fore.RED + "Warning: Replicate library does not exist."
259
223
  )
260
224
  print(Fore.RESET)
261
- else:
262
- instrument_set = True
263
225
  elif instrument == Instruments.VERTEXAI:
264
226
  if not init_vertexai_instrumentor():
265
227
  print(
266
228
  Fore.RED + "Warning: Vertex AI library does not exist."
267
229
  )
268
230
  print(Fore.RESET)
269
- else:
270
- instrument_set = True
271
231
  elif instrument == Instruments.WATSONX:
272
232
  if not init_watsonx_instrumentor():
273
233
  print(Fore.RED + "Warning: Watsonx library does not exist.")
274
234
  print(Fore.RESET)
275
- else:
276
- instrument_set = True
277
235
  elif instrument == Instruments.WEAVIATE:
278
236
  if not init_weaviate_instrumentor():
279
237
  print(
280
238
  Fore.RED + "Warning: Weaviate library does not exist."
281
239
  )
282
240
  print(Fore.RESET)
283
- else:
284
- instrument_set = True
285
241
  elif instrument == Instruments.ALEPHALPHA:
286
242
  if not init_alephalpha_instrumentor():
287
243
  print(
@@ -289,26 +245,18 @@ class TracerWrapper(object):
289
245
  + "Warning: Aleph Alpha library does not exist."
290
246
  )
291
247
  print(Fore.RESET)
292
- else:
293
- instrument_set = True
294
248
  elif instrument == Instruments.MARQO:
295
249
  if not init_marqo_instrumentor():
296
250
  print(Fore.RED + "Warning: marqo library does not exist.")
297
251
  print(Fore.RESET)
298
- else:
299
- instrument_set = True
300
252
  elif instrument == Instruments.LANCEDB:
301
253
  if not init_lancedb_instrumentor():
302
254
  print(Fore.RED + "Warning: LanceDB library does not exist.")
303
255
  print(Fore.RESET)
304
- else:
305
- instrument_set = True
306
256
  elif instrument == Instruments.REDIS:
307
257
  if not init_redis_instrumentor():
308
258
  print(Fore.RED + "Warning: redis library does not exist.")
309
259
  print(Fore.RESET)
310
- else:
311
- instrument_set = True
312
260
 
313
261
  else:
314
262
  print(
@@ -324,13 +272,6 @@ class TracerWrapper(object):
324
272
  )
325
273
  print(Fore.RESET)
326
274
 
327
- if not instrument_set:
328
- print(
329
- Fore.RED + "Warning: No valid instruments set. Remove 'instrument' "
330
- "argument to use all instruments, or set a valid instrument."
331
- )
332
- print(Fore.RESET)
333
-
334
275
  obj.__content_allow_list = ContentAllowList()
335
276
 
336
277
  # Force flushes for debug environments (e.g. local development)
@@ -531,10 +472,6 @@ def init_instrumentations(should_enrich_metrics: bool):
531
472
  init_milvus_instrumentor()
532
473
  init_transformers_instrumentor()
533
474
  init_together_instrumentor()
534
- init_redis_instrumentor()
535
- init_requests_instrumentor()
536
- init_urllib3_instrumentor()
537
- init_pymysql_instrumentor()
538
475
  init_bedrock_instrumentor(should_enrich_metrics)
539
476
  init_replicate_instrumentor()
540
477
  init_vertexai_instrumentor()
@@ -545,6 +482,12 @@ def init_instrumentations(should_enrich_metrics: bool):
545
482
  init_lancedb_instrumentor()
546
483
  init_groq_instrumentor()
547
484
 
485
+ # These libraries are not instrumented by default, but if the user wants, he can manually specify them
486
+ # init_redis_instrumentor()
487
+ # init_requests_instrumentor()
488
+ # init_urllib3_instrumentor()
489
+ # init_pymysql_instrumentor()
490
+
548
491
 
549
492
  def init_openai_instrumentor(should_enrich_metrics: bool):
550
493
  try:
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.1
2
2
  Name: lmnr
3
- Version: 0.4.10
3
+ Version: 0.4.12b1
4
4
  Summary: Python SDK for Laminar AI
5
5
  License: Apache-2.0
6
6
  Author: lmnr.ai
@@ -11,6 +11,7 @@ Classifier: Programming Language :: Python :: 3.9
11
11
  Classifier: Programming Language :: Python :: 3.10
12
12
  Classifier: Programming Language :: Python :: 3.11
13
13
  Classifier: Programming Language :: Python :: 3.12
14
+ Requires-Dist: argparse (>=1.0,<2.0)
14
15
  Requires-Dist: asyncio (>=3.0,<4.0)
15
16
  Requires-Dist: backoff (>=2.0,<3.0)
16
17
  Requires-Dist: colorama (>=0.4,<0.5)
@@ -54,6 +55,7 @@ Requires-Dist: pydantic (>=2.7,<3.0)
54
55
  Requires-Dist: python-dotenv (>=1.0,<2.0)
55
56
  Requires-Dist: requests (>=2.0,<3.0)
56
57
  Requires-Dist: tenacity (>=8.0,<9.0)
58
+ Requires-Dist: tqdm (>=4.0,<5.0)
57
59
  Description-Content-Type: text/markdown
58
60
 
59
61
  # Laminar Python
@@ -67,6 +69,9 @@ OpenTelemetry log sender for [Laminar](https://github.com/lmnr-ai/lmnr) for Pyth
67
69
 
68
70
 
69
71
  ## Quickstart
72
+
73
+ First, install the package:
74
+
70
75
  ```sh
71
76
  python3 -m venv .myenv
72
77
  source .myenv/bin/activate # or use your favorite env management tool
@@ -74,22 +79,39 @@ source .myenv/bin/activate # or use your favorite env management tool
74
79
  pip install lmnr
75
80
  ```
76
81
 
77
- And the in your main Python file
82
+ Then, you can initialize Laminar in your main file and instrument your code.
78
83
 
79
84
  ```python
80
- from lmnr import Laminar as L, Instruments
85
+ import os
86
+ from openai import OpenAI
87
+ from lmnr import Laminar as L
81
88
 
82
- L.initialize(project_api_key="<LMNR_PROJECT_API_KEY>", instruments={Instruments.OPENAI, Instruments.ANTHROPIC})
83
- ```
89
+ L.initialize(
90
+ project_api_key=os.environ["LMNR_PROJECT_API_KEY"],
91
+ )
84
92
 
85
- If you want to automatically instrument particular LLM, Vector DB, and related
86
- calls with OpenTelemetry-compatible instrumentation, then pass the appropriate instruments to `.initialize()`.
93
+ client = OpenAI(api_key=os.environ["OPENAI_API_KEY"])
94
+
95
+ def poem_writer(topic: str):
96
+ prompt = f"write a poem about {topic}"
97
+
98
+ # OpenAI calls are automatically instrumented
99
+ response = client.chat.completions.create(
100
+ model="gpt-4o",
101
+ messages=[
102
+ {"role": "system", "content": "You are a helpful assistant."},
103
+ {"role": "user", "content": prompt},
104
+ ],
105
+ )
106
+ poem = response.choices[0].message.content
107
+ return poem
87
108
 
88
- You can pass an empty set as `instruments=set()` to disable any kind of automatic instrumentation.
89
- Also if you want to automatically instrument all supported libraries, then pass `instruments=None` or don't pass `instruments` at all.
109
+ if __name__ == "__main__":
110
+ print(poem_writer("laminar flow"))
111
+
112
+ ```
90
113
 
91
- Our code is based on the [OpenLLMetry](https://github.com/traceloop/openllmetry), open-source package
92
- by TraceLoop. Also, we are grateful to Traceloop for implementing autoinstrumentations for many libraries.
114
+ Note that you need to only initialize Laminar once in your application.
93
115
 
94
116
  ### Project API key
95
117
 
@@ -98,67 +120,84 @@ You can either pass it to `.initialize()` or set it to `.env` at the root of you
98
120
 
99
121
  ## Instrumentation
100
122
 
101
- In addition to automatic instrumentation, we provide a simple `@observe()` decorator, if you want more fine-grained tracing
102
- or to trace other functions.
123
+ ### Manual instrumentation
103
124
 
104
- ### Example
125
+ To instrument any function in your code, we provide a simple `@observe()` decorator.
126
+ This can be useful if you want to trace a request handler or a function which combines multiple LLM calls.
105
127
 
106
128
  ```python
107
129
  import os
108
130
  from openai import OpenAI
131
+ from lmnr import Laminar as L, Instruments
109
132
 
110
-
111
- from lmnr import observe, Laminar as L, Instruments
112
- L.initialize(project_api_key="<LMNR_PROJECT_API_KEY>", instruments={Instruments.OPENAI})
133
+ L.initialize(project_api_key=os.environ["LMNR_PROJECT_API_KEY"])
113
134
 
114
135
  client = OpenAI(api_key=os.environ["OPENAI_API_KEY"])
115
136
 
116
- @observe() # annotate all functions you want to trace
117
- def poem_writer(topic="turbulence"):
137
+ def poem_writer(topic: str):
118
138
  prompt = f"write a poem about {topic}"
139
+ messages = [
140
+ {"role": "system", "content": "You are a helpful assistant."},
141
+ {"role": "user", "content": prompt},
142
+ ]
143
+
144
+ # OpenAI calls are still automatically instrumented
119
145
  response = client.chat.completions.create(
120
146
  model="gpt-4o",
121
- messages=[
122
- {"role": "system", "content": "You are a helpful assistant."},
123
- {"role": "user", "content": prompt},
124
- ],
147
+ messages=messages,
125
148
  )
126
149
  poem = response.choices[0].message.content
150
+
127
151
  return poem
128
152
 
129
- print(poem_writer(topic="laminar flow"))
153
+ @observe()
154
+ def generate_poems():
155
+ poem1 = poem_writer(topic="laminar flow")
156
+ L.event("is_poem_generated", True)
157
+ poem2 = poem_writer(topic="turbulence")
158
+ L.event("is_poem_generated", True)
159
+ poems = f"{poem1}\n\n---\n\n{poem2}"
160
+ return poems
130
161
  ```
131
162
 
132
- ### Manual instrumentation
133
-
134
- Also, you can `Laminar.start_as_current_span` if you want to record a chunk of your code.
163
+ Also, you can use `Laminar.start_as_current_span` if you want to record a chunk of your code using `with` statement.
135
164
 
136
165
  ```python
137
- from lmnr import observe, Laminar as L, Instruments
138
- L.initialize(project_api_key="<LMNR_PROJECT_API_KEY>", instruments={Instruments.OPENAI})
166
+ def handle_user_request(topic: str):
167
+ with L.start_as_current_span(name="poem_writer", input=topic):
168
+ ...
169
+
170
+ poem = poem_writer(topic=topic)
171
+
172
+ ...
173
+
174
+ # while within the span, you can attach laminar events to it
175
+ L.event("is_poem_generated", True)
139
176
 
140
- def poem_writer(topic="turbulence"):
141
- prompt = f"write a poem about {topic}"
142
- messages = [
143
- {"role": "system", "content": "You are a helpful assistant."},
144
- {"role": "user", "content": prompt},
145
- ]
177
+ # Use set_span_output to record the output of the span
178
+ L.set_span_output(poem)
179
+ ```
146
180
 
147
- with L.start_as_current_span(name="poem_writer", input=messages):
148
- # OpenAI calls are still automatically instrumented with OpenLLMetry
149
- response = client.chat.completions.create(
150
- model="gpt-4o",
151
- messages=messages,
152
- )
153
- poem = response.choices[0].message.content
154
- # while within the span, you can attach laminar events to it
155
- L.event("event_name", "event_value")
181
+ ### Automatic instrumentation
156
182
 
157
- L.set_span_output(poem) # set an output
183
+ Laminar allows you to automatically instrument majority of the most popular LLM, Vector DB, database, requests, and other libraries.
158
184
 
159
- return poem
185
+ If you want to automatically instrument a default set of libraries, then simply do NOT pass `instruments` argument to `.initialize()`.
186
+ See the full list of available instrumentations in the [enum](/src/lmnr/traceloop_sdk/instruments.py).
187
+
188
+ If you want to automatically instrument only specific LLM, Vector DB, or other
189
+ calls with OpenTelemetry-compatible instrumentation, then pass the appropriate instruments to `.initialize()`.
190
+ For example, if you want to only instrument OpenAI and Anthropic, then do the following:
191
+
192
+ ```python
193
+ from lmnr import Laminar as L, Instruments
194
+
195
+ L.initialize(project_api_key=os.environ["LMNR_PROJECT_API_KEY"], instruments={Instruments.OPENAI, Instruments.ANTHROPIC})
160
196
  ```
161
197
 
198
+ If you want to fully disable any kind of autoinstrumentation, pass an empty set as `instruments=set()` to `.initialize()`.
199
+
200
+ Majority of the autoinstrumentations are provided by Traceloop's [OpenLLMetry](https://github.com/traceloop/openllmetry).
162
201
 
163
202
  ## Sending events
164
203
 
@@ -1,18 +1,19 @@
1
- lmnr/__init__.py,sha256=bA1f7JsEdSdU93HTz3SQLSanq-UgZGvb5I2OE0CWGR8,233
1
+ lmnr/__init__.py,sha256=5Ks8UIicCzCBgwSz0MOX3I7jVruPMUO3SmxIwUoODzQ,231
2
+ lmnr/cli.py,sha256=Ptvm5dsNLKUY5lwnN8XkT5GtCYjzpRNi2WvefknB3OQ,1079
2
3
  lmnr/sdk/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
3
- lmnr/sdk/decorators.py,sha256=W46diLcINe0HAhxktrjbfQnaIfklSb0AydBHHxiko9U,2314
4
- lmnr/sdk/evaluations.py,sha256=EaRcwbdXxj4w2yzak1xFv-YhDuxRVentQcJ-CypBoH0,6307
5
- lmnr/sdk/laminar.py,sha256=M8HdP6ZYJHdngUVrGj4GMZxz_EZyx3woHm-UpfWmIvs,18439
4
+ lmnr/sdk/decorators.py,sha256=O8S4PI6LUfdWPkbroigl5khtnkyhp24J8qzSdlvCs44,2227
5
+ lmnr/sdk/evaluations.py,sha256=kIME5Ahc_N3hVr7CXuG72oLDl7PjMg2JJlaBSkCG_S0,9999
6
+ lmnr/sdk/laminar.py,sha256=r9jTmIg4ljPlMiy6RVkmldd2i8l4p16ixmvVwyI8jOE,19001
6
7
  lmnr/sdk/log.py,sha256=EgAMY77Zn1bv1imCqrmflD3imoAJ2yveOkIcrIP3e98,1170
7
- lmnr/sdk/types.py,sha256=w7BJsoEPHiNps62cQt3Hd6tEZ7ZFCKRTPzcwdD6rNak,4050
8
+ lmnr/sdk/types.py,sha256=NKXohmXewppXUt_AwkTyZpohH6n4IStsVeEGILTB8e0,4006
8
9
  lmnr/sdk/utils.py,sha256=ZsGJ86tq8lIbvOhSb1gJWH5K3GylO_lgX68FN6rG2nM,3358
9
10
  lmnr/traceloop_sdk/.flake8,sha256=bCxuDlGx3YQ55QHKPiGJkncHanh9qGjQJUujcFa3lAU,150
10
11
  lmnr/traceloop_sdk/.python-version,sha256=9OLQBQVbD4zE4cJsPePhnAfV_snrPSoqEQw-PXgPMOs,6
11
- lmnr/traceloop_sdk/__init__.py,sha256=J-zVw6j0DmceVvJVZXAFcCzN_scz9hB3X17NQgPMgOg,4420
12
+ lmnr/traceloop_sdk/__init__.py,sha256=-wa25NtU7BeTgqjT6rpVIEdaVP4aOjLgbYrc3B3-JaM,3932
12
13
  lmnr/traceloop_sdk/config/__init__.py,sha256=EGN3ixOt_ORbMxqaQdLaC14kmO-gyG4mnGJ2GfN-R-E,364
13
14
  lmnr/traceloop_sdk/decorators/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
14
15
  lmnr/traceloop_sdk/decorators/base.py,sha256=wcqXF0iVQgRXMyWTcJ5QvL_6q2y_gttwsX8dllmAtWM,4891
15
- lmnr/traceloop_sdk/instruments.py,sha256=G5EFAbpc20WD3M6xK6rlbj-Yy_r_f1m3gidY6UXzSRQ,701
16
+ lmnr/traceloop_sdk/instruments.py,sha256=oMvIASueW3GeChpjIdH-DD9aFBVB8OtHZ0HawppTrlI,942
16
17
  lmnr/traceloop_sdk/metrics/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
17
18
  lmnr/traceloop_sdk/metrics/metrics.py,sha256=AlQ2a2os1WcZbfBd155u_UzBbPrbuPia6O_HbojV9Wc,5055
18
19
  lmnr/traceloop_sdk/tests/__init__.py,sha256=RYnG0-8zbXL0-2Ste1mEBf5sN4d_rQjGTCgPBuaZC74,20
@@ -39,14 +40,14 @@ lmnr/traceloop_sdk/tracing/__init__.py,sha256=Ckq7zCM26VdJVB5tIZv0GTPyMZKyfso_KW
39
40
  lmnr/traceloop_sdk/tracing/content_allow_list.py,sha256=3feztm6PBWNelc8pAZUcQyEGyeSpNiVKjOaDk65l2ps,846
40
41
  lmnr/traceloop_sdk/tracing/context_manager.py,sha256=csVlB6kDmbgSPsROHwnddvGGblx55v6lJMRj0wsSMQM,304
41
42
  lmnr/traceloop_sdk/tracing/manual.py,sha256=RPwEreHHdzmw7g15u4G21GqhHOvRp7d72ylQNLG1jRM,1841
42
- lmnr/traceloop_sdk/tracing/tracing.py,sha256=VFrf5D6CC3DquLy_19_5I_L_w1kO2X61KvPW0XD26-k,42347
43
+ lmnr/traceloop_sdk/tracing/tracing.py,sha256=5e8AsiFKaIO6zqAbMfhw242glVsQUkxbNhTWP7QDqSg,40108
43
44
  lmnr/traceloop_sdk/utils/__init__.py,sha256=pNhf0G3vTd5ccoc03i1MXDbricSaiqCbi1DLWhSekK8,604
44
45
  lmnr/traceloop_sdk/utils/in_memory_span_exporter.py,sha256=H_4TRaThMO1H6vUQ0OpQvzJk_fZH0OOsRAM1iZQXsR8,2112
45
46
  lmnr/traceloop_sdk/utils/json_encoder.py,sha256=dK6b_axr70IYL7Vv-bu4wntvDDuyntoqsHaddqX7P58,463
46
47
  lmnr/traceloop_sdk/utils/package_check.py,sha256=TZSngzJOpFhfUZLXIs38cpMxQiZSmp0D-sCrIyhz7BA,251
47
48
  lmnr/traceloop_sdk/version.py,sha256=OlatFEFA4ttqSSIiV8jdE-sq3KG5zu2hnC4B4mzWF3s,23
48
- lmnr-0.4.10.dist-info/LICENSE,sha256=67b_wJHVV1CBaWkrKFWU1wyqTPSdzH77Ls-59631COg,10411
49
- lmnr-0.4.10.dist-info/METADATA,sha256=VPXsfYwAy1uTv_qucCqgAmyGyZMngr0HuysSSdpX8Jw,10999
50
- lmnr-0.4.10.dist-info/WHEEL,sha256=sP946D7jFCHeNz5Iq4fL4Lu-PrWrFsgfLXbbkciIZwg,88
51
- lmnr-0.4.10.dist-info/entry_points.txt,sha256=Qg7ZRax4k-rcQsZ26XRYQ8YFSBiyY2PNxYfq4a6PYXI,41
52
- lmnr-0.4.10.dist-info/RECORD,,
49
+ lmnr-0.4.12b1.dist-info/LICENSE,sha256=67b_wJHVV1CBaWkrKFWU1wyqTPSdzH77Ls-59631COg,10411
50
+ lmnr-0.4.12b1.dist-info/METADATA,sha256=6ne6QRKpBg09-X6ypcHSgqfbwjLvmxPAdknTPfA00F0,11992
51
+ lmnr-0.4.12b1.dist-info/WHEEL,sha256=sP946D7jFCHeNz5Iq4fL4Lu-PrWrFsgfLXbbkciIZwg,88
52
+ lmnr-0.4.12b1.dist-info/entry_points.txt,sha256=K1jE20ww4jzHNZLnsfWBvU3YKDGBgbOiYG5Y7ivQcq4,37
53
+ lmnr-0.4.12b1.dist-info/RECORD,,
@@ -0,0 +1,3 @@
1
+ [console_scripts]
2
+ lmnr=lmnr.cli:cli
3
+
@@ -1,3 +0,0 @@
1
- [console_scripts]
2
- lmnr=lmnr.cli.cli:cli
3
-
File without changes