lmnr 0.3.7__py3-none-any.whl → 0.4.1__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -0,0 +1,214 @@
1
+ Metadata-Version: 2.1
2
+ Name: lmnr
3
+ Version: 0.4.1
4
+ Summary: Python SDK for Laminar AI
5
+ License: Apache-2.0
6
+ Author: lmnr.ai
7
+ Requires-Python: >=3.9,<4.0
8
+ Classifier: License :: OSI Approved :: Apache Software License
9
+ Classifier: Programming Language :: Python :: 3
10
+ Classifier: Programming Language :: Python :: 3.9
11
+ Classifier: Programming Language :: Python :: 3.10
12
+ Classifier: Programming Language :: Python :: 3.11
13
+ Classifier: Programming Language :: Python :: 3.12
14
+ Requires-Dist: asyncio (>=3.4.3,<4.0.0)
15
+ Requires-Dist: backoff (>=2.2.1,<3.0.0)
16
+ Requires-Dist: pydantic (>=2.7.4,<3.0.0)
17
+ Requires-Dist: python-dotenv (>=1.0.1,<2.0.0)
18
+ Requires-Dist: requests (>=2.32.3,<3.0.0)
19
+ Requires-Dist: traceloop-sdk (>=0.29.2,<0.30.0)
20
+ Description-Content-Type: text/markdown
21
+
22
+ # Laminar Python
23
+
24
+ OpenTelemetry log sender for [Laminar](https://github.com/lmnr-ai/lmnr) for Python code.
25
+
26
+ <a href="https://pypi.org/project/lmnr/"> ![PyPI - Version](https://img.shields.io/pypi/v/lmnr?label=lmnr&logo=pypi&logoColor=3775A9) </a>
27
+ ![PyPI - Downloads](https://img.shields.io/pypi/dm/lmnr)
28
+ ![PyPI - Python Version](https://img.shields.io/pypi/pyversions/lmnr)
29
+
30
+
31
+
32
+ ## Quickstart
33
+ ```sh
34
+ python3 -m venv .myenv
35
+ source .myenv/bin/activate # or use your favorite env management tool
36
+
37
+ pip install lmnr
38
+ ```
39
+
40
+ And the in your main Python file
41
+
42
+ ```python
43
+ from lmnr import Laminar as L
44
+
45
+ L.initialize(project_api_key="<LMNR_PROJECT_API_KEY>")
46
+ ```
47
+
48
+ This will automatically instrument most of the LLM, Vector DB, and related
49
+ calls with OpenTelemetry-compatible instrumentation.
50
+
51
+ We rely on the amazing [OpenLLMetry](https://github.com/traceloop/openllmetry), open-source package
52
+ by TraceLoop, to achieve that.
53
+
54
+ ### Project API key
55
+
56
+ Get the key from the settings page of your Laminar project ([Learn more](https://docs.lmnr.ai/api-reference/introduction#authentication)).
57
+ You can either pass it to `.initialize()` or set it to `.env` at the root of your package with the key `LMNR_PROJECT_API_KEY`.
58
+
59
+ ## Instrumentation
60
+
61
+ In addition to automatic instrumentation, we provide a simple `@observe()` decorator, if you want more fine-grained tracing
62
+ or to trace other functions.
63
+
64
+ ### Example
65
+
66
+ ```python
67
+ import os
68
+ from openai import OpenAI
69
+
70
+
71
+ from lmnr import observe, Laminar as L
72
+ L.initialize(project_api_key="<LMNR_PROJECT_API_KEY>")
73
+
74
+ client = OpenAI(api_key=os.environ["OPENAI_API_KEY"])
75
+
76
+ @observe() # annotate all functions you want to trace
77
+ def poem_writer(topic="turbulence"):
78
+ prompt = f"write a poem about {topic}"
79
+ response = client.chat.completions.create(
80
+ model="gpt-4o",
81
+ messages=[
82
+ {"role": "system", "content": "You are a helpful assistant."},
83
+ {"role": "user", "content": prompt},
84
+ ],
85
+ )
86
+ poem = response.choices[0].message.content
87
+ return poem
88
+
89
+ print(poem_writer(topic="laminar flow"))
90
+ ```
91
+
92
+
93
+ ## Sending events
94
+
95
+ You can send events in two ways:
96
+ - `.event(name, value)` – for a pre-defined event with one of possible values.
97
+ - `.evaluate_event(name, evaluator, data)` – for an event that is evaluated by evaluator pipeline based on the data.
98
+
99
+ Note that to run an evaluate event, you need to crate an evaluator pipeline and create a target version for it.
100
+
101
+ Read our [docs](https://docs.lmnr.ai) to learn more about event types and how they are created and evaluated.
102
+
103
+ ### Example
104
+
105
+ ```python
106
+ from lmnr import Laminar as L
107
+ # ...
108
+ poem = response.choices[0].message.content
109
+
110
+ # this will register True or False value with Laminar
111
+ L.event("topic alignment", topic in poem)
112
+
113
+ # this will run the pipeline `check_wordy` with `poem` set as the value
114
+ # of `text_input` node, and write the result as an event with name
115
+ # "excessive_wordiness"
116
+ L.evaluate_event("excessive_wordiness", "check_wordy", {"text_input": poem})
117
+ ```
118
+
119
+ ## Laminar pipelines as prompt chain managers
120
+
121
+ You can create Laminar pipelines in the UI and manage chains of LLM calls there.
122
+
123
+ After you are ready to use your pipeline in your code, deploy it in Laminar by selecting the target version for the pipeline.
124
+
125
+ Once your pipeline target is set, you can call it from Python in just a few lines.
126
+
127
+ Example use:
128
+
129
+ ```python
130
+ from lmnr import Laminar as L
131
+
132
+ L.initialize('<YOUR_PROJECT_API_KEY>')
133
+
134
+ result = l.run(
135
+ pipeline = 'my_pipeline_name',
136
+ inputs = {'input_node_name': 'some_value'},
137
+ # all environment variables
138
+ env = {'OPENAI_API_KEY': 'sk-some-key'},
139
+ )
140
+ ```
141
+
142
+ Resulting in:
143
+
144
+ ```python
145
+ >>> result
146
+ PipelineRunResponse(
147
+ outputs={'output': {'value': [ChatMessage(role='user', content='hello')]}},
148
+ # useful to locate your trace
149
+ run_id='53b012d5-5759-48a6-a9c5-0011610e3669'
150
+ )
151
+ ```
152
+
153
+ ## Running offline evaluations on your data
154
+
155
+ You can evaluate your code with your own data and send it to Laminar using the `Evaluation` class.
156
+
157
+ Evaluation takes in the following parameters:
158
+ - `name` – the name of your evaluation. If no such evaluation exists in the project, it will be created. Otherwise, data will be pushed to the existing evaluation
159
+ - `data` – an array of `EvaluationDatapoint` objects, where each `EvaluationDatapoint` has two keys: `target` and `data`, each containing a key-value object. Alternatively, you can pass in dictionaries, and we will instantiate `EvaluationDatapoint`s with pydantic if possible
160
+ - `executor` – the logic you want to evaluate. This function must take `data` as the first argument, and produce any output. *
161
+ - `evaluators` – evaluaton logic. List of functions that take output of executor as the first argument, `target` as the second argument and produce a numeric scores. Each function can produce either a single number or `dict[str, int|float]` of scores.
162
+
163
+ \* If you already have the outputs of executors you want to evaluate, you can specify the executor as an identity function, that takes in `data` and returns only needed value(s) from it.
164
+
165
+ ### Example
166
+
167
+ ```python
168
+ from openai import AsyncOpenAI
169
+ import asyncio
170
+ import os
171
+
172
+ openai_client = AsyncOpenAI(api_key=os.environ["OPENAI_API_KEY"])
173
+
174
+ async def get_capital(data):
175
+ country = data["country"]
176
+ response = await openai_client.chat.completions.create(
177
+ model="gpt-4o-mini",
178
+ messages=[
179
+ {"role": "system", "content": "You are a helpful assistant."},
180
+ {
181
+ "role": "user",
182
+ "content": f"What is the capital of {country}? Just name the "
183
+ "city and nothing else",
184
+ },
185
+ ],
186
+ )
187
+ return response.choices[0].message.content.strip()
188
+
189
+
190
+ # Evaluation data
191
+ data = [
192
+ {"data": {"country": "Canada"}, "target": {"capital": "Ottawa"}},
193
+ {"data": {"country": "Germany"}, "target": {"capital": "Berlin"}},
194
+ {"data": {"country": "Tanzania"}, "target": {"capital": "Dodoma"}},
195
+ ]
196
+
197
+
198
+ def evaluator_A(output, target):
199
+ return 1 if output == target["capital"] else 0
200
+
201
+
202
+ # Create an Evaluation instance
203
+ e = Evaluation(
204
+ name="py-evaluation-async",
205
+ data=data,
206
+ executor=get_capital,
207
+ evaluators=[evaluator_A],
208
+ project_api_key=os.environ["LMNR_PROJECT_API_KEY"],
209
+ )
210
+
211
+ # Run the evaluation
212
+ asyncio.run(e.run())
213
+ ```
214
+
@@ -0,0 +1,13 @@
1
+ lmnr/__init__.py,sha256=wQwnHl662Xcz7GdSofFsEjmAK0nxioYA2Yq6Q78m4ps,194
2
+ lmnr/sdk/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
3
+ lmnr/sdk/decorators.py,sha256=Xs6n0TGX9LZ9i1hE_UZz4LEyd_ZAfpVGfNQh_rKwOuA,2493
4
+ lmnr/sdk/evaluations.py,sha256=LkQApHAhR7y_rC2ovnJi8yHpdcl0-7yesdBqvOJ0BKg,6107
5
+ lmnr/sdk/laminar.py,sha256=970fvaw969pBdBqrDRD8lQ82uPEn8V5n-4rIIe_5pqM,16552
6
+ lmnr/sdk/log.py,sha256=EgAMY77Zn1bv1imCqrmflD3imoAJ2yveOkIcrIP3e98,1170
7
+ lmnr/sdk/types.py,sha256=gDwRSWR9A1__FGtQhVaFc6PUYQuIhubo5tpfYAajTQQ,4055
8
+ lmnr/sdk/utils.py,sha256=ZsGJ86tq8lIbvOhSb1gJWH5K3GylO_lgX68FN6rG2nM,3358
9
+ lmnr-0.4.1.dist-info/LICENSE,sha256=67b_wJHVV1CBaWkrKFWU1wyqTPSdzH77Ls-59631COg,10411
10
+ lmnr-0.4.1.dist-info/METADATA,sha256=_g6TaAlJuPxl_sbWYPjlg4380V0Ja34P8OXHAXofakI,7025
11
+ lmnr-0.4.1.dist-info/WHEEL,sha256=sP946D7jFCHeNz5Iq4fL4Lu-PrWrFsgfLXbbkciIZwg,88
12
+ lmnr-0.4.1.dist-info/entry_points.txt,sha256=Qg7ZRax4k-rcQsZ26XRYQ8YFSBiyY2PNxYfq4a6PYXI,41
13
+ lmnr-0.4.1.dist-info/RECORD,,
lmnr/sdk/client.py DELETED
@@ -1,161 +0,0 @@
1
- from .tracing_types import Span, Trace
2
-
3
- from pydantic.alias_generators import to_snake
4
- from typing import Any, Optional, Union
5
- import dotenv
6
- import json
7
- import logging
8
- import os
9
- import requests
10
- import uuid
11
-
12
- from .types import (
13
- PipelineRunError,
14
- PipelineRunResponse,
15
- NodeInput,
16
- PipelineRunRequest,
17
- )
18
-
19
-
20
- class APIError(Exception):
21
- def __init__(self, status: Union[int, str], message: str, details: Any = None):
22
- self.message = message
23
- self.status = status
24
- self.details = details
25
-
26
- def __str__(self):
27
- msg = "{0} ({1}): {2}"
28
- return msg.format(self.message, self.status, self.details)
29
-
30
-
31
- class Laminar:
32
- _base_url = "https://api.lmnr.ai"
33
-
34
- def __init__(self, project_api_key: Optional[str] = None):
35
- self.project_api_key = project_api_key or os.environ.get("LMNR_PROJECT_API_KEY")
36
- if not self.project_api_key:
37
- dotenv_path = dotenv.find_dotenv(usecwd=True)
38
- self.project_api_key = dotenv.get_key(
39
- dotenv_path=dotenv_path, key_to_get="LMNR_PROJECT_API_KEY"
40
- )
41
- if not self.project_api_key:
42
- raise ValueError(
43
- "Please initialize the Laminar object with your project API key or set "
44
- "the LMNR_PROJECT_API_KEY environment variable in your environment or .env file"
45
- )
46
-
47
- def run(
48
- self,
49
- pipeline: str,
50
- inputs: dict[str, NodeInput],
51
- env: dict[str, str] = {},
52
- metadata: dict[str, str] = {},
53
- parent_span_id: Optional[uuid.UUID] = None,
54
- trace_id: Optional[uuid.UUID] = None,
55
- ) -> PipelineRunResponse:
56
- """Runs the pipeline with the given inputs
57
-
58
- Args:
59
- pipeline (str): name of the Laminar pipeline
60
- inputs (dict[str, NodeInput]):
61
- inputs to the endpoint's target pipeline.
62
- Keys in the dictionary must match input node names
63
- env (dict[str, str], optional):
64
- Environment variables for the pipeline execution.
65
- Defaults to {}.
66
- metadata (dict[str, str], optional):
67
- any custom metadata to be stored
68
- with execution trace. Defaults to {}.
69
- parent_span_id (Optional[uuid.UUID], optional):
70
- parent span id for the resulting span.
71
- Must usually be SpanContext.id()
72
- Defaults to None.
73
- trace_id (Optional[uuid.UUID], optional):
74
- trace id for the resulting trace.
75
- Must usually be TraceContext.id()
76
- Defaults to None.
77
-
78
- Returns:
79
- PipelineRunResponse: response object containing the outputs
80
-
81
- Raises:
82
- ValueError: if project API key is not set
83
- PipelineRunError: if the endpoint run fails
84
- """
85
- if self.project_api_key is None:
86
- raise ValueError(
87
- "Please initialize the Laminar object with your project API key or set "
88
- "the LMNR_PROJECT_API_KEY environment variable"
89
- )
90
- try:
91
- request = PipelineRunRequest(
92
- inputs=inputs,
93
- pipeline=pipeline,
94
- env=env,
95
- metadata=metadata,
96
- parent_span_id=parent_span_id,
97
- trace_id=trace_id,
98
- )
99
- except Exception as e:
100
- raise ValueError(f"Invalid request: {e}")
101
-
102
- response = requests.post(
103
- self._base_url + "/v1/pipeline/run",
104
- data=json.dumps(request.to_dict()),
105
- headers=self._headers(),
106
- )
107
- if response.status_code != 200:
108
- raise PipelineRunError(response)
109
- try:
110
- resp_json = response.json()
111
- keys = list(resp_json.keys())
112
- for key in keys:
113
- value = resp_json[key]
114
- del resp_json[key]
115
- resp_json[to_snake(key)] = value
116
- return PipelineRunResponse(**resp_json)
117
- except Exception:
118
- raise PipelineRunError(response)
119
-
120
- def batch_post_traces(self, batch: list[Union[Span, Trace]]):
121
- log = logging.getLogger("laminar.client")
122
- url = self._base_url + "/v1/observations"
123
- data = json.dumps({"observations": [item.to_dict() for item in batch]})
124
- log.debug(f"making request to {url}")
125
- headers = self._headers()
126
- res = requests.post(url, data=data, headers=headers)
127
-
128
- if res.status_code == 200:
129
- log.debug("data uploaded successfully")
130
-
131
- return self._process_response(
132
- res, success_message="data uploaded successfully", return_json=False
133
- )
134
-
135
- def _process_response(
136
- self, res: requests.Response, success_message: str, return_json: bool = True
137
- ) -> Union[requests.Response, Any]:
138
- log = logging.getLogger("laminar.client")
139
- log.debug("received response: %s", res.text)
140
- if res.status_code in (200, 201):
141
- log.debug(success_message)
142
- if return_json:
143
- try:
144
- return res.json()
145
- except json.JSONDecodeError:
146
- log.error("Response is not valid JSON.")
147
- raise APIError(res.status_code, "Invalid JSON response received")
148
- else:
149
- return res
150
- try:
151
- payload = res.json()
152
- log.error("received error response: %s", payload)
153
- raise APIError(res.status_code, payload)
154
- except (KeyError, ValueError):
155
- raise APIError(res.status_code, res.text)
156
-
157
- def _headers(self):
158
- return {
159
- "Authorization": "Bearer " + self.project_api_key,
160
- "Content-Type": "application/json",
161
- }
lmnr/sdk/collector.py DELETED
@@ -1,177 +0,0 @@
1
- from .client import APIError, Laminar
2
- from .tracing_types import Span, Trace
3
-
4
- from queue import Queue, Empty, Full
5
- from typing import Union
6
-
7
- import atexit
8
- import backoff
9
- import logging
10
- import time
11
- import threading
12
-
13
-
14
- class Collector(threading.Thread):
15
- _log = logging.getLogger("laminar.collector")
16
- _queue: Queue[Union[Span, Trace]]
17
- _client: Laminar
18
- _flush_interval: float
19
-
20
- def __init__(
21
- self,
22
- queue: Queue[Union[Span, Trace]],
23
- client: Laminar,
24
- flush_interval: float = 5.0,
25
- ):
26
- super().__init__()
27
- self.daemon = True
28
- self._queue = queue
29
- self.running = True
30
- self._flush_interval = flush_interval
31
- self._client = client
32
-
33
- def run(self):
34
- """Runs the collector."""
35
- self._log.debug("collector is running...")
36
- while self.running:
37
- self.upload()
38
-
39
- def upload(self):
40
- """Upload the next batch of items, return whether successful."""
41
- batch = self._next()
42
- if len(batch) == 0:
43
- return
44
-
45
- try:
46
- self._upload_batch(batch)
47
- except Exception as e:
48
- self._log.exception("error uploading: %s", e)
49
- finally:
50
- # mark items as acknowledged from queue
51
- for _ in batch:
52
- self._queue.task_done()
53
-
54
- def pause(self):
55
- self.running = False
56
-
57
- def _next(self):
58
- items = []
59
- start_time = time.monotonic()
60
-
61
- while True:
62
- elapsed = time.monotonic() - start_time
63
- if elapsed >= self._flush_interval:
64
- break
65
- try:
66
- item = self._queue.get(
67
- block=True, timeout=self._flush_interval - elapsed
68
- )
69
- items.append(item)
70
- except Empty:
71
- break
72
- return items
73
-
74
- def _upload_batch(self, batch: list[Union[Trace, Span]]):
75
- self._log.debug("uploading batch of %d items", len(batch))
76
-
77
- @backoff.on_exception(backoff.expo, Exception, max_tries=5)
78
- def execute_task_with_backoff(batch: list[Union[Trace, Span]]):
79
- try:
80
- self._client.batch_post_traces(batch=batch)
81
- except Exception as e:
82
- if (
83
- isinstance(e, APIError)
84
- and 400 <= int(e.status) < 500
85
- and int(e.status) != 429
86
- ):
87
- self._log.warn(
88
- f"Received {e.status} error by Laminar server, not retrying: {e.message}"
89
- )
90
- return
91
-
92
- raise e
93
-
94
- execute_task_with_backoff(batch)
95
- self._log.debug("successfully uploaded batch of %d items", len(batch))
96
-
97
-
98
- class ThreadManager:
99
- _log = logging.getLogger("laminar.task_manager")
100
- _queue: Queue[Union[Span, Trace]]
101
- _client: Laminar
102
- _max_task_queue_size: int
103
- _flush_interval: float
104
- _collectors: list[Collector] = []
105
- _threads: int
106
-
107
- def __init__(
108
- self,
109
- client: Laminar,
110
- flush_interval: float = 2.0,
111
- max_task_queue_size: int = 1000,
112
- threads: int = 1,
113
- ):
114
- self._max_task_queue_size = max_task_queue_size
115
- self._threads = threads
116
- self._queue = Queue(maxsize=self._max_task_queue_size)
117
- self._flush_interval = flush_interval
118
- self._client = client
119
- for _ in range(self._threads):
120
- collector = Collector(
121
- queue=self._queue,
122
- client=self._client,
123
- flush_interval=flush_interval,
124
- )
125
- self._collectors.append(collector)
126
- collector.start()
127
- atexit.register(self.join)
128
-
129
- def add_task(self, event: Union[Span, Trace]) -> bool:
130
- try:
131
- self._queue.put(event, block=False)
132
- return True
133
- except Full:
134
- self._log.warning("queue is full")
135
- return False
136
- except Exception as e:
137
- self._log.exception(f"Exception in adding task {e}")
138
-
139
- return False
140
-
141
- def flush(self):
142
- """Forces a flush from the internal queue to the server"""
143
- self._log.debug("flushing queue")
144
- queue = self._queue
145
- size = queue.qsize()
146
- queue.join()
147
- # Note that this message may not be precise, because of threading.
148
- self._log.debug("successfully flushed about %s items.", size)
149
-
150
- def join(self):
151
- """Ends the collector threads once the queue is empty.
152
- Blocks execution until finished
153
- """
154
- self._log.debug(f"joining {len(self._collectors)} collector threads")
155
-
156
- # pause all collectors before joining them so we don't have to wait for multiple
157
- # flush intervals to join them all.
158
- for collector in self._collectors:
159
- collector.pause()
160
-
161
- for i, collector in enumerate(self._collectors):
162
- try:
163
- collector.join()
164
- except RuntimeError:
165
- # collector thread has not started
166
- pass
167
-
168
- self._log.debug(f"collector thread {i} joined")
169
-
170
- def shutdown(self):
171
- """Flush all messages and cleanly shutdown the client"""
172
- self._log.debug("shutdown initiated")
173
-
174
- self.flush()
175
- self.join()
176
-
177
- self._log.debug("shutdown completed")
lmnr/sdk/constants.py DELETED
@@ -1 +0,0 @@
1
- CURRENT_TRACING_VERSION = "0.1.0"