lmnr 0.2.15__py3-none-any.whl → 0.3.0b1__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- lmnr/__init__.py +4 -4
- lmnr/sdk/client.py +156 -0
- lmnr/sdk/collector.py +177 -0
- lmnr/sdk/constants.py +1 -0
- lmnr/sdk/context.py +456 -0
- lmnr/sdk/decorators.py +277 -0
- lmnr/sdk/interface.py +339 -0
- lmnr/sdk/providers/__init__.py +2 -0
- lmnr/sdk/providers/base.py +28 -0
- lmnr/sdk/providers/fallback.py +131 -0
- lmnr/sdk/providers/openai.py +140 -0
- lmnr/sdk/providers/utils.py +33 -0
- lmnr/sdk/tracing_types.py +197 -0
- lmnr/sdk/types.py +69 -0
- lmnr/sdk/utils.py +102 -0
- lmnr-0.3.0b1.dist-info/METADATA +186 -0
- lmnr-0.3.0b1.dist-info/RECORD +21 -0
- lmnr/cli/__init__.py +0 -0
- lmnr/cli/__main__.py +0 -4
- lmnr/cli/cli.py +0 -230
- lmnr/cli/parser/__init__.py +0 -0
- lmnr/cli/parser/nodes/__init__.py +0 -45
- lmnr/cli/parser/nodes/code.py +0 -36
- lmnr/cli/parser/nodes/condition.py +0 -30
- lmnr/cli/parser/nodes/input.py +0 -25
- lmnr/cli/parser/nodes/json_extractor.py +0 -29
- lmnr/cli/parser/nodes/llm.py +0 -56
- lmnr/cli/parser/nodes/output.py +0 -27
- lmnr/cli/parser/nodes/router.py +0 -37
- lmnr/cli/parser/nodes/semantic_search.py +0 -53
- lmnr/cli/parser/nodes/types.py +0 -153
- lmnr/cli/parser/parser.py +0 -62
- lmnr/cli/parser/utils.py +0 -49
- lmnr/cli/zip.py +0 -16
- lmnr/sdk/endpoint.py +0 -186
- lmnr/sdk/registry.py +0 -29
- lmnr/sdk/remote_debugger.py +0 -148
- lmnr/types.py +0 -101
- lmnr-0.2.15.dist-info/METADATA +0 -187
- lmnr-0.2.15.dist-info/RECORD +0 -28
- {lmnr-0.2.15.dist-info → lmnr-0.3.0b1.dist-info}/LICENSE +0 -0
- {lmnr-0.2.15.dist-info → lmnr-0.3.0b1.dist-info}/WHEEL +0 -0
- {lmnr-0.2.15.dist-info → lmnr-0.3.0b1.dist-info}/entry_points.txt +0 -0
lmnr/__init__.py
CHANGED
@@ -1,4 +1,4 @@
|
|
1
|
-
from .sdk.
|
2
|
-
from .
|
3
|
-
from .sdk.
|
4
|
-
from .sdk.
|
1
|
+
from .sdk.client import Laminar
|
2
|
+
from .sdk.decorators import observe, lmnr_context, wrap_llm_call
|
3
|
+
from .sdk.interface import trace, TraceContext, SpanContext
|
4
|
+
from .sdk.types import ChatMessage, PipelineRunError, PipelineRunResponse, NodeInput
|
lmnr/sdk/client.py
ADDED
@@ -0,0 +1,156 @@
|
|
1
|
+
from .tracing_types import Span, Trace
|
2
|
+
|
3
|
+
from pydantic.alias_generators import to_snake
|
4
|
+
from typing import Any, Optional, Union
|
5
|
+
import dotenv
|
6
|
+
import json
|
7
|
+
import logging
|
8
|
+
import os
|
9
|
+
import requests
|
10
|
+
import uuid
|
11
|
+
|
12
|
+
from .types import (
|
13
|
+
PipelineRunError,
|
14
|
+
PipelineRunResponse,
|
15
|
+
NodeInput,
|
16
|
+
PipelineRunRequest,
|
17
|
+
)
|
18
|
+
|
19
|
+
|
20
|
+
class APIError(Exception):
|
21
|
+
def __init__(self, status: Union[int, str], message: str, details: Any = None):
|
22
|
+
self.message = message
|
23
|
+
self.status = status
|
24
|
+
self.details = details
|
25
|
+
|
26
|
+
def __str__(self):
|
27
|
+
msg = "{0} ({1}): {2}"
|
28
|
+
return msg.format(self.message, self.status, self.details)
|
29
|
+
|
30
|
+
|
31
|
+
class Laminar:
|
32
|
+
_base_url = "https://api.lmnr.ai"
|
33
|
+
|
34
|
+
def __init__(self, project_api_key: Optional[str] = None):
|
35
|
+
self.project_api_key = project_api_key or os.environ.get("LMNR_PROJECT_API_KEY")
|
36
|
+
if not self.project_api_key:
|
37
|
+
dotenv_path = dotenv.find_dotenv(usecwd=True)
|
38
|
+
self.project_api_key = dotenv.get_key(
|
39
|
+
dotenv_path=dotenv_path, key_to_get="LMNR_PROJECT_API_KEY"
|
40
|
+
)
|
41
|
+
|
42
|
+
def run(
|
43
|
+
self,
|
44
|
+
pipeline: str,
|
45
|
+
inputs: dict[str, NodeInput],
|
46
|
+
env: dict[str, str] = {},
|
47
|
+
metadata: dict[str, str] = {},
|
48
|
+
parent_span_id: Optional[uuid.UUID] = None,
|
49
|
+
trace_id: Optional[uuid.UUID] = None,
|
50
|
+
) -> PipelineRunResponse:
|
51
|
+
"""Runs the pipeline with the given inputs
|
52
|
+
|
53
|
+
Args:
|
54
|
+
pipeline (str): name of the Laminar pipeline
|
55
|
+
inputs (dict[str, NodeInput]):
|
56
|
+
inputs to the endpoint's target pipeline.
|
57
|
+
Keys in the dictionary must match input node names
|
58
|
+
env (dict[str, str], optional):
|
59
|
+
Environment variables for the pipeline execution.
|
60
|
+
Defaults to {}.
|
61
|
+
metadata (dict[str, str], optional):
|
62
|
+
any custom metadata to be stored
|
63
|
+
with execution trace. Defaults to {}.
|
64
|
+
parent_span_id (Optional[uuid.UUID], optional):
|
65
|
+
parent span id for the resulting span.
|
66
|
+
Must usually be SpanContext.id()
|
67
|
+
Defaults to None.
|
68
|
+
trace_id (Optional[uuid.UUID], optional):
|
69
|
+
trace id for the resulting trace.
|
70
|
+
Must usually be TraceContext.id()
|
71
|
+
Defaults to None.
|
72
|
+
|
73
|
+
Returns:
|
74
|
+
PipelineRunResponse: response object containing the outputs
|
75
|
+
|
76
|
+
Raises:
|
77
|
+
ValueError: if project API key is not set
|
78
|
+
PipelineRunError: if the endpoint run fails
|
79
|
+
"""
|
80
|
+
if self.project_api_key is None:
|
81
|
+
raise ValueError(
|
82
|
+
"Please initialize the Laminar object with your project API key or set "
|
83
|
+
"the LMNR_PROJECT_API_KEY environment variable"
|
84
|
+
)
|
85
|
+
try:
|
86
|
+
request = PipelineRunRequest(
|
87
|
+
inputs=inputs,
|
88
|
+
pipeline=pipeline,
|
89
|
+
env=env,
|
90
|
+
metadata=metadata,
|
91
|
+
parent_span_id=parent_span_id,
|
92
|
+
trace_id=trace_id,
|
93
|
+
)
|
94
|
+
except Exception as e:
|
95
|
+
raise ValueError(f"Invalid request: {e}")
|
96
|
+
|
97
|
+
response = requests.post(
|
98
|
+
self._base_url + "/v1/pipeline/run",
|
99
|
+
data=json.dumps(request.to_dict()),
|
100
|
+
headers=self._headers(),
|
101
|
+
)
|
102
|
+
if response.status_code != 200:
|
103
|
+
raise PipelineRunError(response)
|
104
|
+
try:
|
105
|
+
resp_json = response.json()
|
106
|
+
keys = list(resp_json.keys())
|
107
|
+
for key in keys:
|
108
|
+
value = resp_json[key]
|
109
|
+
del resp_json[key]
|
110
|
+
resp_json[to_snake(key)] = value
|
111
|
+
return PipelineRunResponse(**resp_json)
|
112
|
+
except Exception:
|
113
|
+
raise PipelineRunError(response)
|
114
|
+
|
115
|
+
def batch_post_traces(self, batch: list[Union[Span, Trace]]):
|
116
|
+
log = logging.getLogger("laminar.client")
|
117
|
+
url = self._base_url + "/v1/traces"
|
118
|
+
data = json.dumps({"traces": [item.to_dict() for item in batch]})
|
119
|
+
log.debug(f"making request to {url}")
|
120
|
+
headers = self._headers()
|
121
|
+
res = requests.post(url, data=data, headers=headers)
|
122
|
+
|
123
|
+
if res.status_code == 200:
|
124
|
+
log.debug("data uploaded successfully")
|
125
|
+
|
126
|
+
return self._process_response(
|
127
|
+
res, success_message="data uploaded successfully", return_json=False
|
128
|
+
)
|
129
|
+
|
130
|
+
def _process_response(
|
131
|
+
self, res: requests.Response, success_message: str, return_json: bool = True
|
132
|
+
) -> Union[requests.Response, Any]:
|
133
|
+
log = logging.getLogger("laminar.client")
|
134
|
+
log.debug("received response: %s", res.text)
|
135
|
+
if res.status_code in (200, 201):
|
136
|
+
log.debug(success_message)
|
137
|
+
if return_json:
|
138
|
+
try:
|
139
|
+
return res.json()
|
140
|
+
except json.JSONDecodeError:
|
141
|
+
log.error("Response is not valid JSON.")
|
142
|
+
raise APIError(res.status_code, "Invalid JSON response received")
|
143
|
+
else:
|
144
|
+
return res
|
145
|
+
try:
|
146
|
+
payload = res.json()
|
147
|
+
log.error("received error response: %s", payload)
|
148
|
+
raise APIError(res.status_code, payload)
|
149
|
+
except (KeyError, ValueError):
|
150
|
+
raise APIError(res.status_code, res.text)
|
151
|
+
|
152
|
+
def _headers(self):
|
153
|
+
return {
|
154
|
+
"Authorization": "Bearer " + self.project_api_key,
|
155
|
+
"Content-Type": "application/json",
|
156
|
+
}
|
lmnr/sdk/collector.py
ADDED
@@ -0,0 +1,177 @@
|
|
1
|
+
from .client import APIError, Laminar
|
2
|
+
from .tracing_types import Span, Trace
|
3
|
+
|
4
|
+
from queue import Queue, Empty, Full
|
5
|
+
from typing import Union
|
6
|
+
|
7
|
+
import atexit
|
8
|
+
import backoff
|
9
|
+
import logging
|
10
|
+
import time
|
11
|
+
import threading
|
12
|
+
|
13
|
+
|
14
|
+
class Collector(threading.Thread):
|
15
|
+
_log = logging.getLogger("laminar.collector")
|
16
|
+
_queue: Queue[Union[Span, Trace]]
|
17
|
+
_client: Laminar
|
18
|
+
_flush_interval: float
|
19
|
+
|
20
|
+
def __init__(
|
21
|
+
self,
|
22
|
+
queue: Queue[Union[Span, Trace]],
|
23
|
+
client: Laminar,
|
24
|
+
flush_interval: float = 5.0,
|
25
|
+
):
|
26
|
+
super().__init__()
|
27
|
+
self.daemon = True
|
28
|
+
self._queue = queue
|
29
|
+
self.running = True
|
30
|
+
self._flush_interval = flush_interval
|
31
|
+
self._client = client
|
32
|
+
|
33
|
+
def run(self):
|
34
|
+
"""Runs the collector."""
|
35
|
+
self._log.debug("collector is running...")
|
36
|
+
while self.running:
|
37
|
+
self.upload()
|
38
|
+
|
39
|
+
def upload(self):
|
40
|
+
"""Upload the next batch of items, return whether successful."""
|
41
|
+
batch = self._next()
|
42
|
+
if len(batch) == 0:
|
43
|
+
return
|
44
|
+
|
45
|
+
try:
|
46
|
+
self._upload_batch(batch)
|
47
|
+
except Exception as e:
|
48
|
+
self._log.exception("error uploading: %s", e)
|
49
|
+
finally:
|
50
|
+
# mark items as acknowledged from queue
|
51
|
+
for _ in batch:
|
52
|
+
self._queue.task_done()
|
53
|
+
|
54
|
+
def pause(self):
|
55
|
+
self.running = False
|
56
|
+
|
57
|
+
def _next(self):
|
58
|
+
items = []
|
59
|
+
start_time = time.monotonic()
|
60
|
+
|
61
|
+
while True:
|
62
|
+
elapsed = time.monotonic() - start_time
|
63
|
+
if elapsed >= self._flush_interval:
|
64
|
+
break
|
65
|
+
try:
|
66
|
+
item = self._queue.get(
|
67
|
+
block=True, timeout=self._flush_interval - elapsed
|
68
|
+
)
|
69
|
+
items.append(item)
|
70
|
+
except Empty:
|
71
|
+
break
|
72
|
+
return items
|
73
|
+
|
74
|
+
def _upload_batch(self, batch: list[Union[Trace, Span]]):
|
75
|
+
self._log.debug("uploading batch of %d items", len(batch))
|
76
|
+
|
77
|
+
@backoff.on_exception(backoff.expo, Exception, max_tries=5)
|
78
|
+
def execute_task_with_backoff(batch: list[Union[Trace, Span]]):
|
79
|
+
try:
|
80
|
+
self._client.batch_post_traces(batch=batch)
|
81
|
+
except Exception as e:
|
82
|
+
if (
|
83
|
+
isinstance(e, APIError)
|
84
|
+
and 400 <= int(e.status) < 500
|
85
|
+
and int(e.status) != 429
|
86
|
+
):
|
87
|
+
self._log.warn(
|
88
|
+
f"Received {e.status} error by Laminar server, not retrying: {e.message}"
|
89
|
+
)
|
90
|
+
return
|
91
|
+
|
92
|
+
raise e
|
93
|
+
|
94
|
+
execute_task_with_backoff(batch)
|
95
|
+
self._log.debug("successfully uploaded batch of %d items", len(batch))
|
96
|
+
|
97
|
+
|
98
|
+
class ThreadManager:
|
99
|
+
_log = logging.getLogger("laminar.task_manager")
|
100
|
+
_queue: Queue[Union[Span, Trace]]
|
101
|
+
_client: Laminar
|
102
|
+
_max_task_queue_size: int
|
103
|
+
_flush_interval: float
|
104
|
+
_collectors: list[Collector] = []
|
105
|
+
_threads: int
|
106
|
+
|
107
|
+
def __init__(
|
108
|
+
self,
|
109
|
+
client: Laminar,
|
110
|
+
flush_interval: float = 2.0,
|
111
|
+
max_task_queue_size: int = 1000,
|
112
|
+
threads: int = 1,
|
113
|
+
):
|
114
|
+
self._max_task_queue_size = max_task_queue_size
|
115
|
+
self._threads = threads
|
116
|
+
self._queue = Queue(maxsize=self._max_task_queue_size)
|
117
|
+
self._flush_interval = flush_interval
|
118
|
+
self._client = client
|
119
|
+
for _ in range(self._threads):
|
120
|
+
collector = Collector(
|
121
|
+
queue=self._queue,
|
122
|
+
client=self._client,
|
123
|
+
flush_interval=flush_interval,
|
124
|
+
)
|
125
|
+
self._collectors.append(collector)
|
126
|
+
collector.start()
|
127
|
+
atexit.register(self.join)
|
128
|
+
|
129
|
+
def add_task(self, event: Union[Span, Trace]) -> bool:
|
130
|
+
try:
|
131
|
+
self._queue.put(event, block=False)
|
132
|
+
return True
|
133
|
+
except Full:
|
134
|
+
self._log.warning("queue is full")
|
135
|
+
return False
|
136
|
+
except Exception as e:
|
137
|
+
self._log.exception(f"Exception in adding task {e}")
|
138
|
+
|
139
|
+
return False
|
140
|
+
|
141
|
+
def flush(self):
|
142
|
+
"""Forces a flush from the internal queue to the server"""
|
143
|
+
self._log.debug("flushing queue")
|
144
|
+
queue = self._queue
|
145
|
+
size = queue.qsize()
|
146
|
+
queue.join()
|
147
|
+
# Note that this message may not be precise, because of threading.
|
148
|
+
self._log.debug("successfully flushed about %s items.", size)
|
149
|
+
|
150
|
+
def join(self):
|
151
|
+
"""Ends the collector threads once the queue is empty.
|
152
|
+
Blocks execution until finished
|
153
|
+
"""
|
154
|
+
self._log.debug(f"joining {len(self._collectors)} collector threads")
|
155
|
+
|
156
|
+
# pause all collectors before joining them so we don't have to wait for multiple
|
157
|
+
# flush intervals to join them all.
|
158
|
+
for collector in self._collectors:
|
159
|
+
collector.pause()
|
160
|
+
|
161
|
+
for i, collector in enumerate(self._collectors):
|
162
|
+
try:
|
163
|
+
collector.join()
|
164
|
+
except RuntimeError:
|
165
|
+
# collector thread has not started
|
166
|
+
pass
|
167
|
+
|
168
|
+
self._log.debug(f"collector thread {i} joined")
|
169
|
+
|
170
|
+
def shutdown(self):
|
171
|
+
"""Flush all messages and cleanly shutdown the client"""
|
172
|
+
self._log.debug("shutdown initiated")
|
173
|
+
|
174
|
+
self.flush()
|
175
|
+
self.join()
|
176
|
+
|
177
|
+
self._log.debug("shutdown completed")
|
lmnr/sdk/constants.py
ADDED
@@ -0,0 +1 @@
|
|
1
|
+
CURRENT_TRACING_VERSION = "0.1.0"
|