lmnr 0.2.15__tar.gz → 0.3.0__tar.gz
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- lmnr-0.3.0/PKG-INFO +185 -0
- lmnr-0.3.0/README.md +164 -0
- {lmnr-0.2.15 → lmnr-0.3.0}/pyproject.toml +8 -9
- lmnr-0.3.0/src/lmnr/__init__.py +4 -0
- lmnr-0.3.0/src/lmnr/sdk/client.py +161 -0
- lmnr-0.3.0/src/lmnr/sdk/collector.py +177 -0
- lmnr-0.3.0/src/lmnr/sdk/constants.py +1 -0
- lmnr-0.3.0/src/lmnr/sdk/context.py +456 -0
- lmnr-0.3.0/src/lmnr/sdk/decorators.py +277 -0
- lmnr-0.3.0/src/lmnr/sdk/interface.py +339 -0
- lmnr-0.3.0/src/lmnr/sdk/providers/__init__.py +2 -0
- lmnr-0.3.0/src/lmnr/sdk/providers/base.py +28 -0
- lmnr-0.3.0/src/lmnr/sdk/providers/fallback.py +131 -0
- lmnr-0.3.0/src/lmnr/sdk/providers/openai.py +140 -0
- lmnr-0.3.0/src/lmnr/sdk/providers/utils.py +33 -0
- lmnr-0.3.0/src/lmnr/sdk/tracing_types.py +197 -0
- lmnr-0.3.0/src/lmnr/sdk/types.py +69 -0
- lmnr-0.3.0/src/lmnr/sdk/utils.py +102 -0
- lmnr-0.2.15/PKG-INFO +0 -187
- lmnr-0.2.15/README.md +0 -161
- lmnr-0.2.15/src/lmnr/__init__.py +0 -4
- lmnr-0.2.15/src/lmnr/cli/__main__.py +0 -4
- lmnr-0.2.15/src/lmnr/cli/cli.py +0 -230
- lmnr-0.2.15/src/lmnr/cli/parser/__init__.py +0 -0
- lmnr-0.2.15/src/lmnr/cli/parser/nodes/__init__.py +0 -45
- lmnr-0.2.15/src/lmnr/cli/parser/nodes/code.py +0 -36
- lmnr-0.2.15/src/lmnr/cli/parser/nodes/condition.py +0 -30
- lmnr-0.2.15/src/lmnr/cli/parser/nodes/input.py +0 -25
- lmnr-0.2.15/src/lmnr/cli/parser/nodes/json_extractor.py +0 -29
- lmnr-0.2.15/src/lmnr/cli/parser/nodes/llm.py +0 -56
- lmnr-0.2.15/src/lmnr/cli/parser/nodes/output.py +0 -27
- lmnr-0.2.15/src/lmnr/cli/parser/nodes/router.py +0 -37
- lmnr-0.2.15/src/lmnr/cli/parser/nodes/semantic_search.py +0 -53
- lmnr-0.2.15/src/lmnr/cli/parser/nodes/types.py +0 -153
- lmnr-0.2.15/src/lmnr/cli/parser/parser.py +0 -62
- lmnr-0.2.15/src/lmnr/cli/parser/utils.py +0 -49
- lmnr-0.2.15/src/lmnr/cli/zip.py +0 -16
- lmnr-0.2.15/src/lmnr/sdk/__init__.py +0 -0
- lmnr-0.2.15/src/lmnr/sdk/endpoint.py +0 -186
- lmnr-0.2.15/src/lmnr/sdk/registry.py +0 -29
- lmnr-0.2.15/src/lmnr/sdk/remote_debugger.py +0 -148
- lmnr-0.2.15/src/lmnr/types.py +0 -101
- {lmnr-0.2.15 → lmnr-0.3.0}/LICENSE +0 -0
- {lmnr-0.2.15/src/lmnr/cli → lmnr-0.3.0/src/lmnr/sdk}/__init__.py +0 -0
lmnr-0.3.0/PKG-INFO
ADDED
@@ -0,0 +1,185 @@
|
|
1
|
+
Metadata-Version: 2.1
|
2
|
+
Name: lmnr
|
3
|
+
Version: 0.3.0
|
4
|
+
Summary: Python SDK for Laminar AI
|
5
|
+
License: Apache-2.0
|
6
|
+
Author: lmnr.ai
|
7
|
+
Requires-Python: >=3.9,<4.0
|
8
|
+
Classifier: License :: OSI Approved :: Apache Software License
|
9
|
+
Classifier: Programming Language :: Python :: 3
|
10
|
+
Classifier: Programming Language :: Python :: 3.9
|
11
|
+
Classifier: Programming Language :: Python :: 3.10
|
12
|
+
Classifier: Programming Language :: Python :: 3.11
|
13
|
+
Classifier: Programming Language :: Python :: 3.12
|
14
|
+
Requires-Dist: backoff (>=2.2.1,<3.0.0)
|
15
|
+
Requires-Dist: openai (>=1.41.1,<2.0.0)
|
16
|
+
Requires-Dist: pydantic (>=2.7.4,<3.0.0)
|
17
|
+
Requires-Dist: python-dotenv (>=1.0.1,<2.0.0)
|
18
|
+
Requires-Dist: requests (>=2.32.3,<3.0.0)
|
19
|
+
Description-Content-Type: text/markdown
|
20
|
+
|
21
|
+
# Laminar AI
|
22
|
+
|
23
|
+
This repo provides core for code generation, Laminar CLI, and Laminar SDK.
|
24
|
+
|
25
|
+
## Quickstart
|
26
|
+
```sh
|
27
|
+
python3 -m venv .myenv
|
28
|
+
source .myenv/bin/activate # or use your favorite env management tool
|
29
|
+
|
30
|
+
pip install lmnr
|
31
|
+
```
|
32
|
+
|
33
|
+
|
34
|
+
## Decorator instrumentation example
|
35
|
+
|
36
|
+
For easy automatic instrumentation, we provide you two simple primitives:
|
37
|
+
|
38
|
+
- `observe` - a multi-purpose automatic decorator that starts traces and spans when functions are entered, and finishes them when functions return
|
39
|
+
- `wrap_llm_call` - a function that takes in your LLM call and return a "decorated" version of it. This does all the same things as `observe`, plus
|
40
|
+
a few utilities around LLM-specific things, such as counting tokens and recording model params.
|
41
|
+
|
42
|
+
You can also import `lmnr_context` in order to interact and have more control over the context of the current span.
|
43
|
+
|
44
|
+
```python
|
45
|
+
import os
|
46
|
+
from openai import OpenAI
|
47
|
+
|
48
|
+
from lmnr import observe, wrap_llm_call, lmnr_context
|
49
|
+
client = OpenAI(api_key=os.environ["OPENAI_API_KEY"])
|
50
|
+
|
51
|
+
@observe() # annotate all functions you want to trace
|
52
|
+
def poem_writer(topic="turbulence"):
|
53
|
+
prompt = f"write a poem about {topic}"
|
54
|
+
|
55
|
+
# wrap the actual final call to LLM with `wrap_llm_call`
|
56
|
+
response = wrap_llm_call(client.chat.completions.create)(
|
57
|
+
model="gpt-4o",
|
58
|
+
messages=[
|
59
|
+
{"role": "system", "content": "You are a helpful assistant."},
|
60
|
+
{"role": "user", "content": prompt},
|
61
|
+
],
|
62
|
+
)
|
63
|
+
|
64
|
+
poem = response.choices[0].message.content
|
65
|
+
|
66
|
+
if topic in poem:
|
67
|
+
lmnr_context.event("topic_alignment") # send an event with a pre-defined name
|
68
|
+
|
69
|
+
# to trigger an automatic check for a possible event do:
|
70
|
+
lmnr_context.check_span_event("excessive_wordiness")
|
71
|
+
|
72
|
+
return poem
|
73
|
+
|
74
|
+
if __name__ == "__main__":
|
75
|
+
print(poem_writer(topic="laminar flow"))
|
76
|
+
```
|
77
|
+
|
78
|
+
This gives an advantage of quick instrumentation, but is somewhat limited in flexibility + doesn't really work as expected with threading.
|
79
|
+
This is due to the fact that we use `contextvars.ContextVar` for this, and how Python manages them between threads.
|
80
|
+
|
81
|
+
If you want to instrument your code manually, follow on to the next section
|
82
|
+
|
83
|
+
## Manual instrumentation example
|
84
|
+
|
85
|
+
For manual instrumetation you will need to import the following:
|
86
|
+
- `trace` - this is a function to start a trace. It returns a `TraceContext`
|
87
|
+
- `TraceContext` - a pointer to the current trace that you can pass around functions as you want.
|
88
|
+
- `SpanContext` - a pointer to the current span that you can pass around functions as you want
|
89
|
+
|
90
|
+
Both `TraceContext` and `SpanContext` expose the following interfaces:
|
91
|
+
- `span(name: str, **kwargs)` - create a child span within the current context. Returns `SpanContext`
|
92
|
+
- `update(**kwargs)` - update the current trace or span and return it. Returns `TraceContext` or `SpanContext`. Useful when some metadata becomes known later during the program execution
|
93
|
+
- `end(**kwargs)` – update the current span, and terminate it
|
94
|
+
|
95
|
+
In addition, `SpanContext` allows you to:
|
96
|
+
- `event(name: str, value: str | int = None)` - emit a custom event at any point
|
97
|
+
- `evaluate_event(name: str, data: str)` - register a possible event for automatic checking by Laminar.
|
98
|
+
|
99
|
+
Example:
|
100
|
+
|
101
|
+
```python
|
102
|
+
import os
|
103
|
+
from openai import OpenAI
|
104
|
+
|
105
|
+
from lmnr import trace, TraceContext, SpanContext
|
106
|
+
client = OpenAI(api_key=os.environ["OPENAI_API_KEY"])
|
107
|
+
|
108
|
+
def poem_writer(t: TraceContext, topic = "turbulence"):
|
109
|
+
span: SpanContext = t.span(name="poem_writer", input=None)
|
110
|
+
|
111
|
+
prompt = f"write a poem about {topic}"
|
112
|
+
messages = [
|
113
|
+
{"role": "system", "content": "You are a helpful assistant."},
|
114
|
+
{"role": "user", "content": prompt},
|
115
|
+
]
|
116
|
+
# create a child span within the current `poem_writer` span.
|
117
|
+
llm_span = span.span(name="OpenAI completion", input=messages, span_type="LLM")
|
118
|
+
|
119
|
+
response = client.chat.completions.create(
|
120
|
+
model="gpt-4o-mini",
|
121
|
+
messages=[
|
122
|
+
{"role": "system", "content": "You are a helpful assistant."},
|
123
|
+
{"role": "user", "content": "Hello. What is the capital of France?"},
|
124
|
+
],
|
125
|
+
)
|
126
|
+
poem = response.choices[0].message.content
|
127
|
+
if topic in poem:
|
128
|
+
llm_span.event("topic_alignment") # send an event with a pre-defined name
|
129
|
+
|
130
|
+
# note that you can register possible events here as well, not only `llm_span.check_span_event()`
|
131
|
+
llm_span.end(output=poem, check_event_names=["excessive_wordiness"])
|
132
|
+
span.end(output=poem)
|
133
|
+
return poem
|
134
|
+
|
135
|
+
|
136
|
+
t: TraceContext = trace(user_id="user", session_id="session", release="release")
|
137
|
+
main(t, topic="laminar flow")
|
138
|
+
t.end(success=True)
|
139
|
+
```
|
140
|
+
|
141
|
+
## Features
|
142
|
+
|
143
|
+
- Make Laminar endpoint calls from your Python code
|
144
|
+
- Make Laminar endpoint calls that can run your own functions as tools
|
145
|
+
- CLI to generate code from pipelines you build on Laminar or execute your own functions while you test your flows in workshop
|
146
|
+
|
147
|
+
## Making Laminar pipeline calls
|
148
|
+
|
149
|
+
After you are ready to use your pipeline in your code, deploy it in Laminar by selecting the target version for the pipeline.
|
150
|
+
|
151
|
+
Once your pipeline target is set, you can call it from Python in just a few lines.
|
152
|
+
|
153
|
+
Example use:
|
154
|
+
|
155
|
+
```python
|
156
|
+
from lmnr import Laminar
|
157
|
+
|
158
|
+
# for decorator instrumentation, do: `from lmnr inport lmnr_context`
|
159
|
+
|
160
|
+
l = Laminar('<YOUR_PROJECT_API_KEY>')
|
161
|
+
result = l.run( # lmnr_context.run( for decorator instrumentation
|
162
|
+
pipeline = 'my_pipeline_name',
|
163
|
+
inputs = {'input_node_name': 'some_value'},
|
164
|
+
# all environment variables
|
165
|
+
env = {'OPENAI_API_KEY': 'sk-some-key'},
|
166
|
+
# any metadata to attach to this run's trace
|
167
|
+
metadata = {'session_id': 'your_custom_session_id'}
|
168
|
+
)
|
169
|
+
```
|
170
|
+
|
171
|
+
Resulting in:
|
172
|
+
|
173
|
+
```python
|
174
|
+
>>> result
|
175
|
+
PipelineRunResponse(
|
176
|
+
outputs={'output': {'value': [ChatMessage(role='user', content='hello')]}},
|
177
|
+
# useful to locate your trace
|
178
|
+
run_id='53b012d5-5759-48a6-a9c5-0011610e3669'
|
179
|
+
)
|
180
|
+
```
|
181
|
+
|
182
|
+
## PROJECT_API_KEY
|
183
|
+
|
184
|
+
Read more [here](https://docs.lmnr.ai/api-reference/introduction#authentication) on how to get `PROJECT_API_KEY`.
|
185
|
+
|
lmnr-0.3.0/README.md
ADDED
@@ -0,0 +1,164 @@
|
|
1
|
+
# Laminar AI
|
2
|
+
|
3
|
+
This repo provides core for code generation, Laminar CLI, and Laminar SDK.
|
4
|
+
|
5
|
+
## Quickstart
|
6
|
+
```sh
|
7
|
+
python3 -m venv .myenv
|
8
|
+
source .myenv/bin/activate # or use your favorite env management tool
|
9
|
+
|
10
|
+
pip install lmnr
|
11
|
+
```
|
12
|
+
|
13
|
+
|
14
|
+
## Decorator instrumentation example
|
15
|
+
|
16
|
+
For easy automatic instrumentation, we provide you two simple primitives:
|
17
|
+
|
18
|
+
- `observe` - a multi-purpose automatic decorator that starts traces and spans when functions are entered, and finishes them when functions return
|
19
|
+
- `wrap_llm_call` - a function that takes in your LLM call and return a "decorated" version of it. This does all the same things as `observe`, plus
|
20
|
+
a few utilities around LLM-specific things, such as counting tokens and recording model params.
|
21
|
+
|
22
|
+
You can also import `lmnr_context` in order to interact and have more control over the context of the current span.
|
23
|
+
|
24
|
+
```python
|
25
|
+
import os
|
26
|
+
from openai import OpenAI
|
27
|
+
|
28
|
+
from lmnr import observe, wrap_llm_call, lmnr_context
|
29
|
+
client = OpenAI(api_key=os.environ["OPENAI_API_KEY"])
|
30
|
+
|
31
|
+
@observe() # annotate all functions you want to trace
|
32
|
+
def poem_writer(topic="turbulence"):
|
33
|
+
prompt = f"write a poem about {topic}"
|
34
|
+
|
35
|
+
# wrap the actual final call to LLM with `wrap_llm_call`
|
36
|
+
response = wrap_llm_call(client.chat.completions.create)(
|
37
|
+
model="gpt-4o",
|
38
|
+
messages=[
|
39
|
+
{"role": "system", "content": "You are a helpful assistant."},
|
40
|
+
{"role": "user", "content": prompt},
|
41
|
+
],
|
42
|
+
)
|
43
|
+
|
44
|
+
poem = response.choices[0].message.content
|
45
|
+
|
46
|
+
if topic in poem:
|
47
|
+
lmnr_context.event("topic_alignment") # send an event with a pre-defined name
|
48
|
+
|
49
|
+
# to trigger an automatic check for a possible event do:
|
50
|
+
lmnr_context.check_span_event("excessive_wordiness")
|
51
|
+
|
52
|
+
return poem
|
53
|
+
|
54
|
+
if __name__ == "__main__":
|
55
|
+
print(poem_writer(topic="laminar flow"))
|
56
|
+
```
|
57
|
+
|
58
|
+
This gives an advantage of quick instrumentation, but is somewhat limited in flexibility + doesn't really work as expected with threading.
|
59
|
+
This is due to the fact that we use `contextvars.ContextVar` for this, and how Python manages them between threads.
|
60
|
+
|
61
|
+
If you want to instrument your code manually, follow on to the next section
|
62
|
+
|
63
|
+
## Manual instrumentation example
|
64
|
+
|
65
|
+
For manual instrumetation you will need to import the following:
|
66
|
+
- `trace` - this is a function to start a trace. It returns a `TraceContext`
|
67
|
+
- `TraceContext` - a pointer to the current trace that you can pass around functions as you want.
|
68
|
+
- `SpanContext` - a pointer to the current span that you can pass around functions as you want
|
69
|
+
|
70
|
+
Both `TraceContext` and `SpanContext` expose the following interfaces:
|
71
|
+
- `span(name: str, **kwargs)` - create a child span within the current context. Returns `SpanContext`
|
72
|
+
- `update(**kwargs)` - update the current trace or span and return it. Returns `TraceContext` or `SpanContext`. Useful when some metadata becomes known later during the program execution
|
73
|
+
- `end(**kwargs)` – update the current span, and terminate it
|
74
|
+
|
75
|
+
In addition, `SpanContext` allows you to:
|
76
|
+
- `event(name: str, value: str | int = None)` - emit a custom event at any point
|
77
|
+
- `evaluate_event(name: str, data: str)` - register a possible event for automatic checking by Laminar.
|
78
|
+
|
79
|
+
Example:
|
80
|
+
|
81
|
+
```python
|
82
|
+
import os
|
83
|
+
from openai import OpenAI
|
84
|
+
|
85
|
+
from lmnr import trace, TraceContext, SpanContext
|
86
|
+
client = OpenAI(api_key=os.environ["OPENAI_API_KEY"])
|
87
|
+
|
88
|
+
def poem_writer(t: TraceContext, topic = "turbulence"):
|
89
|
+
span: SpanContext = t.span(name="poem_writer", input=None)
|
90
|
+
|
91
|
+
prompt = f"write a poem about {topic}"
|
92
|
+
messages = [
|
93
|
+
{"role": "system", "content": "You are a helpful assistant."},
|
94
|
+
{"role": "user", "content": prompt},
|
95
|
+
]
|
96
|
+
# create a child span within the current `poem_writer` span.
|
97
|
+
llm_span = span.span(name="OpenAI completion", input=messages, span_type="LLM")
|
98
|
+
|
99
|
+
response = client.chat.completions.create(
|
100
|
+
model="gpt-4o-mini",
|
101
|
+
messages=[
|
102
|
+
{"role": "system", "content": "You are a helpful assistant."},
|
103
|
+
{"role": "user", "content": "Hello. What is the capital of France?"},
|
104
|
+
],
|
105
|
+
)
|
106
|
+
poem = response.choices[0].message.content
|
107
|
+
if topic in poem:
|
108
|
+
llm_span.event("topic_alignment") # send an event with a pre-defined name
|
109
|
+
|
110
|
+
# note that you can register possible events here as well, not only `llm_span.check_span_event()`
|
111
|
+
llm_span.end(output=poem, check_event_names=["excessive_wordiness"])
|
112
|
+
span.end(output=poem)
|
113
|
+
return poem
|
114
|
+
|
115
|
+
|
116
|
+
t: TraceContext = trace(user_id="user", session_id="session", release="release")
|
117
|
+
main(t, topic="laminar flow")
|
118
|
+
t.end(success=True)
|
119
|
+
```
|
120
|
+
|
121
|
+
## Features
|
122
|
+
|
123
|
+
- Make Laminar endpoint calls from your Python code
|
124
|
+
- Make Laminar endpoint calls that can run your own functions as tools
|
125
|
+
- CLI to generate code from pipelines you build on Laminar or execute your own functions while you test your flows in workshop
|
126
|
+
|
127
|
+
## Making Laminar pipeline calls
|
128
|
+
|
129
|
+
After you are ready to use your pipeline in your code, deploy it in Laminar by selecting the target version for the pipeline.
|
130
|
+
|
131
|
+
Once your pipeline target is set, you can call it from Python in just a few lines.
|
132
|
+
|
133
|
+
Example use:
|
134
|
+
|
135
|
+
```python
|
136
|
+
from lmnr import Laminar
|
137
|
+
|
138
|
+
# for decorator instrumentation, do: `from lmnr inport lmnr_context`
|
139
|
+
|
140
|
+
l = Laminar('<YOUR_PROJECT_API_KEY>')
|
141
|
+
result = l.run( # lmnr_context.run( for decorator instrumentation
|
142
|
+
pipeline = 'my_pipeline_name',
|
143
|
+
inputs = {'input_node_name': 'some_value'},
|
144
|
+
# all environment variables
|
145
|
+
env = {'OPENAI_API_KEY': 'sk-some-key'},
|
146
|
+
# any metadata to attach to this run's trace
|
147
|
+
metadata = {'session_id': 'your_custom_session_id'}
|
148
|
+
)
|
149
|
+
```
|
150
|
+
|
151
|
+
Resulting in:
|
152
|
+
|
153
|
+
```python
|
154
|
+
>>> result
|
155
|
+
PipelineRunResponse(
|
156
|
+
outputs={'output': {'value': [ChatMessage(role='user', content='hello')]}},
|
157
|
+
# useful to locate your trace
|
158
|
+
run_id='53b012d5-5759-48a6-a9c5-0011610e3669'
|
159
|
+
)
|
160
|
+
```
|
161
|
+
|
162
|
+
## PROJECT_API_KEY
|
163
|
+
|
164
|
+
Read more [here](https://docs.lmnr.ai/api-reference/introduction#authentication) on how to get `PROJECT_API_KEY`.
|
@@ -1,6 +1,6 @@
|
|
1
1
|
[project]
|
2
2
|
name = "lmnr"
|
3
|
-
version = "0.
|
3
|
+
version = "0.3.0"
|
4
4
|
description = "Python SDK for Laminar AI"
|
5
5
|
authors = [
|
6
6
|
{ name = "lmnr.ai", email = "founders@lmnr.ai" }
|
@@ -11,7 +11,7 @@ license = "Apache-2.0"
|
|
11
11
|
|
12
12
|
[tool.poetry]
|
13
13
|
name = "lmnr"
|
14
|
-
version = "0.
|
14
|
+
version = "0.3.0"
|
15
15
|
description = "Python SDK for Laminar AI"
|
16
16
|
authors = ["lmnr.ai"]
|
17
17
|
readme = "README.md"
|
@@ -19,16 +19,15 @@ license = "Apache-2.0"
|
|
19
19
|
|
20
20
|
[tool.poetry.dependencies]
|
21
21
|
python = "^3.9"
|
22
|
-
black = "^24.4.2"
|
23
22
|
pydantic = "^2.7.4"
|
24
|
-
click = "^8.1.7"
|
25
23
|
requests = "^2.32.3"
|
26
|
-
websockets = "^12.0"
|
27
|
-
cookiecutter = "^2.6.0"
|
28
24
|
python-dotenv = "^1.0.1"
|
29
|
-
|
30
|
-
|
31
|
-
|
25
|
+
|
26
|
+
openai = "^1.41.1"
|
27
|
+
backoff = "^2.2.1"
|
28
|
+
|
29
|
+
[tool.poetry.group.dev.dependencies]
|
30
|
+
black = "^24.8.0"
|
32
31
|
|
33
32
|
[build-system]
|
34
33
|
requires = ["poetry-core"]
|
@@ -0,0 +1,161 @@
|
|
1
|
+
from .tracing_types import Span, Trace
|
2
|
+
|
3
|
+
from pydantic.alias_generators import to_snake
|
4
|
+
from typing import Any, Optional, Union
|
5
|
+
import dotenv
|
6
|
+
import json
|
7
|
+
import logging
|
8
|
+
import os
|
9
|
+
import requests
|
10
|
+
import uuid
|
11
|
+
|
12
|
+
from .types import (
|
13
|
+
PipelineRunError,
|
14
|
+
PipelineRunResponse,
|
15
|
+
NodeInput,
|
16
|
+
PipelineRunRequest,
|
17
|
+
)
|
18
|
+
|
19
|
+
|
20
|
+
class APIError(Exception):
|
21
|
+
def __init__(self, status: Union[int, str], message: str, details: Any = None):
|
22
|
+
self.message = message
|
23
|
+
self.status = status
|
24
|
+
self.details = details
|
25
|
+
|
26
|
+
def __str__(self):
|
27
|
+
msg = "{0} ({1}): {2}"
|
28
|
+
return msg.format(self.message, self.status, self.details)
|
29
|
+
|
30
|
+
|
31
|
+
class Laminar:
|
32
|
+
_base_url = "https://api.lmnr.ai"
|
33
|
+
|
34
|
+
def __init__(self, project_api_key: Optional[str] = None):
|
35
|
+
self.project_api_key = project_api_key or os.environ.get("LMNR_PROJECT_API_KEY")
|
36
|
+
if not self.project_api_key:
|
37
|
+
dotenv_path = dotenv.find_dotenv(usecwd=True)
|
38
|
+
self.project_api_key = dotenv.get_key(
|
39
|
+
dotenv_path=dotenv_path, key_to_get="LMNR_PROJECT_API_KEY"
|
40
|
+
)
|
41
|
+
if not self.project_api_key:
|
42
|
+
raise ValueError(
|
43
|
+
"Please initialize the Laminar object with your project API key or set "
|
44
|
+
"the LMNR_PROJECT_API_KEY environment variable in your environment or .env file"
|
45
|
+
)
|
46
|
+
|
47
|
+
def run(
|
48
|
+
self,
|
49
|
+
pipeline: str,
|
50
|
+
inputs: dict[str, NodeInput],
|
51
|
+
env: dict[str, str] = {},
|
52
|
+
metadata: dict[str, str] = {},
|
53
|
+
parent_span_id: Optional[uuid.UUID] = None,
|
54
|
+
trace_id: Optional[uuid.UUID] = None,
|
55
|
+
) -> PipelineRunResponse:
|
56
|
+
"""Runs the pipeline with the given inputs
|
57
|
+
|
58
|
+
Args:
|
59
|
+
pipeline (str): name of the Laminar pipeline
|
60
|
+
inputs (dict[str, NodeInput]):
|
61
|
+
inputs to the endpoint's target pipeline.
|
62
|
+
Keys in the dictionary must match input node names
|
63
|
+
env (dict[str, str], optional):
|
64
|
+
Environment variables for the pipeline execution.
|
65
|
+
Defaults to {}.
|
66
|
+
metadata (dict[str, str], optional):
|
67
|
+
any custom metadata to be stored
|
68
|
+
with execution trace. Defaults to {}.
|
69
|
+
parent_span_id (Optional[uuid.UUID], optional):
|
70
|
+
parent span id for the resulting span.
|
71
|
+
Must usually be SpanContext.id()
|
72
|
+
Defaults to None.
|
73
|
+
trace_id (Optional[uuid.UUID], optional):
|
74
|
+
trace id for the resulting trace.
|
75
|
+
Must usually be TraceContext.id()
|
76
|
+
Defaults to None.
|
77
|
+
|
78
|
+
Returns:
|
79
|
+
PipelineRunResponse: response object containing the outputs
|
80
|
+
|
81
|
+
Raises:
|
82
|
+
ValueError: if project API key is not set
|
83
|
+
PipelineRunError: if the endpoint run fails
|
84
|
+
"""
|
85
|
+
if self.project_api_key is None:
|
86
|
+
raise ValueError(
|
87
|
+
"Please initialize the Laminar object with your project API key or set "
|
88
|
+
"the LMNR_PROJECT_API_KEY environment variable"
|
89
|
+
)
|
90
|
+
try:
|
91
|
+
request = PipelineRunRequest(
|
92
|
+
inputs=inputs,
|
93
|
+
pipeline=pipeline,
|
94
|
+
env=env,
|
95
|
+
metadata=metadata,
|
96
|
+
parent_span_id=parent_span_id,
|
97
|
+
trace_id=trace_id,
|
98
|
+
)
|
99
|
+
except Exception as e:
|
100
|
+
raise ValueError(f"Invalid request: {e}")
|
101
|
+
|
102
|
+
response = requests.post(
|
103
|
+
self._base_url + "/v1/pipeline/run",
|
104
|
+
data=json.dumps(request.to_dict()),
|
105
|
+
headers=self._headers(),
|
106
|
+
)
|
107
|
+
if response.status_code != 200:
|
108
|
+
raise PipelineRunError(response)
|
109
|
+
try:
|
110
|
+
resp_json = response.json()
|
111
|
+
keys = list(resp_json.keys())
|
112
|
+
for key in keys:
|
113
|
+
value = resp_json[key]
|
114
|
+
del resp_json[key]
|
115
|
+
resp_json[to_snake(key)] = value
|
116
|
+
return PipelineRunResponse(**resp_json)
|
117
|
+
except Exception:
|
118
|
+
raise PipelineRunError(response)
|
119
|
+
|
120
|
+
def batch_post_traces(self, batch: list[Union[Span, Trace]]):
|
121
|
+
log = logging.getLogger("laminar.client")
|
122
|
+
url = self._base_url + "/v1/traces"
|
123
|
+
data = json.dumps({"traces": [item.to_dict() for item in batch]})
|
124
|
+
log.debug(f"making request to {url}")
|
125
|
+
headers = self._headers()
|
126
|
+
res = requests.post(url, data=data, headers=headers)
|
127
|
+
|
128
|
+
if res.status_code == 200:
|
129
|
+
log.debug("data uploaded successfully")
|
130
|
+
|
131
|
+
return self._process_response(
|
132
|
+
res, success_message="data uploaded successfully", return_json=False
|
133
|
+
)
|
134
|
+
|
135
|
+
def _process_response(
|
136
|
+
self, res: requests.Response, success_message: str, return_json: bool = True
|
137
|
+
) -> Union[requests.Response, Any]:
|
138
|
+
log = logging.getLogger("laminar.client")
|
139
|
+
log.debug("received response: %s", res.text)
|
140
|
+
if res.status_code in (200, 201):
|
141
|
+
log.debug(success_message)
|
142
|
+
if return_json:
|
143
|
+
try:
|
144
|
+
return res.json()
|
145
|
+
except json.JSONDecodeError:
|
146
|
+
log.error("Response is not valid JSON.")
|
147
|
+
raise APIError(res.status_code, "Invalid JSON response received")
|
148
|
+
else:
|
149
|
+
return res
|
150
|
+
try:
|
151
|
+
payload = res.json()
|
152
|
+
log.error("received error response: %s", payload)
|
153
|
+
raise APIError(res.status_code, payload)
|
154
|
+
except (KeyError, ValueError):
|
155
|
+
raise APIError(res.status_code, res.text)
|
156
|
+
|
157
|
+
def _headers(self):
|
158
|
+
return {
|
159
|
+
"Authorization": "Bearer " + self.project_api_key,
|
160
|
+
"Content-Type": "application/json",
|
161
|
+
}
|