lmnr 0.4.9__tar.gz → 0.4.11__tar.gz
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- {lmnr-0.4.9 → lmnr-0.4.11}/PKG-INFO +81 -51
- {lmnr-0.4.9 → lmnr-0.4.11}/README.md +80 -50
- {lmnr-0.4.9 → lmnr-0.4.11}/pyproject.toml +2 -2
- {lmnr-0.4.9 → lmnr-0.4.11}/src/lmnr/__init__.py +1 -0
- {lmnr-0.4.9 → lmnr-0.4.11}/src/lmnr/sdk/decorators.py +4 -7
- {lmnr-0.4.9 → lmnr-0.4.11}/src/lmnr/sdk/types.py +17 -17
- {lmnr-0.4.9 → lmnr-0.4.11}/src/lmnr/traceloop_sdk/instruments.py +9 -4
- {lmnr-0.4.9 → lmnr-0.4.11}/src/lmnr/traceloop_sdk/tracing/tracing.py +6 -4
- {lmnr-0.4.9 → lmnr-0.4.11}/LICENSE +0 -0
- {lmnr-0.4.9 → lmnr-0.4.11}/src/lmnr/sdk/__init__.py +0 -0
- {lmnr-0.4.9 → lmnr-0.4.11}/src/lmnr/sdk/evaluations.py +0 -0
- {lmnr-0.4.9 → lmnr-0.4.11}/src/lmnr/sdk/laminar.py +0 -0
- {lmnr-0.4.9 → lmnr-0.4.11}/src/lmnr/sdk/log.py +0 -0
- {lmnr-0.4.9 → lmnr-0.4.11}/src/lmnr/sdk/utils.py +0 -0
- {lmnr-0.4.9 → lmnr-0.4.11}/src/lmnr/traceloop_sdk/.flake8 +0 -0
- {lmnr-0.4.9 → lmnr-0.4.11}/src/lmnr/traceloop_sdk/.python-version +0 -0
- {lmnr-0.4.9 → lmnr-0.4.11}/src/lmnr/traceloop_sdk/__init__.py +0 -0
- {lmnr-0.4.9 → lmnr-0.4.11}/src/lmnr/traceloop_sdk/config/__init__.py +0 -0
- {lmnr-0.4.9 → lmnr-0.4.11}/src/lmnr/traceloop_sdk/decorators/__init__.py +0 -0
- {lmnr-0.4.9 → lmnr-0.4.11}/src/lmnr/traceloop_sdk/decorators/base.py +0 -0
- {lmnr-0.4.9 → lmnr-0.4.11}/src/lmnr/traceloop_sdk/metrics/__init__.py +0 -0
- {lmnr-0.4.9 → lmnr-0.4.11}/src/lmnr/traceloop_sdk/metrics/metrics.py +0 -0
- {lmnr-0.4.9 → lmnr-0.4.11}/src/lmnr/traceloop_sdk/tests/__init__.py +0 -0
- {lmnr-0.4.9 → lmnr-0.4.11}/src/lmnr/traceloop_sdk/tests/cassettes/test_association_properties/test_langchain_and_external_association_properties.yaml +0 -0
- {lmnr-0.4.9 → lmnr-0.4.11}/src/lmnr/traceloop_sdk/tests/cassettes/test_association_properties/test_langchain_association_properties.yaml +0 -0
- {lmnr-0.4.9 → lmnr-0.4.11}/src/lmnr/traceloop_sdk/tests/cassettes/test_manual/test_manual_report.yaml +0 -0
- {lmnr-0.4.9 → lmnr-0.4.11}/src/lmnr/traceloop_sdk/tests/cassettes/test_manual/test_resource_attributes.yaml +0 -0
- {lmnr-0.4.9 → lmnr-0.4.11}/src/lmnr/traceloop_sdk/tests/cassettes/test_privacy_no_prompts/test_simple_workflow.yaml +0 -0
- {lmnr-0.4.9 → lmnr-0.4.11}/src/lmnr/traceloop_sdk/tests/cassettes/test_prompt_management/test_prompt_management.yaml +0 -0
- {lmnr-0.4.9 → lmnr-0.4.11}/src/lmnr/traceloop_sdk/tests/cassettes/test_sdk_initialization/test_resource_attributes.yaml +0 -0
- {lmnr-0.4.9 → lmnr-0.4.11}/src/lmnr/traceloop_sdk/tests/cassettes/test_tasks/test_task_io_serialization_with_langchain.yaml +0 -0
- {lmnr-0.4.9 → lmnr-0.4.11}/src/lmnr/traceloop_sdk/tests/cassettes/test_workflows/test_simple_aworkflow.yaml +0 -0
- {lmnr-0.4.9 → lmnr-0.4.11}/src/lmnr/traceloop_sdk/tests/cassettes/test_workflows/test_simple_workflow.yaml +0 -0
- {lmnr-0.4.9 → lmnr-0.4.11}/src/lmnr/traceloop_sdk/tests/cassettes/test_workflows/test_streaming_workflow.yaml +0 -0
- {lmnr-0.4.9 → lmnr-0.4.11}/src/lmnr/traceloop_sdk/tests/conftest.py +0 -0
- {lmnr-0.4.9 → lmnr-0.4.11}/src/lmnr/traceloop_sdk/tests/test_association_properties.py +0 -0
- {lmnr-0.4.9 → lmnr-0.4.11}/src/lmnr/traceloop_sdk/tests/test_manual.py +0 -0
- {lmnr-0.4.9 → lmnr-0.4.11}/src/lmnr/traceloop_sdk/tests/test_nested_tasks.py +0 -0
- {lmnr-0.4.9 → lmnr-0.4.11}/src/lmnr/traceloop_sdk/tests/test_privacy_no_prompts.py +0 -0
- {lmnr-0.4.9 → lmnr-0.4.11}/src/lmnr/traceloop_sdk/tests/test_sdk_initialization.py +0 -0
- {lmnr-0.4.9 → lmnr-0.4.11}/src/lmnr/traceloop_sdk/tests/test_tasks.py +0 -0
- {lmnr-0.4.9 → lmnr-0.4.11}/src/lmnr/traceloop_sdk/tests/test_workflows.py +0 -0
- {lmnr-0.4.9 → lmnr-0.4.11}/src/lmnr/traceloop_sdk/tracing/__init__.py +0 -0
- {lmnr-0.4.9 → lmnr-0.4.11}/src/lmnr/traceloop_sdk/tracing/content_allow_list.py +0 -0
- {lmnr-0.4.9 → lmnr-0.4.11}/src/lmnr/traceloop_sdk/tracing/context_manager.py +0 -0
- {lmnr-0.4.9 → lmnr-0.4.11}/src/lmnr/traceloop_sdk/tracing/manual.py +0 -0
- {lmnr-0.4.9 → lmnr-0.4.11}/src/lmnr/traceloop_sdk/utils/__init__.py +0 -0
- {lmnr-0.4.9 → lmnr-0.4.11}/src/lmnr/traceloop_sdk/utils/in_memory_span_exporter.py +0 -0
- {lmnr-0.4.9 → lmnr-0.4.11}/src/lmnr/traceloop_sdk/utils/json_encoder.py +0 -0
- {lmnr-0.4.9 → lmnr-0.4.11}/src/lmnr/traceloop_sdk/utils/package_check.py +0 -0
- {lmnr-0.4.9 → lmnr-0.4.11}/src/lmnr/traceloop_sdk/version.py +0 -0
@@ -1,6 +1,6 @@
|
|
1
1
|
Metadata-Version: 2.1
|
2
2
|
Name: lmnr
|
3
|
-
Version: 0.4.
|
3
|
+
Version: 0.4.11
|
4
4
|
Summary: Python SDK for Laminar AI
|
5
5
|
License: Apache-2.0
|
6
6
|
Author: lmnr.ai
|
@@ -67,6 +67,9 @@ OpenTelemetry log sender for [Laminar](https://github.com/lmnr-ai/lmnr) for Pyth
|
|
67
67
|
|
68
68
|
|
69
69
|
## Quickstart
|
70
|
+
|
71
|
+
First, install the package:
|
72
|
+
|
70
73
|
```sh
|
71
74
|
python3 -m venv .myenv
|
72
75
|
source .myenv/bin/activate # or use your favorite env management tool
|
@@ -74,21 +77,39 @@ source .myenv/bin/activate # or use your favorite env management tool
|
|
74
77
|
pip install lmnr
|
75
78
|
```
|
76
79
|
|
77
|
-
|
80
|
+
Then, you can initialize Laminar in your main file and instrument your code.
|
78
81
|
|
79
82
|
```python
|
83
|
+
import os
|
84
|
+
from openai import OpenAI
|
80
85
|
from lmnr import Laminar as L
|
81
86
|
|
82
|
-
L.initialize(
|
83
|
-
|
87
|
+
L.initialize(
|
88
|
+
project_api_key=os.environ["LMNR_PROJECT_API_KEY"],
|
89
|
+
)
|
84
90
|
|
85
|
-
|
86
|
-
calls with OpenTelemetry-compatible instrumentation, then pass the appropriate instruments to `.initialize()`.
|
91
|
+
client = OpenAI(api_key=os.environ["OPENAI_API_KEY"])
|
87
92
|
|
88
|
-
|
93
|
+
def poem_writer(topic: str):
|
94
|
+
prompt = f"write a poem about {topic}"
|
89
95
|
|
90
|
-
|
91
|
-
|
96
|
+
# OpenAI calls are automatically instrumented
|
97
|
+
response = client.chat.completions.create(
|
98
|
+
model="gpt-4o",
|
99
|
+
messages=[
|
100
|
+
{"role": "system", "content": "You are a helpful assistant."},
|
101
|
+
{"role": "user", "content": prompt},
|
102
|
+
],
|
103
|
+
)
|
104
|
+
poem = response.choices[0].message.content
|
105
|
+
return poem
|
106
|
+
|
107
|
+
if __name__ == "__main__":
|
108
|
+
print(poem_writer("laminar flow"))
|
109
|
+
|
110
|
+
```
|
111
|
+
|
112
|
+
Note that you need to only initialize Laminar once in your application.
|
92
113
|
|
93
114
|
### Project API key
|
94
115
|
|
@@ -97,75 +118,84 @@ You can either pass it to `.initialize()` or set it to `.env` at the root of you
|
|
97
118
|
|
98
119
|
## Instrumentation
|
99
120
|
|
100
|
-
|
101
|
-
or to trace other functions.
|
121
|
+
### Manual instrumentation
|
102
122
|
|
103
|
-
|
123
|
+
To instrument any function in your code, we provide a simple `@observe()` decorator.
|
124
|
+
This can be useful if you want to trace a request handler or a function which combines multiple LLM calls.
|
104
125
|
|
105
126
|
```python
|
106
127
|
import os
|
107
128
|
from openai import OpenAI
|
129
|
+
from lmnr import Laminar as L, Instruments
|
108
130
|
|
109
|
-
|
110
|
-
from lmnr import observe, Laminar as L
|
111
|
-
L.initialize(project_api_key="<LMNR_PROJECT_API_KEY>", instruments=set())
|
131
|
+
L.initialize(project_api_key=os.environ["LMNR_PROJECT_API_KEY"])
|
112
132
|
|
113
133
|
client = OpenAI(api_key=os.environ["OPENAI_API_KEY"])
|
114
134
|
|
115
|
-
|
116
|
-
def poem_writer(topic="turbulence"):
|
135
|
+
def poem_writer(topic: str):
|
117
136
|
prompt = f"write a poem about {topic}"
|
137
|
+
messages = [
|
138
|
+
{"role": "system", "content": "You are a helpful assistant."},
|
139
|
+
{"role": "user", "content": prompt},
|
140
|
+
]
|
141
|
+
|
142
|
+
# OpenAI calls are still automatically instrumented
|
118
143
|
response = client.chat.completions.create(
|
119
144
|
model="gpt-4o",
|
120
|
-
messages=
|
121
|
-
{"role": "system", "content": "You are a helpful assistant."},
|
122
|
-
{"role": "user", "content": prompt},
|
123
|
-
],
|
145
|
+
messages=messages,
|
124
146
|
)
|
125
147
|
poem = response.choices[0].message.content
|
148
|
+
|
126
149
|
return poem
|
127
150
|
|
128
|
-
|
151
|
+
@observe()
|
152
|
+
def generate_poems():
|
153
|
+
poem1 = poem_writer(topic="laminar flow")
|
154
|
+
L.event("is_poem_generated", True)
|
155
|
+
poem2 = poem_writer(topic="turbulence")
|
156
|
+
L.event("is_poem_generated", True)
|
157
|
+
poems = f"{poem1}\n\n---\n\n{poem2}"
|
158
|
+
return poems
|
129
159
|
```
|
130
160
|
|
131
|
-
|
132
|
-
|
133
|
-
Our manual instrumentation is a very thin wrapper around OpenTelemetry's
|
134
|
-
`trace.start_span`. Our wrapper sets the span into the active context.
|
135
|
-
You don't have to explicitly pass the spans around, it is enough to
|
136
|
-
just call `L.start_span`, and OpenTelemetry will handle the context management
|
161
|
+
Also, you can use `Laminar.start_as_current_span` if you want to record a chunk of your code using `with` statement.
|
137
162
|
|
138
163
|
```python
|
139
|
-
|
140
|
-
L.
|
164
|
+
def handle_user_request(topic: str):
|
165
|
+
with L.start_as_current_span(name="poem_writer", input=topic):
|
166
|
+
...
|
167
|
+
|
168
|
+
poem = poem_writer(topic=topic)
|
169
|
+
|
170
|
+
...
|
171
|
+
|
172
|
+
# while within the span, you can attach laminar events to it
|
173
|
+
L.event("is_poem_generated", True)
|
174
|
+
|
175
|
+
# Use set_span_output to record the output of the span
|
176
|
+
L.set_span_output(poem)
|
177
|
+
```
|
141
178
|
|
142
|
-
|
143
|
-
|
144
|
-
span = L.start_span("poem_writer", topic) # start a span
|
179
|
+
### Automatic instrumentation
|
145
180
|
|
146
|
-
|
181
|
+
Laminar allows you to automatically instrument majority of the most popular LLM, Vector DB, database, requests, and other libraries.
|
147
182
|
|
148
|
-
|
149
|
-
|
150
|
-
model="gpt-4o",
|
151
|
-
messages=[
|
152
|
-
{"role": "system", "content": "You are a helpful assistant."},
|
153
|
-
{"role": "user", "content": prompt},
|
154
|
-
],
|
155
|
-
)
|
156
|
-
poem = response.choices[0].message.content
|
157
|
-
# while within the span, you can attach laminar events to it
|
158
|
-
L.event("event_name", "event_value")
|
183
|
+
If you want to automatically instrument a default set of libraries, then simply do NOT pass `instruments` argument to `.initialize()`.
|
184
|
+
See the full list of available instrumentations in the [enum](/src/lmnr/traceloop_sdk/instruments.py).
|
159
185
|
|
160
|
-
|
161
|
-
|
162
|
-
|
163
|
-
# Otherwise, the trace may not be sent/displayed correctly
|
164
|
-
span.end()
|
186
|
+
If you want to automatically instrument only specific LLM, Vector DB, or other
|
187
|
+
calls with OpenTelemetry-compatible instrumentation, then pass the appropriate instruments to `.initialize()`.
|
188
|
+
For example, if you want to only instrument OpenAI and Anthropic, then do the following:
|
165
189
|
|
166
|
-
|
190
|
+
```python
|
191
|
+
from lmnr import Laminar as L, Instruments
|
192
|
+
|
193
|
+
L.initialize(project_api_key=os.environ["LMNR_PROJECT_API_KEY"], instruments={Instruments.OPENAI, Instruments.ANTHROPIC})
|
167
194
|
```
|
168
195
|
|
196
|
+
If you want to fully disable any kind of autoinstrumentation, pass an empty set as `instruments=set()` to `.initialize()`.
|
197
|
+
|
198
|
+
Majority of the autoinstrumentations are provided by Traceloop's [OpenLLMetry](https://github.com/traceloop/openllmetry).
|
169
199
|
|
170
200
|
## Sending events
|
171
201
|
|
@@ -9,6 +9,9 @@ OpenTelemetry log sender for [Laminar](https://github.com/lmnr-ai/lmnr) for Pyth
|
|
9
9
|
|
10
10
|
|
11
11
|
## Quickstart
|
12
|
+
|
13
|
+
First, install the package:
|
14
|
+
|
12
15
|
```sh
|
13
16
|
python3 -m venv .myenv
|
14
17
|
source .myenv/bin/activate # or use your favorite env management tool
|
@@ -16,21 +19,39 @@ source .myenv/bin/activate # or use your favorite env management tool
|
|
16
19
|
pip install lmnr
|
17
20
|
```
|
18
21
|
|
19
|
-
|
22
|
+
Then, you can initialize Laminar in your main file and instrument your code.
|
20
23
|
|
21
24
|
```python
|
25
|
+
import os
|
26
|
+
from openai import OpenAI
|
22
27
|
from lmnr import Laminar as L
|
23
28
|
|
24
|
-
L.initialize(
|
25
|
-
|
29
|
+
L.initialize(
|
30
|
+
project_api_key=os.environ["LMNR_PROJECT_API_KEY"],
|
31
|
+
)
|
26
32
|
|
27
|
-
|
28
|
-
calls with OpenTelemetry-compatible instrumentation, then pass the appropriate instruments to `.initialize()`.
|
33
|
+
client = OpenAI(api_key=os.environ["OPENAI_API_KEY"])
|
29
34
|
|
30
|
-
|
35
|
+
def poem_writer(topic: str):
|
36
|
+
prompt = f"write a poem about {topic}"
|
31
37
|
|
32
|
-
|
33
|
-
|
38
|
+
# OpenAI calls are automatically instrumented
|
39
|
+
response = client.chat.completions.create(
|
40
|
+
model="gpt-4o",
|
41
|
+
messages=[
|
42
|
+
{"role": "system", "content": "You are a helpful assistant."},
|
43
|
+
{"role": "user", "content": prompt},
|
44
|
+
],
|
45
|
+
)
|
46
|
+
poem = response.choices[0].message.content
|
47
|
+
return poem
|
48
|
+
|
49
|
+
if __name__ == "__main__":
|
50
|
+
print(poem_writer("laminar flow"))
|
51
|
+
|
52
|
+
```
|
53
|
+
|
54
|
+
Note that you need to only initialize Laminar once in your application.
|
34
55
|
|
35
56
|
### Project API key
|
36
57
|
|
@@ -39,75 +60,84 @@ You can either pass it to `.initialize()` or set it to `.env` at the root of you
|
|
39
60
|
|
40
61
|
## Instrumentation
|
41
62
|
|
42
|
-
|
43
|
-
or to trace other functions.
|
63
|
+
### Manual instrumentation
|
44
64
|
|
45
|
-
|
65
|
+
To instrument any function in your code, we provide a simple `@observe()` decorator.
|
66
|
+
This can be useful if you want to trace a request handler or a function which combines multiple LLM calls.
|
46
67
|
|
47
68
|
```python
|
48
69
|
import os
|
49
70
|
from openai import OpenAI
|
71
|
+
from lmnr import Laminar as L, Instruments
|
50
72
|
|
51
|
-
|
52
|
-
from lmnr import observe, Laminar as L
|
53
|
-
L.initialize(project_api_key="<LMNR_PROJECT_API_KEY>", instruments=set())
|
73
|
+
L.initialize(project_api_key=os.environ["LMNR_PROJECT_API_KEY"])
|
54
74
|
|
55
75
|
client = OpenAI(api_key=os.environ["OPENAI_API_KEY"])
|
56
76
|
|
57
|
-
|
58
|
-
def poem_writer(topic="turbulence"):
|
77
|
+
def poem_writer(topic: str):
|
59
78
|
prompt = f"write a poem about {topic}"
|
79
|
+
messages = [
|
80
|
+
{"role": "system", "content": "You are a helpful assistant."},
|
81
|
+
{"role": "user", "content": prompt},
|
82
|
+
]
|
83
|
+
|
84
|
+
# OpenAI calls are still automatically instrumented
|
60
85
|
response = client.chat.completions.create(
|
61
86
|
model="gpt-4o",
|
62
|
-
messages=
|
63
|
-
{"role": "system", "content": "You are a helpful assistant."},
|
64
|
-
{"role": "user", "content": prompt},
|
65
|
-
],
|
87
|
+
messages=messages,
|
66
88
|
)
|
67
89
|
poem = response.choices[0].message.content
|
90
|
+
|
68
91
|
return poem
|
69
92
|
|
70
|
-
|
93
|
+
@observe()
|
94
|
+
def generate_poems():
|
95
|
+
poem1 = poem_writer(topic="laminar flow")
|
96
|
+
L.event("is_poem_generated", True)
|
97
|
+
poem2 = poem_writer(topic="turbulence")
|
98
|
+
L.event("is_poem_generated", True)
|
99
|
+
poems = f"{poem1}\n\n---\n\n{poem2}"
|
100
|
+
return poems
|
71
101
|
```
|
72
102
|
|
73
|
-
|
74
|
-
|
75
|
-
Our manual instrumentation is a very thin wrapper around OpenTelemetry's
|
76
|
-
`trace.start_span`. Our wrapper sets the span into the active context.
|
77
|
-
You don't have to explicitly pass the spans around, it is enough to
|
78
|
-
just call `L.start_span`, and OpenTelemetry will handle the context management
|
103
|
+
Also, you can use `Laminar.start_as_current_span` if you want to record a chunk of your code using `with` statement.
|
79
104
|
|
80
105
|
```python
|
81
|
-
|
82
|
-
L.
|
106
|
+
def handle_user_request(topic: str):
|
107
|
+
with L.start_as_current_span(name="poem_writer", input=topic):
|
108
|
+
...
|
109
|
+
|
110
|
+
poem = poem_writer(topic=topic)
|
111
|
+
|
112
|
+
...
|
113
|
+
|
114
|
+
# while within the span, you can attach laminar events to it
|
115
|
+
L.event("is_poem_generated", True)
|
116
|
+
|
117
|
+
# Use set_span_output to record the output of the span
|
118
|
+
L.set_span_output(poem)
|
119
|
+
```
|
83
120
|
|
84
|
-
|
85
|
-
|
86
|
-
span = L.start_span("poem_writer", topic) # start a span
|
121
|
+
### Automatic instrumentation
|
87
122
|
|
88
|
-
|
123
|
+
Laminar allows you to automatically instrument majority of the most popular LLM, Vector DB, database, requests, and other libraries.
|
89
124
|
|
90
|
-
|
91
|
-
|
92
|
-
model="gpt-4o",
|
93
|
-
messages=[
|
94
|
-
{"role": "system", "content": "You are a helpful assistant."},
|
95
|
-
{"role": "user", "content": prompt},
|
96
|
-
],
|
97
|
-
)
|
98
|
-
poem = response.choices[0].message.content
|
99
|
-
# while within the span, you can attach laminar events to it
|
100
|
-
L.event("event_name", "event_value")
|
125
|
+
If you want to automatically instrument a default set of libraries, then simply do NOT pass `instruments` argument to `.initialize()`.
|
126
|
+
See the full list of available instrumentations in the [enum](/src/lmnr/traceloop_sdk/instruments.py).
|
101
127
|
|
102
|
-
|
103
|
-
|
104
|
-
|
105
|
-
# Otherwise, the trace may not be sent/displayed correctly
|
106
|
-
span.end()
|
128
|
+
If you want to automatically instrument only specific LLM, Vector DB, or other
|
129
|
+
calls with OpenTelemetry-compatible instrumentation, then pass the appropriate instruments to `.initialize()`.
|
130
|
+
For example, if you want to only instrument OpenAI and Anthropic, then do the following:
|
107
131
|
|
108
|
-
|
132
|
+
```python
|
133
|
+
from lmnr import Laminar as L, Instruments
|
134
|
+
|
135
|
+
L.initialize(project_api_key=os.environ["LMNR_PROJECT_API_KEY"], instruments={Instruments.OPENAI, Instruments.ANTHROPIC})
|
109
136
|
```
|
110
137
|
|
138
|
+
If you want to fully disable any kind of autoinstrumentation, pass an empty set as `instruments=set()` to `.initialize()`.
|
139
|
+
|
140
|
+
Majority of the autoinstrumentations are provided by Traceloop's [OpenLLMetry](https://github.com/traceloop/openllmetry).
|
111
141
|
|
112
142
|
## Sending events
|
113
143
|
|
@@ -1,6 +1,6 @@
|
|
1
1
|
[project]
|
2
2
|
name = "lmnr"
|
3
|
-
version = "0.4.
|
3
|
+
version = "0.4.11"
|
4
4
|
description = "Python SDK for Laminar AI"
|
5
5
|
authors = [
|
6
6
|
{ name = "lmnr.ai", email = "founders@lmnr.ai" }
|
@@ -11,7 +11,7 @@ license = "Apache-2.0"
|
|
11
11
|
|
12
12
|
[tool.poetry]
|
13
13
|
name = "lmnr"
|
14
|
-
version = "0.4.
|
14
|
+
version = "0.4.11"
|
15
15
|
description = "Python SDK for Laminar AI"
|
16
16
|
authors = ["lmnr.ai"]
|
17
17
|
readme = "README.md"
|
@@ -4,22 +4,19 @@ from lmnr.traceloop_sdk.decorators.base import (
|
|
4
4
|
)
|
5
5
|
from opentelemetry.trace import INVALID_SPAN, get_current_span
|
6
6
|
|
7
|
-
from typing import Callable, Optional,
|
7
|
+
from typing import Callable, Optional, cast
|
8
8
|
|
9
9
|
from lmnr.traceloop_sdk.tracing.tracing import update_association_properties
|
10
10
|
|
11
11
|
from .utils import is_async
|
12
12
|
|
13
|
-
P = ParamSpec("P")
|
14
|
-
R = TypeVar("R")
|
15
|
-
|
16
13
|
|
17
14
|
def observe(
|
18
15
|
*,
|
19
16
|
name: Optional[str] = None,
|
20
17
|
user_id: Optional[str] = None,
|
21
18
|
session_id: Optional[str] = None,
|
22
|
-
) -> Callable[[Callable
|
19
|
+
) -> Callable[[Callable], Callable]:
|
23
20
|
"""The main decorator entrypoint for Laminar. This is used to wrap
|
24
21
|
functions and methods to create spans.
|
25
22
|
|
@@ -41,7 +38,7 @@ def observe(
|
|
41
38
|
R: Returns the result of the wrapped function
|
42
39
|
"""
|
43
40
|
|
44
|
-
def decorator(func: Callable
|
41
|
+
def decorator(func: Callable) -> Callable:
|
45
42
|
current_span = get_current_span()
|
46
43
|
if current_span != INVALID_SPAN:
|
47
44
|
if session_id is not None:
|
@@ -64,4 +61,4 @@ def observe(
|
|
64
61
|
else entity_method(name=name)(func)
|
65
62
|
)
|
66
63
|
|
67
|
-
return cast(Callable
|
64
|
+
return cast(Callable, decorator)
|
@@ -2,7 +2,7 @@ import datetime
|
|
2
2
|
import requests
|
3
3
|
import pydantic
|
4
4
|
import uuid
|
5
|
-
from typing import Any, Literal, Optional,
|
5
|
+
from typing import Any, Awaitable, Callable, Literal, Optional, Union
|
6
6
|
|
7
7
|
from .utils import to_dict
|
8
8
|
|
@@ -17,9 +17,9 @@ class ConditionedValue(pydantic.BaseModel):
|
|
17
17
|
value: "NodeInput"
|
18
18
|
|
19
19
|
|
20
|
-
Numeric
|
21
|
-
NodeInput
|
22
|
-
PipelineOutput
|
20
|
+
Numeric = Union[int, float]
|
21
|
+
NodeInput = Union[str, list[ChatMessage], ConditionedValue, Numeric, bool]
|
22
|
+
PipelineOutput = Union[NodeInput]
|
23
23
|
|
24
24
|
|
25
25
|
class PipelineRunRequest(pydantic.BaseModel):
|
@@ -74,8 +74,8 @@ class PipelineRunError(Exception):
|
|
74
74
|
return super().__str__()
|
75
75
|
|
76
76
|
|
77
|
-
EvaluationDatapointData
|
78
|
-
EvaluationDatapointTarget
|
77
|
+
EvaluationDatapointData = dict[str, Any]
|
78
|
+
EvaluationDatapointTarget = dict[str, Any]
|
79
79
|
|
80
80
|
|
81
81
|
# EvaluationDatapoint is a single data point in the evaluation
|
@@ -87,24 +87,24 @@ class EvaluationDatapoint(pydantic.BaseModel):
|
|
87
87
|
target: EvaluationDatapointTarget
|
88
88
|
|
89
89
|
|
90
|
-
ExecutorFunctionReturnType
|
91
|
-
EvaluatorFunctionReturnType
|
90
|
+
ExecutorFunctionReturnType = Any
|
91
|
+
EvaluatorFunctionReturnType = Union[Numeric, dict[str, Numeric]]
|
92
92
|
|
93
|
-
|
94
|
-
|
95
|
-
|
96
|
-
|
93
|
+
ExecutorFunction = Callable[
|
94
|
+
[EvaluationDatapointData, Any, dict[str, Any]],
|
95
|
+
Union[ExecutorFunctionReturnType, Awaitable[ExecutorFunctionReturnType]],
|
96
|
+
]
|
97
97
|
|
98
98
|
# EvaluatorFunction is a function that takes the output of the executor and the
|
99
99
|
# target data, and returns a score. The score can be a single number or a
|
100
100
|
# record of string keys and number values. The latter is useful for evaluating
|
101
101
|
# multiple criteria in one go instead of running multiple evaluators.
|
102
|
-
|
103
|
-
|
104
|
-
|
105
|
-
|
102
|
+
EvaluatorFunction = Callable[
|
103
|
+
[ExecutorFunctionReturnType, Any, dict[str, Any]],
|
104
|
+
Union[EvaluatorFunctionReturnType, Awaitable[EvaluatorFunctionReturnType]],
|
105
|
+
]
|
106
106
|
|
107
|
-
EvaluationStatus
|
107
|
+
EvaluationStatus = Literal["Started", "Finished", "Error"]
|
108
108
|
|
109
109
|
|
110
110
|
class CreateEvaluationResponse(pydantic.BaseModel):
|
@@ -2,6 +2,8 @@ from enum import Enum
|
|
2
2
|
|
3
3
|
|
4
4
|
class Instruments(Enum):
|
5
|
+
# The list of libraries which will be autoinstrumented
|
6
|
+
# if no specific instruments are provided to initialize()
|
5
7
|
OPENAI = "openai"
|
6
8
|
ANTHROPIC = "anthropic"
|
7
9
|
COHERE = "cohere"
|
@@ -15,10 +17,6 @@ class Instruments(Enum):
|
|
15
17
|
MILVUS = "milvus"
|
16
18
|
TRANSFORMERS = "transformers"
|
17
19
|
TOGETHER = "together"
|
18
|
-
REDIS = "redis"
|
19
|
-
REQUESTS = "requests"
|
20
|
-
URLLIB3 = "urllib3"
|
21
|
-
PYMYSQL = "pymysql"
|
22
20
|
BEDROCK = "bedrock"
|
23
21
|
REPLICATE = "replicate"
|
24
22
|
VERTEXAI = "vertexai"
|
@@ -27,3 +25,10 @@ class Instruments(Enum):
|
|
27
25
|
ALEPHALPHA = "alephalpha"
|
28
26
|
MARQO = "marqo"
|
29
27
|
LANCEDB = "lancedb"
|
28
|
+
|
29
|
+
# The following libraries will not be autoinstrumented unless
|
30
|
+
# specified explicitly in the initialize() call.
|
31
|
+
REDIS = "redis"
|
32
|
+
REQUESTS = "requests"
|
33
|
+
URLLIB3 = "urllib3"
|
34
|
+
PYMYSQL = "pymysql"
|
@@ -531,10 +531,6 @@ def init_instrumentations(should_enrich_metrics: bool):
|
|
531
531
|
init_milvus_instrumentor()
|
532
532
|
init_transformers_instrumentor()
|
533
533
|
init_together_instrumentor()
|
534
|
-
init_redis_instrumentor()
|
535
|
-
init_requests_instrumentor()
|
536
|
-
init_urllib3_instrumentor()
|
537
|
-
init_pymysql_instrumentor()
|
538
534
|
init_bedrock_instrumentor(should_enrich_metrics)
|
539
535
|
init_replicate_instrumentor()
|
540
536
|
init_vertexai_instrumentor()
|
@@ -545,6 +541,12 @@ def init_instrumentations(should_enrich_metrics: bool):
|
|
545
541
|
init_lancedb_instrumentor()
|
546
542
|
init_groq_instrumentor()
|
547
543
|
|
544
|
+
# These libraries are not instrumented by default, but if the user wants, he can manually specify them
|
545
|
+
# init_redis_instrumentor()
|
546
|
+
# init_requests_instrumentor()
|
547
|
+
# init_urllib3_instrumentor()
|
548
|
+
# init_pymysql_instrumentor()
|
549
|
+
|
548
550
|
|
549
551
|
def init_openai_instrumentor(should_enrich_metrics: bool):
|
550
552
|
try:
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|