lmnr 0.4.10__tar.gz → 0.4.11__tar.gz
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- {lmnr-0.4.10 → lmnr-0.4.11}/PKG-INFO +83 -46
- {lmnr-0.4.10 → lmnr-0.4.11}/README.md +82 -45
- {lmnr-0.4.10 → lmnr-0.4.11}/pyproject.toml +2 -2
- {lmnr-0.4.10 → lmnr-0.4.11}/src/lmnr/sdk/decorators.py +4 -7
- {lmnr-0.4.10 → lmnr-0.4.11}/src/lmnr/sdk/types.py +17 -17
- {lmnr-0.4.10 → lmnr-0.4.11}/src/lmnr/traceloop_sdk/instruments.py +9 -4
- {lmnr-0.4.10 → lmnr-0.4.11}/src/lmnr/traceloop_sdk/tracing/tracing.py +6 -4
- {lmnr-0.4.10 → lmnr-0.4.11}/LICENSE +0 -0
- {lmnr-0.4.10 → lmnr-0.4.11}/src/lmnr/__init__.py +0 -0
- {lmnr-0.4.10 → lmnr-0.4.11}/src/lmnr/sdk/__init__.py +0 -0
- {lmnr-0.4.10 → lmnr-0.4.11}/src/lmnr/sdk/evaluations.py +0 -0
- {lmnr-0.4.10 → lmnr-0.4.11}/src/lmnr/sdk/laminar.py +0 -0
- {lmnr-0.4.10 → lmnr-0.4.11}/src/lmnr/sdk/log.py +0 -0
- {lmnr-0.4.10 → lmnr-0.4.11}/src/lmnr/sdk/utils.py +0 -0
- {lmnr-0.4.10 → lmnr-0.4.11}/src/lmnr/traceloop_sdk/.flake8 +0 -0
- {lmnr-0.4.10 → lmnr-0.4.11}/src/lmnr/traceloop_sdk/.python-version +0 -0
- {lmnr-0.4.10 → lmnr-0.4.11}/src/lmnr/traceloop_sdk/__init__.py +0 -0
- {lmnr-0.4.10 → lmnr-0.4.11}/src/lmnr/traceloop_sdk/config/__init__.py +0 -0
- {lmnr-0.4.10 → lmnr-0.4.11}/src/lmnr/traceloop_sdk/decorators/__init__.py +0 -0
- {lmnr-0.4.10 → lmnr-0.4.11}/src/lmnr/traceloop_sdk/decorators/base.py +0 -0
- {lmnr-0.4.10 → lmnr-0.4.11}/src/lmnr/traceloop_sdk/metrics/__init__.py +0 -0
- {lmnr-0.4.10 → lmnr-0.4.11}/src/lmnr/traceloop_sdk/metrics/metrics.py +0 -0
- {lmnr-0.4.10 → lmnr-0.4.11}/src/lmnr/traceloop_sdk/tests/__init__.py +0 -0
- {lmnr-0.4.10 → lmnr-0.4.11}/src/lmnr/traceloop_sdk/tests/cassettes/test_association_properties/test_langchain_and_external_association_properties.yaml +0 -0
- {lmnr-0.4.10 → lmnr-0.4.11}/src/lmnr/traceloop_sdk/tests/cassettes/test_association_properties/test_langchain_association_properties.yaml +0 -0
- {lmnr-0.4.10 → lmnr-0.4.11}/src/lmnr/traceloop_sdk/tests/cassettes/test_manual/test_manual_report.yaml +0 -0
- {lmnr-0.4.10 → lmnr-0.4.11}/src/lmnr/traceloop_sdk/tests/cassettes/test_manual/test_resource_attributes.yaml +0 -0
- {lmnr-0.4.10 → lmnr-0.4.11}/src/lmnr/traceloop_sdk/tests/cassettes/test_privacy_no_prompts/test_simple_workflow.yaml +0 -0
- {lmnr-0.4.10 → lmnr-0.4.11}/src/lmnr/traceloop_sdk/tests/cassettes/test_prompt_management/test_prompt_management.yaml +0 -0
- {lmnr-0.4.10 → lmnr-0.4.11}/src/lmnr/traceloop_sdk/tests/cassettes/test_sdk_initialization/test_resource_attributes.yaml +0 -0
- {lmnr-0.4.10 → lmnr-0.4.11}/src/lmnr/traceloop_sdk/tests/cassettes/test_tasks/test_task_io_serialization_with_langchain.yaml +0 -0
- {lmnr-0.4.10 → lmnr-0.4.11}/src/lmnr/traceloop_sdk/tests/cassettes/test_workflows/test_simple_aworkflow.yaml +0 -0
- {lmnr-0.4.10 → lmnr-0.4.11}/src/lmnr/traceloop_sdk/tests/cassettes/test_workflows/test_simple_workflow.yaml +0 -0
- {lmnr-0.4.10 → lmnr-0.4.11}/src/lmnr/traceloop_sdk/tests/cassettes/test_workflows/test_streaming_workflow.yaml +0 -0
- {lmnr-0.4.10 → lmnr-0.4.11}/src/lmnr/traceloop_sdk/tests/conftest.py +0 -0
- {lmnr-0.4.10 → lmnr-0.4.11}/src/lmnr/traceloop_sdk/tests/test_association_properties.py +0 -0
- {lmnr-0.4.10 → lmnr-0.4.11}/src/lmnr/traceloop_sdk/tests/test_manual.py +0 -0
- {lmnr-0.4.10 → lmnr-0.4.11}/src/lmnr/traceloop_sdk/tests/test_nested_tasks.py +0 -0
- {lmnr-0.4.10 → lmnr-0.4.11}/src/lmnr/traceloop_sdk/tests/test_privacy_no_prompts.py +0 -0
- {lmnr-0.4.10 → lmnr-0.4.11}/src/lmnr/traceloop_sdk/tests/test_sdk_initialization.py +0 -0
- {lmnr-0.4.10 → lmnr-0.4.11}/src/lmnr/traceloop_sdk/tests/test_tasks.py +0 -0
- {lmnr-0.4.10 → lmnr-0.4.11}/src/lmnr/traceloop_sdk/tests/test_workflows.py +0 -0
- {lmnr-0.4.10 → lmnr-0.4.11}/src/lmnr/traceloop_sdk/tracing/__init__.py +0 -0
- {lmnr-0.4.10 → lmnr-0.4.11}/src/lmnr/traceloop_sdk/tracing/content_allow_list.py +0 -0
- {lmnr-0.4.10 → lmnr-0.4.11}/src/lmnr/traceloop_sdk/tracing/context_manager.py +0 -0
- {lmnr-0.4.10 → lmnr-0.4.11}/src/lmnr/traceloop_sdk/tracing/manual.py +0 -0
- {lmnr-0.4.10 → lmnr-0.4.11}/src/lmnr/traceloop_sdk/utils/__init__.py +0 -0
- {lmnr-0.4.10 → lmnr-0.4.11}/src/lmnr/traceloop_sdk/utils/in_memory_span_exporter.py +0 -0
- {lmnr-0.4.10 → lmnr-0.4.11}/src/lmnr/traceloop_sdk/utils/json_encoder.py +0 -0
- {lmnr-0.4.10 → lmnr-0.4.11}/src/lmnr/traceloop_sdk/utils/package_check.py +0 -0
- {lmnr-0.4.10 → lmnr-0.4.11}/src/lmnr/traceloop_sdk/version.py +0 -0
@@ -1,6 +1,6 @@
|
|
1
1
|
Metadata-Version: 2.1
|
2
2
|
Name: lmnr
|
3
|
-
Version: 0.4.
|
3
|
+
Version: 0.4.11
|
4
4
|
Summary: Python SDK for Laminar AI
|
5
5
|
License: Apache-2.0
|
6
6
|
Author: lmnr.ai
|
@@ -67,6 +67,9 @@ OpenTelemetry log sender for [Laminar](https://github.com/lmnr-ai/lmnr) for Pyth
|
|
67
67
|
|
68
68
|
|
69
69
|
## Quickstart
|
70
|
+
|
71
|
+
First, install the package:
|
72
|
+
|
70
73
|
```sh
|
71
74
|
python3 -m venv .myenv
|
72
75
|
source .myenv/bin/activate # or use your favorite env management tool
|
@@ -74,22 +77,39 @@ source .myenv/bin/activate # or use your favorite env management tool
|
|
74
77
|
pip install lmnr
|
75
78
|
```
|
76
79
|
|
77
|
-
|
80
|
+
Then, you can initialize Laminar in your main file and instrument your code.
|
78
81
|
|
79
82
|
```python
|
80
|
-
|
83
|
+
import os
|
84
|
+
from openai import OpenAI
|
85
|
+
from lmnr import Laminar as L
|
81
86
|
|
82
|
-
L.initialize(
|
83
|
-
|
87
|
+
L.initialize(
|
88
|
+
project_api_key=os.environ["LMNR_PROJECT_API_KEY"],
|
89
|
+
)
|
84
90
|
|
85
|
-
|
86
|
-
|
91
|
+
client = OpenAI(api_key=os.environ["OPENAI_API_KEY"])
|
92
|
+
|
93
|
+
def poem_writer(topic: str):
|
94
|
+
prompt = f"write a poem about {topic}"
|
95
|
+
|
96
|
+
# OpenAI calls are automatically instrumented
|
97
|
+
response = client.chat.completions.create(
|
98
|
+
model="gpt-4o",
|
99
|
+
messages=[
|
100
|
+
{"role": "system", "content": "You are a helpful assistant."},
|
101
|
+
{"role": "user", "content": prompt},
|
102
|
+
],
|
103
|
+
)
|
104
|
+
poem = response.choices[0].message.content
|
105
|
+
return poem
|
87
106
|
|
88
|
-
|
89
|
-
|
107
|
+
if __name__ == "__main__":
|
108
|
+
print(poem_writer("laminar flow"))
|
109
|
+
|
110
|
+
```
|
90
111
|
|
91
|
-
|
92
|
-
by TraceLoop. Also, we are grateful to Traceloop for implementing autoinstrumentations for many libraries.
|
112
|
+
Note that you need to only initialize Laminar once in your application.
|
93
113
|
|
94
114
|
### Project API key
|
95
115
|
|
@@ -98,67 +118,84 @@ You can either pass it to `.initialize()` or set it to `.env` at the root of you
|
|
98
118
|
|
99
119
|
## Instrumentation
|
100
120
|
|
101
|
-
|
102
|
-
or to trace other functions.
|
121
|
+
### Manual instrumentation
|
103
122
|
|
104
|
-
|
123
|
+
To instrument any function in your code, we provide a simple `@observe()` decorator.
|
124
|
+
This can be useful if you want to trace a request handler or a function which combines multiple LLM calls.
|
105
125
|
|
106
126
|
```python
|
107
127
|
import os
|
108
128
|
from openai import OpenAI
|
129
|
+
from lmnr import Laminar as L, Instruments
|
109
130
|
|
110
|
-
|
111
|
-
from lmnr import observe, Laminar as L, Instruments
|
112
|
-
L.initialize(project_api_key="<LMNR_PROJECT_API_KEY>", instruments={Instruments.OPENAI})
|
131
|
+
L.initialize(project_api_key=os.environ["LMNR_PROJECT_API_KEY"])
|
113
132
|
|
114
133
|
client = OpenAI(api_key=os.environ["OPENAI_API_KEY"])
|
115
134
|
|
116
|
-
|
117
|
-
def poem_writer(topic="turbulence"):
|
135
|
+
def poem_writer(topic: str):
|
118
136
|
prompt = f"write a poem about {topic}"
|
137
|
+
messages = [
|
138
|
+
{"role": "system", "content": "You are a helpful assistant."},
|
139
|
+
{"role": "user", "content": prompt},
|
140
|
+
]
|
141
|
+
|
142
|
+
# OpenAI calls are still automatically instrumented
|
119
143
|
response = client.chat.completions.create(
|
120
144
|
model="gpt-4o",
|
121
|
-
messages=
|
122
|
-
{"role": "system", "content": "You are a helpful assistant."},
|
123
|
-
{"role": "user", "content": prompt},
|
124
|
-
],
|
145
|
+
messages=messages,
|
125
146
|
)
|
126
147
|
poem = response.choices[0].message.content
|
148
|
+
|
127
149
|
return poem
|
128
150
|
|
129
|
-
|
151
|
+
@observe()
|
152
|
+
def generate_poems():
|
153
|
+
poem1 = poem_writer(topic="laminar flow")
|
154
|
+
L.event("is_poem_generated", True)
|
155
|
+
poem2 = poem_writer(topic="turbulence")
|
156
|
+
L.event("is_poem_generated", True)
|
157
|
+
poems = f"{poem1}\n\n---\n\n{poem2}"
|
158
|
+
return poems
|
130
159
|
```
|
131
160
|
|
132
|
-
|
133
|
-
|
134
|
-
Also, you can `Laminar.start_as_current_span` if you want to record a chunk of your code.
|
161
|
+
Also, you can use `Laminar.start_as_current_span` if you want to record a chunk of your code using `with` statement.
|
135
162
|
|
136
163
|
```python
|
137
|
-
|
138
|
-
L.
|
164
|
+
def handle_user_request(topic: str):
|
165
|
+
with L.start_as_current_span(name="poem_writer", input=topic):
|
166
|
+
...
|
167
|
+
|
168
|
+
poem = poem_writer(topic=topic)
|
169
|
+
|
170
|
+
...
|
171
|
+
|
172
|
+
# while within the span, you can attach laminar events to it
|
173
|
+
L.event("is_poem_generated", True)
|
139
174
|
|
140
|
-
|
141
|
-
|
142
|
-
|
143
|
-
{"role": "system", "content": "You are a helpful assistant."},
|
144
|
-
{"role": "user", "content": prompt},
|
145
|
-
]
|
175
|
+
# Use set_span_output to record the output of the span
|
176
|
+
L.set_span_output(poem)
|
177
|
+
```
|
146
178
|
|
147
|
-
|
148
|
-
# OpenAI calls are still automatically instrumented with OpenLLMetry
|
149
|
-
response = client.chat.completions.create(
|
150
|
-
model="gpt-4o",
|
151
|
-
messages=messages,
|
152
|
-
)
|
153
|
-
poem = response.choices[0].message.content
|
154
|
-
# while within the span, you can attach laminar events to it
|
155
|
-
L.event("event_name", "event_value")
|
179
|
+
### Automatic instrumentation
|
156
180
|
|
157
|
-
|
181
|
+
Laminar allows you to automatically instrument majority of the most popular LLM, Vector DB, database, requests, and other libraries.
|
158
182
|
|
159
|
-
|
183
|
+
If you want to automatically instrument a default set of libraries, then simply do NOT pass `instruments` argument to `.initialize()`.
|
184
|
+
See the full list of available instrumentations in the [enum](/src/lmnr/traceloop_sdk/instruments.py).
|
185
|
+
|
186
|
+
If you want to automatically instrument only specific LLM, Vector DB, or other
|
187
|
+
calls with OpenTelemetry-compatible instrumentation, then pass the appropriate instruments to `.initialize()`.
|
188
|
+
For example, if you want to only instrument OpenAI and Anthropic, then do the following:
|
189
|
+
|
190
|
+
```python
|
191
|
+
from lmnr import Laminar as L, Instruments
|
192
|
+
|
193
|
+
L.initialize(project_api_key=os.environ["LMNR_PROJECT_API_KEY"], instruments={Instruments.OPENAI, Instruments.ANTHROPIC})
|
160
194
|
```
|
161
195
|
|
196
|
+
If you want to fully disable any kind of autoinstrumentation, pass an empty set as `instruments=set()` to `.initialize()`.
|
197
|
+
|
198
|
+
Majority of the autoinstrumentations are provided by Traceloop's [OpenLLMetry](https://github.com/traceloop/openllmetry).
|
162
199
|
|
163
200
|
## Sending events
|
164
201
|
|
@@ -9,6 +9,9 @@ OpenTelemetry log sender for [Laminar](https://github.com/lmnr-ai/lmnr) for Pyth
|
|
9
9
|
|
10
10
|
|
11
11
|
## Quickstart
|
12
|
+
|
13
|
+
First, install the package:
|
14
|
+
|
12
15
|
```sh
|
13
16
|
python3 -m venv .myenv
|
14
17
|
source .myenv/bin/activate # or use your favorite env management tool
|
@@ -16,22 +19,39 @@ source .myenv/bin/activate # or use your favorite env management tool
|
|
16
19
|
pip install lmnr
|
17
20
|
```
|
18
21
|
|
19
|
-
|
22
|
+
Then, you can initialize Laminar in your main file and instrument your code.
|
20
23
|
|
21
24
|
```python
|
22
|
-
|
25
|
+
import os
|
26
|
+
from openai import OpenAI
|
27
|
+
from lmnr import Laminar as L
|
23
28
|
|
24
|
-
L.initialize(
|
25
|
-
|
29
|
+
L.initialize(
|
30
|
+
project_api_key=os.environ["LMNR_PROJECT_API_KEY"],
|
31
|
+
)
|
26
32
|
|
27
|
-
|
28
|
-
|
33
|
+
client = OpenAI(api_key=os.environ["OPENAI_API_KEY"])
|
34
|
+
|
35
|
+
def poem_writer(topic: str):
|
36
|
+
prompt = f"write a poem about {topic}"
|
37
|
+
|
38
|
+
# OpenAI calls are automatically instrumented
|
39
|
+
response = client.chat.completions.create(
|
40
|
+
model="gpt-4o",
|
41
|
+
messages=[
|
42
|
+
{"role": "system", "content": "You are a helpful assistant."},
|
43
|
+
{"role": "user", "content": prompt},
|
44
|
+
],
|
45
|
+
)
|
46
|
+
poem = response.choices[0].message.content
|
47
|
+
return poem
|
29
48
|
|
30
|
-
|
31
|
-
|
49
|
+
if __name__ == "__main__":
|
50
|
+
print(poem_writer("laminar flow"))
|
51
|
+
|
52
|
+
```
|
32
53
|
|
33
|
-
|
34
|
-
by TraceLoop. Also, we are grateful to Traceloop for implementing autoinstrumentations for many libraries.
|
54
|
+
Note that you need to only initialize Laminar once in your application.
|
35
55
|
|
36
56
|
### Project API key
|
37
57
|
|
@@ -40,67 +60,84 @@ You can either pass it to `.initialize()` or set it to `.env` at the root of you
|
|
40
60
|
|
41
61
|
## Instrumentation
|
42
62
|
|
43
|
-
|
44
|
-
or to trace other functions.
|
63
|
+
### Manual instrumentation
|
45
64
|
|
46
|
-
|
65
|
+
To instrument any function in your code, we provide a simple `@observe()` decorator.
|
66
|
+
This can be useful if you want to trace a request handler or a function which combines multiple LLM calls.
|
47
67
|
|
48
68
|
```python
|
49
69
|
import os
|
50
70
|
from openai import OpenAI
|
71
|
+
from lmnr import Laminar as L, Instruments
|
51
72
|
|
52
|
-
|
53
|
-
from lmnr import observe, Laminar as L, Instruments
|
54
|
-
L.initialize(project_api_key="<LMNR_PROJECT_API_KEY>", instruments={Instruments.OPENAI})
|
73
|
+
L.initialize(project_api_key=os.environ["LMNR_PROJECT_API_KEY"])
|
55
74
|
|
56
75
|
client = OpenAI(api_key=os.environ["OPENAI_API_KEY"])
|
57
76
|
|
58
|
-
|
59
|
-
def poem_writer(topic="turbulence"):
|
77
|
+
def poem_writer(topic: str):
|
60
78
|
prompt = f"write a poem about {topic}"
|
79
|
+
messages = [
|
80
|
+
{"role": "system", "content": "You are a helpful assistant."},
|
81
|
+
{"role": "user", "content": prompt},
|
82
|
+
]
|
83
|
+
|
84
|
+
# OpenAI calls are still automatically instrumented
|
61
85
|
response = client.chat.completions.create(
|
62
86
|
model="gpt-4o",
|
63
|
-
messages=
|
64
|
-
{"role": "system", "content": "You are a helpful assistant."},
|
65
|
-
{"role": "user", "content": prompt},
|
66
|
-
],
|
87
|
+
messages=messages,
|
67
88
|
)
|
68
89
|
poem = response.choices[0].message.content
|
90
|
+
|
69
91
|
return poem
|
70
92
|
|
71
|
-
|
93
|
+
@observe()
|
94
|
+
def generate_poems():
|
95
|
+
poem1 = poem_writer(topic="laminar flow")
|
96
|
+
L.event("is_poem_generated", True)
|
97
|
+
poem2 = poem_writer(topic="turbulence")
|
98
|
+
L.event("is_poem_generated", True)
|
99
|
+
poems = f"{poem1}\n\n---\n\n{poem2}"
|
100
|
+
return poems
|
72
101
|
```
|
73
102
|
|
74
|
-
|
75
|
-
|
76
|
-
Also, you can `Laminar.start_as_current_span` if you want to record a chunk of your code.
|
103
|
+
Also, you can use `Laminar.start_as_current_span` if you want to record a chunk of your code using `with` statement.
|
77
104
|
|
78
105
|
```python
|
79
|
-
|
80
|
-
L.
|
106
|
+
def handle_user_request(topic: str):
|
107
|
+
with L.start_as_current_span(name="poem_writer", input=topic):
|
108
|
+
...
|
109
|
+
|
110
|
+
poem = poem_writer(topic=topic)
|
111
|
+
|
112
|
+
...
|
113
|
+
|
114
|
+
# while within the span, you can attach laminar events to it
|
115
|
+
L.event("is_poem_generated", True)
|
81
116
|
|
82
|
-
|
83
|
-
|
84
|
-
|
85
|
-
{"role": "system", "content": "You are a helpful assistant."},
|
86
|
-
{"role": "user", "content": prompt},
|
87
|
-
]
|
117
|
+
# Use set_span_output to record the output of the span
|
118
|
+
L.set_span_output(poem)
|
119
|
+
```
|
88
120
|
|
89
|
-
|
90
|
-
# OpenAI calls are still automatically instrumented with OpenLLMetry
|
91
|
-
response = client.chat.completions.create(
|
92
|
-
model="gpt-4o",
|
93
|
-
messages=messages,
|
94
|
-
)
|
95
|
-
poem = response.choices[0].message.content
|
96
|
-
# while within the span, you can attach laminar events to it
|
97
|
-
L.event("event_name", "event_value")
|
121
|
+
### Automatic instrumentation
|
98
122
|
|
99
|
-
|
123
|
+
Laminar allows you to automatically instrument majority of the most popular LLM, Vector DB, database, requests, and other libraries.
|
100
124
|
|
101
|
-
|
125
|
+
If you want to automatically instrument a default set of libraries, then simply do NOT pass `instruments` argument to `.initialize()`.
|
126
|
+
See the full list of available instrumentations in the [enum](/src/lmnr/traceloop_sdk/instruments.py).
|
127
|
+
|
128
|
+
If you want to automatically instrument only specific LLM, Vector DB, or other
|
129
|
+
calls with OpenTelemetry-compatible instrumentation, then pass the appropriate instruments to `.initialize()`.
|
130
|
+
For example, if you want to only instrument OpenAI and Anthropic, then do the following:
|
131
|
+
|
132
|
+
```python
|
133
|
+
from lmnr import Laminar as L, Instruments
|
134
|
+
|
135
|
+
L.initialize(project_api_key=os.environ["LMNR_PROJECT_API_KEY"], instruments={Instruments.OPENAI, Instruments.ANTHROPIC})
|
102
136
|
```
|
103
137
|
|
138
|
+
If you want to fully disable any kind of autoinstrumentation, pass an empty set as `instruments=set()` to `.initialize()`.
|
139
|
+
|
140
|
+
Majority of the autoinstrumentations are provided by Traceloop's [OpenLLMetry](https://github.com/traceloop/openllmetry).
|
104
141
|
|
105
142
|
## Sending events
|
106
143
|
|
@@ -1,6 +1,6 @@
|
|
1
1
|
[project]
|
2
2
|
name = "lmnr"
|
3
|
-
version = "0.4.
|
3
|
+
version = "0.4.11"
|
4
4
|
description = "Python SDK for Laminar AI"
|
5
5
|
authors = [
|
6
6
|
{ name = "lmnr.ai", email = "founders@lmnr.ai" }
|
@@ -11,7 +11,7 @@ license = "Apache-2.0"
|
|
11
11
|
|
12
12
|
[tool.poetry]
|
13
13
|
name = "lmnr"
|
14
|
-
version = "0.4.
|
14
|
+
version = "0.4.11"
|
15
15
|
description = "Python SDK for Laminar AI"
|
16
16
|
authors = ["lmnr.ai"]
|
17
17
|
readme = "README.md"
|
@@ -4,22 +4,19 @@ from lmnr.traceloop_sdk.decorators.base import (
|
|
4
4
|
)
|
5
5
|
from opentelemetry.trace import INVALID_SPAN, get_current_span
|
6
6
|
|
7
|
-
from typing import Callable, Optional,
|
7
|
+
from typing import Callable, Optional, cast
|
8
8
|
|
9
9
|
from lmnr.traceloop_sdk.tracing.tracing import update_association_properties
|
10
10
|
|
11
11
|
from .utils import is_async
|
12
12
|
|
13
|
-
P = ParamSpec("P")
|
14
|
-
R = TypeVar("R")
|
15
|
-
|
16
13
|
|
17
14
|
def observe(
|
18
15
|
*,
|
19
16
|
name: Optional[str] = None,
|
20
17
|
user_id: Optional[str] = None,
|
21
18
|
session_id: Optional[str] = None,
|
22
|
-
) -> Callable[[Callable
|
19
|
+
) -> Callable[[Callable], Callable]:
|
23
20
|
"""The main decorator entrypoint for Laminar. This is used to wrap
|
24
21
|
functions and methods to create spans.
|
25
22
|
|
@@ -41,7 +38,7 @@ def observe(
|
|
41
38
|
R: Returns the result of the wrapped function
|
42
39
|
"""
|
43
40
|
|
44
|
-
def decorator(func: Callable
|
41
|
+
def decorator(func: Callable) -> Callable:
|
45
42
|
current_span = get_current_span()
|
46
43
|
if current_span != INVALID_SPAN:
|
47
44
|
if session_id is not None:
|
@@ -64,4 +61,4 @@ def observe(
|
|
64
61
|
else entity_method(name=name)(func)
|
65
62
|
)
|
66
63
|
|
67
|
-
return cast(Callable
|
64
|
+
return cast(Callable, decorator)
|
@@ -2,7 +2,7 @@ import datetime
|
|
2
2
|
import requests
|
3
3
|
import pydantic
|
4
4
|
import uuid
|
5
|
-
from typing import Any, Literal, Optional,
|
5
|
+
from typing import Any, Awaitable, Callable, Literal, Optional, Union
|
6
6
|
|
7
7
|
from .utils import to_dict
|
8
8
|
|
@@ -17,9 +17,9 @@ class ConditionedValue(pydantic.BaseModel):
|
|
17
17
|
value: "NodeInput"
|
18
18
|
|
19
19
|
|
20
|
-
Numeric
|
21
|
-
NodeInput
|
22
|
-
PipelineOutput
|
20
|
+
Numeric = Union[int, float]
|
21
|
+
NodeInput = Union[str, list[ChatMessage], ConditionedValue, Numeric, bool]
|
22
|
+
PipelineOutput = Union[NodeInput]
|
23
23
|
|
24
24
|
|
25
25
|
class PipelineRunRequest(pydantic.BaseModel):
|
@@ -74,8 +74,8 @@ class PipelineRunError(Exception):
|
|
74
74
|
return super().__str__()
|
75
75
|
|
76
76
|
|
77
|
-
EvaluationDatapointData
|
78
|
-
EvaluationDatapointTarget
|
77
|
+
EvaluationDatapointData = dict[str, Any]
|
78
|
+
EvaluationDatapointTarget = dict[str, Any]
|
79
79
|
|
80
80
|
|
81
81
|
# EvaluationDatapoint is a single data point in the evaluation
|
@@ -87,24 +87,24 @@ class EvaluationDatapoint(pydantic.BaseModel):
|
|
87
87
|
target: EvaluationDatapointTarget
|
88
88
|
|
89
89
|
|
90
|
-
ExecutorFunctionReturnType
|
91
|
-
EvaluatorFunctionReturnType
|
90
|
+
ExecutorFunctionReturnType = Any
|
91
|
+
EvaluatorFunctionReturnType = Union[Numeric, dict[str, Numeric]]
|
92
92
|
|
93
|
-
|
94
|
-
|
95
|
-
|
96
|
-
|
93
|
+
ExecutorFunction = Callable[
|
94
|
+
[EvaluationDatapointData, Any, dict[str, Any]],
|
95
|
+
Union[ExecutorFunctionReturnType, Awaitable[ExecutorFunctionReturnType]],
|
96
|
+
]
|
97
97
|
|
98
98
|
# EvaluatorFunction is a function that takes the output of the executor and the
|
99
99
|
# target data, and returns a score. The score can be a single number or a
|
100
100
|
# record of string keys and number values. The latter is useful for evaluating
|
101
101
|
# multiple criteria in one go instead of running multiple evaluators.
|
102
|
-
|
103
|
-
|
104
|
-
|
105
|
-
|
102
|
+
EvaluatorFunction = Callable[
|
103
|
+
[ExecutorFunctionReturnType, Any, dict[str, Any]],
|
104
|
+
Union[EvaluatorFunctionReturnType, Awaitable[EvaluatorFunctionReturnType]],
|
105
|
+
]
|
106
106
|
|
107
|
-
EvaluationStatus
|
107
|
+
EvaluationStatus = Literal["Started", "Finished", "Error"]
|
108
108
|
|
109
109
|
|
110
110
|
class CreateEvaluationResponse(pydantic.BaseModel):
|
@@ -2,6 +2,8 @@ from enum import Enum
|
|
2
2
|
|
3
3
|
|
4
4
|
class Instruments(Enum):
|
5
|
+
# The list of libraries which will be autoinstrumented
|
6
|
+
# if no specific instruments are provided to initialize()
|
5
7
|
OPENAI = "openai"
|
6
8
|
ANTHROPIC = "anthropic"
|
7
9
|
COHERE = "cohere"
|
@@ -15,10 +17,6 @@ class Instruments(Enum):
|
|
15
17
|
MILVUS = "milvus"
|
16
18
|
TRANSFORMERS = "transformers"
|
17
19
|
TOGETHER = "together"
|
18
|
-
REDIS = "redis"
|
19
|
-
REQUESTS = "requests"
|
20
|
-
URLLIB3 = "urllib3"
|
21
|
-
PYMYSQL = "pymysql"
|
22
20
|
BEDROCK = "bedrock"
|
23
21
|
REPLICATE = "replicate"
|
24
22
|
VERTEXAI = "vertexai"
|
@@ -27,3 +25,10 @@ class Instruments(Enum):
|
|
27
25
|
ALEPHALPHA = "alephalpha"
|
28
26
|
MARQO = "marqo"
|
29
27
|
LANCEDB = "lancedb"
|
28
|
+
|
29
|
+
# The following libraries will not be autoinstrumented unless
|
30
|
+
# specified explicitly in the initialize() call.
|
31
|
+
REDIS = "redis"
|
32
|
+
REQUESTS = "requests"
|
33
|
+
URLLIB3 = "urllib3"
|
34
|
+
PYMYSQL = "pymysql"
|
@@ -531,10 +531,6 @@ def init_instrumentations(should_enrich_metrics: bool):
|
|
531
531
|
init_milvus_instrumentor()
|
532
532
|
init_transformers_instrumentor()
|
533
533
|
init_together_instrumentor()
|
534
|
-
init_redis_instrumentor()
|
535
|
-
init_requests_instrumentor()
|
536
|
-
init_urllib3_instrumentor()
|
537
|
-
init_pymysql_instrumentor()
|
538
534
|
init_bedrock_instrumentor(should_enrich_metrics)
|
539
535
|
init_replicate_instrumentor()
|
540
536
|
init_vertexai_instrumentor()
|
@@ -545,6 +541,12 @@ def init_instrumentations(should_enrich_metrics: bool):
|
|
545
541
|
init_lancedb_instrumentor()
|
546
542
|
init_groq_instrumentor()
|
547
543
|
|
544
|
+
# These libraries are not instrumented by default, but if the user wants, he can manually specify them
|
545
|
+
# init_redis_instrumentor()
|
546
|
+
# init_requests_instrumentor()
|
547
|
+
# init_urllib3_instrumentor()
|
548
|
+
# init_pymysql_instrumentor()
|
549
|
+
|
548
550
|
|
549
551
|
def init_openai_instrumentor(should_enrich_metrics: bool):
|
550
552
|
try:
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|