lmnr 0.6.2__py3-none-any.whl → 0.6.4__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- lmnr/cli.py +18 -11
- lmnr/opentelemetry_lib/__init__.py +6 -7
- lmnr/opentelemetry_lib/decorators/__init__.py +23 -8
- lmnr/opentelemetry_lib/opentelemetry/instrumentation/google_genai/__init__.py +2 -2
- lmnr/opentelemetry_lib/opentelemetry/instrumentation/google_genai/config.py +4 -4
- lmnr/opentelemetry_lib/opentelemetry/instrumentation/google_genai/utils.py +19 -19
- lmnr/opentelemetry_lib/tracing/__init__.py +6 -7
- lmnr/opentelemetry_lib/tracing/_instrument_initializers.py +32 -33
- lmnr/opentelemetry_lib/tracing/context_properties.py +2 -3
- lmnr/opentelemetry_lib/tracing/exporter.py +4 -5
- lmnr/opentelemetry_lib/tracing/instruments.py +5 -6
- lmnr/opentelemetry_lib/tracing/processor.py +6 -7
- lmnr/sdk/browser/utils.py +4 -3
- lmnr/sdk/client/asynchronous/async_client.py +26 -11
- lmnr/sdk/client/asynchronous/resources/__init__.py +2 -0
- lmnr/sdk/client/asynchronous/resources/agent.py +89 -91
- lmnr/sdk/client/asynchronous/resources/evals.py +8 -8
- lmnr/sdk/client/asynchronous/resources/tags.py +89 -0
- lmnr/sdk/client/synchronous/resources/__init__.py +2 -1
- lmnr/sdk/client/synchronous/resources/agent.py +91 -91
- lmnr/sdk/client/synchronous/resources/evals.py +8 -8
- lmnr/sdk/client/synchronous/resources/tags.py +89 -0
- lmnr/sdk/client/synchronous/sync_client.py +28 -13
- lmnr/sdk/decorators.py +43 -27
- lmnr/sdk/eval_control.py +1 -1
- lmnr/sdk/evaluations.py +75 -52
- lmnr/sdk/laminar.py +76 -58
- lmnr/sdk/types.py +47 -37
- lmnr/sdk/utils.py +3 -3
- lmnr/version.py +1 -1
- {lmnr-0.6.2.dist-info → lmnr-0.6.4.dist-info}/METADATA +1 -1
- lmnr-0.6.4.dist-info/RECORD +56 -0
- lmnr-0.6.2.dist-info/RECORD +0 -54
- {lmnr-0.6.2.dist-info → lmnr-0.6.4.dist-info}/LICENSE +0 -0
- {lmnr-0.6.2.dist-info → lmnr-0.6.4.dist-info}/WHEEL +0 -0
- {lmnr-0.6.2.dist-info → lmnr-0.6.4.dist-info}/entry_points.txt +0 -0
@@ -1,6 +1,6 @@
|
|
1
1
|
"""Agent resource for interacting with Laminar agents."""
|
2
2
|
|
3
|
-
from typing import Generator, Literal,
|
3
|
+
from typing import Generator, Literal, overload
|
4
4
|
import uuid
|
5
5
|
|
6
6
|
from lmnr.sdk.client.synchronous.resources.base import BaseResource
|
@@ -23,44 +23,44 @@ class Agent(BaseResource):
|
|
23
23
|
self,
|
24
24
|
prompt: str,
|
25
25
|
stream: Literal[True],
|
26
|
-
parent_span_context:
|
27
|
-
model_provider:
|
28
|
-
model:
|
26
|
+
parent_span_context: LaminarSpanContext | str | None = None,
|
27
|
+
model_provider: ModelProvider | None = None,
|
28
|
+
model: str | None = None,
|
29
29
|
enable_thinking: bool = True,
|
30
|
-
agent_state:
|
31
|
-
storage_state:
|
30
|
+
agent_state: str | None = None,
|
31
|
+
storage_state: str | None = None,
|
32
32
|
return_screenshots: bool = False,
|
33
33
|
return_agent_state: bool = False,
|
34
34
|
return_storage_state: bool = False,
|
35
35
|
disable_give_control: bool = False,
|
36
|
-
timeout:
|
37
|
-
cdp_url:
|
38
|
-
max_steps:
|
39
|
-
thinking_token_budget:
|
40
|
-
start_url:
|
41
|
-
user_agent:
|
36
|
+
timeout: int | None = None,
|
37
|
+
cdp_url: str | None = None,
|
38
|
+
max_steps: int | None = None,
|
39
|
+
thinking_token_budget: int | None = None,
|
40
|
+
start_url: str | None = None,
|
41
|
+
user_agent: str | None = None,
|
42
42
|
) -> Generator[RunAgentResponseChunk, None, None]:
|
43
43
|
"""Run Laminar index agent in streaming mode.
|
44
44
|
|
45
45
|
Args:
|
46
46
|
prompt (str): prompt for the agent
|
47
47
|
stream (Literal[True]): whether to stream the agent's response
|
48
|
-
parent_span_context (
|
49
|
-
model_provider (
|
50
|
-
model (
|
48
|
+
parent_span_context (LaminarSpanContext | str | None, optional): span context if the agent is part of a trace
|
49
|
+
model_provider (ModelProvider | None, optional): LLM model provider
|
50
|
+
model (str | None, optional): LLM model name
|
51
51
|
enable_thinking (bool, optional): whether to enable thinking on the underlying LLM. Default to True.
|
52
|
-
agent_state (
|
53
|
-
storage_state (
|
52
|
+
agent_state (str | None, optional): the agent's state as returned by the previous agent run. Default to None.
|
53
|
+
storage_state (str | None, optional): the browser's storage state as returned by the previous agent run. Default to None.
|
54
54
|
return_screenshots (bool, optional): whether to return screenshots of the agent's states at every step. Default to False.
|
55
55
|
return_agent_state (bool, optional): whether to return the agent's state. Default to False.
|
56
56
|
return_storage_state (bool, optional): whether to return the storage state. Default to False.
|
57
57
|
disable_give_control (bool, optional): whether to NOT give the agent additional direction to give control to the user for tasks such as login. Default to False.
|
58
|
-
timeout (
|
59
|
-
cdp_url (
|
60
|
-
max_steps (
|
61
|
-
thinking_token_budget (
|
62
|
-
start_url (
|
63
|
-
user_agent (
|
58
|
+
timeout (int | None, optional): timeout seconds for the agent's response. Default to None.
|
59
|
+
cdp_url (str | None, optional): Chrome DevTools Protocol URL of an existing browser session. Default to None.
|
60
|
+
max_steps (int | None, optional): maximum number of steps the agent can take. If not set, the backend will use a default value (currently 100). Default to None.
|
61
|
+
thinking_token_budget (int | None, optional): maximum number of tokens the underlying LLM can spend on thinking in each step, if supported by the model. Default to None.
|
62
|
+
start_url (str | None, optional): the URL to start the agent on. Must be a valid URL - refer to https://playwright.dev/docs/api/class-page#page-goto. If not specified, the agent infers this from the prompt. Default to None.
|
63
|
+
user_agent (str | None, optional): the user to be sent to the browser. If not specified, Laminar uses the default user agent. Default to None.
|
64
64
|
Returns:
|
65
65
|
Generator[RunAgentResponseChunk, None, None]: a generator of response chunks
|
66
66
|
"""
|
@@ -70,43 +70,43 @@ class Agent(BaseResource):
|
|
70
70
|
def run(
|
71
71
|
self,
|
72
72
|
prompt: str,
|
73
|
-
parent_span_context:
|
74
|
-
model_provider:
|
75
|
-
model:
|
73
|
+
parent_span_context: LaminarSpanContext | str | None = None,
|
74
|
+
model_provider: ModelProvider | None = None,
|
75
|
+
model: str | None = None,
|
76
76
|
enable_thinking: bool = True,
|
77
|
-
agent_state:
|
78
|
-
storage_state:
|
77
|
+
agent_state: str | None = None,
|
78
|
+
storage_state: str | None = None,
|
79
79
|
return_screenshots: bool = False,
|
80
80
|
return_agent_state: bool = False,
|
81
81
|
disable_give_control: bool = False,
|
82
82
|
return_storage_state: bool = False,
|
83
|
-
timeout:
|
84
|
-
cdp_url:
|
85
|
-
max_steps:
|
86
|
-
thinking_token_budget:
|
87
|
-
start_url:
|
88
|
-
user_agent:
|
83
|
+
timeout: int | None = None,
|
84
|
+
cdp_url: str | None = None,
|
85
|
+
max_steps: int | None = None,
|
86
|
+
thinking_token_budget: int | None = None,
|
87
|
+
start_url: str | None = None,
|
88
|
+
user_agent: str | None = None,
|
89
89
|
) -> AgentOutput:
|
90
90
|
"""Run Laminar index agent.
|
91
91
|
|
92
92
|
Args:
|
93
93
|
prompt (str): prompt for the agent
|
94
|
-
parent_span_context (
|
95
|
-
model_provider (
|
96
|
-
model (
|
94
|
+
parent_span_context (LaminarSpanContext | str | None, optional): span context if the agent is part of a trace
|
95
|
+
model_provider (ModelProvider | None, optional): LLM model provider
|
96
|
+
model (str | None, optional): LLM model name
|
97
97
|
enable_thinking (bool, optional): whether to enable thinking on the underlying LLM. Default to True.
|
98
|
-
agent_state (
|
99
|
-
storage_state (
|
98
|
+
agent_state (str | None, optional): the agent's state as returned by the previous agent run. Default to None.
|
99
|
+
storage_state (str | None, optional): the browser's storage state as returned by the previous agent run. Default to None.
|
100
100
|
return_screenshots (bool, optional): whether to return screenshots of the agent's states at every step. Default to False.
|
101
101
|
return_agent_state (bool, optional): whether to return the agent's state. Default to False.
|
102
102
|
return_storage_state (bool, optional): whether to return the storage state. Default to False.
|
103
103
|
disable_give_control (bool, optional): whether to NOT give the agent additional direction to give control to the user for tasks such as login. Default to False.
|
104
|
-
timeout (
|
105
|
-
cdp_url (
|
106
|
-
max_steps (
|
107
|
-
thinking_token_budget (
|
108
|
-
start_url (
|
109
|
-
user_agent (
|
104
|
+
timeout (int | None, optional): timeout seconds for the agent's response. Default to None.
|
105
|
+
cdp_url (str | None, optional): Chrome DevTools Protocol URL of an existing browser session. Default to None.
|
106
|
+
max_steps (int | None, optional): maximum number of steps the agent can take. If not set, the backend will use a default value (currently 100). Default to None.
|
107
|
+
thinking_token_budget (int | None, optional): maximum number of tokens the underlying LLM can spend on thinking in each step, if supported by the model. Default to None.
|
108
|
+
start_url (str | None, optional): the URL to start the agent on. Must be a valid URL - refer to https://playwright.dev/docs/api/class-page#page-goto. If not specified, the agent infers this from the prompt. Default to None.
|
109
|
+
user_agent (str | None, optional): the user to be sent to the browser. If not specified, Laminar uses the default user agent. Default to None.
|
110
110
|
|
111
111
|
Returns:
|
112
112
|
AgentOutput: agent output
|
@@ -117,45 +117,45 @@ class Agent(BaseResource):
|
|
117
117
|
def run(
|
118
118
|
self,
|
119
119
|
prompt: str,
|
120
|
-
parent_span_context:
|
121
|
-
model_provider:
|
122
|
-
model:
|
120
|
+
parent_span_context: LaminarSpanContext | str | None = None,
|
121
|
+
model_provider: ModelProvider | None = None,
|
122
|
+
model: str | None = None,
|
123
123
|
stream: Literal[False] = False,
|
124
124
|
enable_thinking: bool = True,
|
125
|
-
agent_state:
|
126
|
-
storage_state:
|
125
|
+
agent_state: str | None = None,
|
126
|
+
storage_state: str | None = None,
|
127
127
|
return_screenshots: bool = False,
|
128
128
|
return_agent_state: bool = False,
|
129
129
|
return_storage_state: bool = False,
|
130
130
|
disable_give_control: bool = False,
|
131
|
-
timeout:
|
132
|
-
cdp_url:
|
133
|
-
max_steps:
|
134
|
-
thinking_token_budget:
|
135
|
-
start_url:
|
136
|
-
user_agent:
|
131
|
+
timeout: int | None = None,
|
132
|
+
cdp_url: str | None = None,
|
133
|
+
max_steps: int | None = None,
|
134
|
+
thinking_token_budget: int | None = None,
|
135
|
+
start_url: str | None = None,
|
136
|
+
user_agent: str | None = None,
|
137
137
|
) -> AgentOutput:
|
138
138
|
"""Run Laminar index agent.
|
139
139
|
|
140
140
|
Args:
|
141
141
|
prompt (str): prompt for the agent
|
142
|
-
parent_span_context (
|
143
|
-
model_provider (
|
144
|
-
model (
|
142
|
+
parent_span_context (LaminarSpanContext | str | None, optional): span context if the agent is part of a trace
|
143
|
+
model_provider (ModelProvider | None, optional): LLM model provider
|
144
|
+
model (str | None, optional): LLM model name
|
145
145
|
stream (Literal[False], optional): whether to stream the agent's response
|
146
146
|
enable_thinking (bool, optional): whether to enable thinking on the underlying LLM. Default to True.
|
147
|
-
agent_state (
|
148
|
-
storage_state (
|
147
|
+
agent_state (str | None, optional): the agent's state as returned by the previous agent run. Default to None.
|
148
|
+
storage_state (str | None, optional): the browser's storage state as returned by the previous agent run. Default to None.
|
149
149
|
return_screenshots (bool, optional): whether to return screenshots of the agent's states at every step. Default to False.
|
150
150
|
return_agent_state (bool, optional): whether to return the agent's state. Default to False.
|
151
151
|
return_storage_state (bool, optional): whether to return the storage state. Default to False.
|
152
152
|
disable_give_control (bool, optional): whether to NOT give the agent additional direction to give control to the user for tasks such as login. Default to False.
|
153
|
-
timeout (
|
154
|
-
cdp_url (
|
155
|
-
max_steps (
|
156
|
-
thinking_token_budget (
|
157
|
-
start_url (
|
158
|
-
user_agent (
|
153
|
+
timeout (int | None, optional): timeout seconds for the agent's response. Default to None.
|
154
|
+
cdp_url (str | None, optional): Chrome DevTools Protocol URL of an existing browser session. Default to None.
|
155
|
+
max_steps (int | None, optional): maximum number of steps the agent can take. If not set, the backend will use a default value (currently 100). Default to None.
|
156
|
+
thinking_token_budget (int | None, optional): maximum number of tokens the underlying LLM can spend on thinking in each step, if supported by the model. Default to None.
|
157
|
+
start_url (str | None, optional): the URL to start the agent on. Must be a valid URL - refer to https://playwright.dev/docs/api/class-page#page-goto. If not specified, the agent infers this from the prompt. Default to None.
|
158
|
+
user_agent (str | None, optional): the user to be sent to the browser. If not specified, Laminar uses the default user agent. Default to None.
|
159
159
|
|
160
160
|
Returns:
|
161
161
|
AgentOutput: agent output
|
@@ -165,48 +165,48 @@ class Agent(BaseResource):
|
|
165
165
|
def run(
|
166
166
|
self,
|
167
167
|
prompt: str,
|
168
|
-
parent_span_context:
|
169
|
-
model_provider:
|
170
|
-
model:
|
168
|
+
parent_span_context: LaminarSpanContext | str | None = None,
|
169
|
+
model_provider: ModelProvider | None = None,
|
170
|
+
model: str | None = None,
|
171
171
|
stream: bool = False,
|
172
172
|
enable_thinking: bool = True,
|
173
|
-
agent_state:
|
174
|
-
storage_state:
|
173
|
+
agent_state: str | None = None,
|
174
|
+
storage_state: str | None = None,
|
175
175
|
return_screenshots: bool = False,
|
176
176
|
return_agent_state: bool = False,
|
177
177
|
return_storage_state: bool = False,
|
178
178
|
disable_give_control: bool = False,
|
179
|
-
timeout:
|
180
|
-
cdp_url:
|
181
|
-
max_steps:
|
182
|
-
thinking_token_budget:
|
183
|
-
start_url:
|
184
|
-
user_agent:
|
185
|
-
) ->
|
179
|
+
timeout: int | None = None,
|
180
|
+
cdp_url: str | None = None,
|
181
|
+
max_steps: int | None = None,
|
182
|
+
thinking_token_budget: int | None = None,
|
183
|
+
start_url: str | None = None,
|
184
|
+
user_agent: str | None = None,
|
185
|
+
) -> AgentOutput | Generator[RunAgentResponseChunk, None, None]:
|
186
186
|
"""Run Laminar index agent.
|
187
187
|
|
188
188
|
Args:
|
189
189
|
prompt (str): prompt for the agent
|
190
|
-
parent_span_context (
|
191
|
-
model_provider (
|
192
|
-
model (
|
190
|
+
parent_span_context (LaminarSpanContext | str | None, optional): span context if the agent is part of a trace
|
191
|
+
model_provider (ModelProvider | None, optional): LLM model provider
|
192
|
+
model (str | None, optional): LLM model name
|
193
193
|
stream (bool, optional): whether to stream the agent's response
|
194
194
|
enable_thinking (bool, optional): whether to enable thinking on the underlying LLM. Default to True.
|
195
|
-
agent_state (
|
196
|
-
storage_state (
|
195
|
+
agent_state (str | None, optional): the agent's state as returned by the previous agent run. Default to None.
|
196
|
+
storage_state (str | None, optional): the browser's storage state as returned by the previous agent run. Default to None.
|
197
197
|
return_screenshots (bool, optional): whether to return screenshots of the agent's states at every step. Default to False.
|
198
198
|
return_agent_state (bool, optional): whether to return the agent's state. Default to False.
|
199
199
|
return_storage_state (bool, optional): whether to return the storage state. Default to False.
|
200
200
|
disable_give_control (bool, optional): whether to NOT give the agent additional direction to give control to the user for tasks such as login. Default to False.
|
201
|
-
timeout (
|
202
|
-
cdp_url (
|
203
|
-
max_steps (
|
204
|
-
thinking_token_budget (
|
205
|
-
start_url (
|
206
|
-
user_agent (
|
201
|
+
timeout (int | None, optional): timeout seconds for the agent's response. Default to None.
|
202
|
+
cdp_url (str | None, optional): Chrome DevTools Protocol URL of an existing browser session. Default to None.
|
203
|
+
max_steps (int | None, optional): maximum number of steps the agent can take. If not set, the backend will use a default value (currently 100). Default to None.
|
204
|
+
thinking_token_budget (int | None, optional): maximum number of tokens the underlying LLM can spend on thinking in each step, if supported by the model. Default to None.
|
205
|
+
start_url (str | None, optional): the URL to start the agent on. Must be a valid URL - refer to https://playwright.dev/docs/api/class-page#page-goto. If not specified, the agent infers this from the prompt. Default to None.
|
206
|
+
user_agent (str | None, optional): the user to be sent to the browser. If not specified, Laminar uses the default user agent. Default to None.
|
207
207
|
|
208
208
|
Returns:
|
209
|
-
|
209
|
+
AgentOutput | Generator[RunAgentResponseChunk, None, None]: agent output or a generator of response chunks
|
210
210
|
"""
|
211
211
|
if parent_span_context is None:
|
212
212
|
span = trace.get_current_span()
|
@@ -2,12 +2,12 @@
|
|
2
2
|
|
3
3
|
import uuid
|
4
4
|
import urllib.parse
|
5
|
-
from typing import Optional
|
6
5
|
|
7
6
|
from lmnr.sdk.client.synchronous.resources.base import BaseResource
|
8
7
|
from lmnr.sdk.types import (
|
9
8
|
InitEvaluationResponse,
|
10
9
|
EvaluationResultDatapoint,
|
10
|
+
PartialEvaluationDatapoint,
|
11
11
|
GetDatapointsResponse,
|
12
12
|
)
|
13
13
|
|
@@ -16,13 +16,13 @@ class Evals(BaseResource):
|
|
16
16
|
"""Resource for interacting with Laminar evaluations API."""
|
17
17
|
|
18
18
|
def init(
|
19
|
-
self, name:
|
19
|
+
self, name: str | None = None, group_name: str | None = None
|
20
20
|
) -> InitEvaluationResponse:
|
21
21
|
"""Initialize a new evaluation.
|
22
22
|
|
23
23
|
Args:
|
24
|
-
name (
|
25
|
-
group_name (
|
24
|
+
name (str | None, optional): Name of the evaluation. Defaults to None.
|
25
|
+
group_name (str | None, optional): Group name for the evaluation. Defaults to None.
|
26
26
|
|
27
27
|
Returns:
|
28
28
|
InitEvaluationResponse: The response from the initialization request.
|
@@ -41,15 +41,15 @@ class Evals(BaseResource):
|
|
41
41
|
def save_datapoints(
|
42
42
|
self,
|
43
43
|
eval_id: uuid.UUID,
|
44
|
-
datapoints: list[EvaluationResultDatapoint],
|
45
|
-
group_name:
|
44
|
+
datapoints: list[EvaluationResultDatapoint | PartialEvaluationDatapoint],
|
45
|
+
group_name: str | None = None,
|
46
46
|
):
|
47
47
|
"""Save evaluation datapoints.
|
48
48
|
|
49
49
|
Args:
|
50
50
|
eval_id (uuid.UUID): The evaluation ID.
|
51
|
-
datapoints (list[EvaluationResultDatapoint]): The datapoints to save.
|
52
|
-
group_name (
|
51
|
+
datapoints (list[EvaluationResultDatapoint | PartialEvaluationDatapoint]): The datapoints to save.
|
52
|
+
group_name (str | None, optional): Group name for the datapoints. Defaults to None.
|
53
53
|
|
54
54
|
Raises:
|
55
55
|
ValueError: If there's an error saving the datapoints.
|
@@ -0,0 +1,89 @@
|
|
1
|
+
"""Resource for tagging traces."""
|
2
|
+
|
3
|
+
import json
|
4
|
+
import uuid
|
5
|
+
|
6
|
+
from lmnr.sdk.client.synchronous.resources.base import BaseResource
|
7
|
+
from lmnr.sdk.log import get_default_logger
|
8
|
+
|
9
|
+
logger = get_default_logger(__name__)
|
10
|
+
|
11
|
+
|
12
|
+
class Tags(BaseResource):
|
13
|
+
"""Resource for tagging traces."""
|
14
|
+
|
15
|
+
def tag(
|
16
|
+
self,
|
17
|
+
trace_id: str | int | uuid.UUID,
|
18
|
+
tags: list[str] | str,
|
19
|
+
):
|
20
|
+
"""Tag a trace with a list of tags. Note that the trace must be ended
|
21
|
+
before tagging it. You may want to call `Laminar.flush()` after the
|
22
|
+
trace that you want to tag.
|
23
|
+
|
24
|
+
Args:
|
25
|
+
trace_id (str | int | uuid.UUID): The trace id to tag.
|
26
|
+
tags (list[str] | str): The tag or list of tags to add to the trace.
|
27
|
+
|
28
|
+
Raises:
|
29
|
+
ValueError: If the trace id is not a valid UUID.
|
30
|
+
|
31
|
+
Returns:
|
32
|
+
list[dict]: The response from the server.
|
33
|
+
|
34
|
+
Example:
|
35
|
+
```python
|
36
|
+
from lmnr import Laminar, LaminarClient, observe
|
37
|
+
|
38
|
+
Laminar.initialize()
|
39
|
+
client = LaminarClient()
|
40
|
+
trace_id = None
|
41
|
+
|
42
|
+
@observe()
|
43
|
+
def foo():
|
44
|
+
trace_id = Laminar.get_trace_id()
|
45
|
+
pass
|
46
|
+
|
47
|
+
# make sure `foo` is called outside a trace context
|
48
|
+
foo()
|
49
|
+
|
50
|
+
# or make sure the trace is ended by this point
|
51
|
+
Laminar.flush()
|
52
|
+
|
53
|
+
client.tags.tag(trace_id, "my_tag")
|
54
|
+
```
|
55
|
+
"""
|
56
|
+
trace_tags = tags if isinstance(tags, list) else [tags]
|
57
|
+
if isinstance(trace_id, uuid.UUID):
|
58
|
+
trace_id = str(trace_id)
|
59
|
+
elif isinstance(trace_id, int):
|
60
|
+
trace_id = str(uuid.UUID(int=trace_id))
|
61
|
+
elif isinstance(trace_id, str):
|
62
|
+
uuid.UUID(trace_id)
|
63
|
+
else:
|
64
|
+
raise ValueError(f"Invalid trace id: {trace_id}")
|
65
|
+
|
66
|
+
url = self._base_url + "/v1/tag"
|
67
|
+
payload = {
|
68
|
+
"traceId": trace_id,
|
69
|
+
"names": trace_tags,
|
70
|
+
}
|
71
|
+
response = self._client.post(
|
72
|
+
url,
|
73
|
+
content=json.dumps(payload),
|
74
|
+
headers={
|
75
|
+
**self._headers(),
|
76
|
+
},
|
77
|
+
)
|
78
|
+
|
79
|
+
if response.status_code == 404:
|
80
|
+
logger.warning(
|
81
|
+
f"Trace {trace_id} not found. The trace may have not been ended yet."
|
82
|
+
)
|
83
|
+
return []
|
84
|
+
|
85
|
+
if response.status_code != 200:
|
86
|
+
raise ValueError(
|
87
|
+
f"Failed to tag trace: [{response.status_code}] {response.text}"
|
88
|
+
)
|
89
|
+
return response.json()
|
@@ -4,13 +4,14 @@ Laminar HTTP client. Used to send data to/from the Laminar API.
|
|
4
4
|
|
5
5
|
import httpx
|
6
6
|
import re
|
7
|
-
from typing import
|
7
|
+
from typing import TypeVar
|
8
8
|
from types import TracebackType
|
9
9
|
|
10
10
|
from lmnr.sdk.client.synchronous.resources import (
|
11
11
|
Agent,
|
12
12
|
BrowserEvents,
|
13
13
|
Evals,
|
14
|
+
Tags,
|
14
15
|
)
|
15
16
|
from lmnr.sdk.utils import from_env
|
16
17
|
|
@@ -23,22 +24,26 @@ class LaminarClient:
|
|
23
24
|
__client: httpx.Client = None
|
24
25
|
|
25
26
|
# Resource properties
|
26
|
-
__agent:
|
27
|
-
__evals:
|
27
|
+
__agent: Agent | None = None
|
28
|
+
__evals: Evals | None = None
|
29
|
+
__tags: Tags | None = None
|
28
30
|
|
29
31
|
def __init__(
|
30
32
|
self,
|
31
|
-
base_url:
|
32
|
-
project_api_key:
|
33
|
-
port:
|
33
|
+
base_url: str | None = None,
|
34
|
+
project_api_key: str | None = None,
|
35
|
+
port: int | None = None,
|
34
36
|
timeout: int = 3600,
|
35
37
|
):
|
36
38
|
"""Initializer for the Laminar HTTP client.
|
37
39
|
|
38
40
|
Args:
|
39
|
-
base_url (str): base URL of the Laminar API.
|
40
|
-
|
41
|
-
|
41
|
+
base_url (str | None): base URL of the Laminar API. If not\
|
42
|
+
provided, the LMNR_BASE_URL environment variable is used or we\
|
43
|
+
default to "https://api.lmnr.ai".
|
44
|
+
project_api_key (str | None): Laminar project API key. If not\
|
45
|
+
provided, the LMNR_PROJECT_API_KEY environment variable is used.
|
46
|
+
port (int | None, optional): port of the Laminar API HTTP server.\
|
42
47
|
Overrides any port in the base URL.
|
43
48
|
Defaults to None. If none is provided, the default port (443) will
|
44
49
|
be used.
|
@@ -72,6 +77,7 @@ class LaminarClient:
|
|
72
77
|
self.__browser_events = BrowserEvents(
|
73
78
|
self.__client, self.__base_url, self.__project_api_key
|
74
79
|
)
|
80
|
+
self.__tags = Tags(self.__client, self.__base_url, self.__project_api_key)
|
75
81
|
|
76
82
|
@property
|
77
83
|
def agent(self) -> Agent:
|
@@ -100,6 +106,15 @@ class LaminarClient:
|
|
100
106
|
"""
|
101
107
|
return self.__browser_events
|
102
108
|
|
109
|
+
@property
|
110
|
+
def tags(self) -> Tags:
|
111
|
+
"""Get the Tags resource.
|
112
|
+
|
113
|
+
Returns:
|
114
|
+
Tags: The Tags resource instance.
|
115
|
+
"""
|
116
|
+
return self.__tags
|
117
|
+
|
103
118
|
def shutdown(self):
|
104
119
|
"""Shutdown the client by closing underlying connections."""
|
105
120
|
self.__client.close()
|
@@ -119,7 +134,7 @@ class LaminarClient:
|
|
119
134
|
"""
|
120
135
|
# If an error is thrown while constructing a client, self._client
|
121
136
|
# may not be present
|
122
|
-
if hasattr(self, "
|
137
|
+
if hasattr(self, "__client"):
|
123
138
|
self.__client.close()
|
124
139
|
|
125
140
|
def __enter__(self: _T) -> _T:
|
@@ -127,9 +142,9 @@ class LaminarClient:
|
|
127
142
|
|
128
143
|
def __exit__(
|
129
144
|
self,
|
130
|
-
exc_type:
|
131
|
-
exc:
|
132
|
-
exc_tb:
|
145
|
+
exc_type: type[BaseException] | None,
|
146
|
+
exc: BaseException | None,
|
147
|
+
exc_tb: TracebackType | None,
|
133
148
|
) -> None:
|
134
149
|
self.close()
|
135
150
|
|
lmnr/sdk/decorators.py
CHANGED
@@ -1,16 +1,14 @@
|
|
1
1
|
from lmnr.opentelemetry_lib.decorators import (
|
2
2
|
entity_method,
|
3
3
|
aentity_method,
|
4
|
+
json_dumps,
|
4
5
|
)
|
5
6
|
from opentelemetry.trace import INVALID_SPAN, get_current_span
|
6
7
|
|
7
|
-
from typing import Callable, Literal,
|
8
|
+
from typing import Any, Callable, Literal, TypeVar, cast
|
8
9
|
from typing_extensions import ParamSpec
|
9
10
|
|
10
11
|
from lmnr.opentelemetry_lib.tracing.attributes import SESSION_ID
|
11
|
-
from lmnr.opentelemetry_lib.tracing.context_properties import (
|
12
|
-
update_association_properties,
|
13
|
-
)
|
14
12
|
|
15
13
|
from .utils import is_async
|
16
14
|
|
@@ -21,37 +19,42 @@ R = TypeVar("R")
|
|
21
19
|
|
22
20
|
def observe(
|
23
21
|
*,
|
24
|
-
name:
|
25
|
-
session_id:
|
22
|
+
name: str | None = None,
|
23
|
+
session_id: str | None = None,
|
24
|
+
user_id: str | None = None,
|
26
25
|
ignore_input: bool = False,
|
27
26
|
ignore_output: bool = False,
|
28
|
-
span_type:
|
29
|
-
ignore_inputs:
|
27
|
+
span_type: Literal["DEFAULT", "LLM", "TOOL"] = "DEFAULT",
|
28
|
+
ignore_inputs: list[str] | None = None,
|
29
|
+
metadata: dict[str, Any] | None = None,
|
30
30
|
) -> Callable[[Callable[P, R]], Callable[P, R]]:
|
31
31
|
"""The main decorator entrypoint for Laminar. This is used to wrap
|
32
32
|
functions and methods to create spans.
|
33
33
|
|
34
34
|
Args:
|
35
|
-
name (
|
36
|
-
|
37
|
-
|
38
|
-
|
39
|
-
|
35
|
+
name (str | None, optional): Name of the span. Function name is used if\
|
36
|
+
not specified. Defaults to None.
|
37
|
+
session_id (str | None, optional): Session ID to associate with the\
|
38
|
+
span and the following context. Defaults to None.
|
39
|
+
user_id (str | None, optional): User ID to associate with the span and\
|
40
|
+
the following context. This is different from ID of a Laminar user.
|
41
|
+
Defaults to None.
|
40
42
|
ignore_input (bool, optional): Whether to ignore ALL input of the\
|
41
|
-
|
43
|
+
wrapped function. Defaults to False.
|
42
44
|
ignore_output (bool, optional): Whether to ignore ALL output of the\
|
43
|
-
|
44
|
-
span_type (
|
45
|
-
|
46
|
-
ignore_inputs (
|
47
|
-
|
48
|
-
|
49
|
-
|
50
|
-
|
51
|
-
|
45
|
+
wrapped function. Defaults to False.
|
46
|
+
span_type (Literal["DEFAULT", "LLM", "TOOL"], optional): Type of the span.
|
47
|
+
Defaults to "DEFAULT".
|
48
|
+
ignore_inputs (list[str] | None, optional): List of input keys to\
|
49
|
+
ignore. For example, if the wrapped function takes three arguments\
|
50
|
+
def foo(a, b, `sensitive_data`), and you want to ignore the\
|
51
|
+
`sensitive_data` argument, you can pass ["sensitive_data"] to\
|
52
|
+
this argument. Defaults to None.
|
53
|
+
metadata (dict[str, Any] | None, optional): Metadata to associate with\
|
54
|
+
the trace. Must be JSON serializable. Defaults to None.
|
52
55
|
Raises:
|
53
|
-
Exception: re-raises the exception if the wrapped function raises
|
54
|
-
|
56
|
+
Exception: re-raises the exception if the wrapped function raises an\
|
57
|
+
exception
|
55
58
|
|
56
59
|
Returns:
|
57
60
|
R: Returns the result of the wrapped function
|
@@ -65,14 +68,25 @@ def observe(
|
|
65
68
|
association_properties = {}
|
66
69
|
if session_id is not None:
|
67
70
|
association_properties["session_id"] = session_id
|
68
|
-
|
69
|
-
|
71
|
+
if user_id is not None:
|
72
|
+
association_properties["user_id"] = user_id
|
73
|
+
if metadata is not None:
|
74
|
+
association_properties.update(
|
75
|
+
{
|
76
|
+
f"metadata.{k}": (
|
77
|
+
v if isinstance(v, (str, int, float, bool)) else json_dumps(v)
|
78
|
+
)
|
79
|
+
for k, v in metadata.items()
|
80
|
+
}
|
81
|
+
)
|
82
|
+
result = (
|
70
83
|
aentity_method(
|
71
84
|
name=name,
|
72
85
|
ignore_input=ignore_input,
|
73
86
|
ignore_output=ignore_output,
|
74
87
|
span_type=span_type,
|
75
88
|
ignore_inputs=ignore_inputs,
|
89
|
+
association_properties=association_properties,
|
76
90
|
)(func)
|
77
91
|
if is_async(func)
|
78
92
|
else entity_method(
|
@@ -81,7 +95,9 @@ def observe(
|
|
81
95
|
ignore_output=ignore_output,
|
82
96
|
span_type=span_type,
|
83
97
|
ignore_inputs=ignore_inputs,
|
98
|
+
association_properties=association_properties,
|
84
99
|
)(func)
|
85
100
|
)
|
101
|
+
return result
|
86
102
|
|
87
103
|
return cast(Callable, decorator)
|