lmnr 0.4.7__py3-none-any.whl → 0.4.9__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- lmnr/sdk/decorators.py +3 -3
- lmnr/sdk/evaluations.py +4 -4
- lmnr/sdk/laminar.py +12 -58
- lmnr/sdk/types.py +9 -9
- lmnr/traceloop_sdk/__init__.py +7 -17
- lmnr/traceloop_sdk/decorators/__init__.py +0 -131
- lmnr/traceloop_sdk/decorators/base.py +6 -98
- lmnr/traceloop_sdk/tests/__init__.py +1 -1
- lmnr/traceloop_sdk/tests/conftest.py +83 -83
- lmnr/traceloop_sdk/tests/test_association_properties.py +229 -229
- lmnr/traceloop_sdk/tests/test_manual.py +41 -41
- lmnr/traceloop_sdk/tests/test_nested_tasks.py +47 -47
- lmnr/traceloop_sdk/tests/test_privacy_no_prompts.py +50 -50
- lmnr/traceloop_sdk/tests/test_sdk_initialization.py +40 -40
- lmnr/traceloop_sdk/tests/test_tasks.py +24 -24
- lmnr/traceloop_sdk/tests/test_workflows.py +262 -261
- lmnr/traceloop_sdk/tracing/__init__.py +0 -1
- lmnr/traceloop_sdk/tracing/tracing.py +19 -30
- {lmnr-0.4.7.dist-info → lmnr-0.4.9.dist-info}/METADATA +20 -18
- {lmnr-0.4.7.dist-info → lmnr-0.4.9.dist-info}/RECORD +23 -24
- lmnr/traceloop_sdk/README.md +0 -16
- {lmnr-0.4.7.dist-info → lmnr-0.4.9.dist-info}/LICENSE +0 -0
- {lmnr-0.4.7.dist-info → lmnr-0.4.9.dist-info}/WHEEL +0 -0
- {lmnr-0.4.7.dist-info → lmnr-0.4.9.dist-info}/entry_points.txt +0 -0
@@ -1,261 +1,262 @@
|
|
1
|
-
import json
|
2
|
-
|
3
|
-
import pytest
|
4
|
-
from openai import OpenAI, AsyncOpenAI
|
5
|
-
from opentelemetry.semconv_ai import SpanAttributes
|
6
|
-
from lmnr.traceloop_sdk import Traceloop
|
7
|
-
from lmnr.traceloop_sdk.decorators import workflow, task, aworkflow, atask
|
8
|
-
|
9
|
-
|
10
|
-
@pytest.fixture
|
11
|
-
def openai_client():
|
12
|
-
|
13
|
-
|
14
|
-
|
15
|
-
@pytest.fixture
|
16
|
-
def async_openai_client():
|
17
|
-
|
18
|
-
|
19
|
-
|
20
|
-
|
21
|
-
|
22
|
-
|
23
|
-
|
24
|
-
|
25
|
-
|
26
|
-
|
27
|
-
|
28
|
-
|
29
|
-
|
30
|
-
|
31
|
-
|
32
|
-
|
33
|
-
|
34
|
-
|
35
|
-
|
36
|
-
|
37
|
-
|
38
|
-
|
39
|
-
|
40
|
-
|
41
|
-
|
42
|
-
|
43
|
-
|
44
|
-
|
45
|
-
|
46
|
-
|
47
|
-
|
48
|
-
|
49
|
-
|
50
|
-
|
51
|
-
|
52
|
-
|
53
|
-
|
54
|
-
|
55
|
-
|
56
|
-
|
57
|
-
|
58
|
-
|
59
|
-
|
60
|
-
|
61
|
-
|
62
|
-
|
63
|
-
|
64
|
-
|
65
|
-
|
66
|
-
|
67
|
-
|
68
|
-
|
69
|
-
|
70
|
-
|
71
|
-
|
72
|
-
|
73
|
-
|
74
|
-
|
75
|
-
|
76
|
-
|
77
|
-
|
78
|
-
|
79
|
-
|
80
|
-
|
81
|
-
|
82
|
-
|
83
|
-
|
84
|
-
|
85
|
-
|
86
|
-
|
87
|
-
|
88
|
-
@pytest.mark.
|
89
|
-
|
90
|
-
|
91
|
-
|
92
|
-
|
93
|
-
|
94
|
-
|
95
|
-
|
96
|
-
|
97
|
-
|
98
|
-
|
99
|
-
|
100
|
-
|
101
|
-
|
102
|
-
|
103
|
-
|
104
|
-
|
105
|
-
|
106
|
-
|
107
|
-
|
108
|
-
|
109
|
-
|
110
|
-
|
111
|
-
|
112
|
-
|
113
|
-
|
114
|
-
|
115
|
-
|
116
|
-
|
117
|
-
|
118
|
-
|
119
|
-
|
120
|
-
|
121
|
-
|
122
|
-
|
123
|
-
|
124
|
-
|
125
|
-
|
126
|
-
|
127
|
-
|
128
|
-
|
129
|
-
|
130
|
-
|
131
|
-
|
132
|
-
|
133
|
-
|
134
|
-
|
135
|
-
|
136
|
-
|
137
|
-
|
138
|
-
|
139
|
-
|
140
|
-
|
141
|
-
|
142
|
-
|
143
|
-
|
144
|
-
|
145
|
-
|
146
|
-
|
147
|
-
|
148
|
-
|
149
|
-
|
150
|
-
|
151
|
-
|
152
|
-
|
153
|
-
|
154
|
-
|
155
|
-
|
156
|
-
|
157
|
-
|
158
|
-
|
159
|
-
|
160
|
-
|
161
|
-
|
162
|
-
|
163
|
-
|
164
|
-
|
165
|
-
|
166
|
-
|
167
|
-
|
168
|
-
|
169
|
-
|
170
|
-
|
171
|
-
|
172
|
-
|
173
|
-
|
174
|
-
|
175
|
-
|
176
|
-
|
177
|
-
|
178
|
-
|
179
|
-
|
180
|
-
|
181
|
-
|
182
|
-
|
183
|
-
|
184
|
-
|
185
|
-
|
186
|
-
|
187
|
-
|
188
|
-
|
189
|
-
|
190
|
-
|
191
|
-
|
192
|
-
|
193
|
-
|
194
|
-
|
195
|
-
|
196
|
-
|
197
|
-
|
198
|
-
|
199
|
-
|
200
|
-
|
201
|
-
|
202
|
-
|
203
|
-
|
204
|
-
|
205
|
-
|
206
|
-
|
207
|
-
|
208
|
-
|
209
|
-
|
210
|
-
|
211
|
-
|
212
|
-
|
213
|
-
|
214
|
-
|
215
|
-
|
216
|
-
|
217
|
-
|
218
|
-
|
219
|
-
|
220
|
-
|
221
|
-
|
222
|
-
|
223
|
-
|
224
|
-
|
225
|
-
|
226
|
-
|
227
|
-
|
228
|
-
|
229
|
-
|
230
|
-
|
231
|
-
|
232
|
-
|
233
|
-
|
234
|
-
|
235
|
-
|
236
|
-
|
237
|
-
|
238
|
-
|
239
|
-
|
240
|
-
|
241
|
-
|
242
|
-
|
243
|
-
|
244
|
-
|
245
|
-
|
246
|
-
|
247
|
-
|
248
|
-
|
249
|
-
|
250
|
-
|
251
|
-
|
252
|
-
|
253
|
-
|
254
|
-
|
255
|
-
|
256
|
-
|
257
|
-
|
258
|
-
|
259
|
-
|
260
|
-
|
261
|
-
|
1
|
+
# import json
|
2
|
+
|
3
|
+
# import pytest
|
4
|
+
# from openai import OpenAI, AsyncOpenAI
|
5
|
+
# from opentelemetry.semconv_ai import SpanAttributes
|
6
|
+
# from lmnr.traceloop_sdk import Traceloop
|
7
|
+
# from lmnr.traceloop_sdk.decorators import workflow, task, aworkflow, atask
|
8
|
+
|
9
|
+
|
10
|
+
# @pytest.fixture
|
11
|
+
# def openai_client():
|
12
|
+
# return OpenAI()
|
13
|
+
|
14
|
+
|
15
|
+
# @pytest.fixture
|
16
|
+
# def async_openai_client():
|
17
|
+
# return AsyncOpenAI()
|
18
|
+
|
19
|
+
|
20
|
+
# # commented out because of the version parameter in task decorator
|
21
|
+
# # @pytest.mark.vcr
|
22
|
+
# # def test_simple_workflow(exporter, openai_client):
|
23
|
+
# # @task(name="something_creator", version=2)
|
24
|
+
# # def create_something(what: str, subject: str):
|
25
|
+
# # Traceloop.set_prompt(
|
26
|
+
# # "Tell me a {what} about {subject}", {"what": what, "subject": subject}, 5
|
27
|
+
# # )
|
28
|
+
# # completion = openai_client.chat.completions.create(
|
29
|
+
# # model="gpt-3.5-turbo",
|
30
|
+
# # messages=[{"role": "user", "content": f"Tell me a {what} about {subject}"}],
|
31
|
+
# # )
|
32
|
+
# # return completion.choices[0].message.content
|
33
|
+
|
34
|
+
# # @workflow(name="pirate_joke_generator", version=1)
|
35
|
+
# # def joke_workflow():
|
36
|
+
# # return create_something("joke", subject="OpenTelemetry")
|
37
|
+
|
38
|
+
# # joke = joke_workflow()
|
39
|
+
|
40
|
+
# # spans = exporter.get_finished_spans()
|
41
|
+
# # assert [span.name for span in spans] == [
|
42
|
+
# # "openai.chat",
|
43
|
+
# # "something_creator.task",
|
44
|
+
# # "pirate_joke_generator.workflow",
|
45
|
+
# # ]
|
46
|
+
# # open_ai_span = next(span for span in spans if span.name == "openai.chat")
|
47
|
+
# # assert (
|
48
|
+
# # open_ai_span.attributes[f"{SpanAttributes.LLM_PROMPTS}.0.content"]
|
49
|
+
# # == "Tell me a joke about OpenTelemetry"
|
50
|
+
# # )
|
51
|
+
# # assert open_ai_span.attributes.get(f"{SpanAttributes.LLM_COMPLETIONS}.0.content")
|
52
|
+
# # assert (
|
53
|
+
# # open_ai_span.attributes.get("traceloop.prompt.template")
|
54
|
+
# # == "Tell me a {what} about {subject}"
|
55
|
+
# # )
|
56
|
+
# # assert (
|
57
|
+
# # open_ai_span.attributes.get("traceloop.prompt.template_variables.what")
|
58
|
+
# # == "joke"
|
59
|
+
# # )
|
60
|
+
# # assert (
|
61
|
+
# # open_ai_span.attributes.get("traceloop.prompt.template_variables.subject")
|
62
|
+
# # == "OpenTelemetry"
|
63
|
+
# # )
|
64
|
+
# # assert open_ai_span.attributes.get("traceloop.prompt.version") == 5
|
65
|
+
|
66
|
+
# # workflow_span = next(
|
67
|
+
# # span for span in spans if span.name == "pirate_joke_generator.workflow"
|
68
|
+
# # )
|
69
|
+
# # task_span = next(span for span in spans if span.name == "something_creator.task")
|
70
|
+
# # assert json.loads(task_span.attributes[SpanAttributes.TRACELOOP_ENTITY_INPUT]) == {
|
71
|
+
# # "args": ["joke"],
|
72
|
+
# # "kwargs": {"subject": "OpenTelemetry"},
|
73
|
+
# # }
|
74
|
+
|
75
|
+
# # assert (
|
76
|
+
# # json.loads(task_span.attributes.get(SpanAttributes.TRACELOOP_ENTITY_OUTPUT))
|
77
|
+
# # == joke
|
78
|
+
# # )
|
79
|
+
# # assert task_span.parent.span_id == workflow_span.context.span_id
|
80
|
+
# # assert (
|
81
|
+
# # workflow_span.attributes[SpanAttributes.TRACELOOP_ENTITY_NAME]
|
82
|
+
# # == "pirate_joke_generator"
|
83
|
+
# # )
|
84
|
+
# # assert workflow_span.attributes[SpanAttributes.TRACELOOP_ENTITY_VERSION] == 1
|
85
|
+
# # assert task_span.attributes[SpanAttributes.TRACELOOP_ENTITY_VERSION] == 2
|
86
|
+
|
87
|
+
|
88
|
+
# @pytest.mark.vcr
|
89
|
+
# @pytest.mark.asyncio
|
90
|
+
# async def test_simple_aworkflow(exporter, async_openai_client):
|
91
|
+
# @atask(name="something_creator", version=2)
|
92
|
+
# async def create_something(what: str, subject: str):
|
93
|
+
# Traceloop.set_prompt(
|
94
|
+
# "Tell me a {what} about {subject}", {"what": what, "subject": subject}, 5
|
95
|
+
# )
|
96
|
+
# completion = await async_openai_client.chat.completions.create(
|
97
|
+
# model="gpt-3.5-turbo",
|
98
|
+
# messages=[{"role": "user", "content": f"Tell me a {what} about {subject}"}],
|
99
|
+
# )
|
100
|
+
# return completion.choices[0].message.content
|
101
|
+
|
102
|
+
# @aworkflow(name="pirate_joke_generator", version=1)
|
103
|
+
# async def joke_workflow():
|
104
|
+
# return await create_something("joke", subject="OpenTelemetry")
|
105
|
+
|
106
|
+
# joke = await joke_workflow()
|
107
|
+
|
108
|
+
# spans = exporter.get_finished_spans()
|
109
|
+
# assert [span.name for span in spans] == [
|
110
|
+
# "openai.chat",
|
111
|
+
# "something_creator.task",
|
112
|
+
# "pirate_joke_generator.workflow",
|
113
|
+
# ]
|
114
|
+
# open_ai_span = next(span for span in spans if span.name == "openai.chat")
|
115
|
+
# assert (
|
116
|
+
# open_ai_span.attributes[f"{SpanAttributes.LLM_PROMPTS}.0.content"]
|
117
|
+
# == "Tell me a joke about OpenTelemetry"
|
118
|
+
# )
|
119
|
+
# assert open_ai_span.attributes.get(f"{SpanAttributes.LLM_COMPLETIONS}.0.content")
|
120
|
+
# assert (
|
121
|
+
# open_ai_span.attributes.get("traceloop.prompt.template")
|
122
|
+
# == "Tell me a {what} about {subject}"
|
123
|
+
# )
|
124
|
+
# assert (
|
125
|
+
# open_ai_span.attributes.get("traceloop.prompt.template_variables.what")
|
126
|
+
# == "joke"
|
127
|
+
# )
|
128
|
+
# assert (
|
129
|
+
# open_ai_span.attributes.get("traceloop.prompt.template_variables.subject")
|
130
|
+
# == "OpenTelemetry"
|
131
|
+
# )
|
132
|
+
# assert open_ai_span.attributes.get("traceloop.prompt.version") == 5
|
133
|
+
|
134
|
+
# workflow_span = next(
|
135
|
+
# span for span in spans if span.name == "pirate_joke_generator.workflow"
|
136
|
+
# )
|
137
|
+
# task_span = next(span for span in spans if span.name == "something_creator.task")
|
138
|
+
# assert json.loads(task_span.attributes[SpanAttributes.TRACELOOP_ENTITY_INPUT]) == {
|
139
|
+
# "args": ["joke"],
|
140
|
+
# "kwargs": {"subject": "OpenTelemetry"},
|
141
|
+
# }
|
142
|
+
|
143
|
+
# assert (
|
144
|
+
# json.loads(task_span.attributes.get(SpanAttributes.TRACELOOP_ENTITY_OUTPUT))
|
145
|
+
# == joke
|
146
|
+
# )
|
147
|
+
# assert task_span.parent.span_id == workflow_span.context.span_id
|
148
|
+
# assert (
|
149
|
+
# workflow_span.attributes[SpanAttributes.TRACELOOP_ENTITY_NAME]
|
150
|
+
# == "pirate_joke_generator"
|
151
|
+
# )
|
152
|
+
# assert workflow_span.attributes[SpanAttributes.TRACELOOP_ENTITY_VERSION] == 1
|
153
|
+
# assert task_span.attributes[SpanAttributes.TRACELOOP_ENTITY_VERSION] == 2
|
154
|
+
|
155
|
+
|
156
|
+
# @pytest.mark.vcr
|
157
|
+
# def test_streaming_workflow(exporter, openai_client):
|
158
|
+
|
159
|
+
# @task(name="pirate_joke_generator")
|
160
|
+
# def joke_task():
|
161
|
+
# response_stream = openai_client.chat.completions.create(
|
162
|
+
# model="gpt-3.5-turbo",
|
163
|
+
# messages=[
|
164
|
+
# {"role": "user", "content": "Tell me a joke about OpenTelemetry"}
|
165
|
+
# ],
|
166
|
+
# stream=True,
|
167
|
+
# )
|
168
|
+
# for chunk in response_stream:
|
169
|
+
# yield chunk
|
170
|
+
|
171
|
+
# @task(name="joke_runner")
|
172
|
+
# def joke_runner():
|
173
|
+
# res = joke_task()
|
174
|
+
# return res
|
175
|
+
|
176
|
+
# @workflow(name="joke_manager")
|
177
|
+
# def joke_workflow():
|
178
|
+
# res = joke_runner()
|
179
|
+
# for chunk in res:
|
180
|
+
# pass
|
181
|
+
|
182
|
+
# joke_workflow()
|
183
|
+
|
184
|
+
# spans = exporter.get_finished_spans()
|
185
|
+
# assert set([span.name for span in spans]) == set(
|
186
|
+
# [
|
187
|
+
# "openai.chat",
|
188
|
+
# "pirate_joke_generator.task",
|
189
|
+
# "joke_runner.task",
|
190
|
+
# "joke_manager.workflow",
|
191
|
+
# ]
|
192
|
+
# )
|
193
|
+
# generator_span = next(
|
194
|
+
# span for span in spans if span.name == "pirate_joke_generator.task"
|
195
|
+
# )
|
196
|
+
# runner_span = next(span for span in spans if span.name == "joke_runner.task")
|
197
|
+
# manager_span = next(span for span in spans if span.name == "joke_manager.workflow")
|
198
|
+
# openai_span = next(span for span in spans if span.name == "openai.chat")
|
199
|
+
|
200
|
+
# assert openai_span.parent.span_id == generator_span.context.span_id
|
201
|
+
# assert generator_span.parent.span_id == runner_span.context.span_id
|
202
|
+
# assert runner_span.parent.span_id == manager_span.context.span_id
|
203
|
+
# assert openai_span.end_time <= manager_span.end_time
|
204
|
+
|
205
|
+
|
206
|
+
# def test_unrelated_entities(exporter):
|
207
|
+
# @workflow(name="workflow_1")
|
208
|
+
# def workflow_1():
|
209
|
+
# return
|
210
|
+
|
211
|
+
# @task(name="task_1")
|
212
|
+
# def task_1():
|
213
|
+
# return
|
214
|
+
|
215
|
+
# workflow_1()
|
216
|
+
# task_1()
|
217
|
+
|
218
|
+
# spans = exporter.get_finished_spans()
|
219
|
+
# assert [span.name for span in spans] == ["workflow_1.workflow", "task_1.task"]
|
220
|
+
|
221
|
+
# workflow_1_span = spans[0]
|
222
|
+
# task_1_span = spans[1]
|
223
|
+
|
224
|
+
# assert (
|
225
|
+
# workflow_1_span.attributes[SpanAttributes.TRACELOOP_ENTITY_NAME] == "workflow_1"
|
226
|
+
# )
|
227
|
+
# assert workflow_1_span.attributes[SpanAttributes.TRACELOOP_SPAN_KIND] == "workflow"
|
228
|
+
|
229
|
+
# assert task_1_span.attributes[SpanAttributes.TRACELOOP_ENTITY_NAME] == "task_1"
|
230
|
+
# assert task_1_span.attributes[SpanAttributes.TRACELOOP_SPAN_KIND] == "task"
|
231
|
+
# assert task_1_span.parent is None
|
232
|
+
|
233
|
+
|
234
|
+
# def test_unserializable_workflow(exporter):
|
235
|
+
# @task(name="unserializable_task")
|
236
|
+
# def unserializable_task(obj: object):
|
237
|
+
# return object()
|
238
|
+
|
239
|
+
# @workflow(name="unserializable_workflow")
|
240
|
+
# def unserializable_workflow(obj: object):
|
241
|
+
# return unserializable_task(obj)
|
242
|
+
|
243
|
+
# unserializable_task(object())
|
244
|
+
|
245
|
+
# spans = exporter.get_finished_spans()
|
246
|
+
# assert [span.name for span in spans] == ["unserializable_task.task"]
|
247
|
+
|
248
|
+
|
249
|
+
# @pytest.mark.asyncio
|
250
|
+
# async def test_unserializable_async_workflow(exporter):
|
251
|
+
# @atask(name="unserializable_task")
|
252
|
+
# async def unserializable_task(obj: object):
|
253
|
+
# return object()
|
254
|
+
|
255
|
+
# @aworkflow(name="unserializable_workflow")
|
256
|
+
# async def unserializable_workflow(obj: object):
|
257
|
+
# return await unserializable_task(obj)
|
258
|
+
|
259
|
+
# await unserializable_task(object())
|
260
|
+
|
261
|
+
# spans = exporter.get_finished_spans()
|
262
|
+
# assert [span.name for span in spans] == ["unserializable_task.task"]
|
@@ -63,9 +63,9 @@ class TracerWrapper(object):
|
|
63
63
|
def __new__(
|
64
64
|
cls,
|
65
65
|
disable_batch=False,
|
66
|
-
processor: SpanProcessor = None,
|
67
|
-
propagator: TextMapPropagator = None,
|
68
|
-
exporter: SpanExporter = None,
|
66
|
+
processor: Optional[SpanProcessor] = None,
|
67
|
+
propagator: Optional[TextMapPropagator] = None,
|
68
|
+
exporter: Optional[SpanExporter] = None,
|
69
69
|
should_enrich_metrics: bool = True,
|
70
70
|
instruments: Optional[Set[Instruments]] = None,
|
71
71
|
) -> "TracerWrapper":
|
@@ -342,14 +342,6 @@ class TracerWrapper(object):
|
|
342
342
|
self.flush()
|
343
343
|
|
344
344
|
def _span_processor_on_start(self, span, parent_context):
|
345
|
-
workflow_name = get_value("workflow_name")
|
346
|
-
if workflow_name is not None:
|
347
|
-
span.set_attribute(SpanAttributes.TRACELOOP_WORKFLOW_NAME, workflow_name)
|
348
|
-
|
349
|
-
entity_path = get_value("entity_path")
|
350
|
-
if entity_path is not None:
|
351
|
-
span.set_attribute(SpanAttributes.TRACELOOP_ENTITY_PATH, entity_path)
|
352
|
-
|
353
345
|
association_properties = get_value("association_properties")
|
354
346
|
if association_properties is not None:
|
355
347
|
_set_association_properties_attributes(span, association_properties)
|
@@ -444,10 +436,23 @@ class TracerWrapper(object):
|
|
444
436
|
def set_association_properties(properties: dict) -> None:
|
445
437
|
attach(set_value("association_properties", properties))
|
446
438
|
|
447
|
-
#
|
439
|
+
# TODO: When called inside observe decorator, this actually sets the properties on the parent span, not the current one
|
440
|
+
# Then, processor's on_start will assign this to current span
|
441
|
+
span = trace.get_current_span()
|
442
|
+
_set_association_properties_attributes(span, properties)
|
443
|
+
|
444
|
+
|
445
|
+
def update_association_properties(properties: dict) -> None:
|
446
|
+
"""Only adds or updates properties that are not already present"""
|
447
|
+
association_properties = get_value("association_properties") or {}
|
448
|
+
association_properties.update(properties)
|
449
|
+
|
450
|
+
attach(set_value("association_properties", association_properties))
|
451
|
+
|
452
|
+
# TODO: When called inside observe decorator, this actually sets the properties on the parent span, not the current one
|
453
|
+
# Then, processor's on_start will assign this to current span
|
448
454
|
span = trace.get_current_span()
|
449
|
-
|
450
|
-
_set_association_properties_attributes(span, properties)
|
455
|
+
_set_association_properties_attributes(span, properties)
|
451
456
|
|
452
457
|
|
453
458
|
def _set_association_properties_attributes(span, properties: dict) -> None:
|
@@ -457,22 +462,6 @@ def _set_association_properties_attributes(span, properties: dict) -> None:
|
|
457
462
|
)
|
458
463
|
|
459
464
|
|
460
|
-
def set_workflow_name(workflow_name: str) -> None:
|
461
|
-
attach(set_value("workflow_name", workflow_name))
|
462
|
-
|
463
|
-
|
464
|
-
def set_entity_path(entity_path: str) -> None:
|
465
|
-
attach(set_value("entity_path", entity_path))
|
466
|
-
|
467
|
-
|
468
|
-
def get_chained_entity_path(entity_name: str) -> str:
|
469
|
-
parent = get_value("entity_path")
|
470
|
-
if parent is None:
|
471
|
-
return entity_name
|
472
|
-
else:
|
473
|
-
return f"{parent}.{entity_name}"
|
474
|
-
|
475
|
-
|
476
465
|
def set_managed_prompt_tracing_context(
|
477
466
|
key: str,
|
478
467
|
version: int,
|