lmnr 0.4.6__py3-none-any.whl → 0.4.8__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (50) hide show
  1. lmnr/sdk/decorators.py +2 -7
  2. lmnr/sdk/evaluations.py +4 -4
  3. lmnr/sdk/laminar.py +69 -4
  4. lmnr/sdk/types.py +8 -8
  5. lmnr/traceloop_sdk/.flake8 +12 -0
  6. lmnr/traceloop_sdk/.python-version +1 -0
  7. lmnr/traceloop_sdk/README.md +16 -0
  8. lmnr/traceloop_sdk/__init__.py +138 -0
  9. lmnr/traceloop_sdk/config/__init__.py +13 -0
  10. lmnr/traceloop_sdk/decorators/__init__.py +131 -0
  11. lmnr/traceloop_sdk/decorators/base.py +253 -0
  12. lmnr/traceloop_sdk/instruments.py +29 -0
  13. lmnr/traceloop_sdk/metrics/__init__.py +0 -0
  14. lmnr/traceloop_sdk/metrics/metrics.py +176 -0
  15. lmnr/traceloop_sdk/tests/__init__.py +1 -0
  16. lmnr/traceloop_sdk/tests/cassettes/test_association_properties/test_langchain_and_external_association_properties.yaml +101 -0
  17. lmnr/traceloop_sdk/tests/cassettes/test_association_properties/test_langchain_association_properties.yaml +99 -0
  18. lmnr/traceloop_sdk/tests/cassettes/test_manual/test_manual_report.yaml +98 -0
  19. lmnr/traceloop_sdk/tests/cassettes/test_manual/test_resource_attributes.yaml +98 -0
  20. lmnr/traceloop_sdk/tests/cassettes/test_privacy_no_prompts/test_simple_workflow.yaml +199 -0
  21. lmnr/traceloop_sdk/tests/cassettes/test_prompt_management/test_prompt_management.yaml +202 -0
  22. lmnr/traceloop_sdk/tests/cassettes/test_sdk_initialization/test_resource_attributes.yaml +199 -0
  23. lmnr/traceloop_sdk/tests/cassettes/test_tasks/test_task_io_serialization_with_langchain.yaml +96 -0
  24. lmnr/traceloop_sdk/tests/cassettes/test_workflows/test_simple_aworkflow.yaml +98 -0
  25. lmnr/traceloop_sdk/tests/cassettes/test_workflows/test_simple_workflow.yaml +199 -0
  26. lmnr/traceloop_sdk/tests/cassettes/test_workflows/test_streaming_workflow.yaml +167 -0
  27. lmnr/traceloop_sdk/tests/conftest.py +111 -0
  28. lmnr/traceloop_sdk/tests/test_association_properties.py +229 -0
  29. lmnr/traceloop_sdk/tests/test_manual.py +48 -0
  30. lmnr/traceloop_sdk/tests/test_nested_tasks.py +47 -0
  31. lmnr/traceloop_sdk/tests/test_privacy_no_prompts.py +50 -0
  32. lmnr/traceloop_sdk/tests/test_sdk_initialization.py +57 -0
  33. lmnr/traceloop_sdk/tests/test_tasks.py +32 -0
  34. lmnr/traceloop_sdk/tests/test_workflows.py +261 -0
  35. lmnr/traceloop_sdk/tracing/__init__.py +2 -0
  36. lmnr/traceloop_sdk/tracing/content_allow_list.py +24 -0
  37. lmnr/traceloop_sdk/tracing/context_manager.py +13 -0
  38. lmnr/traceloop_sdk/tracing/manual.py +57 -0
  39. lmnr/traceloop_sdk/tracing/tracing.py +1078 -0
  40. lmnr/traceloop_sdk/utils/__init__.py +26 -0
  41. lmnr/traceloop_sdk/utils/in_memory_span_exporter.py +61 -0
  42. lmnr/traceloop_sdk/utils/json_encoder.py +20 -0
  43. lmnr/traceloop_sdk/utils/package_check.py +8 -0
  44. lmnr/traceloop_sdk/version.py +1 -0
  45. {lmnr-0.4.6.dist-info → lmnr-0.4.8.dist-info}/METADATA +40 -3
  46. lmnr-0.4.8.dist-info/RECORD +53 -0
  47. lmnr-0.4.6.dist-info/RECORD +0 -13
  48. {lmnr-0.4.6.dist-info → lmnr-0.4.8.dist-info}/LICENSE +0 -0
  49. {lmnr-0.4.6.dist-info → lmnr-0.4.8.dist-info}/WHEEL +0 -0
  50. {lmnr-0.4.6.dist-info → lmnr-0.4.8.dist-info}/entry_points.txt +0 -0
@@ -0,0 +1,57 @@
1
+ import pytest
2
+ from openai import OpenAI
3
+ from lmnr.traceloop_sdk.decorators import workflow
4
+
5
+
6
+ @pytest.fixture
7
+ def openai_client():
8
+ return OpenAI()
9
+
10
+
11
+ @pytest.mark.vcr
12
+ def test_resource_attributes(exporter, openai_client):
13
+ openai_client.chat.completions.create(
14
+ model="gpt-3.5-turbo",
15
+ messages=[{"role": "user", "content": "Tell me a joke about opentelemetry"}],
16
+ )
17
+
18
+ spans = exporter.get_finished_spans()
19
+ open_ai_span = spans[0]
20
+ assert open_ai_span.resource.attributes["something"] == "yes"
21
+ assert open_ai_span.resource.attributes["service.name"] == "test"
22
+
23
+
24
+ def test_custom_span_processor(exporter_with_custom_span_processor):
25
+ @workflow()
26
+ def run_workflow():
27
+ pass
28
+
29
+ run_workflow()
30
+
31
+ spans = exporter_with_custom_span_processor.get_finished_spans()
32
+ workflow_span = spans[0]
33
+ assert workflow_span.attributes["custom_span"] == "yes"
34
+
35
+
36
+ def test_instruments(exporter_with_custom_instrumentations):
37
+ @workflow()
38
+ def run_workflow():
39
+ pass
40
+
41
+ run_workflow()
42
+
43
+ spans = exporter_with_custom_instrumentations.get_finished_spans()
44
+ workflow_span = spans[0]
45
+ assert workflow_span
46
+
47
+
48
+ def test_no_metrics(exporter_with_no_metrics):
49
+ @workflow()
50
+ def run_workflow():
51
+ pass
52
+
53
+ run_workflow()
54
+
55
+ spans = exporter_with_no_metrics.get_finished_spans()
56
+ workflow_span = spans[0]
57
+ assert workflow_span
@@ -0,0 +1,32 @@
1
+ import json
2
+ import pytest
3
+
4
+ from langchain_openai import ChatOpenAI
5
+ from lmnr.traceloop_sdk.decorators import task
6
+ from opentelemetry.semconv_ai import SpanAttributes
7
+
8
+
9
+ @pytest.mark.vcr
10
+ def test_task_io_serialization_with_langchain(exporter):
11
+ @task(name="answer_question")
12
+ def answer_question():
13
+ chat = ChatOpenAI(temperature=0)
14
+
15
+ return chat.invoke("Is Berlin the capital of Germany? Answer with yes or no")
16
+
17
+ answer_question()
18
+
19
+ spans = exporter.get_finished_spans()
20
+
21
+ assert [span.name for span in spans] == [
22
+ "ChatOpenAI.chat",
23
+ "answer_question.task",
24
+ ]
25
+
26
+ task_span = next(span for span in spans if span.name == "answer_question.task")
27
+ assert (
28
+ json.loads(task_span.attributes.get(SpanAttributes.TRACELOOP_ENTITY_OUTPUT))[
29
+ "kwargs"
30
+ ]["content"]
31
+ == "Yes"
32
+ )
@@ -0,0 +1,261 @@
1
+ import json
2
+
3
+ import pytest
4
+ from openai import OpenAI, AsyncOpenAI
5
+ from opentelemetry.semconv_ai import SpanAttributes
6
+ from lmnr.traceloop_sdk import Traceloop
7
+ from lmnr.traceloop_sdk.decorators import workflow, task, aworkflow, atask
8
+
9
+
10
+ @pytest.fixture
11
+ def openai_client():
12
+ return OpenAI()
13
+
14
+
15
+ @pytest.fixture
16
+ def async_openai_client():
17
+ return AsyncOpenAI()
18
+
19
+
20
+ @pytest.mark.vcr
21
+ def test_simple_workflow(exporter, openai_client):
22
+ @task(name="something_creator", version=2)
23
+ def create_something(what: str, subject: str):
24
+ Traceloop.set_prompt(
25
+ "Tell me a {what} about {subject}", {"what": what, "subject": subject}, 5
26
+ )
27
+ completion = openai_client.chat.completions.create(
28
+ model="gpt-3.5-turbo",
29
+ messages=[{"role": "user", "content": f"Tell me a {what} about {subject}"}],
30
+ )
31
+ return completion.choices[0].message.content
32
+
33
+ @workflow(name="pirate_joke_generator", version=1)
34
+ def joke_workflow():
35
+ return create_something("joke", subject="OpenTelemetry")
36
+
37
+ joke = joke_workflow()
38
+
39
+ spans = exporter.get_finished_spans()
40
+ assert [span.name for span in spans] == [
41
+ "openai.chat",
42
+ "something_creator.task",
43
+ "pirate_joke_generator.workflow",
44
+ ]
45
+ open_ai_span = next(span for span in spans if span.name == "openai.chat")
46
+ assert (
47
+ open_ai_span.attributes[f"{SpanAttributes.LLM_PROMPTS}.0.content"]
48
+ == "Tell me a joke about OpenTelemetry"
49
+ )
50
+ assert open_ai_span.attributes.get(f"{SpanAttributes.LLM_COMPLETIONS}.0.content")
51
+ assert (
52
+ open_ai_span.attributes.get("traceloop.prompt.template")
53
+ == "Tell me a {what} about {subject}"
54
+ )
55
+ assert (
56
+ open_ai_span.attributes.get("traceloop.prompt.template_variables.what")
57
+ == "joke"
58
+ )
59
+ assert (
60
+ open_ai_span.attributes.get("traceloop.prompt.template_variables.subject")
61
+ == "OpenTelemetry"
62
+ )
63
+ assert open_ai_span.attributes.get("traceloop.prompt.version") == 5
64
+
65
+ workflow_span = next(
66
+ span for span in spans if span.name == "pirate_joke_generator.workflow"
67
+ )
68
+ task_span = next(span for span in spans if span.name == "something_creator.task")
69
+ assert json.loads(task_span.attributes[SpanAttributes.TRACELOOP_ENTITY_INPUT]) == {
70
+ "args": ["joke"],
71
+ "kwargs": {"subject": "OpenTelemetry"},
72
+ }
73
+
74
+ assert (
75
+ json.loads(task_span.attributes.get(SpanAttributes.TRACELOOP_ENTITY_OUTPUT))
76
+ == joke
77
+ )
78
+ assert task_span.parent.span_id == workflow_span.context.span_id
79
+ assert (
80
+ workflow_span.attributes[SpanAttributes.TRACELOOP_ENTITY_NAME]
81
+ == "pirate_joke_generator"
82
+ )
83
+ assert workflow_span.attributes[SpanAttributes.TRACELOOP_ENTITY_VERSION] == 1
84
+ assert task_span.attributes[SpanAttributes.TRACELOOP_ENTITY_VERSION] == 2
85
+
86
+
87
+ @pytest.mark.vcr
88
+ @pytest.mark.asyncio
89
+ async def test_simple_aworkflow(exporter, async_openai_client):
90
+ @atask(name="something_creator", version=2)
91
+ async def create_something(what: str, subject: str):
92
+ Traceloop.set_prompt(
93
+ "Tell me a {what} about {subject}", {"what": what, "subject": subject}, 5
94
+ )
95
+ completion = await async_openai_client.chat.completions.create(
96
+ model="gpt-3.5-turbo",
97
+ messages=[{"role": "user", "content": f"Tell me a {what} about {subject}"}],
98
+ )
99
+ return completion.choices[0].message.content
100
+
101
+ @aworkflow(name="pirate_joke_generator", version=1)
102
+ async def joke_workflow():
103
+ return await create_something("joke", subject="OpenTelemetry")
104
+
105
+ joke = await joke_workflow()
106
+
107
+ spans = exporter.get_finished_spans()
108
+ assert [span.name for span in spans] == [
109
+ "openai.chat",
110
+ "something_creator.task",
111
+ "pirate_joke_generator.workflow",
112
+ ]
113
+ open_ai_span = next(span for span in spans if span.name == "openai.chat")
114
+ assert (
115
+ open_ai_span.attributes[f"{SpanAttributes.LLM_PROMPTS}.0.content"]
116
+ == "Tell me a joke about OpenTelemetry"
117
+ )
118
+ assert open_ai_span.attributes.get(f"{SpanAttributes.LLM_COMPLETIONS}.0.content")
119
+ assert (
120
+ open_ai_span.attributes.get("traceloop.prompt.template")
121
+ == "Tell me a {what} about {subject}"
122
+ )
123
+ assert (
124
+ open_ai_span.attributes.get("traceloop.prompt.template_variables.what")
125
+ == "joke"
126
+ )
127
+ assert (
128
+ open_ai_span.attributes.get("traceloop.prompt.template_variables.subject")
129
+ == "OpenTelemetry"
130
+ )
131
+ assert open_ai_span.attributes.get("traceloop.prompt.version") == 5
132
+
133
+ workflow_span = next(
134
+ span for span in spans if span.name == "pirate_joke_generator.workflow"
135
+ )
136
+ task_span = next(span for span in spans if span.name == "something_creator.task")
137
+ assert json.loads(task_span.attributes[SpanAttributes.TRACELOOP_ENTITY_INPUT]) == {
138
+ "args": ["joke"],
139
+ "kwargs": {"subject": "OpenTelemetry"},
140
+ }
141
+
142
+ assert (
143
+ json.loads(task_span.attributes.get(SpanAttributes.TRACELOOP_ENTITY_OUTPUT))
144
+ == joke
145
+ )
146
+ assert task_span.parent.span_id == workflow_span.context.span_id
147
+ assert (
148
+ workflow_span.attributes[SpanAttributes.TRACELOOP_ENTITY_NAME]
149
+ == "pirate_joke_generator"
150
+ )
151
+ assert workflow_span.attributes[SpanAttributes.TRACELOOP_ENTITY_VERSION] == 1
152
+ assert task_span.attributes[SpanAttributes.TRACELOOP_ENTITY_VERSION] == 2
153
+
154
+
155
+ @pytest.mark.vcr
156
+ def test_streaming_workflow(exporter, openai_client):
157
+
158
+ @task(name="pirate_joke_generator")
159
+ def joke_task():
160
+ response_stream = openai_client.chat.completions.create(
161
+ model="gpt-3.5-turbo",
162
+ messages=[
163
+ {"role": "user", "content": "Tell me a joke about OpenTelemetry"}
164
+ ],
165
+ stream=True,
166
+ )
167
+ for chunk in response_stream:
168
+ yield chunk
169
+
170
+ @task(name="joke_runner")
171
+ def joke_runner():
172
+ res = joke_task()
173
+ return res
174
+
175
+ @workflow(name="joke_manager")
176
+ def joke_workflow():
177
+ res = joke_runner()
178
+ for chunk in res:
179
+ pass
180
+
181
+ joke_workflow()
182
+
183
+ spans = exporter.get_finished_spans()
184
+ assert set([span.name for span in spans]) == set(
185
+ [
186
+ "openai.chat",
187
+ "pirate_joke_generator.task",
188
+ "joke_runner.task",
189
+ "joke_manager.workflow",
190
+ ]
191
+ )
192
+ generator_span = next(
193
+ span for span in spans if span.name == "pirate_joke_generator.task"
194
+ )
195
+ runner_span = next(span for span in spans if span.name == "joke_runner.task")
196
+ manager_span = next(span for span in spans if span.name == "joke_manager.workflow")
197
+ openai_span = next(span for span in spans if span.name == "openai.chat")
198
+
199
+ assert openai_span.parent.span_id == generator_span.context.span_id
200
+ assert generator_span.parent.span_id == runner_span.context.span_id
201
+ assert runner_span.parent.span_id == manager_span.context.span_id
202
+ assert openai_span.end_time <= manager_span.end_time
203
+
204
+
205
+ def test_unrelated_entities(exporter):
206
+ @workflow(name="workflow_1")
207
+ def workflow_1():
208
+ return
209
+
210
+ @task(name="task_1")
211
+ def task_1():
212
+ return
213
+
214
+ workflow_1()
215
+ task_1()
216
+
217
+ spans = exporter.get_finished_spans()
218
+ assert [span.name for span in spans] == ["workflow_1.workflow", "task_1.task"]
219
+
220
+ workflow_1_span = spans[0]
221
+ task_1_span = spans[1]
222
+
223
+ assert (
224
+ workflow_1_span.attributes[SpanAttributes.TRACELOOP_ENTITY_NAME] == "workflow_1"
225
+ )
226
+ assert workflow_1_span.attributes[SpanAttributes.TRACELOOP_SPAN_KIND] == "workflow"
227
+
228
+ assert task_1_span.attributes[SpanAttributes.TRACELOOP_ENTITY_NAME] == "task_1"
229
+ assert task_1_span.attributes[SpanAttributes.TRACELOOP_SPAN_KIND] == "task"
230
+ assert task_1_span.parent is None
231
+
232
+
233
+ def test_unserializable_workflow(exporter):
234
+ @task(name="unserializable_task")
235
+ def unserializable_task(obj: object):
236
+ return object()
237
+
238
+ @workflow(name="unserializable_workflow")
239
+ def unserializable_workflow(obj: object):
240
+ return unserializable_task(obj)
241
+
242
+ unserializable_task(object())
243
+
244
+ spans = exporter.get_finished_spans()
245
+ assert [span.name for span in spans] == ["unserializable_task.task"]
246
+
247
+
248
+ @pytest.mark.asyncio
249
+ async def test_unserializable_async_workflow(exporter):
250
+ @atask(name="unserializable_task")
251
+ async def unserializable_task(obj: object):
252
+ return object()
253
+
254
+ @aworkflow(name="unserializable_workflow")
255
+ async def unserializable_workflow(obj: object):
256
+ return await unserializable_task(obj)
257
+
258
+ await unserializable_task(object())
259
+
260
+ spans = exporter.get_finished_spans()
261
+ assert [span.name for span in spans] == ["unserializable_task.task"]
@@ -0,0 +1,2 @@
1
+ from lmnr.traceloop_sdk.tracing.context_manager import get_tracer
2
+ from lmnr.traceloop_sdk.tracing.tracing import set_workflow_name
@@ -0,0 +1,24 @@
1
+ # Manages list of associated properties for which content tracing
2
+ # (prompts, vector embeddings, etc.) is allowed.
3
+ class ContentAllowList:
4
+ def __new__(cls) -> "ContentAllowList":
5
+ if not hasattr(cls, "instance"):
6
+ obj = cls.instance = super(ContentAllowList, cls).__new__(cls)
7
+ obj._allow_list: list[dict] = []
8
+
9
+ return cls.instance
10
+
11
+ def is_allowed(self, association_properties: dict) -> bool:
12
+ for allow_list_item in self._allow_list:
13
+ if all(
14
+ [
15
+ association_properties.get(key) == value
16
+ for key, value in allow_list_item.items()
17
+ ]
18
+ ):
19
+ return True
20
+
21
+ return False
22
+
23
+ def load(self, response_json: dict):
24
+ self._allow_list = response_json["associationPropertyAllowList"]
@@ -0,0 +1,13 @@
1
+ from contextlib import contextmanager
2
+
3
+ from lmnr.traceloop_sdk.tracing.tracing import TracerWrapper
4
+
5
+
6
+ @contextmanager
7
+ def get_tracer(flush_on_exit: bool = False):
8
+ wrapper = TracerWrapper()
9
+ try:
10
+ yield wrapper.get_tracer()
11
+ finally:
12
+ if flush_on_exit:
13
+ wrapper.flush()
@@ -0,0 +1,57 @@
1
+ from contextlib import contextmanager
2
+ from opentelemetry.semconv_ai import SpanAttributes
3
+ from opentelemetry.trace import Span
4
+ from pydantic import BaseModel
5
+ from lmnr.traceloop_sdk.tracing.context_manager import get_tracer
6
+
7
+
8
+ class LLMMessage(BaseModel):
9
+ role: str
10
+ content: str
11
+
12
+
13
+ class LLMUsage(BaseModel):
14
+ prompt_tokens: int
15
+ completion_tokens: int
16
+ total_tokens: int
17
+
18
+
19
+ class LLMSpan:
20
+ _span: Span = None
21
+
22
+ def __init__(self, span: Span):
23
+ self._span = span
24
+ pass
25
+
26
+ def report_request(self, model: str, messages: list[LLMMessage]):
27
+ self._span.set_attribute(SpanAttributes.LLM_REQUEST_MODEL, model)
28
+ for idx, message in enumerate(messages):
29
+ self._span.set_attribute(
30
+ f"{SpanAttributes.LLM_PROMPTS}.{idx}.role", message.role
31
+ )
32
+ self._span.set_attribute(
33
+ f"{SpanAttributes.LLM_PROMPTS}.{idx}.content", message.content
34
+ )
35
+
36
+ def report_response(self, model: str, completions: list[str]):
37
+ self._span.set_attribute(SpanAttributes.LLM_RESPONSE_MODEL, model)
38
+ for idx, completion in enumerate(completions):
39
+ self._span.set_attribute(
40
+ f"{SpanAttributes.LLM_COMPLETIONS}.{idx}.role", "assistant"
41
+ )
42
+ self._span.set_attribute(
43
+ f"{SpanAttributes.LLM_COMPLETIONS}.{idx}", completion
44
+ )
45
+
46
+
47
+ @contextmanager
48
+ def track_llm_call(vendor: str, type: str):
49
+ with get_tracer() as tracer:
50
+ with tracer.start_as_current_span(name=f"{vendor}.{type}") as span:
51
+ span.set_attribute(SpanAttributes.LLM_SYSTEM, vendor)
52
+ span.set_attribute(SpanAttributes.LLM_REQUEST_TYPE, type)
53
+ llm_span = LLMSpan(span)
54
+ try:
55
+ yield llm_span
56
+ finally:
57
+ span.end()