lmnr 0.4.8__py3-none-any.whl → 0.4.9__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -1,48 +1,48 @@
1
- from opentelemetry.semconv_ai import SpanAttributes
2
- import pytest
3
- from openai import OpenAI
4
- from lmnr.traceloop_sdk.tracing.manual import LLMMessage, track_llm_call
1
+ # from opentelemetry.semconv_ai import SpanAttributes
2
+ # import pytest
3
+ # from openai import OpenAI
4
+ # from lmnr.traceloop_sdk.tracing.manual import LLMMessage, track_llm_call
5
5
 
6
6
 
7
- @pytest.fixture
8
- def openai_client():
9
- return OpenAI()
7
+ # @pytest.fixture
8
+ # def openai_client():
9
+ # return OpenAI()
10
10
 
11
11
 
12
- @pytest.mark.vcr
13
- def test_manual_report(exporter, openai_client):
14
- with track_llm_call(vendor="openai", type="chat") as span:
15
- span.report_request(
16
- model="gpt-3.5-turbo",
17
- messages=[
18
- LLMMessage(role="user", content="Tell me a joke about opentelemetry")
19
- ],
20
- )
12
+ # @pytest.mark.vcr
13
+ # def test_manual_report(exporter, openai_client):
14
+ # with track_llm_call(vendor="openai", type="chat") as span:
15
+ # span.report_request(
16
+ # model="gpt-3.5-turbo",
17
+ # messages=[
18
+ # LLMMessage(role="user", content="Tell me a joke about opentelemetry")
19
+ # ],
20
+ # )
21
21
 
22
- res = openai_client.chat.completions.create(
23
- model="gpt-3.5-turbo",
24
- messages=[
25
- {"role": "user", "content": "Tell me a joke about opentelemetry"}
26
- ],
27
- )
22
+ # res = openai_client.chat.completions.create(
23
+ # model="gpt-3.5-turbo",
24
+ # messages=[
25
+ # {"role": "user", "content": "Tell me a joke about opentelemetry"}
26
+ # ],
27
+ # )
28
28
 
29
- span.report_response(res.model, [text.message.content for text in res.choices])
29
+ # span.report_response(res.model, [text.message.content for text in res.choices])
30
30
 
31
- spans = exporter.get_finished_spans()
32
- open_ai_span = spans[0]
33
- assert open_ai_span.attributes[SpanAttributes.LLM_REQUEST_MODEL] == "gpt-3.5-turbo"
34
- assert open_ai_span.attributes[f"{SpanAttributes.LLM_PROMPTS}.0.role"] == "user"
35
- assert (
36
- open_ai_span.attributes[f"{SpanAttributes.LLM_PROMPTS}.0.content"]
37
- == "Tell me a joke about opentelemetry"
38
- )
39
- assert (
40
- open_ai_span.attributes[SpanAttributes.LLM_RESPONSE_MODEL]
41
- == "gpt-3.5-turbo-0125"
42
- )
43
- assert (
44
- open_ai_span.attributes[f"{SpanAttributes.LLM_COMPLETIONS}.0.content"]
45
- == "Why did the opentelemetry developer break up with their partner? Because they were tired"
46
- + " of constantly tracing their every move!"
47
- )
48
- assert open_ai_span.end_time > open_ai_span.start_time
31
+ # spans = exporter.get_finished_spans()
32
+ # open_ai_span = spans[0]
33
+ # assert open_ai_span.attributes[SpanAttributes.LLM_REQUEST_MODEL] == "gpt-3.5-turbo"
34
+ # assert open_ai_span.attributes[f"{SpanAttributes.LLM_PROMPTS}.0.role"] == "user"
35
+ # assert (
36
+ # open_ai_span.attributes[f"{SpanAttributes.LLM_PROMPTS}.0.content"]
37
+ # == "Tell me a joke about opentelemetry"
38
+ # )
39
+ # assert (
40
+ # open_ai_span.attributes[SpanAttributes.LLM_RESPONSE_MODEL]
41
+ # == "gpt-3.5-turbo-0125"
42
+ # )
43
+ # assert (
44
+ # open_ai_span.attributes[f"{SpanAttributes.LLM_COMPLETIONS}.0.content"]
45
+ # == "Why did the opentelemetry developer break up with their partner? Because they were tired"
46
+ # + " of constantly tracing their every move!"
47
+ # )
48
+ # assert open_ai_span.end_time > open_ai_span.start_time
@@ -1,47 +1,47 @@
1
- from opentelemetry.semconv_ai import SpanAttributes
2
- from lmnr.traceloop_sdk.decorators import task, workflow
3
- from pytest import raises
4
-
5
-
6
- def test_nested_tasks(exporter):
7
- @workflow(name="some_workflow")
8
- def some_workflow():
9
- return outer_task()
10
-
11
- @task(name="outer_task")
12
- def outer_task():
13
- return inner_task()
14
-
15
- @task(name="inner_task")
16
- def inner_task():
17
- return inner_inner_task()
18
-
19
- @task(name="inner_inner_task")
20
- def inner_inner_task():
21
- return
22
-
23
- some_workflow()
24
-
25
- spans = exporter.get_finished_spans()
26
- assert [span.name for span in spans] == [
27
- "inner_inner_task.task",
28
- "inner_task.task",
29
- "outer_task.task",
30
- "some_workflow.workflow",
31
- ]
32
-
33
- inner_inner_task_span = spans[0]
34
- inner_task_span = spans[1]
35
- outer_task_span = spans[2]
36
- some_workflow_span = spans[3]
37
-
38
- assert (
39
- inner_inner_task_span.attributes[SpanAttributes.TRACELOOP_ENTITY_PATH]
40
- == "outer_task.inner_task"
41
- )
42
- assert (
43
- inner_task_span.attributes[SpanAttributes.TRACELOOP_ENTITY_PATH] == "outer_task"
44
- )
45
- with raises(KeyError):
46
- _ = outer_task_span.attributes[SpanAttributes.TRACELOOP_ENTITY_PATH]
47
- _ = some_workflow_span.attributes[SpanAttributes.TRACELOOP_ENTITY_PATH]
1
+ # from opentelemetry.semconv_ai import SpanAttributes
2
+ # from lmnr.traceloop_sdk.decorators import task, workflow
3
+ # from pytest import raises
4
+
5
+
6
+ # def test_nested_tasks(exporter):
7
+ # @workflow(name="some_workflow")
8
+ # def some_workflow():
9
+ # return outer_task()
10
+
11
+ # @task(name="outer_task")
12
+ # def outer_task():
13
+ # return inner_task()
14
+
15
+ # @task(name="inner_task")
16
+ # def inner_task():
17
+ # return inner_inner_task()
18
+
19
+ # @task(name="inner_inner_task")
20
+ # def inner_inner_task():
21
+ # return
22
+
23
+ # some_workflow()
24
+
25
+ # spans = exporter.get_finished_spans()
26
+ # assert [span.name for span in spans] == [
27
+ # "inner_inner_task.task",
28
+ # "inner_task.task",
29
+ # "outer_task.task",
30
+ # "some_workflow.workflow",
31
+ # ]
32
+
33
+ # inner_inner_task_span = spans[0]
34
+ # inner_task_span = spans[1]
35
+ # outer_task_span = spans[2]
36
+ # some_workflow_span = spans[3]
37
+
38
+ # assert (
39
+ # inner_inner_task_span.attributes[SpanAttributes.TRACELOOP_ENTITY_PATH]
40
+ # == "outer_task.inner_task"
41
+ # )
42
+ # assert (
43
+ # inner_task_span.attributes[SpanAttributes.TRACELOOP_ENTITY_PATH] == "outer_task"
44
+ # )
45
+ # with raises(KeyError):
46
+ # _ = outer_task_span.attributes[SpanAttributes.TRACELOOP_ENTITY_PATH]
47
+ # _ = some_workflow_span.attributes[SpanAttributes.TRACELOOP_ENTITY_PATH]
@@ -1,50 +1,50 @@
1
- import os
2
-
3
- import pytest
4
- from openai import OpenAI
5
- from opentelemetry.semconv_ai import SpanAttributes
6
- from lmnr.traceloop_sdk.decorators import workflow, task
7
-
8
-
9
- @pytest.fixture(autouse=True)
10
- def disable_trace_content():
11
- os.environ["TRACELOOP_TRACE_CONTENT"] = "false"
12
- yield
13
- os.environ["TRACELOOP_TRACE_CONTENT"] = "true"
14
-
15
-
16
- @pytest.fixture
17
- def openai_client():
18
- return OpenAI()
19
-
20
-
21
- @pytest.mark.vcr
22
- def test_simple_workflow(exporter, openai_client):
23
- @task(name="joke_creation")
24
- def create_joke():
25
- completion = openai_client.chat.completions.create(
26
- model="gpt-3.5-turbo",
27
- messages=[
28
- {"role": "user", "content": "Tell me a joke about opentelemetry"}
29
- ],
30
- )
31
- return completion.choices[0].message.content
32
-
33
- @workflow(name="pirate_joke_generator")
34
- def joke_workflow():
35
- create_joke()
36
-
37
- joke_workflow()
38
-
39
- spans = exporter.get_finished_spans()
40
- assert [span.name for span in spans] == [
41
- "openai.chat",
42
- "joke_creation.task",
43
- "pirate_joke_generator.workflow",
44
- ]
45
- open_ai_span = spans[0]
46
- assert open_ai_span.attributes[SpanAttributes.LLM_USAGE_PROMPT_TOKENS] == 15
47
- assert not open_ai_span.attributes.get(f"{SpanAttributes.LLM_PROMPTS}.0.content")
48
- assert not open_ai_span.attributes.get(
49
- f"{SpanAttributes.LLM_PROMPTS}.0.completions"
50
- )
1
+ # import os
2
+
3
+ # import pytest
4
+ # from openai import OpenAI
5
+ # from opentelemetry.semconv_ai import SpanAttributes
6
+ # from lmnr.traceloop_sdk.decorators import workflow, task
7
+
8
+
9
+ # @pytest.fixture(autouse=True)
10
+ # def disable_trace_content():
11
+ # os.environ["TRACELOOP_TRACE_CONTENT"] = "false"
12
+ # yield
13
+ # os.environ["TRACELOOP_TRACE_CONTENT"] = "true"
14
+
15
+
16
+ # @pytest.fixture
17
+ # def openai_client():
18
+ # return OpenAI()
19
+
20
+
21
+ # @pytest.mark.vcr
22
+ # def test_simple_workflow(exporter, openai_client):
23
+ # @task(name="joke_creation")
24
+ # def create_joke():
25
+ # completion = openai_client.chat.completions.create(
26
+ # model="gpt-3.5-turbo",
27
+ # messages=[
28
+ # {"role": "user", "content": "Tell me a joke about opentelemetry"}
29
+ # ],
30
+ # )
31
+ # return completion.choices[0].message.content
32
+
33
+ # @workflow(name="pirate_joke_generator")
34
+ # def joke_workflow():
35
+ # create_joke()
36
+
37
+ # joke_workflow()
38
+
39
+ # spans = exporter.get_finished_spans()
40
+ # assert [span.name for span in spans] == [
41
+ # "openai.chat",
42
+ # "joke_creation.task",
43
+ # "pirate_joke_generator.workflow",
44
+ # ]
45
+ # open_ai_span = spans[0]
46
+ # assert open_ai_span.attributes[SpanAttributes.LLM_USAGE_PROMPT_TOKENS] == 15
47
+ # assert not open_ai_span.attributes.get(f"{SpanAttributes.LLM_PROMPTS}.0.content")
48
+ # assert not open_ai_span.attributes.get(
49
+ # f"{SpanAttributes.LLM_PROMPTS}.0.completions"
50
+ # )
@@ -1,57 +1,57 @@
1
- import pytest
2
- from openai import OpenAI
3
- from lmnr.traceloop_sdk.decorators import workflow
1
+ # import pytest
2
+ # from openai import OpenAI
3
+ # from lmnr.traceloop_sdk.decorators import workflow
4
4
 
5
5
 
6
- @pytest.fixture
7
- def openai_client():
8
- return OpenAI()
6
+ # @pytest.fixture
7
+ # def openai_client():
8
+ # return OpenAI()
9
9
 
10
10
 
11
- @pytest.mark.vcr
12
- def test_resource_attributes(exporter, openai_client):
13
- openai_client.chat.completions.create(
14
- model="gpt-3.5-turbo",
15
- messages=[{"role": "user", "content": "Tell me a joke about opentelemetry"}],
16
- )
11
+ # @pytest.mark.vcr
12
+ # def test_resource_attributes(exporter, openai_client):
13
+ # openai_client.chat.completions.create(
14
+ # model="gpt-3.5-turbo",
15
+ # messages=[{"role": "user", "content": "Tell me a joke about opentelemetry"}],
16
+ # )
17
17
 
18
- spans = exporter.get_finished_spans()
19
- open_ai_span = spans[0]
20
- assert open_ai_span.resource.attributes["something"] == "yes"
21
- assert open_ai_span.resource.attributes["service.name"] == "test"
18
+ # spans = exporter.get_finished_spans()
19
+ # open_ai_span = spans[0]
20
+ # assert open_ai_span.resource.attributes["something"] == "yes"
21
+ # assert open_ai_span.resource.attributes["service.name"] == "test"
22
22
 
23
23
 
24
- def test_custom_span_processor(exporter_with_custom_span_processor):
25
- @workflow()
26
- def run_workflow():
27
- pass
24
+ # def test_custom_span_processor(exporter_with_custom_span_processor):
25
+ # @workflow()
26
+ # def run_workflow():
27
+ # pass
28
28
 
29
- run_workflow()
29
+ # run_workflow()
30
30
 
31
- spans = exporter_with_custom_span_processor.get_finished_spans()
32
- workflow_span = spans[0]
33
- assert workflow_span.attributes["custom_span"] == "yes"
31
+ # spans = exporter_with_custom_span_processor.get_finished_spans()
32
+ # workflow_span = spans[0]
33
+ # assert workflow_span.attributes["custom_span"] == "yes"
34
34
 
35
35
 
36
- def test_instruments(exporter_with_custom_instrumentations):
37
- @workflow()
38
- def run_workflow():
39
- pass
36
+ # def test_instruments(exporter_with_custom_instrumentations):
37
+ # @workflow()
38
+ # def run_workflow():
39
+ # pass
40
40
 
41
- run_workflow()
41
+ # run_workflow()
42
42
 
43
- spans = exporter_with_custom_instrumentations.get_finished_spans()
44
- workflow_span = spans[0]
45
- assert workflow_span
43
+ # spans = exporter_with_custom_instrumentations.get_finished_spans()
44
+ # workflow_span = spans[0]
45
+ # assert workflow_span
46
46
 
47
47
 
48
- def test_no_metrics(exporter_with_no_metrics):
49
- @workflow()
50
- def run_workflow():
51
- pass
48
+ # def test_no_metrics(exporter_with_no_metrics):
49
+ # @workflow()
50
+ # def run_workflow():
51
+ # pass
52
52
 
53
- run_workflow()
53
+ # run_workflow()
54
54
 
55
- spans = exporter_with_no_metrics.get_finished_spans()
56
- workflow_span = spans[0]
57
- assert workflow_span
55
+ # spans = exporter_with_no_metrics.get_finished_spans()
56
+ # workflow_span = spans[0]
57
+ # assert workflow_span
@@ -1,32 +1,32 @@
1
- import json
2
- import pytest
1
+ # import json
2
+ # import pytest
3
3
 
4
- from langchain_openai import ChatOpenAI
5
- from lmnr.traceloop_sdk.decorators import task
6
- from opentelemetry.semconv_ai import SpanAttributes
4
+ # from langchain_openai import ChatOpenAI
5
+ # from lmnr.traceloop_sdk.decorators import task
6
+ # from opentelemetry.semconv_ai import SpanAttributes
7
7
 
8
8
 
9
- @pytest.mark.vcr
10
- def test_task_io_serialization_with_langchain(exporter):
11
- @task(name="answer_question")
12
- def answer_question():
13
- chat = ChatOpenAI(temperature=0)
9
+ # @pytest.mark.vcr
10
+ # def test_task_io_serialization_with_langchain(exporter):
11
+ # @task(name="answer_question")
12
+ # def answer_question():
13
+ # chat = ChatOpenAI(temperature=0)
14
14
 
15
- return chat.invoke("Is Berlin the capital of Germany? Answer with yes or no")
15
+ # return chat.invoke("Is Berlin the capital of Germany? Answer with yes or no")
16
16
 
17
- answer_question()
17
+ # answer_question()
18
18
 
19
- spans = exporter.get_finished_spans()
19
+ # spans = exporter.get_finished_spans()
20
20
 
21
- assert [span.name for span in spans] == [
22
- "ChatOpenAI.chat",
23
- "answer_question.task",
24
- ]
21
+ # assert [span.name for span in spans] == [
22
+ # "ChatOpenAI.chat",
23
+ # "answer_question.task",
24
+ # ]
25
25
 
26
- task_span = next(span for span in spans if span.name == "answer_question.task")
27
- assert (
28
- json.loads(task_span.attributes.get(SpanAttributes.TRACELOOP_ENTITY_OUTPUT))[
29
- "kwargs"
30
- ]["content"]
31
- == "Yes"
32
- )
26
+ # task_span = next(span for span in spans if span.name == "answer_question.task")
27
+ # assert (
28
+ # json.loads(task_span.attributes.get(SpanAttributes.TRACELOOP_ENTITY_OUTPUT))[
29
+ # "kwargs"
30
+ # ]["content"]
31
+ # == "Yes"
32
+ # )