langtrace-python-sdk 1.3.5__py3-none-any.whl → 1.3.7__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -0,0 +1,97 @@
1
+ interactions:
2
+ - request:
3
+ body: '{"prompt": "A charming and adorable baby sea otter. This small, fluffy
4
+ creature is floating gracefully on its back, with its tiny webbed paws folded
5
+ cutely over its fuzzy belly. It has big, round, innocent eyes that are brimming
6
+ with youthful curiosity. As it blissfully floats on the calm, sparkling ocean
7
+ surface under the glow of the golden sunset, it playfully tosses a shiny seashell
8
+ from one paw to another, showcasing its playful and distinctively otter-like
9
+ behavior.", "model": "dall-e-3"}'
10
+ headers:
11
+ accept:
12
+ - application/json
13
+ accept-encoding:
14
+ - gzip, deflate
15
+ connection:
16
+ - keep-alive
17
+ content-length:
18
+ - '498'
19
+ content-type:
20
+ - application/json
21
+ host:
22
+ - api.openai.com
23
+ user-agent:
24
+ - OpenAI/Python 1.23.2
25
+ x-stainless-arch:
26
+ - arm64
27
+ x-stainless-async:
28
+ - 'false'
29
+ x-stainless-lang:
30
+ - python
31
+ x-stainless-os:
32
+ - MacOS
33
+ x-stainless-package-version:
34
+ - 1.23.2
35
+ x-stainless-runtime:
36
+ - CPython
37
+ x-stainless-runtime-version:
38
+ - 3.11.5
39
+ method: POST
40
+ uri: https://api.openai.com/v1/images/generations
41
+ response:
42
+ body:
43
+ string: !!binary |
44
+ H4sIAAAAAAAAA1yR3W7bRhCF7/UUAwFSb0SKf6IoA0Rh2Q4SO2nTRIjVFEUx5A7JjVa71M5SMhPk
45
+ 3QvSaVH3ZjHAmT3znZlvE4BpaQkdiekVhOswTuNgna4WgyDQ4fQK/pgAAHwbX4CppbNkEn+11hxb
46
+ N72C6S21snSAUDZoj1LXgFoACmOxUAQFFj0wIRjnyEKlDLqhyWiQjqHA8jDUriEwJaH+iYE7W2FJ
47
+ PuwaAj6iUj8+N8iAUKmuqnooDbpxlJO6hwsVBQlo8cJQGSVIgDmTfZ5BSvU+vBlKWS/Amk4LoJ54
48
+ AZVUigRcpGugN51rqk5B2VlpWLp+AbUyFyisrBs3eIxEhIDPHiP1iFYreTySZei0IDsK9YChwWLP
49
+ YCpAYHJjdO60D4/DRKNpQF78x6hV2FedUj04w0xDYG6GhEzIDSkFUjszfkBpFyCV6tjZ56UOaX8Y
50
+ jKvptDx1BAU1eJbG+tPFP3fsrBqO1zjX8tVyaVAKVIqwla01gsuO/UKZwi+NJf8itTAX9jW5ZWvl
51
+ GR0tja297Zd0v396elUf7un3V7rhSO9VoO2yY7Le69Oxtfbw8HD+9Onrm93Ddpc0ew4/LuWx9oL1
52
+ 5VLf9Yz3vz3eZrtTtmp+vd9tG7/V9c/s8iiIEi9IvCjYhatZfB1Hs/g6WX2eM73Q1i+0NrdzPg8N
53
+ oRdkXpDO2ebF3HIpcqmV1DTULpdHrGnZ6nrOByNFniKiIEFeUhWxl6SbzMOsSr00S8R6naVFkK7n
54
+ fHBS5JhkZYmr1KNUoJdkCXmYhYm3KbNkk66ioiziofVfynCzi0bAcBZfR8HnOR9eRPi/yHkx58PL
55
+ ELLOt7+oWbR9e7rb7LuPNzfqJny3uzezaPv44cOXlXvb3d29pvf7p9vTZluvg1l8Ox1v/X0C8Ofk
56
+ ++RvAAAA//8DAI8VAGztAwAA
57
+ headers:
58
+ CF-Cache-Status:
59
+ - DYNAMIC
60
+ CF-RAY:
61
+ - 877687ee8e730d81-MRS
62
+ Connection:
63
+ - keep-alive
64
+ Content-Encoding:
65
+ - gzip
66
+ Content-Type:
67
+ - application/json
68
+ Date:
69
+ - Sat, 20 Apr 2024 16:32:45 GMT
70
+ Server:
71
+ - cloudflare
72
+ Set-Cookie:
73
+ - __cf_bm=kYFV6abEqmWMPFItBN6IHrGsHEkBMh7G1oaVuNVZR1A-1713630765-1.0.1.1-bQc9xmqCV.rTa7TAHJuI6.CIQ15UhKTn1Zyf4YTgWlYYOJIoLtD48FLCul3eEd3f6v.Gcd7Jo_CKQjYqx..dEA;
74
+ path=/; expires=Sat, 20-Apr-24 17:02:45 GMT; domain=.api.openai.com; HttpOnly;
75
+ Secure; SameSite=None
76
+ - _cfuvid=oNNudHOOYv_4yEJDVbToTqSHaHs.q4LVjXgT0QmIVmc-1713630765448-0.0.1.1-604800000;
77
+ path=/; domain=.api.openai.com; HttpOnly; Secure; SameSite=None
78
+ Transfer-Encoding:
79
+ - chunked
80
+ access-control-allow-origin:
81
+ - '*'
82
+ alt-svc:
83
+ - h3=":443"; ma=86400
84
+ openai-organization:
85
+ - scale3-1
86
+ openai-processing-ms:
87
+ - '12231'
88
+ openai-version:
89
+ - '2020-10-01'
90
+ strict-transport-security:
91
+ - max-age=15724800; includeSubDomains
92
+ x-request-id:
93
+ - req_fcf0eebdfca1850bb1d76b490ec251ff
94
+ status:
95
+ code: 200
96
+ message: OK
97
+ version: 1
@@ -0,0 +1,45 @@
1
+ """Unit tests configuration module."""
2
+
3
+ import pytest
4
+ import os
5
+ from openai import OpenAI, AsyncOpenAI
6
+ from opentelemetry.sdk.trace import TracerProvider
7
+ from opentelemetry.sdk.trace.export.in_memory_span_exporter import InMemorySpanExporter
8
+ from opentelemetry.sdk.trace.export import SimpleSpanProcessor
9
+ from opentelemetry import trace
10
+ from langtrace_python_sdk.instrumentation.openai.instrumentation import (
11
+ OpenAIInstrumentation,
12
+ )
13
+
14
+
15
+ @pytest.fixture(autouse=True)
16
+ def environment():
17
+ if not os.getenv("OPENAI_API_KEY"):
18
+ os.environ["OPENAI_API_KEY"] = "test_api_key"
19
+
20
+
21
+ @pytest.fixture
22
+ def openai_client():
23
+ return OpenAI()
24
+
25
+
26
+ @pytest.fixture
27
+ def async_openai_client():
28
+ return AsyncOpenAI()
29
+
30
+
31
+ @pytest.fixture(scope="module")
32
+ def vcr_config():
33
+ return {"filter_headers": ["authorization", "api-key"]}
34
+
35
+
36
+ @pytest.fixture(scope="session")
37
+ def exporter():
38
+ exporter = InMemorySpanExporter()
39
+ processor = SimpleSpanProcessor(exporter)
40
+
41
+ provider = TracerProvider()
42
+ provider.add_span_processor(processor)
43
+ trace.set_tracer_provider(provider)
44
+ OpenAIInstrumentation().instrument()
45
+ return exporter
@@ -0,0 +1,142 @@
1
+ import pytest
2
+ import importlib
3
+ import json
4
+ from langtrace_python_sdk.constants.instrumentation.openai import APIS
5
+
6
+
7
+ @pytest.mark.vcr()
8
+ def test_chat_completion(exporter, openai_client):
9
+ llm_model_value = "gpt-4"
10
+ messages_value = [{"role": "user", "content": "Say this is a test three times"}]
11
+
12
+ kwargs = {
13
+ "model": llm_model_value,
14
+ "messages": messages_value,
15
+ "stream": False,
16
+ }
17
+
18
+ openai_client.chat.completions.create(**kwargs)
19
+ spans = exporter.get_finished_spans()
20
+ completion_span = spans[-1]
21
+ assert completion_span.name == "openai.chat.completions.create"
22
+
23
+ attributes = completion_span.attributes
24
+ assert attributes.get("langtrace.sdk.name") == "langtrace-python-sdk"
25
+ assert attributes.get("langtrace.service.name") == "OpenAI"
26
+ assert attributes.get("langtrace.service.type") == "llm"
27
+ assert attributes.get("langtrace.service.version") == importlib.metadata.version(
28
+ "openai"
29
+ )
30
+ assert attributes.get("langtrace.version") == "1.0.0"
31
+ assert attributes.get("url.full") == "https://api.openai.com/v1/"
32
+ assert attributes.get("llm.api") == APIS["CHAT_COMPLETION"]["ENDPOINT"]
33
+ assert attributes.get("llm.model") == "gpt-4-0613"
34
+ assert attributes.get("llm.prompts") == json.dumps(messages_value)
35
+ assert attributes.get("llm.stream") is False
36
+
37
+ tokens = json.loads(attributes.get("llm.token.counts"))
38
+ output_tokens = tokens.get("output_tokens")
39
+ prompt_tokens = tokens.get("input_tokens")
40
+ total_tokens = tokens.get("total_tokens")
41
+
42
+ assert output_tokens and prompt_tokens and total_tokens
43
+ assert output_tokens + prompt_tokens == total_tokens
44
+
45
+
46
+ @pytest.mark.vcr()
47
+ def test_chat_completion_streaming(exporter, openai_client):
48
+ llm_model_value = "gpt-4"
49
+ messages_value = [{"role": "user", "content": "Say this is a test three times"}]
50
+
51
+ kwargs = {
52
+ "model": llm_model_value,
53
+ "messages": messages_value,
54
+ "stream": True,
55
+ }
56
+
57
+ response = openai_client.chat.completions.create(**kwargs)
58
+ chunk_count = 0
59
+ for _ in response:
60
+ chunk_count += 1
61
+
62
+ spans = exporter.get_finished_spans()
63
+ streaming_span = spans[-1]
64
+
65
+ assert streaming_span.name == "openai.chat.completions.create"
66
+ attributes = streaming_span.attributes
67
+
68
+ assert attributes.get("langtrace.sdk.name") == "langtrace-python-sdk"
69
+ assert attributes.get("langtrace.service.name") == "OpenAI"
70
+ assert attributes.get("langtrace.service.type") == "llm"
71
+ assert attributes.get("langtrace.service.version") == importlib.metadata.version(
72
+ "openai"
73
+ )
74
+ assert attributes.get("langtrace.version") == "1.0.0"
75
+ assert attributes.get("url.full") == "https://api.openai.com/v1/"
76
+ assert attributes.get("llm.api") == APIS["CHAT_COMPLETION"]["ENDPOINT"]
77
+ assert attributes.get("llm.model") == "gpt-4-0613"
78
+ assert attributes.get("llm.prompts") == json.dumps(messages_value)
79
+ assert attributes.get("llm.stream") is True
80
+
81
+ events = streaming_span.events
82
+ assert len(events) - 2 == chunk_count # -2 for start and end events
83
+
84
+ # check token usage attributes for stream
85
+ tokens = json.loads(attributes.get("llm.token.counts"))
86
+
87
+ output_tokens = tokens.get("output_tokens")
88
+ prompt_tokens = tokens.get("input_tokens")
89
+ total_tokens = tokens.get("total_tokens")
90
+
91
+ assert output_tokens and prompt_tokens and total_tokens
92
+ assert output_tokens + prompt_tokens == total_tokens
93
+
94
+
95
+ @pytest.mark.vcr()
96
+ @pytest.mark.asyncio()
97
+ async def test_async_chat_completion_streaming(exporter, async_openai_client):
98
+ llm_model_value = "gpt-4"
99
+ messages_value = [{"role": "user", "content": "Say this is a test three times"}]
100
+
101
+ kwargs = {
102
+ "model": llm_model_value,
103
+ "messages": messages_value,
104
+ "stream": True,
105
+ }
106
+
107
+ response = await async_openai_client.chat.completions.create(**kwargs)
108
+ chunk_count = 0
109
+ async for _ in response:
110
+ chunk_count += 1
111
+
112
+ spans = exporter.get_finished_spans()
113
+ streaming_span = spans[-1]
114
+
115
+ assert streaming_span.name == "openai.chat.completions.create"
116
+ attributes = streaming_span.attributes
117
+
118
+ assert attributes.get("langtrace.sdk.name") == "langtrace-python-sdk"
119
+ assert attributes.get("langtrace.service.name") == "OpenAI"
120
+ assert attributes.get("langtrace.service.type") == "llm"
121
+ assert attributes.get("langtrace.service.version") == importlib.metadata.version(
122
+ "openai"
123
+ )
124
+ assert attributes.get("langtrace.version") == "1.0.0"
125
+ assert attributes.get("url.full") == "https://api.openai.com/v1/"
126
+ assert attributes.get("llm.api") == APIS["CHAT_COMPLETION"]["ENDPOINT"]
127
+ assert attributes.get("llm.model") == "gpt-4-0613"
128
+ assert attributes.get("llm.prompts") == json.dumps(messages_value)
129
+ assert attributes.get("llm.stream") is True
130
+
131
+ events = streaming_span.events
132
+ assert len(events) - 2 == chunk_count # -2 for start and end events
133
+
134
+ # check token usage attributes for stream
135
+ tokens = json.loads(attributes.get("llm.token.counts"))
136
+
137
+ output_tokens = tokens.get("output_tokens")
138
+ prompt_tokens = tokens.get("input_tokens")
139
+ total_tokens = tokens.get("total_tokens")
140
+
141
+ assert output_tokens and prompt_tokens and total_tokens
142
+ assert output_tokens + prompt_tokens == total_tokens
File without changes
@@ -0,0 +1,77 @@
1
+ import pytest
2
+ import json
3
+ import importlib
4
+ from langtrace_python_sdk.constants.instrumentation.openai import APIS
5
+
6
+
7
+ @pytest.mark.vcr()
8
+ def test_image_generation(openai_client, exporter):
9
+ llm_model_value = "dall-e-3"
10
+ prompt = "A charming and adorable baby sea otter. This small, fluffy creature is floating gracefully on its back, with its tiny webbed paws folded cutely over its fuzzy belly. It has big, round, innocent eyes that are brimming with youthful curiosity. As it blissfully floats on the calm, sparkling ocean surface under the glow of the golden sunset, it playfully tosses a shiny seashell from one paw to another, showcasing its playful and distinctively otter-like behavior."
11
+
12
+ kwargs = {
13
+ "model": llm_model_value,
14
+ "prompt": prompt,
15
+ }
16
+
17
+ response = openai_client.images.generate(**kwargs)
18
+ spans = exporter.get_finished_spans()
19
+ image_generation_span = spans[-1]
20
+ assert image_generation_span.name == "openai.images.generate"
21
+
22
+ attributes = image_generation_span.attributes
23
+ assert attributes.get("langtrace.sdk.name") == "langtrace-python-sdk"
24
+ assert attributes.get("langtrace.service.name") == "OpenAI"
25
+ assert attributes.get("langtrace.service.type") == "llm"
26
+ assert attributes.get("langtrace.service.version") == importlib.metadata.version(
27
+ "openai"
28
+ )
29
+ assert attributes.get("langtrace.version") == "1.0.0"
30
+ assert attributes.get("url.full") == "https://api.openai.com/v1/"
31
+ assert attributes.get("llm.api") == APIS["IMAGES_GENERATION"]["ENDPOINT"]
32
+ assert attributes.get("llm.model") == llm_model_value
33
+ assert attributes.get("llm.prompts") == json.dumps([prompt])
34
+
35
+ langtrace_responses = json.loads(attributes.get("llm.responses"))
36
+ for langtrace_response in langtrace_responses:
37
+ assert response.data[0].url == langtrace_response.get("url")
38
+ assert response.data[0].revised_prompt == langtrace_response.get(
39
+ "revised_prompt"
40
+ )
41
+
42
+
43
+ @pytest.mark.vcr()
44
+ @pytest.mark.asyncio()
45
+ async def test_async_image_generation(async_openai_client, exporter):
46
+ llm_model_value = "dall-e-3"
47
+ prompt = "A charming and adorable baby sea otter. This small, fluffy creature is floating gracefully on its back, with its tiny webbed paws folded cutely over its fuzzy belly. It has big, round, innocent eyes that are brimming with youthful curiosity. As it blissfully floats on the calm, sparkling ocean surface under the glow of the golden sunset, it playfully tosses a shiny seashell from one paw to another, showcasing its playful and distinctively otter-like behavior."
48
+
49
+ kwargs = {
50
+ "model": llm_model_value,
51
+ "prompt": prompt,
52
+ }
53
+
54
+ response = await async_openai_client.images.generate(**kwargs)
55
+ spans = exporter.get_finished_spans()
56
+ image_generation_span = spans[-1]
57
+ assert image_generation_span.name == "openai.images.generate"
58
+
59
+ attributes = image_generation_span.attributes
60
+ assert attributes.get("langtrace.sdk.name") == "langtrace-python-sdk"
61
+ assert attributes.get("langtrace.service.name") == "OpenAI"
62
+ assert attributes.get("langtrace.service.type") == "llm"
63
+ assert attributes.get("langtrace.service.version") == importlib.metadata.version(
64
+ "openai"
65
+ )
66
+ assert attributes.get("langtrace.version") == "1.0.0"
67
+ assert attributes.get("url.full") == "https://api.openai.com/v1/"
68
+ assert attributes.get("llm.api") == APIS["IMAGES_GENERATION"]["ENDPOINT"]
69
+ assert attributes.get("llm.model") == llm_model_value
70
+ assert attributes.get("llm.prompts") == json.dumps([prompt])
71
+
72
+ langtrace_responses = json.loads(attributes.get("llm.responses"))
73
+ for langtrace_response in langtrace_responses:
74
+ assert response.data[0].url == langtrace_response.get("url")
75
+ assert response.data[0].revised_prompt == langtrace_response.get(
76
+ "revised_prompt"
77
+ )
@@ -0,0 +1,72 @@
1
+ from unittest.mock import MagicMock, patch, call
2
+ from langtrace_python_sdk.instrumentation.pinecone.patch import generic_patch
3
+ from opentelemetry.trace import SpanKind
4
+ import importlib.metadata
5
+ import pinecone
6
+ from opentelemetry.trace import SpanKind
7
+ from opentelemetry.trace.status import Status, StatusCode
8
+ from langtrace_python_sdk.constants.instrumentation.common import SERVICE_PROVIDERS
9
+ from langtrace_python_sdk.constants.instrumentation.pinecone import APIS
10
+ import unittest
11
+ import json
12
+ from tests.utils import common_setup
13
+
14
+
15
+ class TestPinecone(unittest.TestCase):
16
+ data = {
17
+ "status": "success",
18
+ "message": "Data upserted successfully",
19
+ "upserted_ids": [1, 2, 3]
20
+ }
21
+
22
+ def setUp(self):
23
+ self.pinecone_mock, self.tracer, self.span = common_setup(self.data, 'pinecone.Index.upsert')
24
+
25
+
26
+ def tearDown(self):
27
+ self.pinecone_mock.stop()
28
+
29
+ def test_pinecone(self):
30
+
31
+ # Arrange
32
+ version = importlib.metadata.version('pinecone-client')
33
+ method = "UPSERT"
34
+ vectors = [[1, 2, 3], [4, 5, 6]]
35
+
36
+ # Act
37
+ wrapped_function = generic_patch(pinecone.Index.upsert, method, version, self.tracer)
38
+ result = wrapped_function(MagicMock(), MagicMock(), (vectors,), {})
39
+
40
+ # Assert
41
+ self.assertTrue(self.tracer.start_as_current_span.called_once_with("pinecone.data.index", kind=SpanKind.CLIENT))
42
+ api = APIS[method]
43
+ service_provider = SERVICE_PROVIDERS["PINECONE"]
44
+ expected_attributes = {
45
+ 'langtrace.sdk.name': 'langtrace-python-sdk',
46
+ "langtrace.service.name": service_provider,
47
+ "langtrace.service.type": "vectordb",
48
+ "langtrace.service.version": version,
49
+ "langtrace.version": "1.0.0",
50
+ "db.system": "pinecone",
51
+ "db.operation": api["OPERATION"],
52
+ }
53
+ self.assertTrue(
54
+ self.span.set_attribute.has_calls(
55
+ [call(key, value) for key, value in expected_attributes.items()], any_order=True
56
+ )
57
+ )
58
+
59
+ actual_calls = self.span.set_attribute.call_args_list
60
+
61
+ for key, value in expected_attributes.items():
62
+ self.assertIn(call(key, value), actual_calls)
63
+
64
+ self.assertEqual(self.span.set_status.call_count, 1)
65
+ self.assertTrue(self.span.set_status.has_calls([call(Status(StatusCode.OK))]))
66
+
67
+ expected_result = ['status', 'message', 'upserted_ids']
68
+ result_keys = json.loads(result).keys()
69
+ self.assertSetEqual(set(expected_result), set(result_keys), "Keys mismatch")
70
+
71
+ if __name__ == '__main__':
72
+ unittest.main()
tests/utils.py ADDED
@@ -0,0 +1,21 @@
1
+ from unittest.mock import MagicMock, patch
2
+ import json
3
+
4
+ def common_setup(data, method_to_mock=None):
5
+ if method_to_mock:
6
+ service_mock = patch(method_to_mock)
7
+ mock_method = service_mock.start()
8
+ mock_method.return_value = json.dumps(data)
9
+ else:
10
+ service_mock = MagicMock()
11
+ service_mock.return_value = MagicMock(**data)
12
+
13
+
14
+ tracer = MagicMock()
15
+ span = MagicMock()
16
+
17
+ context_manager_mock = MagicMock()
18
+ context_manager_mock.__enter__.return_value = span
19
+ tracer.start_as_current_span.return_value = context_manager_mock
20
+
21
+ return service_mock, tracer, span