langtrace-python-sdk 1.3.4__py3-none-any.whl → 1.3.6__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- examples/openai/async_tool_calling_nonstreaming.py +93 -0
- examples/openai/async_tool_calling_streaming.py +167 -0
- examples/openai/chat_completion.py +15 -16
- examples/openai/function_calling.py +14 -14
- examples/openai/tool_calling_nonstreaming.py +92 -0
- examples/openai/tool_calling_streaming.py +167 -0
- langtrace_python_sdk/instrumentation/openai/patch.py +175 -99
- langtrace_python_sdk/version.py +1 -1
- {langtrace_python_sdk-1.3.4.dist-info → langtrace_python_sdk-1.3.6.dist-info}/METADATA +5 -1
- {langtrace_python_sdk-1.3.4.dist-info → langtrace_python_sdk-1.3.6.dist-info}/RECORD +29 -8
- {langtrace_python_sdk-1.3.4.dist-info → langtrace_python_sdk-1.3.6.dist-info}/WHEEL +1 -1
- tests/__init__.py +0 -0
- tests/anthropic/test_anthropic.py +73 -0
- tests/chroma/test_chroma.py +64 -0
- tests/langchain/test_langchain.py +69 -0
- tests/langchain/test_langchain_community.py +69 -0
- tests/langchain/test_langchain_core.py +115 -0
- tests/openai/cassettes/test_async_chat_completion_streaming.yaml +158 -0
- tests/openai/cassettes/test_async_image_generation.yaml +97 -0
- tests/openai/cassettes/test_chat_completion.yaml +101 -0
- tests/openai/cassettes/test_chat_completion_streaming.yaml +200860 -0
- tests/openai/cassettes/test_image_generation.yaml +97 -0
- tests/openai/conftest.py +45 -0
- tests/openai/test_chat_completion.py +142 -0
- tests/openai/test_embeddings.py +0 -0
- tests/openai/test_image_generation.py +77 -0
- tests/pinecone/test_pinecone.py +72 -0
- tests/utils.py +21 -0
- {langtrace_python_sdk-1.3.4.dist-info → langtrace_python_sdk-1.3.6.dist-info}/licenses/LICENSE +0 -0
|
@@ -0,0 +1,64 @@
|
|
|
1
|
+
from unittest.mock import MagicMock, patch, call
|
|
2
|
+
from langtrace_python_sdk.constants.instrumentation.common import \
|
|
3
|
+
SERVICE_PROVIDERS
|
|
4
|
+
from langtrace_python_sdk.constants.instrumentation.openai import APIS
|
|
5
|
+
from opentelemetry.trace.status import StatusCode,Status
|
|
6
|
+
from langtrace_python_sdk.instrumentation.chroma.patch import collection_patch
|
|
7
|
+
from opentelemetry.trace import SpanKind
|
|
8
|
+
from tests.utils import common_setup
|
|
9
|
+
import unittest
|
|
10
|
+
import json
|
|
11
|
+
|
|
12
|
+
|
|
13
|
+
class TestChromaPatch(unittest.TestCase):
|
|
14
|
+
data = {
|
|
15
|
+
"status": "success",
|
|
16
|
+
}
|
|
17
|
+
def setUp(self):
|
|
18
|
+
self.chroma_mock, self.tracer, self.span = common_setup(self.data, 'chromadb.Collection.add')
|
|
19
|
+
self.wrapped_method = MagicMock(return_value="mocked method result")
|
|
20
|
+
self.instance = MagicMock()
|
|
21
|
+
self.instance.name = "aa"
|
|
22
|
+
|
|
23
|
+
def tearDown(self):
|
|
24
|
+
self.chroma_mock.stop()
|
|
25
|
+
|
|
26
|
+
def test_collection_patch_success(self):
|
|
27
|
+
# Arrange
|
|
28
|
+
traced_method = collection_patch("ADD", "1.2.3", self.tracer)
|
|
29
|
+
|
|
30
|
+
# Act
|
|
31
|
+
result = traced_method(self.wrapped_method, self.instance, (), {})
|
|
32
|
+
|
|
33
|
+
# Assert
|
|
34
|
+
# Assert the result of the original method is returned
|
|
35
|
+
self.assertEqual(result, "mocked method result")
|
|
36
|
+
|
|
37
|
+
# Assert the span is started with the correct parameters
|
|
38
|
+
self.assertTrue(self.tracer.start_as_current_span.called_once_with("chromadb.Collection.add", kind=SpanKind.CLIENT))
|
|
39
|
+
|
|
40
|
+
# Verify span attributes are set as expected
|
|
41
|
+
expected_attributes = {
|
|
42
|
+
'langtrace.sdk.name': 'langtrace-python-sdk',
|
|
43
|
+
'langtrace.service.name': 'Chroma',
|
|
44
|
+
'langtrace.service.type': 'vectordb',
|
|
45
|
+
'langtrace.service.version': '1.2.3',
|
|
46
|
+
'langtrace.version': '1.0.0',
|
|
47
|
+
'db.system': 'chromadb',
|
|
48
|
+
'db.operation': 'add',
|
|
49
|
+
'db.collection.name': 'aa',
|
|
50
|
+
}
|
|
51
|
+
for key, value in expected_attributes.items():
|
|
52
|
+
self.span.set_attribute.assert_has_calls([call(key, value)], any_order=True)
|
|
53
|
+
|
|
54
|
+
actual_calls = self.span.set_attribute.call_args_list
|
|
55
|
+
|
|
56
|
+
for key, value in expected_attributes.items():
|
|
57
|
+
self.assertIn(call(key, value), actual_calls)
|
|
58
|
+
|
|
59
|
+
# Assert the span status is set to OK
|
|
60
|
+
self.span.set_status.assert_called_with(StatusCode.OK)
|
|
61
|
+
|
|
62
|
+
|
|
63
|
+
if __name__ == '__main__':
|
|
64
|
+
unittest.main()
|
|
@@ -0,0 +1,69 @@
|
|
|
1
|
+
|
|
2
|
+
import unittest
|
|
3
|
+
from unittest.mock import MagicMock, call
|
|
4
|
+
from langtrace_python_sdk.instrumentation.langchain.patch import generic_patch
|
|
5
|
+
from opentelemetry.trace import SpanKind
|
|
6
|
+
from opentelemetry.trace import get_tracer
|
|
7
|
+
import importlib.metadata
|
|
8
|
+
from langtrace_python_sdk.constants.instrumentation.openai import APIS
|
|
9
|
+
from opentelemetry.trace.status import Status, StatusCode
|
|
10
|
+
from tests.utils import common_setup
|
|
11
|
+
import json
|
|
12
|
+
|
|
13
|
+
class TestGenericPatch(unittest.TestCase):
|
|
14
|
+
data = {"key": "value"}
|
|
15
|
+
def setUp(self):
|
|
16
|
+
self.langchain_mock, self.tracer, self.span = common_setup(self.data, None)
|
|
17
|
+
|
|
18
|
+
def tearDown(self):
|
|
19
|
+
# Clean up after each test case
|
|
20
|
+
pass
|
|
21
|
+
|
|
22
|
+
def test_generic_patch(self):
|
|
23
|
+
# Arrange
|
|
24
|
+
method_name = "example_method"
|
|
25
|
+
trace_output = False
|
|
26
|
+
trace_input = False # Change as per your requirement
|
|
27
|
+
args = (1, 2, 3)
|
|
28
|
+
task = "split_text"
|
|
29
|
+
kwargs = {'key': 'value'}
|
|
30
|
+
version = importlib.metadata.version('langchain')
|
|
31
|
+
|
|
32
|
+
# Act
|
|
33
|
+
wrapped_function = generic_patch("langchain.text_splitter", task, self.tracer, version, trace_output, trace_input)
|
|
34
|
+
result = wrapped_function(self.langchain_mock, MagicMock(), args, kwargs)
|
|
35
|
+
|
|
36
|
+
# Assert
|
|
37
|
+
self.assertTrue(self.tracer.start_as_current_span.called_once_with(method_name, kind=SpanKind.CLIENT))
|
|
38
|
+
|
|
39
|
+
service_provider = "Langchain"
|
|
40
|
+
expected_attributes = {
|
|
41
|
+
'langtrace.sdk.name': 'langtrace-python-sdk',
|
|
42
|
+
"langtrace.service.name": service_provider,
|
|
43
|
+
"langtrace.service.type": "framework",
|
|
44
|
+
"langtrace.service.version": version,
|
|
45
|
+
"langtrace.version": "1.0.0",
|
|
46
|
+
"langchain.task.name": task,
|
|
47
|
+
}
|
|
48
|
+
|
|
49
|
+
|
|
50
|
+
self.assertTrue(
|
|
51
|
+
self.span.set_attribute.has_calls(
|
|
52
|
+
[call(key, value) for key, value in expected_attributes.items()], any_order=True
|
|
53
|
+
)
|
|
54
|
+
)
|
|
55
|
+
|
|
56
|
+
actual_calls = self.span.set_attribute.call_args_list
|
|
57
|
+
|
|
58
|
+
for key, value in expected_attributes.items():
|
|
59
|
+
self.assertIn(call(key, value), actual_calls)
|
|
60
|
+
|
|
61
|
+
self.assertEqual(self.span.set_status.call_count, 1)
|
|
62
|
+
self.assertTrue(self.span.set_status.has_calls([call(Status(StatusCode.OK))]))
|
|
63
|
+
|
|
64
|
+
expected_result_data = {"key": "value" }
|
|
65
|
+
|
|
66
|
+
self.assertEqual(result.key, expected_result_data["key"])
|
|
67
|
+
|
|
68
|
+
if __name__ == '__main__':
|
|
69
|
+
unittest.main()
|
|
@@ -0,0 +1,69 @@
|
|
|
1
|
+
|
|
2
|
+
import unittest
|
|
3
|
+
from unittest.mock import MagicMock, Mock, patch, call
|
|
4
|
+
from langtrace_python_sdk.instrumentation.langchain_community.patch import generic_patch
|
|
5
|
+
from opentelemetry.trace import SpanKind
|
|
6
|
+
from opentelemetry.trace import get_tracer
|
|
7
|
+
import importlib.metadata
|
|
8
|
+
import openai
|
|
9
|
+
from langtrace_python_sdk.constants.instrumentation.openai import APIS
|
|
10
|
+
from opentelemetry.trace.status import Status, StatusCode
|
|
11
|
+
import json
|
|
12
|
+
from tests.utils import common_setup
|
|
13
|
+
class TestGenericPatch(unittest.TestCase):
|
|
14
|
+
data = {"key": "value"}
|
|
15
|
+
def setUp(self):
|
|
16
|
+
self.langchain_mock, self.tracer, self.span = common_setup(self.data, None)
|
|
17
|
+
|
|
18
|
+
def tearDown(self):
|
|
19
|
+
# Clean up after each test case
|
|
20
|
+
pass
|
|
21
|
+
|
|
22
|
+
def test_generic_patch(self):
|
|
23
|
+
# Arrange
|
|
24
|
+
method_name = "example_method"
|
|
25
|
+
trace_output = False
|
|
26
|
+
trace_input = False
|
|
27
|
+
args = (1, 2, 3)
|
|
28
|
+
task = "vector_store"
|
|
29
|
+
kwargs = {'key': 'value'}
|
|
30
|
+
version = importlib.metadata.version("langchain-community")
|
|
31
|
+
|
|
32
|
+
# Act
|
|
33
|
+
wrapped_function = generic_patch("langchain_community.vectorstores.faiss", task, self.tracer, version, trace_output, trace_input)
|
|
34
|
+
result = wrapped_function(self.langchain_mock, MagicMock(), args, kwargs)
|
|
35
|
+
|
|
36
|
+
# Assert
|
|
37
|
+
self.assertTrue(self.tracer.start_as_current_span.called_once_with(method_name, kind=SpanKind.CLIENT))
|
|
38
|
+
|
|
39
|
+
service_provider = "Langchain Community"
|
|
40
|
+
expected_attributes = {
|
|
41
|
+
'langtrace.sdk.name': 'langtrace-python-sdk',
|
|
42
|
+
"langtrace.service.name": service_provider,
|
|
43
|
+
"langtrace.service.type": "framework",
|
|
44
|
+
"langtrace.service.version": version,
|
|
45
|
+
"langtrace.version": "1.0.0",
|
|
46
|
+
"langchain.task.name": task,
|
|
47
|
+
}
|
|
48
|
+
|
|
49
|
+
|
|
50
|
+
self.assertTrue(
|
|
51
|
+
self.span.set_attribute.has_calls(
|
|
52
|
+
[call(key, value) for key, value in expected_attributes.items()], any_order=True
|
|
53
|
+
)
|
|
54
|
+
)
|
|
55
|
+
|
|
56
|
+
actual_calls = self.span.set_attribute.call_args_list
|
|
57
|
+
|
|
58
|
+
for key, value in expected_attributes.items():
|
|
59
|
+
self.assertIn(call(key, value), actual_calls)
|
|
60
|
+
|
|
61
|
+
self.assertEqual(self.span.set_status.call_count, 1)
|
|
62
|
+
self.assertTrue(self.span.set_status.has_calls([call(Status(StatusCode.OK))]))
|
|
63
|
+
|
|
64
|
+
expected_result_data = {"key": "value" }
|
|
65
|
+
self.assertEqual(result.key, expected_result_data["key"])
|
|
66
|
+
|
|
67
|
+
|
|
68
|
+
if __name__ == '__main__':
|
|
69
|
+
unittest.main()
|
|
@@ -0,0 +1,115 @@
|
|
|
1
|
+
|
|
2
|
+
import unittest
|
|
3
|
+
from unittest.mock import MagicMock, Mock, patch, call
|
|
4
|
+
from langtrace_python_sdk.instrumentation.langchain_core.patch import generic_patch, runnable_patch
|
|
5
|
+
from opentelemetry.trace import SpanKind
|
|
6
|
+
from opentelemetry.trace import get_tracer
|
|
7
|
+
import importlib.metadata
|
|
8
|
+
import openai
|
|
9
|
+
from langtrace_python_sdk.constants.instrumentation.openai import APIS
|
|
10
|
+
from opentelemetry.trace.status import Status, StatusCode
|
|
11
|
+
import json
|
|
12
|
+
from tests.utils import common_setup
|
|
13
|
+
class TestGenericPatch(unittest.TestCase):
|
|
14
|
+
data = {"items": "value"}
|
|
15
|
+
def setUp(self):
|
|
16
|
+
self.langchain_mock, self.tracer, self.span = common_setup(self.data, None)
|
|
17
|
+
|
|
18
|
+
def tearDown(self):
|
|
19
|
+
# Clean up after each test case
|
|
20
|
+
pass
|
|
21
|
+
|
|
22
|
+
def test_generic_patch(self):
|
|
23
|
+
# Arrange
|
|
24
|
+
method_name = "example_method"
|
|
25
|
+
trace_output = False
|
|
26
|
+
trace_input = True
|
|
27
|
+
task = "retriever"
|
|
28
|
+
args = (1, 2, 3)
|
|
29
|
+
kwargs = {'key': 'value'}
|
|
30
|
+
version = importlib.metadata.version("langchain-core")
|
|
31
|
+
|
|
32
|
+
# Act
|
|
33
|
+
wrapped_function = generic_patch("langchain_core.retrievers", task , self.tracer, version, trace_output, trace_input)
|
|
34
|
+
result = wrapped_function(self.langchain_mock, MagicMock(), args, kwargs)
|
|
35
|
+
|
|
36
|
+
# Assert
|
|
37
|
+
self.assertTrue(self.tracer.start_as_current_span.called_once_with(method_name, kind=SpanKind.CLIENT))
|
|
38
|
+
|
|
39
|
+
service_provider = "Langchain Core"
|
|
40
|
+
expected_attributes = {
|
|
41
|
+
'langtrace.sdk.name': 'langtrace-python-sdk',
|
|
42
|
+
"langtrace.service.name": service_provider,
|
|
43
|
+
"langtrace.service.type": "framework",
|
|
44
|
+
"langtrace.service.version": version,
|
|
45
|
+
"langtrace.version": "1.0.0",
|
|
46
|
+
"langchain.task.name": task,
|
|
47
|
+
}
|
|
48
|
+
|
|
49
|
+
self.assertTrue(
|
|
50
|
+
self.span.set_attribute.has_calls(
|
|
51
|
+
[call(key, value) for key, value in expected_attributes.items()], any_order=True
|
|
52
|
+
)
|
|
53
|
+
)
|
|
54
|
+
|
|
55
|
+
actual_calls = self.span.set_attribute.call_args_list
|
|
56
|
+
for key, value in expected_attributes.items():
|
|
57
|
+
self.assertIn(call(key, value), actual_calls)
|
|
58
|
+
|
|
59
|
+
|
|
60
|
+
self.assertEqual(self.span.set_status.call_count, 1)
|
|
61
|
+
self.assertTrue(self.span.set_status.has_calls([call(Status(StatusCode.OK))]))
|
|
62
|
+
|
|
63
|
+
expected_result_data = {"items": "value" }
|
|
64
|
+
self.assertEqual(result.items, expected_result_data["items"])
|
|
65
|
+
|
|
66
|
+
def test_runnable_patch(self):
|
|
67
|
+
# Arrange
|
|
68
|
+
method_name = "example_method"
|
|
69
|
+
trace_output = False
|
|
70
|
+
trace_input = True
|
|
71
|
+
args = (1, 2, 3)
|
|
72
|
+
kwargs = {'key': 'value'}
|
|
73
|
+
version = importlib.metadata.version("langchain-core")
|
|
74
|
+
|
|
75
|
+
# Act
|
|
76
|
+
wrapped_function = runnable_patch("langchain_core.runnables.passthrough",
|
|
77
|
+
"runnablepassthrough", self.tracer, version, trace_output, trace_input)
|
|
78
|
+
|
|
79
|
+
result = wrapped_function(self.langchain_mock, MagicMock(), args, kwargs)
|
|
80
|
+
|
|
81
|
+
# Assert
|
|
82
|
+
self.assertTrue(self.tracer.start_as_current_span.called_once_with(method_name, kind=SpanKind.CLIENT))
|
|
83
|
+
|
|
84
|
+
service_provider = "Langchain Core"
|
|
85
|
+
expected_attributes = {
|
|
86
|
+
'langtrace.sdk.name': 'langtrace-python-sdk',
|
|
87
|
+
"langtrace.service.name": service_provider,
|
|
88
|
+
"langtrace.service.type": "framework",
|
|
89
|
+
"langtrace.service.version": version,
|
|
90
|
+
"langtrace.version": "1.0.0",
|
|
91
|
+
"langchain.task.name": "runnablepassthrough",
|
|
92
|
+
}
|
|
93
|
+
|
|
94
|
+
self.assertTrue(
|
|
95
|
+
self.span.set_attribute.has_calls(
|
|
96
|
+
[call(key, value) for key, value in expected_attributes.items()], any_order=True
|
|
97
|
+
)
|
|
98
|
+
)
|
|
99
|
+
|
|
100
|
+
actual_calls = self.span.set_attribute.call_args_list
|
|
101
|
+
|
|
102
|
+
for key, value in expected_attributes.items():
|
|
103
|
+
self.assertIn(call(key, value), actual_calls)
|
|
104
|
+
|
|
105
|
+
|
|
106
|
+
self.assertEqual(self.span.set_status.call_count, 1)
|
|
107
|
+
self.assertTrue(self.span.set_status.has_calls([call(Status(StatusCode.OK))]))
|
|
108
|
+
|
|
109
|
+
expected_result_data = {"items": "value" }
|
|
110
|
+
|
|
111
|
+
self.assertEqual(result.items, expected_result_data["items"])
|
|
112
|
+
|
|
113
|
+
|
|
114
|
+
if __name__ == '__main__':
|
|
115
|
+
unittest.main()
|
|
@@ -0,0 +1,158 @@
|
|
|
1
|
+
interactions:
|
|
2
|
+
- request:
|
|
3
|
+
body: '{"messages": [{"role": "user", "content": "Say this is a test three times"}],
|
|
4
|
+
"model": "gpt-4", "stream": true}'
|
|
5
|
+
headers:
|
|
6
|
+
accept:
|
|
7
|
+
- application/json
|
|
8
|
+
accept-encoding:
|
|
9
|
+
- gzip, deflate
|
|
10
|
+
connection:
|
|
11
|
+
- keep-alive
|
|
12
|
+
content-length:
|
|
13
|
+
- '111'
|
|
14
|
+
content-type:
|
|
15
|
+
- application/json
|
|
16
|
+
host:
|
|
17
|
+
- api.openai.com
|
|
18
|
+
user-agent:
|
|
19
|
+
- AsyncOpenAI/Python 1.23.2
|
|
20
|
+
x-stainless-arch:
|
|
21
|
+
- arm64
|
|
22
|
+
x-stainless-async:
|
|
23
|
+
- async:asyncio
|
|
24
|
+
x-stainless-lang:
|
|
25
|
+
- python
|
|
26
|
+
x-stainless-os:
|
|
27
|
+
- MacOS
|
|
28
|
+
x-stainless-package-version:
|
|
29
|
+
- 1.23.2
|
|
30
|
+
x-stainless-runtime:
|
|
31
|
+
- CPython
|
|
32
|
+
x-stainless-runtime-version:
|
|
33
|
+
- 3.11.5
|
|
34
|
+
method: POST
|
|
35
|
+
uri: https://api.openai.com/v1/chat/completions
|
|
36
|
+
response:
|
|
37
|
+
body:
|
|
38
|
+
string: 'data: {"id":"chatcmpl-9G7iUffCm37bRsnrZcMjcD55dwZcN","object":"chat.completion.chunk","created":1713629938,"model":"gpt-4-0613","system_fingerprint":null,"choices":[{"index":0,"delta":{"role":"assistant","content":""},"logprobs":null,"finish_reason":null}]}
|
|
39
|
+
|
|
40
|
+
|
|
41
|
+
data: {"id":"chatcmpl-9G7iUffCm37bRsnrZcMjcD55dwZcN","object":"chat.completion.chunk","created":1713629938,"model":"gpt-4-0613","system_fingerprint":null,"choices":[{"index":0,"delta":{"content":"This"},"logprobs":null,"finish_reason":null}]}
|
|
42
|
+
|
|
43
|
+
|
|
44
|
+
data: {"id":"chatcmpl-9G7iUffCm37bRsnrZcMjcD55dwZcN","object":"chat.completion.chunk","created":1713629938,"model":"gpt-4-0613","system_fingerprint":null,"choices":[{"index":0,"delta":{"content":"
|
|
45
|
+
is"},"logprobs":null,"finish_reason":null}]}
|
|
46
|
+
|
|
47
|
+
|
|
48
|
+
data: {"id":"chatcmpl-9G7iUffCm37bRsnrZcMjcD55dwZcN","object":"chat.completion.chunk","created":1713629938,"model":"gpt-4-0613","system_fingerprint":null,"choices":[{"index":0,"delta":{"content":"
|
|
49
|
+
a"},"logprobs":null,"finish_reason":null}]}
|
|
50
|
+
|
|
51
|
+
|
|
52
|
+
data: {"id":"chatcmpl-9G7iUffCm37bRsnrZcMjcD55dwZcN","object":"chat.completion.chunk","created":1713629938,"model":"gpt-4-0613","system_fingerprint":null,"choices":[{"index":0,"delta":{"content":"
|
|
53
|
+
test"},"logprobs":null,"finish_reason":null}]}
|
|
54
|
+
|
|
55
|
+
|
|
56
|
+
data: {"id":"chatcmpl-9G7iUffCm37bRsnrZcMjcD55dwZcN","object":"chat.completion.chunk","created":1713629938,"model":"gpt-4-0613","system_fingerprint":null,"choices":[{"index":0,"delta":{"content":"."},"logprobs":null,"finish_reason":null}]}
|
|
57
|
+
|
|
58
|
+
|
|
59
|
+
data: {"id":"chatcmpl-9G7iUffCm37bRsnrZcMjcD55dwZcN","object":"chat.completion.chunk","created":1713629938,"model":"gpt-4-0613","system_fingerprint":null,"choices":[{"index":0,"delta":{"content":"
|
|
60
|
+
This"},"logprobs":null,"finish_reason":null}]}
|
|
61
|
+
|
|
62
|
+
|
|
63
|
+
data: {"id":"chatcmpl-9G7iUffCm37bRsnrZcMjcD55dwZcN","object":"chat.completion.chunk","created":1713629938,"model":"gpt-4-0613","system_fingerprint":null,"choices":[{"index":0,"delta":{"content":"
|
|
64
|
+
is"},"logprobs":null,"finish_reason":null}]}
|
|
65
|
+
|
|
66
|
+
|
|
67
|
+
data: {"id":"chatcmpl-9G7iUffCm37bRsnrZcMjcD55dwZcN","object":"chat.completion.chunk","created":1713629938,"model":"gpt-4-0613","system_fingerprint":null,"choices":[{"index":0,"delta":{"content":"
|
|
68
|
+
a"},"logprobs":null,"finish_reason":null}]}
|
|
69
|
+
|
|
70
|
+
|
|
71
|
+
data: {"id":"chatcmpl-9G7iUffCm37bRsnrZcMjcD55dwZcN","object":"chat.completion.chunk","created":1713629938,"model":"gpt-4-0613","system_fingerprint":null,"choices":[{"index":0,"delta":{"content":"
|
|
72
|
+
test"},"logprobs":null,"finish_reason":null}]}
|
|
73
|
+
|
|
74
|
+
|
|
75
|
+
data: {"id":"chatcmpl-9G7iUffCm37bRsnrZcMjcD55dwZcN","object":"chat.completion.chunk","created":1713629938,"model":"gpt-4-0613","system_fingerprint":null,"choices":[{"index":0,"delta":{"content":"."},"logprobs":null,"finish_reason":null}]}
|
|
76
|
+
|
|
77
|
+
|
|
78
|
+
data: {"id":"chatcmpl-9G7iUffCm37bRsnrZcMjcD55dwZcN","object":"chat.completion.chunk","created":1713629938,"model":"gpt-4-0613","system_fingerprint":null,"choices":[{"index":0,"delta":{"content":"
|
|
79
|
+
This"},"logprobs":null,"finish_reason":null}]}
|
|
80
|
+
|
|
81
|
+
|
|
82
|
+
data: {"id":"chatcmpl-9G7iUffCm37bRsnrZcMjcD55dwZcN","object":"chat.completion.chunk","created":1713629938,"model":"gpt-4-0613","system_fingerprint":null,"choices":[{"index":0,"delta":{"content":"
|
|
83
|
+
is"},"logprobs":null,"finish_reason":null}]}
|
|
84
|
+
|
|
85
|
+
|
|
86
|
+
data: {"id":"chatcmpl-9G7iUffCm37bRsnrZcMjcD55dwZcN","object":"chat.completion.chunk","created":1713629938,"model":"gpt-4-0613","system_fingerprint":null,"choices":[{"index":0,"delta":{"content":"
|
|
87
|
+
a"},"logprobs":null,"finish_reason":null}]}
|
|
88
|
+
|
|
89
|
+
|
|
90
|
+
data: {"id":"chatcmpl-9G7iUffCm37bRsnrZcMjcD55dwZcN","object":"chat.completion.chunk","created":1713629938,"model":"gpt-4-0613","system_fingerprint":null,"choices":[{"index":0,"delta":{"content":"
|
|
91
|
+
test"},"logprobs":null,"finish_reason":null}]}
|
|
92
|
+
|
|
93
|
+
|
|
94
|
+
data: {"id":"chatcmpl-9G7iUffCm37bRsnrZcMjcD55dwZcN","object":"chat.completion.chunk","created":1713629938,"model":"gpt-4-0613","system_fingerprint":null,"choices":[{"index":0,"delta":{"content":"."},"logprobs":null,"finish_reason":null}]}
|
|
95
|
+
|
|
96
|
+
|
|
97
|
+
data: {"id":"chatcmpl-9G7iUffCm37bRsnrZcMjcD55dwZcN","object":"chat.completion.chunk","created":1713629938,"model":"gpt-4-0613","system_fingerprint":null,"choices":[{"index":0,"delta":{},"logprobs":null,"finish_reason":"stop"}]}
|
|
98
|
+
|
|
99
|
+
|
|
100
|
+
data: [DONE]
|
|
101
|
+
|
|
102
|
+
|
|
103
|
+
'
|
|
104
|
+
headers:
|
|
105
|
+
CF-Cache-Status:
|
|
106
|
+
- DYNAMIC
|
|
107
|
+
CF-RAY:
|
|
108
|
+
- 8776740aaa3e73a3-MRS
|
|
109
|
+
Cache-Control:
|
|
110
|
+
- no-cache, must-revalidate
|
|
111
|
+
Connection:
|
|
112
|
+
- keep-alive
|
|
113
|
+
Content-Type:
|
|
114
|
+
- text/event-stream
|
|
115
|
+
Date:
|
|
116
|
+
- Sat, 20 Apr 2024 16:18:59 GMT
|
|
117
|
+
Server:
|
|
118
|
+
- cloudflare
|
|
119
|
+
Set-Cookie:
|
|
120
|
+
- __cf_bm=VDKSK7GoYoxisSawjMl9W0b7YZarMfekW_Y69gq5ons-1713629939-1.0.1.1-fH6Sc.9fQ0Kb4MvzvnBRlAmk_cXfYNeNbDd_K6pZeMNxnmMy3qiDlS.olHx3Y7rfDhYg7a3FffrCHr.Xu8j_Uw;
|
|
121
|
+
path=/; expires=Sat, 20-Apr-24 16:48:59 GMT; domain=.api.openai.com; HttpOnly;
|
|
122
|
+
Secure; SameSite=None
|
|
123
|
+
- _cfuvid=GmZ5rJWGxt2nlOYm6_pDkhIo_8V.YkD6O9_B1qloO7g-1713629939085-0.0.1.1-604800000;
|
|
124
|
+
path=/; domain=.api.openai.com; HttpOnly; Secure; SameSite=None
|
|
125
|
+
Transfer-Encoding:
|
|
126
|
+
- chunked
|
|
127
|
+
access-control-allow-origin:
|
|
128
|
+
- '*'
|
|
129
|
+
alt-svc:
|
|
130
|
+
- h3=":443"; ma=86400
|
|
131
|
+
openai-model:
|
|
132
|
+
- gpt-4-0613
|
|
133
|
+
openai-organization:
|
|
134
|
+
- scale3-1
|
|
135
|
+
openai-processing-ms:
|
|
136
|
+
- '247'
|
|
137
|
+
openai-version:
|
|
138
|
+
- '2020-10-01'
|
|
139
|
+
strict-transport-security:
|
|
140
|
+
- max-age=15724800; includeSubDomains
|
|
141
|
+
x-ratelimit-limit-requests:
|
|
142
|
+
- '10000'
|
|
143
|
+
x-ratelimit-limit-tokens:
|
|
144
|
+
- '300000'
|
|
145
|
+
x-ratelimit-remaining-requests:
|
|
146
|
+
- '9999'
|
|
147
|
+
x-ratelimit-remaining-tokens:
|
|
148
|
+
- '299975'
|
|
149
|
+
x-ratelimit-reset-requests:
|
|
150
|
+
- 6ms
|
|
151
|
+
x-ratelimit-reset-tokens:
|
|
152
|
+
- 5ms
|
|
153
|
+
x-request-id:
|
|
154
|
+
- req_4c6f987df5c44fb9c842d54126d2608d
|
|
155
|
+
status:
|
|
156
|
+
code: 200
|
|
157
|
+
message: OK
|
|
158
|
+
version: 1
|
|
@@ -0,0 +1,97 @@
|
|
|
1
|
+
interactions:
|
|
2
|
+
- request:
|
|
3
|
+
body: '{"prompt": "A charming and adorable baby sea otter. This small, fluffy
|
|
4
|
+
creature is floating gracefully on its back, with its tiny webbed paws folded
|
|
5
|
+
cutely over its fuzzy belly. It has big, round, innocent eyes that are brimming
|
|
6
|
+
with youthful curiosity. As it blissfully floats on the calm, sparkling ocean
|
|
7
|
+
surface under the glow of the golden sunset, it playfully tosses a shiny seashell
|
|
8
|
+
from one paw to another, showcasing its playful and distinctively otter-like
|
|
9
|
+
behavior.", "model": "dall-e-3"}'
|
|
10
|
+
headers:
|
|
11
|
+
accept:
|
|
12
|
+
- application/json
|
|
13
|
+
accept-encoding:
|
|
14
|
+
- gzip, deflate
|
|
15
|
+
connection:
|
|
16
|
+
- keep-alive
|
|
17
|
+
content-length:
|
|
18
|
+
- '498'
|
|
19
|
+
content-type:
|
|
20
|
+
- application/json
|
|
21
|
+
host:
|
|
22
|
+
- api.openai.com
|
|
23
|
+
user-agent:
|
|
24
|
+
- AsyncOpenAI/Python 1.23.2
|
|
25
|
+
x-stainless-arch:
|
|
26
|
+
- arm64
|
|
27
|
+
x-stainless-async:
|
|
28
|
+
- async:asyncio
|
|
29
|
+
x-stainless-lang:
|
|
30
|
+
- python
|
|
31
|
+
x-stainless-os:
|
|
32
|
+
- MacOS
|
|
33
|
+
x-stainless-package-version:
|
|
34
|
+
- 1.23.2
|
|
35
|
+
x-stainless-runtime:
|
|
36
|
+
- CPython
|
|
37
|
+
x-stainless-runtime-version:
|
|
38
|
+
- 3.11.5
|
|
39
|
+
method: POST
|
|
40
|
+
uri: https://api.openai.com/v1/images/generations
|
|
41
|
+
response:
|
|
42
|
+
body:
|
|
43
|
+
string: !!binary |
|
|
44
|
+
H4sIAAAAAAAAA1yRYWvbSBCGv/tXDIb4kyRLliLLAXMkF0KT9O5S6qZJj+OY1Y6krda78s7KrlL6
|
|
45
|
+
3w8ptJD7sgy87868z8z3GcC8dISe5PwCknWS5mmaxmkwChI9zi/g7xkAwPfpBZg7Oiom+W/n7L7z
|
|
46
|
+
8wuYXzs8AULZoNsrUwMaCSitQ6EJBIoBmBCs9+Qi2DUEvEetA6h0X1UDTPN7R6AYKm3Rjz1qhyVV
|
|
47
|
+
vdYDWAPKMwgs22CqvDIDnEgIktDhiaGyWpKEsvc0+o/kJl/Vv7wMIEjrIYJbD9xZNzZSdQDO9kYG
|
|
48
|
+
oIyxJRkPNBBDpbQmCSflGxhs75uq11D2TllWfojgk5HkwDcEtbYnsBUg1ONsA9wbJh9M4oQ84YLQ
|
|
49
|
+
ivkVY0LjkQahRL0PgDt0rR5pbUk49nAVljRl7TQOr/+8ZSYGBG5GbibkhrSGytk9WEPjCsBbQGN9
|
|
50
|
+
Qy6ARtWNVnUz7XHcg1TslSm9OlLws/FrvlCrlkBQg0dlXTQPfh65d3q8bON9xxfLpUUlUWvCTnXO
|
|
51
|
+
Si57joS2Iiqto+ikjLQnjgz5ZefUET0travDq6/509O3bzd1e0fPN6bhlXnSsXHLnsmF7w77zrn2
|
|
52
|
+
/v74+Phyu7u/2mXNEycfl2pfh9zau/cPxbP4ozk8mMPjuvr6/O7D5jrqTP0b++0qXmVhnIWreJfk
|
|
53
|
+
Z+llcn6WXsbplwXTG614o3Vbt+DjaEjCuAjjfMFuKxaOS7lVRitDY+23ao81LTtTL7i1Sm5zRJQk
|
|
54
|
+
KcwqkYZZvilCLKo8zItMrtdFLuJ8veDWK7nFrChLPM9DyiWGWZFRiEWShZuyyDb5+UqUIh2tv1Im
|
|
55
|
+
m91qdZZeppsxZf5lwe0bhP+LvBULbt9CqHp7WMnbQ//591vR9B293BWbPz8J3WZ19dcy+fxM7c1V
|
|
56
|
+
zh+uH46b4iy9nk93/jED+Gf2Y/YfAAAA//8DAOKMzHwGBAAA
|
|
57
|
+
headers:
|
|
58
|
+
CF-Cache-Status:
|
|
59
|
+
- DYNAMIC
|
|
60
|
+
CF-RAY:
|
|
61
|
+
- 8776c5e52c3141e5-MRS
|
|
62
|
+
Connection:
|
|
63
|
+
- keep-alive
|
|
64
|
+
Content-Encoding:
|
|
65
|
+
- gzip
|
|
66
|
+
Content-Type:
|
|
67
|
+
- application/json
|
|
68
|
+
Date:
|
|
69
|
+
- Sat, 20 Apr 2024 17:15:03 GMT
|
|
70
|
+
Server:
|
|
71
|
+
- cloudflare
|
|
72
|
+
Set-Cookie:
|
|
73
|
+
- __cf_bm=PPmQaZgp0kP_w8e9X7YcZGZtYzSLNfQUY.TG.SD7Mm0-1713633303-1.0.1.1-RkZB0XF8ApSlz3LiOh15SGhomE_XSMJJ2hL6wVlaShtkXHWvH.VZhzRGqOG.rxD9v..GY0sNMFx7xtMSpCePWg;
|
|
74
|
+
path=/; expires=Sat, 20-Apr-24 17:45:03 GMT; domain=.api.openai.com; HttpOnly;
|
|
75
|
+
Secure; SameSite=None
|
|
76
|
+
- _cfuvid=2YN.H.aWijGSLvi2MBRgIoIhFI2FYDWLqNZOdzmNK80-1713633303345-0.0.1.1-604800000;
|
|
77
|
+
path=/; domain=.api.openai.com; HttpOnly; Secure; SameSite=None
|
|
78
|
+
Transfer-Encoding:
|
|
79
|
+
- chunked
|
|
80
|
+
access-control-allow-origin:
|
|
81
|
+
- '*'
|
|
82
|
+
alt-svc:
|
|
83
|
+
- h3=":443"; ma=86400
|
|
84
|
+
openai-organization:
|
|
85
|
+
- scale3-1
|
|
86
|
+
openai-processing-ms:
|
|
87
|
+
- '12124'
|
|
88
|
+
openai-version:
|
|
89
|
+
- '2020-10-01'
|
|
90
|
+
strict-transport-security:
|
|
91
|
+
- max-age=15724800; includeSubDomains
|
|
92
|
+
x-request-id:
|
|
93
|
+
- req_e6ff12ba31f025e69137c82fbff524fb
|
|
94
|
+
status:
|
|
95
|
+
code: 200
|
|
96
|
+
message: OK
|
|
97
|
+
version: 1
|
|
@@ -0,0 +1,101 @@
|
|
|
1
|
+
interactions:
|
|
2
|
+
- request:
|
|
3
|
+
body: '{"messages": [{"role": "user", "content": "Say this is a test three times"}],
|
|
4
|
+
"model": "gpt-4", "stream": false}'
|
|
5
|
+
headers:
|
|
6
|
+
accept:
|
|
7
|
+
- application/json
|
|
8
|
+
accept-encoding:
|
|
9
|
+
- gzip, deflate
|
|
10
|
+
connection:
|
|
11
|
+
- keep-alive
|
|
12
|
+
content-length:
|
|
13
|
+
- '112'
|
|
14
|
+
content-type:
|
|
15
|
+
- application/json
|
|
16
|
+
host:
|
|
17
|
+
- api.openai.com
|
|
18
|
+
user-agent:
|
|
19
|
+
- OpenAI/Python 1.23.2
|
|
20
|
+
x-stainless-arch:
|
|
21
|
+
- arm64
|
|
22
|
+
x-stainless-async:
|
|
23
|
+
- 'false'
|
|
24
|
+
x-stainless-lang:
|
|
25
|
+
- python
|
|
26
|
+
x-stainless-os:
|
|
27
|
+
- MacOS
|
|
28
|
+
x-stainless-package-version:
|
|
29
|
+
- 1.23.2
|
|
30
|
+
x-stainless-runtime:
|
|
31
|
+
- CPython
|
|
32
|
+
x-stainless-runtime-version:
|
|
33
|
+
- 3.11.5
|
|
34
|
+
method: POST
|
|
35
|
+
uri: https://api.openai.com/v1/chat/completions
|
|
36
|
+
response:
|
|
37
|
+
body:
|
|
38
|
+
string: !!binary |
|
|
39
|
+
H4sIAAAAAAAAA4SQzWrDMBCE736KReckxE5sE99KCg0UeiiBBkoJirKx1cpaVdpA25B3L3Kcn556
|
|
40
|
+
EWhGM/utDgmA0FtRgVCNZNU6M5w9FKq521NerGafWfm8XMwXPy+r+WOZPd2LQUzQ5h0Vn1MjRa0z
|
|
41
|
+
yJrsyVYeJWNsTct0UmR5Ock7o6UtmhirHQ+nw3GRTvpEQ1phEBW8JgAAh+6MbHaLX6KC8eCstBiC
|
|
42
|
+
rFFUl0cAwpOJipAh6MDSshhcTUWW0Xa4y0YH0AEkMAYewT930ZccL9MN1c7TJpLavTEXfaetDs3a
|
|
43
|
+
owxk46TA5E7xYwLw1m25/wMunKfW8ZrpA20sTKenOnH9zxsz700mluaqZ7Ok5xPhOzC26522NXrn
|
|
44
|
+
dbdypEyOyS8AAAD//wMA4oDeWukBAAA=
|
|
45
|
+
headers:
|
|
46
|
+
CF-Cache-Status:
|
|
47
|
+
- DYNAMIC
|
|
48
|
+
CF-RAY:
|
|
49
|
+
- 87760d6f7ac6077a-MRS
|
|
50
|
+
Cache-Control:
|
|
51
|
+
- no-cache, must-revalidate
|
|
52
|
+
Connection:
|
|
53
|
+
- keep-alive
|
|
54
|
+
Content-Encoding:
|
|
55
|
+
- gzip
|
|
56
|
+
Content-Type:
|
|
57
|
+
- application/json
|
|
58
|
+
Date:
|
|
59
|
+
- Sat, 20 Apr 2024 15:08:56 GMT
|
|
60
|
+
Server:
|
|
61
|
+
- cloudflare
|
|
62
|
+
Set-Cookie:
|
|
63
|
+
- __cf_bm=.AWrS_oG3OU4o0c3bVjHojpv.kEUTemvQkhWuz9iz5U-1713625736-1.0.1.1-4NWMdmDl_wiWkhSU1E_K0o93evj.kwjYpG.N0O35W8ILiLMnk.fiJyCvlFOzyLJxK1VRH2JnM0znP_As2May1A;
|
|
64
|
+
path=/; expires=Sat, 20-Apr-24 15:38:56 GMT; domain=.api.openai.com; HttpOnly;
|
|
65
|
+
Secure; SameSite=None
|
|
66
|
+
- _cfuvid=LwFiBqHo.B57JZUvIXF2NgWHbiSKHD34H9ak.jh4FYw-1713625736732-0.0.1.1-604800000;
|
|
67
|
+
path=/; domain=.api.openai.com; HttpOnly; Secure; SameSite=None
|
|
68
|
+
Transfer-Encoding:
|
|
69
|
+
- chunked
|
|
70
|
+
access-control-allow-origin:
|
|
71
|
+
- '*'
|
|
72
|
+
alt-svc:
|
|
73
|
+
- h3=":443"; ma=86400
|
|
74
|
+
openai-model:
|
|
75
|
+
- gpt-4-0613
|
|
76
|
+
openai-organization:
|
|
77
|
+
- scale3-1
|
|
78
|
+
openai-processing-ms:
|
|
79
|
+
- '829'
|
|
80
|
+
openai-version:
|
|
81
|
+
- '2020-10-01'
|
|
82
|
+
strict-transport-security:
|
|
83
|
+
- max-age=15724800; includeSubDomains
|
|
84
|
+
x-ratelimit-limit-requests:
|
|
85
|
+
- '10000'
|
|
86
|
+
x-ratelimit-limit-tokens:
|
|
87
|
+
- '300000'
|
|
88
|
+
x-ratelimit-remaining-requests:
|
|
89
|
+
- '9999'
|
|
90
|
+
x-ratelimit-remaining-tokens:
|
|
91
|
+
- '299975'
|
|
92
|
+
x-ratelimit-reset-requests:
|
|
93
|
+
- 6ms
|
|
94
|
+
x-ratelimit-reset-tokens:
|
|
95
|
+
- 5ms
|
|
96
|
+
x-request-id:
|
|
97
|
+
- req_de693fb8a8b5e9790ff16c8fb0350074
|
|
98
|
+
status:
|
|
99
|
+
code: 200
|
|
100
|
+
message: OK
|
|
101
|
+
version: 1
|