lmnr 0.4.17b0__py2.py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (50) hide show
  1. lmnr/__init__.py +5 -0
  2. lmnr/cli.py +39 -0
  3. lmnr/sdk/__init__.py +0 -0
  4. lmnr/sdk/decorators.py +66 -0
  5. lmnr/sdk/evaluations.py +354 -0
  6. lmnr/sdk/laminar.py +403 -0
  7. lmnr/sdk/log.py +39 -0
  8. lmnr/sdk/types.py +155 -0
  9. lmnr/sdk/utils.py +99 -0
  10. lmnr/traceloop_sdk/.flake8 +12 -0
  11. lmnr/traceloop_sdk/.python-version +1 -0
  12. lmnr/traceloop_sdk/__init__.py +89 -0
  13. lmnr/traceloop_sdk/config/__init__.py +9 -0
  14. lmnr/traceloop_sdk/decorators/__init__.py +0 -0
  15. lmnr/traceloop_sdk/decorators/base.py +178 -0
  16. lmnr/traceloop_sdk/instruments.py +34 -0
  17. lmnr/traceloop_sdk/tests/__init__.py +1 -0
  18. lmnr/traceloop_sdk/tests/cassettes/test_association_properties/test_langchain_and_external_association_properties.yaml +101 -0
  19. lmnr/traceloop_sdk/tests/cassettes/test_association_properties/test_langchain_association_properties.yaml +99 -0
  20. lmnr/traceloop_sdk/tests/cassettes/test_manual/test_manual_report.yaml +98 -0
  21. lmnr/traceloop_sdk/tests/cassettes/test_manual/test_resource_attributes.yaml +98 -0
  22. lmnr/traceloop_sdk/tests/cassettes/test_privacy_no_prompts/test_simple_workflow.yaml +199 -0
  23. lmnr/traceloop_sdk/tests/cassettes/test_prompt_management/test_prompt_management.yaml +202 -0
  24. lmnr/traceloop_sdk/tests/cassettes/test_sdk_initialization/test_resource_attributes.yaml +199 -0
  25. lmnr/traceloop_sdk/tests/cassettes/test_tasks/test_task_io_serialization_with_langchain.yaml +96 -0
  26. lmnr/traceloop_sdk/tests/cassettes/test_workflows/test_simple_aworkflow.yaml +98 -0
  27. lmnr/traceloop_sdk/tests/cassettes/test_workflows/test_simple_workflow.yaml +199 -0
  28. lmnr/traceloop_sdk/tests/cassettes/test_workflows/test_streaming_workflow.yaml +167 -0
  29. lmnr/traceloop_sdk/tests/conftest.py +111 -0
  30. lmnr/traceloop_sdk/tests/test_association_properties.py +229 -0
  31. lmnr/traceloop_sdk/tests/test_manual.py +48 -0
  32. lmnr/traceloop_sdk/tests/test_nested_tasks.py +47 -0
  33. lmnr/traceloop_sdk/tests/test_privacy_no_prompts.py +50 -0
  34. lmnr/traceloop_sdk/tests/test_sdk_initialization.py +57 -0
  35. lmnr/traceloop_sdk/tests/test_tasks.py +32 -0
  36. lmnr/traceloop_sdk/tests/test_workflows.py +262 -0
  37. lmnr/traceloop_sdk/tracing/__init__.py +1 -0
  38. lmnr/traceloop_sdk/tracing/attributes.py +9 -0
  39. lmnr/traceloop_sdk/tracing/content_allow_list.py +24 -0
  40. lmnr/traceloop_sdk/tracing/context_manager.py +13 -0
  41. lmnr/traceloop_sdk/tracing/tracing.py +913 -0
  42. lmnr/traceloop_sdk/utils/__init__.py +26 -0
  43. lmnr/traceloop_sdk/utils/in_memory_span_exporter.py +61 -0
  44. lmnr/traceloop_sdk/utils/json_encoder.py +20 -0
  45. lmnr/traceloop_sdk/utils/package_check.py +8 -0
  46. lmnr/traceloop_sdk/version.py +1 -0
  47. lmnr-0.4.17b0.dist-info/LICENSE +75 -0
  48. lmnr-0.4.17b0.dist-info/METADATA +250 -0
  49. lmnr-0.4.17b0.dist-info/RECORD +50 -0
  50. lmnr-0.4.17b0.dist-info/WHEEL +4 -0
@@ -0,0 +1,12 @@
1
+ [flake8]
2
+ exclude =
3
+ .git,
4
+ __pycache__,
5
+ build,
6
+ dist,
7
+ .tox,
8
+ venv,
9
+ .venv,
10
+ .pytest_cache
11
+ max-line-length = 120
12
+ per-file-ignores = __init__.py:F401
@@ -0,0 +1 @@
1
+ 3.9.5
@@ -0,0 +1,89 @@
1
+ import os
2
+ import sys
3
+ from pathlib import Path
4
+
5
+ from typing import Optional, Set
6
+ from opentelemetry.sdk.trace import SpanProcessor
7
+ from opentelemetry.sdk.trace.export import SpanExporter
8
+ from opentelemetry.sdk.resources import SERVICE_NAME
9
+ from opentelemetry.propagators.textmap import TextMapPropagator
10
+ from opentelemetry.util.re import parse_env_headers
11
+
12
+ from lmnr.traceloop_sdk.instruments import Instruments
13
+ from lmnr.traceloop_sdk.config import (
14
+ is_content_tracing_enabled,
15
+ is_tracing_enabled,
16
+ )
17
+ from lmnr.traceloop_sdk.tracing.tracing import TracerWrapper
18
+ from typing import Dict
19
+
20
+
21
+ class Traceloop:
22
+ AUTO_CREATED_KEY_PATH = str(
23
+ Path.home() / ".cache" / "traceloop" / "auto_created_key"
24
+ )
25
+ AUTO_CREATED_URL = str(Path.home() / ".cache" / "traceloop" / "auto_created_url")
26
+
27
+ __tracer_wrapper: TracerWrapper
28
+
29
+ @staticmethod
30
+ def init(
31
+ app_name: Optional[str] = sys.argv[0],
32
+ api_endpoint: str = "https://api.lmnr.ai",
33
+ api_key: Optional[str] = None,
34
+ headers: Dict[str, str] = {},
35
+ disable_batch=False,
36
+ exporter: Optional[SpanExporter] = None,
37
+ processor: Optional[SpanProcessor] = None,
38
+ propagator: Optional[TextMapPropagator] = None,
39
+ should_enrich_metrics: bool = True,
40
+ resource_attributes: dict = {},
41
+ instruments: Optional[Set[Instruments]] = None,
42
+ ) -> None:
43
+ api_endpoint = os.getenv("TRACELOOP_BASE_URL") or api_endpoint
44
+ api_key = os.getenv("TRACELOOP_API_KEY") or api_key
45
+
46
+ if not is_tracing_enabled():
47
+ # print(Fore.YELLOW + "Tracing is disabled" + Fore.RESET)
48
+ return
49
+
50
+ enable_content_tracing = is_content_tracing_enabled()
51
+
52
+ headers = os.getenv("TRACELOOP_HEADERS") or headers
53
+
54
+ if isinstance(headers, str):
55
+ headers = parse_env_headers(headers)
56
+
57
+ if (
58
+ not exporter
59
+ and not processor
60
+ and api_endpoint == "https://api.lmnr.ai"
61
+ and not api_key
62
+ ):
63
+ print(
64
+ "Error: Missing API key,"
65
+ + " go to project settings to create one"
66
+ )
67
+ print("Set the LMNR_PROJECT_API_KEY environment variable to the key")
68
+ return
69
+
70
+ if api_key and not exporter and not processor and not headers:
71
+ headers = {
72
+ "Authorization": f"Bearer {api_key}",
73
+ }
74
+
75
+ # print(Fore.RESET)
76
+
77
+ # Tracer init
78
+ resource_attributes.update({SERVICE_NAME: app_name})
79
+ TracerWrapper.set_static_params(
80
+ resource_attributes, enable_content_tracing, api_endpoint, headers
81
+ )
82
+ Traceloop.__tracer_wrapper = TracerWrapper(
83
+ disable_batch=disable_batch,
84
+ processor=processor,
85
+ propagator=propagator,
86
+ exporter=exporter,
87
+ should_enrich_metrics=should_enrich_metrics,
88
+ instruments=instruments,
89
+ )
@@ -0,0 +1,9 @@
1
+ import os
2
+
3
+
4
+ def is_tracing_enabled() -> bool:
5
+ return (os.getenv("TRACELOOP_TRACING_ENABLED") or "true").lower() == "true"
6
+
7
+
8
+ def is_content_tracing_enabled() -> bool:
9
+ return (os.getenv("TRACELOOP_TRACE_CONTENT") or "true").lower() == "true"
File without changes
@@ -0,0 +1,178 @@
1
+ import json
2
+ from functools import wraps
3
+ import os
4
+ import types
5
+ from typing import Any, Optional
6
+ import warnings
7
+
8
+ from opentelemetry import trace
9
+ from opentelemetry import context as context_api
10
+
11
+ from lmnr.sdk.utils import get_input_from_func_args, is_method
12
+ from lmnr.traceloop_sdk.tracing import get_tracer
13
+ from lmnr.traceloop_sdk.tracing.attributes import SPAN_INPUT, SPAN_OUTPUT, SPAN_PATH
14
+ from lmnr.traceloop_sdk.tracing.tracing import TracerWrapper, get_span_path
15
+ from lmnr.traceloop_sdk.utils.json_encoder import JSONEncoder
16
+
17
+
18
+ class CustomJSONEncoder(JSONEncoder):
19
+ def default(self, o: Any) -> Any:
20
+ try:
21
+ return super().default(o)
22
+ except TypeError:
23
+ return str(o) # Fallback to string representation for unsupported types
24
+
25
+
26
+ def _json_dumps(data: dict) -> str:
27
+ try:
28
+ with warnings.catch_warnings():
29
+ warnings.simplefilter("ignore", RuntimeWarning)
30
+ return json.dumps(data, cls=CustomJSONEncoder)
31
+ except Exception:
32
+ # Log the exception and return a placeholder if serialization completely fails
33
+ # Telemetry().log_exception(e)
34
+ return "{}" # Return an empty JSON object as a fallback
35
+
36
+
37
+ def entity_method(
38
+ name: Optional[str] = None,
39
+ ):
40
+ def decorate(fn):
41
+ @wraps(fn)
42
+ def wrap(*args, **kwargs):
43
+ if not TracerWrapper.verify_initialized():
44
+ return fn(*args, **kwargs)
45
+
46
+ span_name = name or fn.__name__
47
+
48
+ with get_tracer() as tracer:
49
+ span = tracer.start_span(span_name)
50
+
51
+ span_path = get_span_path(span_name)
52
+ span.set_attribute(SPAN_PATH, span_path)
53
+ ctx = context_api.set_value("span_path", span_path)
54
+
55
+ ctx = trace.set_span_in_context(span, ctx)
56
+ ctx_token = context_api.attach(ctx)
57
+
58
+ try:
59
+ if _should_send_prompts():
60
+ span.set_attribute(
61
+ SPAN_INPUT,
62
+ _json_dumps(
63
+ get_input_from_func_args(
64
+ fn, is_method(fn), args, kwargs
65
+ )
66
+ ),
67
+ )
68
+ except TypeError:
69
+ pass
70
+
71
+ res = fn(*args, **kwargs)
72
+
73
+ # span will be ended in the generator
74
+ if isinstance(res, types.GeneratorType):
75
+ return _handle_generator(span, res)
76
+
77
+ try:
78
+ if _should_send_prompts():
79
+ span.set_attribute(
80
+ SPAN_OUTPUT,
81
+ _json_dumps(res),
82
+ )
83
+ except TypeError:
84
+ pass
85
+
86
+ span.end()
87
+ context_api.detach(ctx_token)
88
+
89
+ return res
90
+
91
+ return wrap
92
+
93
+ return decorate
94
+
95
+
96
+ # Async Decorators
97
+
98
+
99
+ def aentity_method(
100
+ name: Optional[str] = None,
101
+ ):
102
+ def decorate(fn):
103
+ @wraps(fn)
104
+ async def wrap(*args, **kwargs):
105
+ if not TracerWrapper.verify_initialized():
106
+ return await fn(*args, **kwargs)
107
+
108
+ span_name = name or fn.__name__
109
+
110
+ with get_tracer() as tracer:
111
+ span = tracer.start_span(span_name)
112
+
113
+ span_path = get_span_path(span_name)
114
+ span.set_attribute(SPAN_PATH, span_path)
115
+ ctx = context_api.set_value("span_path", span_path)
116
+
117
+ ctx = trace.set_span_in_context(span, ctx)
118
+ ctx_token = context_api.attach(ctx)
119
+
120
+ try:
121
+ if _should_send_prompts():
122
+ span.set_attribute(
123
+ SPAN_INPUT,
124
+ _json_dumps(
125
+ get_input_from_func_args(
126
+ fn, is_method(fn), args, kwargs
127
+ )
128
+ ),
129
+ )
130
+ except TypeError:
131
+ pass
132
+
133
+ res = await fn(*args, **kwargs)
134
+
135
+ # span will be ended in the generator
136
+ if isinstance(res, types.AsyncGeneratorType):
137
+ return await _ahandle_generator(span, ctx_token, res)
138
+
139
+ try:
140
+ if _should_send_prompts():
141
+ span.set_attribute(SPAN_OUTPUT, json.dumps(res))
142
+ except TypeError:
143
+ pass
144
+
145
+ span.end()
146
+ context_api.detach(ctx_token)
147
+
148
+ return res
149
+
150
+ return wrap
151
+
152
+ return decorate
153
+
154
+
155
+ def _handle_generator(span, res):
156
+ # for some reason the SPAN_KEY is not being set in the context of the generator, so we re-set it
157
+ context_api.attach(trace.set_span_in_context(span))
158
+ yield from res
159
+
160
+ span.end()
161
+
162
+ # Note: we don't detach the context here as this fails in some situations
163
+ # https://github.com/open-telemetry/opentelemetry-python/issues/2606
164
+ # This is not a problem since the context will be detached automatically during garbage collection
165
+
166
+
167
+ async def _ahandle_generator(span, ctx_token, res):
168
+ async for part in res:
169
+ yield part
170
+
171
+ span.end()
172
+ context_api.detach(ctx_token)
173
+
174
+
175
+ def _should_send_prompts():
176
+ return (
177
+ os.getenv("TRACELOOP_TRACE_CONTENT") or "true"
178
+ ).lower() == "true" or context_api.get_value("override_enable_content_tracing")
@@ -0,0 +1,34 @@
1
+ from enum import Enum
2
+
3
+
4
+ class Instruments(Enum):
5
+ # The list of libraries which will be autoinstrumented
6
+ # if no specific instruments are provided to initialize()
7
+ OPENAI = "openai"
8
+ ANTHROPIC = "anthropic"
9
+ COHERE = "cohere"
10
+ PINECONE = "pinecone"
11
+ CHROMA = "chroma"
12
+ GOOGLE_GENERATIVEAI = "google_generativeai"
13
+ LANGCHAIN = "langchain"
14
+ MISTRAL = "mistral"
15
+ OLLAMA = "ollama"
16
+ LLAMA_INDEX = "llama_index"
17
+ MILVUS = "milvus"
18
+ TRANSFORMERS = "transformers"
19
+ TOGETHER = "together"
20
+ BEDROCK = "bedrock"
21
+ REPLICATE = "replicate"
22
+ VERTEXAI = "vertexai"
23
+ WATSONX = "watsonx"
24
+ WEAVIATE = "weaviate"
25
+ ALEPHALPHA = "alephalpha"
26
+ MARQO = "marqo"
27
+ LANCEDB = "lancedb"
28
+
29
+ # The following libraries will not be autoinstrumented unless
30
+ # specified explicitly in the initialize() call.
31
+ REDIS = "redis"
32
+ REQUESTS = "requests"
33
+ URLLIB3 = "urllib3"
34
+ PYMYSQL = "pymysql"
@@ -0,0 +1 @@
1
+ # """unit tests."""
@@ -0,0 +1,101 @@
1
+ interactions:
2
+ - request:
3
+ body: '{"messages": [{"content": "You are helpful assistant", "role": "system"},
4
+ {"content": "tell me a short joke", "role": "user"}], "model": "gpt-3.5-turbo",
5
+ "logprobs": false, "n": 1, "stream": false, "temperature": 0.7}'
6
+ headers:
7
+ accept:
8
+ - application/json
9
+ accept-encoding:
10
+ - gzip, deflate
11
+ connection:
12
+ - keep-alive
13
+ content-length:
14
+ - '217'
15
+ content-type:
16
+ - application/json
17
+ host:
18
+ - api.openai.com
19
+ user-agent:
20
+ - OpenAI/Python 1.35.15
21
+ x-stainless-arch:
22
+ - arm64
23
+ x-stainless-async:
24
+ - 'false'
25
+ x-stainless-lang:
26
+ - python
27
+ x-stainless-os:
28
+ - MacOS
29
+ x-stainless-package-version:
30
+ - 1.35.15
31
+ x-stainless-runtime:
32
+ - CPython
33
+ x-stainless-runtime-version:
34
+ - 3.9.5
35
+ method: POST
36
+ uri: https://api.openai.com/v1/chat/completions
37
+ response:
38
+ body:
39
+ string: !!binary |
40
+ H4sIAAAAAAAAA1SRS0/DMBCE7/kVW1+4tKgJCo9eEHABhHooLyGEKtfdJgbHa7wbaFX1v6OEtIWL
41
+ D/PtrGbW6wRA2bkagTKlFlMFN7jIbiaP44fF59Pt5Pj65epu6Zbf44un69MxLVW/cdDsHY1sXYeG
42
+ quBQLPlfbCJqwWZrepLlaTo8S7MWVDRH19iKIIOjw3wgdZzRYJhmeecsyRpkNYLXBABg3b5NRj/H
43
+ pRrBsL9VKmTWBarRbghARXKNojSzZdFeVH8PDXlB38a+ryP24LlcwZz8gQAbi14sC4PEmgW0UMXn
44
+ cIlG14wgJa6g0h8IdQD8wriS0vqi93d9xEXNuqnna+c6fbPL66gIkWbc8Z2+sN5yOY2omXyTjYWC
45
+ aukmAXhr71L/q6pCpCrIVOgDfbMw686i9j+xh2neQSHRbq8f5UmXT/GKBavpwvoCY4i2PVLbYpP8
46
+ AAAA//8DAFwYnEsjAgAA
47
+ headers:
48
+ CF-Cache-Status:
49
+ - DYNAMIC
50
+ CF-RAY:
51
+ - 8bbd5d409b62b0bd-ATL
52
+ Connection:
53
+ - keep-alive
54
+ Content-Encoding:
55
+ - gzip
56
+ Content-Type:
57
+ - application/json
58
+ Date:
59
+ - Sat, 31 Aug 2024 13:28:32 GMT
60
+ Server:
61
+ - cloudflare
62
+ Set-Cookie:
63
+ - __cf_bm=3B.f5aMPKiXVHyNDAIAPma3ZGvDnGViQrDAMvyT4n_8-1725110912-1.0.1.1-.elzfgXAenLSaVeAmcRwzq2OROEZMEvOpxSRlQ7PPZ8n6nkbc2NfZXBU1bijPQNxQ28MLNRJFyh4B4Mq4G3PPA;
64
+ path=/; expires=Sat, 31-Aug-24 13:58:32 GMT; domain=.api.openai.com; HttpOnly;
65
+ Secure; SameSite=None
66
+ - _cfuvid=LakflcrbwsF6x0qpc03TIL8jU8c3IjMCt5dua3l4dVA-1725110912530-0.0.1.1-604800000;
67
+ path=/; domain=.api.openai.com; HttpOnly; Secure; SameSite=None
68
+ Transfer-Encoding:
69
+ - chunked
70
+ X-Content-Type-Options:
71
+ - nosniff
72
+ access-control-expose-headers:
73
+ - X-Request-ID
74
+ alt-svc:
75
+ - h3=":443"; ma=86400
76
+ openai-organization:
77
+ - traceloop
78
+ openai-processing-ms:
79
+ - '233'
80
+ openai-version:
81
+ - '2020-10-01'
82
+ strict-transport-security:
83
+ - max-age=15552000; includeSubDomains; preload
84
+ x-ratelimit-limit-requests:
85
+ - '5000'
86
+ x-ratelimit-limit-tokens:
87
+ - '4000000'
88
+ x-ratelimit-remaining-requests:
89
+ - '4999'
90
+ x-ratelimit-remaining-tokens:
91
+ - '3999970'
92
+ x-ratelimit-reset-requests:
93
+ - 12ms
94
+ x-ratelimit-reset-tokens:
95
+ - 0s
96
+ x-request-id:
97
+ - req_459694e3c39fd24575ad9deb5b65a831
98
+ status:
99
+ code: 200
100
+ message: OK
101
+ version: 1
@@ -0,0 +1,99 @@
1
+ interactions:
2
+ - request:
3
+ body: '{"messages": [{"content": "You are helpful assistant", "role": "system"},
4
+ {"content": "tell me a short joke", "role": "user"}], "model": "gpt-3.5-turbo",
5
+ "logprobs": false, "n": 1, "stream": false, "temperature": 0.7}'
6
+ headers:
7
+ accept:
8
+ - application/json
9
+ accept-encoding:
10
+ - gzip, deflate
11
+ connection:
12
+ - keep-alive
13
+ content-length:
14
+ - '217'
15
+ content-type:
16
+ - application/json
17
+ host:
18
+ - api.openai.com
19
+ user-agent:
20
+ - OpenAI/Python 1.35.15
21
+ x-stainless-arch:
22
+ - arm64
23
+ x-stainless-async:
24
+ - 'false'
25
+ x-stainless-lang:
26
+ - python
27
+ x-stainless-os:
28
+ - MacOS
29
+ x-stainless-package-version:
30
+ - 1.35.15
31
+ x-stainless-runtime:
32
+ - CPython
33
+ x-stainless-runtime-version:
34
+ - 3.9.5
35
+ method: POST
36
+ uri: https://api.openai.com/v1/chat/completions
37
+ response:
38
+ body:
39
+ string: !!binary |
40
+ H4sIAAAAAAAAA1SRzU7DMBCE73mKxRcuLYLSFsgFgbhwBCSQ+FHlONvE4Hgt71olQn135BBauPgw
41
+ s7P6dvxVAChbqxKUabWYLrjphdwk01VXzb1teXOX6pquzp9vKSwfFzdqkhNUvaOR39SRoS44FEv+
42
+ xzYRtWDeenI2m13Mzxfz5WB0VKPLsSbI9PRoMZUUK5oen8wWY7Ila5BVCS8FAMDX8GZGX+OnKuF4
43
+ 8qt0yKwbVOVuCEBFcllRmtmyaC9qsjcNeUE/YD+kiBNoMSJYBg3cUhR4pw+ENUXoKZWv/tU/tT0Y
44
+ Sq72hwLSIlTW9MYh5N01pABVD1YY3foSrtHoxAhWYKMZZEMgNmJ9oEaE7Y7dURMiVflOn5zb6Wvr
45
+ LberiJrJZ04WCj/xbQHwNnSU/p2tQqQuyEroA31eOBsrUvtf+WMuR1NItNvr82Ux8inuWbBbra1v
46
+ MIZoh8IyZbEtvgEAAP//AwAFMxvRLwIAAA==
47
+ headers:
48
+ CF-Cache-Status:
49
+ - DYNAMIC
50
+ CF-RAY:
51
+ - 8aef251d0f8609c9-HFA
52
+ Connection:
53
+ - keep-alive
54
+ Content-Encoding:
55
+ - gzip
56
+ Content-Type:
57
+ - application/json
58
+ Date:
59
+ - Tue, 06 Aug 2024 12:49:06 GMT
60
+ Server:
61
+ - cloudflare
62
+ Set-Cookie:
63
+ - __cf_bm=SWNMVXkujzFcfmlIKZqekPtNke27ztV8lzplh5iUMes-1722948546-1.0.1.1-oRP4d5x5PiBOFu.G77C4XWWMxrsKragvFMrFNCIRwudZ7Z2NMrKgMa_A7eqXOME9rU2sqUIpS9c8T9SBEAR7Fg;
64
+ path=/; expires=Tue, 06-Aug-24 13:19:06 GMT; domain=.api.openai.com; HttpOnly;
65
+ Secure; SameSite=None
66
+ - _cfuvid=VuBkx.e0oap2GPCBPG260hGbmwEpoHckoHEm5vALKZs-1722948546860-0.0.1.1-604800000;
67
+ path=/; domain=.api.openai.com; HttpOnly; Secure; SameSite=None
68
+ Transfer-Encoding:
69
+ - chunked
70
+ X-Content-Type-Options:
71
+ - nosniff
72
+ alt-svc:
73
+ - h3=":443"; ma=86400
74
+ openai-organization:
75
+ - traceloop
76
+ openai-processing-ms:
77
+ - '400'
78
+ openai-version:
79
+ - '2020-10-01'
80
+ strict-transport-security:
81
+ - max-age=15552000; includeSubDomains; preload
82
+ x-ratelimit-limit-requests:
83
+ - '5000'
84
+ x-ratelimit-limit-tokens:
85
+ - '4000000'
86
+ x-ratelimit-remaining-requests:
87
+ - '4999'
88
+ x-ratelimit-remaining-tokens:
89
+ - '3999970'
90
+ x-ratelimit-reset-requests:
91
+ - 12ms
92
+ x-ratelimit-reset-tokens:
93
+ - 0s
94
+ x-request-id:
95
+ - req_bf54266d3b5b08c26a6dc51f55dd208c
96
+ status:
97
+ code: 200
98
+ message: OK
99
+ version: 1
@@ -0,0 +1,98 @@
1
+ interactions:
2
+ - request:
3
+ body: '{"messages": [{"role": "user", "content": "Tell me a joke about opentelemetry"}],
4
+ "model": "gpt-3.5-turbo"}'
5
+ headers:
6
+ accept:
7
+ - application/json
8
+ accept-encoding:
9
+ - gzip, deflate
10
+ connection:
11
+ - keep-alive
12
+ content-length:
13
+ - '107'
14
+ content-type:
15
+ - application/json
16
+ host:
17
+ - api.openai.com
18
+ user-agent:
19
+ - OpenAI/Python 1.35.13
20
+ x-stainless-arch:
21
+ - arm64
22
+ x-stainless-async:
23
+ - 'false'
24
+ x-stainless-lang:
25
+ - python
26
+ x-stainless-os:
27
+ - MacOS
28
+ x-stainless-package-version:
29
+ - 1.35.13
30
+ x-stainless-runtime:
31
+ - CPython
32
+ x-stainless-runtime-version:
33
+ - 3.9.5
34
+ method: POST
35
+ uri: https://api.openai.com/v1/chat/completions
36
+ response:
37
+ body:
38
+ string: !!binary |
39
+ H4sIAAAAAAAAA1RRy07DMBC85ysWn1vUB6XQCxISIB5HJF5CletsElPHa603hYD678hpaMXFh5md
40
+ 8ezsTwagbK4WoEylxdTBDc/dw93zi71/vbotwtNN+Xy9fnl4XH9/3xdnp2qQFLT6QCN/qmNDdXAo
41
+ lvyONoxaMLmO55PxaD6azmcdUVOOLsnKIMPp8WwoDa9oOBpPZr2yImswqgW8ZQAAP92bMvocv9QC
42
+ RoM/pMYYdYlqsR8CUEwuIUrHaKNoL2pwIA15Qd/FfqpayG0OUiFQQC/osEbhFnLcoKOADCtGvYYm
43
+ wKeVKk1ahqBZPPIFXKLRTcQEt/CJjCCWMQcqwJDvvnYtCGtjfdmLcYPcQk0bPFJ9qu1+HUdlYFql
44
+ 1X3j3B4vrLexWjLqSD5Fj0JhJ99mAO9dbc2/JlRgqoMshdbok+F4trNTh0MdyMlJTwqJdgd8ep71
45
+ +VRso2C9LKwvkQPbrsOUMttmvwAAAP//AwAebllYQgIAAA==
46
+ headers:
47
+ CF-Cache-Status:
48
+ - DYNAMIC
49
+ CF-RAY:
50
+ - 8a3c07512da0135d-ATL
51
+ Connection:
52
+ - keep-alive
53
+ Content-Encoding:
54
+ - gzip
55
+ Content-Type:
56
+ - application/json
57
+ Date:
58
+ - Mon, 15 Jul 2024 19:06:15 GMT
59
+ Server:
60
+ - cloudflare
61
+ Set-Cookie:
62
+ - __cf_bm=14DjWb_t0PhGL5mOhK8gsqaD2anNOF1J7Y8Lo_SpKpw-1721070375-1.0.1.1-HZ1yyYErVn.USbzwQt76wp1v0Fpbz2MvF04IOMJMUI7ZFXPv0Np1tZ8z2AthYPyy1oxDYakl9du4ysPr.pp_jg;
63
+ path=/; expires=Mon, 15-Jul-24 19:36:15 GMT; domain=.api.openai.com; HttpOnly;
64
+ Secure; SameSite=None
65
+ - _cfuvid=KuBmiwwXOTWsR0nU52KjyIkpVEjiHsE8MSSzFnGTEv0-1721070375445-0.0.1.1-604800000;
66
+ path=/; domain=.api.openai.com; HttpOnly; Secure; SameSite=None
67
+ Transfer-Encoding:
68
+ - chunked
69
+ X-Content-Type-Options:
70
+ - nosniff
71
+ alt-svc:
72
+ - h3=":443"; ma=86400
73
+ openai-organization:
74
+ - traceloop
75
+ openai-processing-ms:
76
+ - '381'
77
+ openai-version:
78
+ - '2020-10-01'
79
+ strict-transport-security:
80
+ - max-age=15552000; includeSubDomains; preload
81
+ x-ratelimit-limit-requests:
82
+ - '5000'
83
+ x-ratelimit-limit-tokens:
84
+ - '160000'
85
+ x-ratelimit-remaining-requests:
86
+ - '4999'
87
+ x-ratelimit-remaining-tokens:
88
+ - '159974'
89
+ x-ratelimit-reset-requests:
90
+ - 12ms
91
+ x-ratelimit-reset-tokens:
92
+ - 9ms
93
+ x-request-id:
94
+ - req_7e9ec34ca2189a55d52eeb1828fcef25
95
+ status:
96
+ code: 200
97
+ message: OK
98
+ version: 1