opentelemetry-instrumentation-openai 0.4.1__tar.gz → 0.5.0__tar.gz
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of opentelemetry-instrumentation-openai might be problematic. Click here for more details.
- opentelemetry_instrumentation_openai-0.5.0/PKG-INFO +45 -0
- opentelemetry_instrumentation_openai-0.5.0/README.md +25 -0
- {opentelemetry_instrumentation_openai-0.4.1 → opentelemetry_instrumentation_openai-0.5.0}/opentelemetry/instrumentation/openai/__init__.py +8 -2
- opentelemetry_instrumentation_openai-0.5.0/opentelemetry/instrumentation/openai/version.py +1 -0
- {opentelemetry_instrumentation_openai-0.4.1 → opentelemetry_instrumentation_openai-0.5.0}/pyproject.toml +1 -1
- opentelemetry_instrumentation_openai-0.4.1/PKG-INFO +0 -23
- opentelemetry_instrumentation_openai-0.4.1/README.md +0 -3
- opentelemetry_instrumentation_openai-0.4.1/opentelemetry/instrumentation/openai/version.py +0 -1
|
@@ -0,0 +1,45 @@
|
|
|
1
|
+
Metadata-Version: 2.1
|
|
2
|
+
Name: opentelemetry-instrumentation-openai
|
|
3
|
+
Version: 0.5.0
|
|
4
|
+
Summary: OpenTelemetry OpenAI instrumentation
|
|
5
|
+
License: Apache-2.0
|
|
6
|
+
Author: Gal Kleinman
|
|
7
|
+
Author-email: gal@traceloop.com
|
|
8
|
+
Requires-Python: >=3.8.1,<4
|
|
9
|
+
Classifier: License :: OSI Approved :: Apache Software License
|
|
10
|
+
Classifier: Programming Language :: Python :: 3
|
|
11
|
+
Classifier: Programming Language :: Python :: 3.9
|
|
12
|
+
Classifier: Programming Language :: Python :: 3.10
|
|
13
|
+
Classifier: Programming Language :: Python :: 3.11
|
|
14
|
+
Classifier: Programming Language :: Python :: 3.12
|
|
15
|
+
Requires-Dist: opentelemetry-api (>=1.21.0,<2.0.0)
|
|
16
|
+
Requires-Dist: opentelemetry-instrumentation (>=0.42b0,<0.43)
|
|
17
|
+
Requires-Dist: opentelemetry-semantic-conventions-ai (>=0.0.12,<0.0.13)
|
|
18
|
+
Description-Content-Type: text/markdown
|
|
19
|
+
|
|
20
|
+
# OpenTelemetry OpenAI Instrumentation
|
|
21
|
+
|
|
22
|
+
<a href="https://pypi.org/project/opentelemetry-instrumentation-openai/">
|
|
23
|
+
<img src="https://badge.fury.io/py/opentelemetry-instrumentation-openai.svg">
|
|
24
|
+
</a>
|
|
25
|
+
|
|
26
|
+
This library allows tracing OpenAI prompts and completions sent with the official [OpenAI library](https://github.com/openai/openai-python).
|
|
27
|
+
|
|
28
|
+
## Installation
|
|
29
|
+
|
|
30
|
+
```bash
|
|
31
|
+
pip install opentelemetry-instrumentation-openai
|
|
32
|
+
```
|
|
33
|
+
|
|
34
|
+
## Privacy
|
|
35
|
+
|
|
36
|
+
**By default, this instrumentation logs prompts, completions, and embeddings to span attributes**. This gives you a clear visibility into how your LLM application is working, and can make it easy to debug and evaluate the quality of the outputs.
|
|
37
|
+
|
|
38
|
+
However, you may want to disable this logging for privacy reasons, as they may contain highly sensitive data from your users. You may also simply want to reduce the size of your traces.
|
|
39
|
+
|
|
40
|
+
To disable logging, set the `TRACELOOP_TRACE_CONTENT` environment variable to `false`.
|
|
41
|
+
|
|
42
|
+
```bash
|
|
43
|
+
TRACELOOP_TRACE_CONTENT=false
|
|
44
|
+
```
|
|
45
|
+
|
|
@@ -0,0 +1,25 @@
|
|
|
1
|
+
# OpenTelemetry OpenAI Instrumentation
|
|
2
|
+
|
|
3
|
+
<a href="https://pypi.org/project/opentelemetry-instrumentation-openai/">
|
|
4
|
+
<img src="https://badge.fury.io/py/opentelemetry-instrumentation-openai.svg">
|
|
5
|
+
</a>
|
|
6
|
+
|
|
7
|
+
This library allows tracing OpenAI prompts and completions sent with the official [OpenAI library](https://github.com/openai/openai-python).
|
|
8
|
+
|
|
9
|
+
## Installation
|
|
10
|
+
|
|
11
|
+
```bash
|
|
12
|
+
pip install opentelemetry-instrumentation-openai
|
|
13
|
+
```
|
|
14
|
+
|
|
15
|
+
## Privacy
|
|
16
|
+
|
|
17
|
+
**By default, this instrumentation logs prompts, completions, and embeddings to span attributes**. This gives you a clear visibility into how your LLM application is working, and can make it easy to debug and evaluate the quality of the outputs.
|
|
18
|
+
|
|
19
|
+
However, you may want to disable this logging for privacy reasons, as they may contain highly sensitive data from your users. You may also simply want to reduce the size of your traces.
|
|
20
|
+
|
|
21
|
+
To disable logging, set the `TRACELOOP_TRACE_CONTENT` environment variable to `false`.
|
|
22
|
+
|
|
23
|
+
```bash
|
|
24
|
+
TRACELOOP_TRACE_CONTENT=false
|
|
25
|
+
```
|
|
@@ -92,8 +92,13 @@ def _set_span_prompts(span, messages):
|
|
|
92
92
|
|
|
93
93
|
for i, msg in enumerate(messages):
|
|
94
94
|
prefix = f"{SpanAttributes.LLM_PROMPTS}.{i}"
|
|
95
|
+
if isinstance(msg.get("content"), str):
|
|
96
|
+
content = msg.get("content")
|
|
97
|
+
elif isinstance(msg.get("content"), list):
|
|
98
|
+
content = json.dumps(msg.get("content"))
|
|
99
|
+
|
|
95
100
|
_set_span_attribute(span, f"{prefix}.role", msg.get("role"))
|
|
96
|
-
_set_span_attribute(span, f"{prefix}.content",
|
|
101
|
+
_set_span_attribute(span, f"{prefix}.content", content)
|
|
97
102
|
|
|
98
103
|
|
|
99
104
|
def _set_input_attributes(span, llm_request_type, kwargs):
|
|
@@ -205,6 +210,7 @@ def _set_response_attributes(span, llm_request_type, response):
|
|
|
205
210
|
def _build_from_streaming_response(span, llm_request_type, response):
|
|
206
211
|
complete_response = {"choices": [], "model": ""}
|
|
207
212
|
for item in response:
|
|
213
|
+
item_to_yield = item
|
|
208
214
|
if is_openai_v1():
|
|
209
215
|
item = item.__dict__
|
|
210
216
|
|
|
@@ -234,7 +240,7 @@ def _build_from_streaming_response(span, llm_request_type, response):
|
|
|
234
240
|
else:
|
|
235
241
|
complete_choice["text"] += choice.get("text")
|
|
236
242
|
|
|
237
|
-
yield
|
|
243
|
+
yield item_to_yield
|
|
238
244
|
|
|
239
245
|
_set_response_attributes(
|
|
240
246
|
span,
|
|
@@ -0,0 +1 @@
|
|
|
1
|
+
__version__ = "0.5.0"
|
|
@@ -11,7 +11,7 @@ addopts = "--cov --cov-report html:'../../coverage/packages/opentelemetry-instru
|
|
|
11
11
|
|
|
12
12
|
[tool.poetry]
|
|
13
13
|
name = "opentelemetry-instrumentation-openai"
|
|
14
|
-
version = "0.
|
|
14
|
+
version = "0.5.0"
|
|
15
15
|
description = "OpenTelemetry OpenAI instrumentation"
|
|
16
16
|
authors = [
|
|
17
17
|
"Gal Kleinman <gal@traceloop.com>",
|
|
@@ -1,23 +0,0 @@
|
|
|
1
|
-
Metadata-Version: 2.1
|
|
2
|
-
Name: opentelemetry-instrumentation-openai
|
|
3
|
-
Version: 0.4.1
|
|
4
|
-
Summary: OpenTelemetry OpenAI instrumentation
|
|
5
|
-
License: Apache-2.0
|
|
6
|
-
Author: Gal Kleinman
|
|
7
|
-
Author-email: gal@traceloop.com
|
|
8
|
-
Requires-Python: >=3.8.1,<4
|
|
9
|
-
Classifier: License :: OSI Approved :: Apache Software License
|
|
10
|
-
Classifier: Programming Language :: Python :: 3
|
|
11
|
-
Classifier: Programming Language :: Python :: 3.9
|
|
12
|
-
Classifier: Programming Language :: Python :: 3.10
|
|
13
|
-
Classifier: Programming Language :: Python :: 3.11
|
|
14
|
-
Classifier: Programming Language :: Python :: 3.12
|
|
15
|
-
Requires-Dist: opentelemetry-api (>=1.21.0,<2.0.0)
|
|
16
|
-
Requires-Dist: opentelemetry-instrumentation (>=0.42b0,<0.43)
|
|
17
|
-
Requires-Dist: opentelemetry-semantic-conventions-ai (>=0.0.12,<0.0.13)
|
|
18
|
-
Description-Content-Type: text/markdown
|
|
19
|
-
|
|
20
|
-
# opentelemetry-instrumentation-openai
|
|
21
|
-
|
|
22
|
-
Project description here.
|
|
23
|
-
|
|
@@ -1 +0,0 @@
|
|
|
1
|
-
__version__ = "0.4.1"
|