opentelemetry-instrumentation-openai 0.0.1__tar.gz

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of opentelemetry-instrumentation-openai might be problematic. Click here for more details.

@@ -0,0 +1,17 @@
1
+ Metadata-Version: 2.1
2
+ Name: opentelemetry-instrumentation-openai
3
+ Version: 0.0.1
4
+ Summary: OpenTelemetry OpenAI instrumentation
5
+ License: Apache-2.0
6
+ Author: Gal Kleinman
7
+ Author-email: gal@traceloop.com
8
+ Requires-Python: >=3.8.1,<3.12
9
+ Classifier: License :: OSI Approved :: Apache Software License
10
+ Classifier: Programming Language :: Python :: 3
11
+ Classifier: Programming Language :: Python :: 3.9
12
+ Classifier: Programming Language :: Python :: 3.10
13
+ Classifier: Programming Language :: Python :: 3.11
14
+ Requires-Dist: openai (>=0.28.0,<0.29.0)
15
+ Requires-Dist: opentelemetry-api (>=1.19.0,<2.0.0)
16
+ Requires-Dist: opentelemetry-instrumentation (>=0.40b0,<0.41)
17
+ Requires-Dist: opentelemetry-semantic-conventions-llm (>=0.0.1,<0.0.2)
@@ -0,0 +1,216 @@
1
+ import logging
2
+ from typing import Collection
3
+ from wrapt import wrap_function_wrapper
4
+ import openai
5
+
6
+ from opentelemetry import context as context_api
7
+ from opentelemetry.trace import get_tracer, SpanKind
8
+ from opentelemetry.trace.status import Status, StatusCode
9
+
10
+ from opentelemetry.instrumentation.instrumentor import BaseInstrumentor
11
+ from opentelemetry.instrumentation.utils import (
12
+ _SUPPRESS_INSTRUMENTATION_KEY,
13
+ unwrap,
14
+ )
15
+
16
+ from traceloop.semconv import SpanAttributes, LLMRequestTypeValues
17
+
18
+ logger = logging.getLogger(__name__)
19
+
20
+ _instruments = ("openai ~= 0.27.8",)
21
+ __version__ = "0.1.0"
22
+
23
+ WRAPPED_METHODS = [
24
+ {
25
+ "object": "ChatCompletion",
26
+ "method": "create",
27
+ "span_name": "openai.chat",
28
+ },
29
+ {
30
+ "object": "Completion",
31
+ "method": "create",
32
+ "span_name": "openai.completion",
33
+ },
34
+ ]
35
+
36
+
37
+ def _set_span_attribute(span, name, value):
38
+ if value is not None:
39
+ if value != "":
40
+ span.set_attribute(name, value)
41
+ return
42
+
43
+
44
+ def _set_api_attributes(span):
45
+ _set_span_attribute(span, SpanAttributes.OPENAI_API_BASE, openai.api_base)
46
+ _set_span_attribute(span, SpanAttributes.OPENAI_API_TYPE, openai.api_type)
47
+ _set_span_attribute(span, SpanAttributes.OPENAI_API_VERSION, openai.api_version)
48
+
49
+ return
50
+
51
+
52
+ def _set_span_prompts(span, messages):
53
+ if messages is None:
54
+ return
55
+
56
+ for i, msg in enumerate(messages):
57
+ prefix = f"{SpanAttributes.LLM_PROMPTS}.{i}"
58
+ _set_span_attribute(span, f"{prefix}.role", msg.get("role"))
59
+ _set_span_attribute(span, f"{prefix}.content", msg.get("content"))
60
+
61
+
62
+ def _set_input_attributes(span, llm_request_type, kwargs):
63
+ _set_span_attribute(span, SpanAttributes.LLM_REQUEST_MODEL, kwargs.get("model"))
64
+ _set_span_attribute(
65
+ span, SpanAttributes.LLM_REQUEST_MAX_TOKENS, kwargs.get("max_tokens")
66
+ )
67
+ _set_span_attribute(span, SpanAttributes.LLM_TEMPERATURE, kwargs.get("temperature"))
68
+ _set_span_attribute(span, SpanAttributes.LLM_TOP_P, kwargs.get("top_p"))
69
+ _set_span_attribute(
70
+ span, SpanAttributes.LLM_FREQUENCY_PENALTY, kwargs.get("frequency_penalty")
71
+ )
72
+ _set_span_attribute(
73
+ span, SpanAttributes.LLM_PRESENCE_PENALTY, kwargs.get("presence_penalty")
74
+ )
75
+
76
+ if llm_request_type == LLMRequestTypeValues.CHAT:
77
+ _set_span_prompts(span, kwargs.get("messages"))
78
+ elif llm_request_type == LLMRequestTypeValues.COMPLETION:
79
+ _set_span_attribute(
80
+ span, f"{SpanAttributes.LLM_PROMPTS}.0.user", kwargs.get("prompt")
81
+ )
82
+
83
+ return
84
+
85
+
86
+ def _set_span_completions(span, llm_request_type, choices):
87
+ if choices is None:
88
+ return
89
+
90
+ for choice in choices:
91
+ index = choice.get("index")
92
+ prefix = f"{SpanAttributes.LLM_COMPLETIONS}.{index}"
93
+ _set_span_attribute(
94
+ span, f"{prefix}.finish_reason", choice.get("finish_reason")
95
+ )
96
+
97
+ if llm_request_type == LLMRequestTypeValues.CHAT:
98
+ message = choice.get("message")
99
+ if message is not None:
100
+ _set_span_attribute(span, f"{prefix}.role", message.get("role"))
101
+ _set_span_attribute(span, f"{prefix}.content", message.get("content"))
102
+ elif llm_request_type == LLMRequestTypeValues.COMPLETION:
103
+ _set_span_attribute(span, f"{prefix}.content", choice.get("text"))
104
+
105
+
106
+ def _set_response_attributes(span, llm_request_type, response):
107
+ _set_span_attribute(span, SpanAttributes.LLM_RESPONSE_MODEL, response.get("model"))
108
+ _set_span_completions(span, llm_request_type, response.get("choices"))
109
+
110
+ usage = response.get("usage")
111
+ if usage is not None:
112
+ _set_span_attribute(
113
+ span, SpanAttributes.LLM_USAGE_TOTAL_TOKENS, usage.get("total_tokens")
114
+ )
115
+ _set_span_attribute(
116
+ span,
117
+ SpanAttributes.LLM_USAGE_COMPLETION_TOKENS,
118
+ usage.get("completion_tokens"),
119
+ )
120
+ _set_span_attribute(
121
+ span, SpanAttributes.LLM_USAGE_PROMPT_TOKENS, usage.get("prompt_tokens")
122
+ )
123
+
124
+ return
125
+
126
+
127
+ def _with_tracer_wrapper(func):
128
+ """Helper for providing tracer for wrapper functions."""
129
+
130
+ def _with_tracer(tracer, to_wrap):
131
+ def wrapper(wrapped, instance, args, kwargs):
132
+ # prevent double wrapping
133
+ if hasattr(wrapped, "__wrapped__"):
134
+ return wrapped(*args, **kwargs)
135
+
136
+ return func(tracer, to_wrap, wrapped, instance, args, kwargs)
137
+
138
+ return wrapper
139
+
140
+ return _with_tracer
141
+
142
+
143
+ def _llm_request_type_by_object(object_name):
144
+ if object_name == "Completion":
145
+ return LLMRequestTypeValues.COMPLETION
146
+ elif object_name == "ChatCompletion":
147
+ return LLMRequestTypeValues.CHAT
148
+ else:
149
+ return LLMRequestTypeValues.UNKNOWN
150
+
151
+
152
+ @_with_tracer_wrapper
153
+ def _wrap(tracer, to_wrap, wrapped, instance, args, kwargs):
154
+ """Instruments and calls every function defined in TO_WRAP."""
155
+ if context_api.get_value(_SUPPRESS_INSTRUMENTATION_KEY):
156
+ return wrapped(*args, **kwargs)
157
+
158
+ name = to_wrap.get("span_name")
159
+ llm_request_type = _llm_request_type_by_object(to_wrap.get("object"))
160
+ with tracer.start_as_current_span(
161
+ name,
162
+ kind=SpanKind.CLIENT,
163
+ attributes={
164
+ SpanAttributes.LLM_VENDOR: "OpenAI",
165
+ SpanAttributes.LLM_REQUEST_TYPE: llm_request_type.value,
166
+ },
167
+ ) as span:
168
+ if span.is_recording():
169
+ _set_api_attributes(span)
170
+ try:
171
+ if span.is_recording():
172
+ _set_input_attributes(span, llm_request_type, kwargs)
173
+
174
+ except Exception as ex: # pylint: disable=broad-except
175
+ logger.warning(
176
+ "Failed to set input attributes for openai span, error: %s", str(ex)
177
+ )
178
+
179
+ response = wrapped(*args, **kwargs)
180
+
181
+ if response:
182
+ try:
183
+ if span.is_recording():
184
+ _set_response_attributes(span, llm_request_type, response)
185
+
186
+ except Exception as ex: # pylint: disable=broad-except
187
+ logger.warning(
188
+ "Failed to set response attributes for openai span, error: %s",
189
+ str(ex),
190
+ )
191
+ if span.is_recording():
192
+ span.set_status(Status(StatusCode.OK))
193
+
194
+ return response
195
+
196
+
197
+ class OpenAIInstrumentor(BaseInstrumentor):
198
+ """An instrumentor for OpenAI's client library."""
199
+
200
+ def instrumentation_dependencies(self) -> Collection[str]:
201
+ return _instruments
202
+
203
+ def _instrument(self, **kwargs):
204
+ tracer_provider = kwargs.get("tracer_provider")
205
+ tracer = get_tracer(__name__, __version__, tracer_provider)
206
+ for wrapped_method in WRAPPED_METHODS:
207
+ wrap_object = wrapped_method.get("object")
208
+ wrap_method = wrapped_method.get("method")
209
+ wrap_function_wrapper(
210
+ "openai", f"{wrap_object}.{wrap_method}", _wrap(tracer, wrapped_method)
211
+ )
212
+
213
+ def _uninstrument(self, **kwargs):
214
+ for wrapped_method in WRAPPED_METHODS:
215
+ wrap_object = wrapped_method.get("object")
216
+ unwrap(f"openai.{wrap_object}", wrapped_method.get("method"))
@@ -0,0 +1,43 @@
1
+ [tool.coverage.run]
2
+ branch = true
3
+ source = [ "opentelemetry/instrumentation/openai" ]
4
+
5
+ [tool.coverage.report]
6
+ exclude_lines = [ "if TYPE_CHECKING:" ]
7
+ show_missing = true
8
+
9
+ [tool.pytest.ini_options]
10
+ addopts = "--cov --cov-report html:'../../coverage/packages/opentelemetry-instrumentation-openai/html' --cov-report xml:'../../coverage/packages/opentelemetry-instrumentation-openai/coverage.xml' --html='../../reports/packages/opentelemetry-instrumentation-openai/unittests/html/index.html' --junitxml='../../reports/packages/opentelemetry-instrumentation-openai/unittests/junit.xml'"
11
+
12
+ [tool.poetry]
13
+ name = "opentelemetry-instrumentation-openai"
14
+ version = "0.0.1"
15
+ description = "OpenTelemetry OpenAI instrumentation"
16
+ authors = [
17
+ "Gal Kleinman <gal@traceloop.com>",
18
+ "Nir Gazit <nir@traceloop.com>",
19
+ "Tomer Friedman <tomer@traceloop.com>"
20
+ ]
21
+ license = "Apache-2.0"
22
+
23
+ [[tool.poetry.packages]]
24
+ include = "opentelemetry/instrumentation/openai"
25
+
26
+ [tool.poetry.dependencies]
27
+ python = ">=3.8.1,<3.12"
28
+ openai = "^0.28.0"
29
+ opentelemetry-api = "^1.19.0"
30
+ opentelemetry-instrumentation = "^0.40b0"
31
+ opentelemetry-semantic-conventions-llm = "^0.0.1"
32
+
33
+ [tool.poetry.group.dev.dependencies]
34
+ autopep8 = "2.0.4"
35
+ flake8 = "6.1.0"
36
+ pytest = "7.4.1"
37
+ pytest-sugar = "0.9.7"
38
+ pytest-cov = "4.1.0"
39
+ pytest-html = "4.0.0"
40
+
41
+ [build-system]
42
+ requires = [ "poetry-core" ]
43
+ build-backend = "poetry.core.masonry.api"