opentelemetry-instrumentation-openai 0.5.0__tar.gz → 0.5.2__tar.gz
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of opentelemetry-instrumentation-openai might be problematic. Click here for more details.
- {opentelemetry_instrumentation_openai-0.5.0 → opentelemetry_instrumentation_openai-0.5.2}/PKG-INFO +1 -1
- opentelemetry_instrumentation_openai-0.5.2/opentelemetry/instrumentation/openai/__init__.py +28 -0
- opentelemetry_instrumentation_openai-0.5.2/opentelemetry/instrumentation/openai/shared/__init__.py +136 -0
- opentelemetry_instrumentation_openai-0.5.2/opentelemetry/instrumentation/openai/shared/chat_wrappers.py +170 -0
- opentelemetry_instrumentation_openai-0.5.2/opentelemetry/instrumentation/openai/shared/completion_wrappers.py +142 -0
- opentelemetry_instrumentation_openai-0.5.2/opentelemetry/instrumentation/openai/utils.py +22 -0
- opentelemetry_instrumentation_openai-0.5.2/opentelemetry/instrumentation/openai/v0/__init__.py +28 -0
- opentelemetry_instrumentation_openai-0.5.2/opentelemetry/instrumentation/openai/v1/__init__.py +28 -0
- opentelemetry_instrumentation_openai-0.5.2/opentelemetry/instrumentation/openai/version.py +1 -0
- {opentelemetry_instrumentation_openai-0.5.0 → opentelemetry_instrumentation_openai-0.5.2}/pyproject.toml +1 -1
- opentelemetry_instrumentation_openai-0.5.0/opentelemetry/instrumentation/openai/__init__.py +0 -383
- opentelemetry_instrumentation_openai-0.5.0/opentelemetry/instrumentation/openai/version.py +0 -1
- {opentelemetry_instrumentation_openai-0.5.0 → opentelemetry_instrumentation_openai-0.5.2}/README.md +0 -0
|
@@ -0,0 +1,28 @@
|
|
|
1
|
+
from typing import Collection
|
|
2
|
+
|
|
3
|
+
from opentelemetry.instrumentation.instrumentor import BaseInstrumentor
|
|
4
|
+
|
|
5
|
+
from opentelemetry.instrumentation.openai.utils import is_openai_v1
|
|
6
|
+
from opentelemetry.instrumentation.openai.v0 import OpenAIV0Instrumentor
|
|
7
|
+
from opentelemetry.instrumentation.openai.v1 import OpenAIV1Instrumentor
|
|
8
|
+
|
|
9
|
+
_instruments = ("openai >= 0.27.0",)
|
|
10
|
+
|
|
11
|
+
|
|
12
|
+
class OpenAIInstrumentor(BaseInstrumentor):
|
|
13
|
+
"""An instrumentor for OpenAI's client library."""
|
|
14
|
+
|
|
15
|
+
def instrumentation_dependencies(self) -> Collection[str]:
|
|
16
|
+
return _instruments
|
|
17
|
+
|
|
18
|
+
def _instrument(self, **kwargs):
|
|
19
|
+
if is_openai_v1():
|
|
20
|
+
OpenAIV1Instrumentor().instrument(**kwargs)
|
|
21
|
+
else:
|
|
22
|
+
OpenAIV0Instrumentor().instrument(**kwargs)
|
|
23
|
+
|
|
24
|
+
def _uninstrument(self, **kwargs):
|
|
25
|
+
if is_openai_v1():
|
|
26
|
+
OpenAIV1Instrumentor().uninstrument(**kwargs)
|
|
27
|
+
else:
|
|
28
|
+
OpenAIV0Instrumentor().uninstrument(**kwargs)
|
opentelemetry_instrumentation_openai-0.5.2/opentelemetry/instrumentation/openai/shared/__init__.py
ADDED
|
@@ -0,0 +1,136 @@
|
|
|
1
|
+
import os
|
|
2
|
+
import openai
|
|
3
|
+
import json
|
|
4
|
+
import types
|
|
5
|
+
import logging
|
|
6
|
+
|
|
7
|
+
from opentelemetry import context as context_api
|
|
8
|
+
|
|
9
|
+
from opentelemetry.semconv.ai import SpanAttributes
|
|
10
|
+
from opentelemetry.instrumentation.openai.utils import is_openai_v1
|
|
11
|
+
|
|
12
|
+
|
|
13
|
+
OPENAI_API_VERSION = "openai.api_version"
|
|
14
|
+
OPENAI_API_BASE = "openai.api_base"
|
|
15
|
+
OPENAI_API_TYPE = "openai.api_type"
|
|
16
|
+
|
|
17
|
+
logger = logging.getLogger(__name__)
|
|
18
|
+
|
|
19
|
+
|
|
20
|
+
def should_send_prompts():
|
|
21
|
+
return (
|
|
22
|
+
os.getenv("TRACELOOP_TRACE_CONTENT") or "true"
|
|
23
|
+
).lower() == "true" or context_api.get_value("override_enable_content_tracing")
|
|
24
|
+
|
|
25
|
+
|
|
26
|
+
def _set_span_attribute(span, name, value):
|
|
27
|
+
if value is not None:
|
|
28
|
+
if value != "":
|
|
29
|
+
span.set_attribute(name, value)
|
|
30
|
+
return
|
|
31
|
+
|
|
32
|
+
|
|
33
|
+
def _set_api_attributes(span):
|
|
34
|
+
if not span.is_recording():
|
|
35
|
+
return
|
|
36
|
+
|
|
37
|
+
try:
|
|
38
|
+
base_url = openai.base_url if hasattr(openai, "base_url") else openai.api_base
|
|
39
|
+
|
|
40
|
+
_set_span_attribute(span, OPENAI_API_BASE, base_url)
|
|
41
|
+
_set_span_attribute(span, OPENAI_API_TYPE, openai.api_type)
|
|
42
|
+
_set_span_attribute(span, OPENAI_API_VERSION, openai.api_version)
|
|
43
|
+
except Exception as ex: # pylint: disable=broad-except
|
|
44
|
+
logger.warning(
|
|
45
|
+
"Failed to set api attributes for openai span, error: %s", str(ex)
|
|
46
|
+
)
|
|
47
|
+
|
|
48
|
+
return
|
|
49
|
+
|
|
50
|
+
|
|
51
|
+
def _set_functions_attributes(span, functions):
|
|
52
|
+
if not functions:
|
|
53
|
+
return
|
|
54
|
+
|
|
55
|
+
for i, function in enumerate(functions):
|
|
56
|
+
prefix = f"{SpanAttributes.LLM_REQUEST_FUNCTIONS}.{i}"
|
|
57
|
+
_set_span_attribute(span, f"{prefix}.name", function.get("name"))
|
|
58
|
+
_set_span_attribute(span, f"{prefix}.description", function.get("description"))
|
|
59
|
+
_set_span_attribute(
|
|
60
|
+
span, f"{prefix}.parameters", json.dumps(function.get("parameters"))
|
|
61
|
+
)
|
|
62
|
+
|
|
63
|
+
|
|
64
|
+
def _set_request_attributes(span, llm_request_type, kwargs):
|
|
65
|
+
if not span.is_recording():
|
|
66
|
+
return
|
|
67
|
+
|
|
68
|
+
try:
|
|
69
|
+
_set_api_attributes(span)
|
|
70
|
+
_set_span_attribute(span, SpanAttributes.LLM_VENDOR, "OpenAI")
|
|
71
|
+
_set_span_attribute(
|
|
72
|
+
span, SpanAttributes.LLM_REQUEST_TYPE, llm_request_type.value
|
|
73
|
+
)
|
|
74
|
+
_set_span_attribute(span, SpanAttributes.LLM_REQUEST_MODEL, kwargs.get("model"))
|
|
75
|
+
_set_span_attribute(
|
|
76
|
+
span, SpanAttributes.LLM_REQUEST_MAX_TOKENS, kwargs.get("max_tokens")
|
|
77
|
+
)
|
|
78
|
+
_set_span_attribute(
|
|
79
|
+
span, SpanAttributes.LLM_TEMPERATURE, kwargs.get("temperature")
|
|
80
|
+
)
|
|
81
|
+
_set_span_attribute(span, SpanAttributes.LLM_TOP_P, kwargs.get("top_p"))
|
|
82
|
+
_set_span_attribute(
|
|
83
|
+
span, SpanAttributes.LLM_FREQUENCY_PENALTY, kwargs.get("frequency_penalty")
|
|
84
|
+
)
|
|
85
|
+
_set_span_attribute(
|
|
86
|
+
span, SpanAttributes.LLM_PRESENCE_PENALTY, kwargs.get("presence_penalty")
|
|
87
|
+
)
|
|
88
|
+
_set_span_attribute(span, SpanAttributes.LLM_USER, kwargs.get("user"))
|
|
89
|
+
_set_span_attribute(
|
|
90
|
+
span, SpanAttributes.LLM_HEADERS, str(kwargs.get("headers"))
|
|
91
|
+
)
|
|
92
|
+
except Exception as ex: # pylint: disable=broad-except
|
|
93
|
+
logger.warning(
|
|
94
|
+
"Failed to set input attributes for openai span, error: %s", str(ex)
|
|
95
|
+
)
|
|
96
|
+
|
|
97
|
+
|
|
98
|
+
def _set_response_attributes(span, response):
|
|
99
|
+
if not span.is_recording():
|
|
100
|
+
return
|
|
101
|
+
|
|
102
|
+
try:
|
|
103
|
+
_set_span_attribute(
|
|
104
|
+
span, SpanAttributes.LLM_RESPONSE_MODEL, response.get("model")
|
|
105
|
+
)
|
|
106
|
+
|
|
107
|
+
usage = response.get("usage")
|
|
108
|
+
if not usage:
|
|
109
|
+
return
|
|
110
|
+
|
|
111
|
+
if is_openai_v1() and not isinstance(usage, dict):
|
|
112
|
+
usage = usage.__dict__
|
|
113
|
+
|
|
114
|
+
_set_span_attribute(
|
|
115
|
+
span, SpanAttributes.LLM_USAGE_TOTAL_TOKENS, usage.get("total_tokens")
|
|
116
|
+
)
|
|
117
|
+
_set_span_attribute(
|
|
118
|
+
span,
|
|
119
|
+
SpanAttributes.LLM_USAGE_COMPLETION_TOKENS,
|
|
120
|
+
usage.get("completion_tokens"),
|
|
121
|
+
)
|
|
122
|
+
_set_span_attribute(
|
|
123
|
+
span, SpanAttributes.LLM_USAGE_PROMPT_TOKENS, usage.get("prompt_tokens")
|
|
124
|
+
)
|
|
125
|
+
|
|
126
|
+
return
|
|
127
|
+
except Exception as ex: # pylint: disable=broad-except
|
|
128
|
+
logger.warning(
|
|
129
|
+
"Failed to set response attributes for openai span, error: %s", str(ex)
|
|
130
|
+
)
|
|
131
|
+
|
|
132
|
+
|
|
133
|
+
def is_streaming_response(response):
|
|
134
|
+
return isinstance(response, types.GeneratorType) or (
|
|
135
|
+
is_openai_v1() and isinstance(response, openai.Stream)
|
|
136
|
+
)
|
|
@@ -0,0 +1,170 @@
|
|
|
1
|
+
import json
|
|
2
|
+
import logging
|
|
3
|
+
|
|
4
|
+
from opentelemetry import context as context_api
|
|
5
|
+
|
|
6
|
+
from opentelemetry.semconv.ai import SpanAttributes, LLMRequestTypeValues
|
|
7
|
+
|
|
8
|
+
from opentelemetry.instrumentation.utils import _SUPPRESS_INSTRUMENTATION_KEY
|
|
9
|
+
from opentelemetry.instrumentation.openai.utils import (
|
|
10
|
+
_with_tracer_wrapper,
|
|
11
|
+
start_as_current_span_async,
|
|
12
|
+
)
|
|
13
|
+
from opentelemetry.instrumentation.openai.shared import (
|
|
14
|
+
_set_request_attributes,
|
|
15
|
+
_set_span_attribute,
|
|
16
|
+
_set_functions_attributes,
|
|
17
|
+
_set_response_attributes,
|
|
18
|
+
is_streaming_response,
|
|
19
|
+
should_send_prompts,
|
|
20
|
+
)
|
|
21
|
+
from opentelemetry.trace import SpanKind
|
|
22
|
+
from opentelemetry.trace.status import Status, StatusCode
|
|
23
|
+
|
|
24
|
+
from opentelemetry.instrumentation.openai.utils import is_openai_v1
|
|
25
|
+
|
|
26
|
+
SPAN_NAME = "openai.chat"
|
|
27
|
+
LLM_REQUEST_TYPE = LLMRequestTypeValues.CHAT
|
|
28
|
+
|
|
29
|
+
logger = logging.getLogger(__name__)
|
|
30
|
+
|
|
31
|
+
|
|
32
|
+
@_with_tracer_wrapper
|
|
33
|
+
def chat_wrapper(tracer, wrapped, instance, args, kwargs):
|
|
34
|
+
if context_api.get_value(_SUPPRESS_INSTRUMENTATION_KEY):
|
|
35
|
+
return wrapped(*args, **kwargs)
|
|
36
|
+
|
|
37
|
+
# span needs to be opened and closed manually because the response is a generator
|
|
38
|
+
span = tracer.start_span(SPAN_NAME, kind=SpanKind.CLIENT)
|
|
39
|
+
|
|
40
|
+
_handle_request(span, kwargs)
|
|
41
|
+
response = wrapped(*args, **kwargs)
|
|
42
|
+
|
|
43
|
+
if is_streaming_response(response):
|
|
44
|
+
# span will be closed after the generator is done
|
|
45
|
+
return _build_from_streaming_response(span, response)
|
|
46
|
+
else:
|
|
47
|
+
_handle_response(response, span)
|
|
48
|
+
|
|
49
|
+
span.end()
|
|
50
|
+
|
|
51
|
+
return response
|
|
52
|
+
|
|
53
|
+
|
|
54
|
+
@_with_tracer_wrapper
|
|
55
|
+
async def achat_wrapper(tracer, wrapped, instance, args, kwargs):
|
|
56
|
+
if context_api.get_value(_SUPPRESS_INSTRUMENTATION_KEY):
|
|
57
|
+
return wrapped(*args, **kwargs)
|
|
58
|
+
|
|
59
|
+
async with start_as_current_span_async(
|
|
60
|
+
tracer=tracer, name=SPAN_NAME, kind=SpanKind.CLIENT
|
|
61
|
+
) as span:
|
|
62
|
+
_handle_request(span, kwargs)
|
|
63
|
+
response = await wrapped(*args, **kwargs)
|
|
64
|
+
_handle_response(response, span)
|
|
65
|
+
|
|
66
|
+
return response
|
|
67
|
+
|
|
68
|
+
|
|
69
|
+
def _handle_request(span, kwargs):
|
|
70
|
+
_set_request_attributes(span, LLM_REQUEST_TYPE, kwargs)
|
|
71
|
+
if should_send_prompts():
|
|
72
|
+
_set_prompts(span, kwargs.get("messages"))
|
|
73
|
+
_set_functions_attributes(span, kwargs.get("functions"))
|
|
74
|
+
|
|
75
|
+
|
|
76
|
+
def _handle_response(response, span):
|
|
77
|
+
if is_openai_v1():
|
|
78
|
+
response_dict = response.model_dump()
|
|
79
|
+
else:
|
|
80
|
+
response_dict = response
|
|
81
|
+
|
|
82
|
+
_set_response_attributes(span, response_dict)
|
|
83
|
+
|
|
84
|
+
if should_send_prompts():
|
|
85
|
+
_set_completions(span, response_dict.get("choices"))
|
|
86
|
+
|
|
87
|
+
return response
|
|
88
|
+
|
|
89
|
+
|
|
90
|
+
def _set_prompts(span, messages):
|
|
91
|
+
if not span.is_recording() or messages is None:
|
|
92
|
+
return
|
|
93
|
+
|
|
94
|
+
try:
|
|
95
|
+
for i, msg in enumerate(messages):
|
|
96
|
+
prefix = f"{SpanAttributes.LLM_PROMPTS}.{i}"
|
|
97
|
+
if isinstance(msg.get("content"), str):
|
|
98
|
+
content = msg.get("content")
|
|
99
|
+
elif isinstance(msg.get("content"), list):
|
|
100
|
+
content = json.dumps(msg.get("content"))
|
|
101
|
+
|
|
102
|
+
_set_span_attribute(span, f"{prefix}.role", msg.get("role"))
|
|
103
|
+
_set_span_attribute(span, f"{prefix}.content", content)
|
|
104
|
+
except Exception as ex: # pylint: disable=broad-except
|
|
105
|
+
logger.warning("Failed to set prompts for openai span, error: %s", str(ex))
|
|
106
|
+
|
|
107
|
+
|
|
108
|
+
def _set_completions(span, choices):
|
|
109
|
+
if choices is None:
|
|
110
|
+
return
|
|
111
|
+
|
|
112
|
+
for choice in choices:
|
|
113
|
+
index = choice.get("index")
|
|
114
|
+
prefix = f"{SpanAttributes.LLM_COMPLETIONS}.{index}"
|
|
115
|
+
_set_span_attribute(
|
|
116
|
+
span, f"{prefix}.finish_reason", choice.get("finish_reason")
|
|
117
|
+
)
|
|
118
|
+
|
|
119
|
+
message = choice.get("message")
|
|
120
|
+
if not message:
|
|
121
|
+
return
|
|
122
|
+
|
|
123
|
+
_set_span_attribute(span, f"{prefix}.role", message.get("role"))
|
|
124
|
+
_set_span_attribute(span, f"{prefix}.content", message.get("content"))
|
|
125
|
+
|
|
126
|
+
function_call = message.get("function_call")
|
|
127
|
+
if not function_call:
|
|
128
|
+
return
|
|
129
|
+
|
|
130
|
+
_set_span_attribute(
|
|
131
|
+
span, f"{prefix}.function_call.name", function_call.get("name")
|
|
132
|
+
)
|
|
133
|
+
_set_span_attribute(
|
|
134
|
+
span, f"{prefix}.function_call.arguments", function_call.get("arguments")
|
|
135
|
+
)
|
|
136
|
+
|
|
137
|
+
|
|
138
|
+
def _build_from_streaming_response(span, response):
|
|
139
|
+
complete_response = {"choices": [], "model": ""}
|
|
140
|
+
for item in response:
|
|
141
|
+
item_to_yield = item
|
|
142
|
+
if is_openai_v1():
|
|
143
|
+
item = item.model_dump()
|
|
144
|
+
|
|
145
|
+
for choice in item.get("choices"):
|
|
146
|
+
index = choice.get("index")
|
|
147
|
+
if len(complete_response.get("choices")) <= index:
|
|
148
|
+
complete_response["choices"].append(
|
|
149
|
+
{"index": index, "message": {"content": "", "role": ""}}
|
|
150
|
+
)
|
|
151
|
+
complete_choice = complete_response.get("choices")[index]
|
|
152
|
+
if choice.get("finish_reason"):
|
|
153
|
+
complete_choice["finish_reason"] = choice.get("finish_reason")
|
|
154
|
+
|
|
155
|
+
delta = choice.get("delta")
|
|
156
|
+
|
|
157
|
+
if delta.get("content"):
|
|
158
|
+
complete_choice["message"]["content"] += delta.get("content")
|
|
159
|
+
if delta.get("role"):
|
|
160
|
+
complete_choice["message"]["role"] = delta.get("role")
|
|
161
|
+
|
|
162
|
+
yield item_to_yield
|
|
163
|
+
|
|
164
|
+
_set_response_attributes(span, complete_response)
|
|
165
|
+
|
|
166
|
+
if should_send_prompts():
|
|
167
|
+
_set_completions(span, complete_response.get("choices"))
|
|
168
|
+
|
|
169
|
+
span.set_status(Status(StatusCode.OK))
|
|
170
|
+
span.end()
|
|
@@ -0,0 +1,142 @@
|
|
|
1
|
+
import logging
|
|
2
|
+
|
|
3
|
+
from opentelemetry import context as context_api
|
|
4
|
+
|
|
5
|
+
from opentelemetry.semconv.ai import SpanAttributes, LLMRequestTypeValues
|
|
6
|
+
|
|
7
|
+
from opentelemetry.instrumentation.utils import _SUPPRESS_INSTRUMENTATION_KEY
|
|
8
|
+
from opentelemetry.instrumentation.openai.utils import (
|
|
9
|
+
_with_tracer_wrapper,
|
|
10
|
+
start_as_current_span_async,
|
|
11
|
+
)
|
|
12
|
+
from opentelemetry.instrumentation.openai.shared import (
|
|
13
|
+
_set_request_attributes,
|
|
14
|
+
_set_span_attribute,
|
|
15
|
+
_set_functions_attributes,
|
|
16
|
+
_set_response_attributes,
|
|
17
|
+
is_streaming_response,
|
|
18
|
+
should_send_prompts,
|
|
19
|
+
)
|
|
20
|
+
|
|
21
|
+
from opentelemetry.instrumentation.openai.utils import is_openai_v1
|
|
22
|
+
|
|
23
|
+
from opentelemetry.trace import SpanKind
|
|
24
|
+
from opentelemetry.trace.status import Status, StatusCode
|
|
25
|
+
|
|
26
|
+
SPAN_NAME = "openai.completion"
|
|
27
|
+
LLM_REQUEST_TYPE = LLMRequestTypeValues.COMPLETION
|
|
28
|
+
|
|
29
|
+
logger = logging.getLogger(__name__)
|
|
30
|
+
|
|
31
|
+
|
|
32
|
+
@_with_tracer_wrapper
|
|
33
|
+
def completion_wrapper(tracer, wrapped, instance, args, kwargs):
|
|
34
|
+
if context_api.get_value(_SUPPRESS_INSTRUMENTATION_KEY):
|
|
35
|
+
return wrapped(*args, **kwargs)
|
|
36
|
+
|
|
37
|
+
# span needs to be opened and closed manually because the response is a generator
|
|
38
|
+
span = tracer.start_span(SPAN_NAME, kind=SpanKind.CLIENT)
|
|
39
|
+
|
|
40
|
+
_handle_request(span, kwargs)
|
|
41
|
+
response = wrapped(*args, **kwargs)
|
|
42
|
+
|
|
43
|
+
if is_streaming_response(response):
|
|
44
|
+
# span will be closed after the generator is done
|
|
45
|
+
return _build_from_streaming_response(span, response)
|
|
46
|
+
else:
|
|
47
|
+
_handle_response(response, span)
|
|
48
|
+
|
|
49
|
+
span.end()
|
|
50
|
+
return response
|
|
51
|
+
|
|
52
|
+
|
|
53
|
+
@_with_tracer_wrapper
|
|
54
|
+
async def acompletion_wrapper(tracer, wrapped, instance, args, kwargs):
|
|
55
|
+
if context_api.get_value(_SUPPRESS_INSTRUMENTATION_KEY):
|
|
56
|
+
return wrapped(*args, **kwargs)
|
|
57
|
+
|
|
58
|
+
async with start_as_current_span_async(
|
|
59
|
+
tracer=tracer, name=SPAN_NAME, kind=SpanKind.CLIENT
|
|
60
|
+
) as span:
|
|
61
|
+
_handle_request(span, kwargs)
|
|
62
|
+
response = await wrapped(*args, **kwargs)
|
|
63
|
+
_handle_response(response, span)
|
|
64
|
+
|
|
65
|
+
return response
|
|
66
|
+
|
|
67
|
+
|
|
68
|
+
def _handle_request(span, kwargs):
|
|
69
|
+
_set_request_attributes(span, LLM_REQUEST_TYPE, kwargs)
|
|
70
|
+
if should_send_prompts():
|
|
71
|
+
_set_prompts(span, kwargs.get("prompt"))
|
|
72
|
+
_set_functions_attributes(span, kwargs.get("functions"))
|
|
73
|
+
|
|
74
|
+
|
|
75
|
+
def _handle_response(response, span):
|
|
76
|
+
if is_openai_v1():
|
|
77
|
+
response_dict = response.model_dump()
|
|
78
|
+
else:
|
|
79
|
+
response_dict = response
|
|
80
|
+
|
|
81
|
+
_set_response_attributes(span, response_dict)
|
|
82
|
+
|
|
83
|
+
if should_send_prompts():
|
|
84
|
+
_set_completions(span, response_dict.get("choices"))
|
|
85
|
+
|
|
86
|
+
|
|
87
|
+
def _set_prompts(span, prompt):
|
|
88
|
+
if not span.is_recording() or not prompt:
|
|
89
|
+
return
|
|
90
|
+
|
|
91
|
+
try:
|
|
92
|
+
_set_span_attribute(
|
|
93
|
+
span,
|
|
94
|
+
f"{SpanAttributes.LLM_PROMPTS}.0.user",
|
|
95
|
+
prompt[0] if isinstance(prompt, list) else prompt,
|
|
96
|
+
)
|
|
97
|
+
except Exception as ex: # pylint: disable=broad-except
|
|
98
|
+
logger.warning("Failed to set prompts for openai span, error: %s", str(ex))
|
|
99
|
+
|
|
100
|
+
|
|
101
|
+
def _set_completions(span, choices):
|
|
102
|
+
if not span.is_recording() or not choices:
|
|
103
|
+
return
|
|
104
|
+
|
|
105
|
+
try:
|
|
106
|
+
for choice in choices:
|
|
107
|
+
index = choice.get("index")
|
|
108
|
+
prefix = f"{SpanAttributes.LLM_COMPLETIONS}.{index}"
|
|
109
|
+
_set_span_attribute(
|
|
110
|
+
span, f"{prefix}.finish_reason", choice.get("finish_reason")
|
|
111
|
+
)
|
|
112
|
+
_set_span_attribute(span, f"{prefix}.content", choice.get("text"))
|
|
113
|
+
except Exception as e:
|
|
114
|
+
logger.warning("Failed to set completion attributes, error: %s", str(e))
|
|
115
|
+
|
|
116
|
+
|
|
117
|
+
def _build_from_streaming_response(span, response):
|
|
118
|
+
complete_response = {"choices": [], "model": ""}
|
|
119
|
+
for item in response:
|
|
120
|
+
item_to_yield = item
|
|
121
|
+
if is_openai_v1():
|
|
122
|
+
item = item.model_dump()
|
|
123
|
+
|
|
124
|
+
for choice in item.get("choices"):
|
|
125
|
+
index = choice.get("index")
|
|
126
|
+
if len(complete_response.get("choices")) <= index:
|
|
127
|
+
complete_response["choices"].append({"index": index, "text": ""})
|
|
128
|
+
complete_choice = complete_response.get("choices")[index]
|
|
129
|
+
if choice.get("finish_reason"):
|
|
130
|
+
complete_choice["finish_reason"] = choice.get("finish_reason")
|
|
131
|
+
|
|
132
|
+
complete_choice["text"] += choice.get("text")
|
|
133
|
+
|
|
134
|
+
yield item_to_yield
|
|
135
|
+
|
|
136
|
+
_set_response_attributes(span, complete_response)
|
|
137
|
+
|
|
138
|
+
if should_send_prompts():
|
|
139
|
+
_set_completions(span, complete_response.get("choices"))
|
|
140
|
+
|
|
141
|
+
span.set_status(Status(StatusCode.OK))
|
|
142
|
+
span.end()
|
|
@@ -0,0 +1,22 @@
|
|
|
1
|
+
from importlib.metadata import version
|
|
2
|
+
from contextlib import asynccontextmanager
|
|
3
|
+
|
|
4
|
+
|
|
5
|
+
def is_openai_v1():
|
|
6
|
+
return version("openai") >= "1.0.0"
|
|
7
|
+
|
|
8
|
+
|
|
9
|
+
def _with_tracer_wrapper(func):
|
|
10
|
+
def _with_tracer(tracer):
|
|
11
|
+
def wrapper(wrapped, instance, args, kwargs):
|
|
12
|
+
return func(tracer, wrapped, instance, args, kwargs)
|
|
13
|
+
|
|
14
|
+
return wrapper
|
|
15
|
+
|
|
16
|
+
return _with_tracer
|
|
17
|
+
|
|
18
|
+
|
|
19
|
+
@asynccontextmanager
|
|
20
|
+
async def start_as_current_span_async(tracer, *args, **kwargs):
|
|
21
|
+
with tracer.start_as_current_span(*args, **kwargs) as span:
|
|
22
|
+
yield span
|
opentelemetry_instrumentation_openai-0.5.2/opentelemetry/instrumentation/openai/v0/__init__.py
ADDED
|
@@ -0,0 +1,28 @@
|
|
|
1
|
+
from typing import Collection
|
|
2
|
+
|
|
3
|
+
from opentelemetry.instrumentation.instrumentor import BaseInstrumentor
|
|
4
|
+
from opentelemetry.trace import get_tracer
|
|
5
|
+
from wrapt import wrap_function_wrapper
|
|
6
|
+
|
|
7
|
+
from opentelemetry.instrumentation.openai.shared.chat_wrappers import chat_wrapper, achat_wrapper
|
|
8
|
+
from opentelemetry.instrumentation.openai.shared.completion_wrappers import completion_wrapper, acompletion_wrapper
|
|
9
|
+
from opentelemetry.instrumentation.openai.version import __version__
|
|
10
|
+
|
|
11
|
+
_instruments = ("openai >= 0.27.0", "openai < 1.0.0")
|
|
12
|
+
|
|
13
|
+
|
|
14
|
+
class OpenAIV0Instrumentor(BaseInstrumentor):
|
|
15
|
+
def instrumentation_dependencies(self) -> Collection[str]:
|
|
16
|
+
return _instruments
|
|
17
|
+
|
|
18
|
+
def _instrument(self, **kwargs):
|
|
19
|
+
tracer_provider = kwargs.get("tracer_provider")
|
|
20
|
+
tracer = get_tracer(__name__, __version__, tracer_provider)
|
|
21
|
+
|
|
22
|
+
wrap_function_wrapper("openai", "Completion.create", completion_wrapper(tracer))
|
|
23
|
+
wrap_function_wrapper("openai", "Completion.acreate", acompletion_wrapper(tracer))
|
|
24
|
+
wrap_function_wrapper("openai", "ChatCompletion.create", chat_wrapper(tracer))
|
|
25
|
+
wrap_function_wrapper("openai", "ChatCompletion.acreate", achat_wrapper(tracer))
|
|
26
|
+
|
|
27
|
+
def _uninstrument(self, **kwargs):
|
|
28
|
+
pass
|
opentelemetry_instrumentation_openai-0.5.2/opentelemetry/instrumentation/openai/v1/__init__.py
ADDED
|
@@ -0,0 +1,28 @@
|
|
|
1
|
+
from typing import Collection
|
|
2
|
+
|
|
3
|
+
from opentelemetry.instrumentation.instrumentor import BaseInstrumentor
|
|
4
|
+
from opentelemetry.trace import get_tracer
|
|
5
|
+
from wrapt import wrap_function_wrapper
|
|
6
|
+
|
|
7
|
+
from opentelemetry.instrumentation.openai.shared.chat_wrappers import chat_wrapper, achat_wrapper
|
|
8
|
+
from opentelemetry.instrumentation.openai.shared.completion_wrappers import completion_wrapper, acompletion_wrapper
|
|
9
|
+
from opentelemetry.instrumentation.openai.version import __version__
|
|
10
|
+
|
|
11
|
+
_instruments = ("openai >= 1.0.0",)
|
|
12
|
+
|
|
13
|
+
|
|
14
|
+
class OpenAIV1Instrumentor(BaseInstrumentor):
|
|
15
|
+
def instrumentation_dependencies(self) -> Collection[str]:
|
|
16
|
+
return _instruments
|
|
17
|
+
|
|
18
|
+
def _instrument(self, **kwargs):
|
|
19
|
+
tracer_provider = kwargs.get("tracer_provider")
|
|
20
|
+
tracer = get_tracer(__name__, __version__, tracer_provider)
|
|
21
|
+
|
|
22
|
+
wrap_function_wrapper("openai.resources.chat.completions", "Completions.create", chat_wrapper(tracer))
|
|
23
|
+
wrap_function_wrapper("openai.resources.completions", "Completions.create", completion_wrapper(tracer))
|
|
24
|
+
wrap_function_wrapper("openai.resources.chat.completions", "AsyncCompletions.create", achat_wrapper(tracer))
|
|
25
|
+
wrap_function_wrapper("openai.resources.completions", "AsyncCompletions.create", acompletion_wrapper(tracer))
|
|
26
|
+
|
|
27
|
+
def _uninstrument(self, **kwargs):
|
|
28
|
+
pass
|
|
@@ -0,0 +1 @@
|
|
|
1
|
+
__version__ = "0.5.2"
|
|
@@ -11,7 +11,7 @@ addopts = "--cov --cov-report html:'../../coverage/packages/opentelemetry-instru
|
|
|
11
11
|
|
|
12
12
|
[tool.poetry]
|
|
13
13
|
name = "opentelemetry-instrumentation-openai"
|
|
14
|
-
version = "0.5.
|
|
14
|
+
version = "0.5.2"
|
|
15
15
|
description = "OpenTelemetry OpenAI instrumentation"
|
|
16
16
|
authors = [
|
|
17
17
|
"Gal Kleinman <gal@traceloop.com>",
|
|
@@ -1,383 +0,0 @@
|
|
|
1
|
-
import logging
|
|
2
|
-
import os
|
|
3
|
-
import json
|
|
4
|
-
import types
|
|
5
|
-
import pkg_resources
|
|
6
|
-
from typing import Collection
|
|
7
|
-
from wrapt import wrap_function_wrapper
|
|
8
|
-
import openai
|
|
9
|
-
|
|
10
|
-
from opentelemetry import context as context_api
|
|
11
|
-
from opentelemetry.trace import get_tracer, SpanKind
|
|
12
|
-
from opentelemetry.trace.status import Status, StatusCode
|
|
13
|
-
|
|
14
|
-
from opentelemetry.instrumentation.instrumentor import BaseInstrumentor
|
|
15
|
-
from opentelemetry.instrumentation.utils import (
|
|
16
|
-
_SUPPRESS_INSTRUMENTATION_KEY,
|
|
17
|
-
unwrap,
|
|
18
|
-
)
|
|
19
|
-
|
|
20
|
-
from opentelemetry.semconv.ai import SpanAttributes, LLMRequestTypeValues
|
|
21
|
-
from opentelemetry.instrumentation.openai.version import __version__
|
|
22
|
-
|
|
23
|
-
logger = logging.getLogger(__name__)
|
|
24
|
-
|
|
25
|
-
_instruments = ("openai >= 0.27.0",)
|
|
26
|
-
|
|
27
|
-
WRAPPED_METHODS_VERSION_0 = [
|
|
28
|
-
{
|
|
29
|
-
"module": "openai",
|
|
30
|
-
"object": "ChatCompletion",
|
|
31
|
-
"method": "create",
|
|
32
|
-
"span_name": "openai.chat",
|
|
33
|
-
},
|
|
34
|
-
{
|
|
35
|
-
"module": "openai",
|
|
36
|
-
"object": "Completion",
|
|
37
|
-
"method": "create",
|
|
38
|
-
"span_name": "openai.completion",
|
|
39
|
-
},
|
|
40
|
-
]
|
|
41
|
-
|
|
42
|
-
WRAPPED_METHODS_VERSION_1 = [
|
|
43
|
-
{
|
|
44
|
-
"module": "openai.resources.chat.completions",
|
|
45
|
-
"object": "Completions",
|
|
46
|
-
"method": "create",
|
|
47
|
-
"span_name": "openai.chat",
|
|
48
|
-
},
|
|
49
|
-
{
|
|
50
|
-
"module": "openai.resources.completions",
|
|
51
|
-
"object": "Completions",
|
|
52
|
-
"method": "create",
|
|
53
|
-
"span_name": "openai.completion",
|
|
54
|
-
},
|
|
55
|
-
]
|
|
56
|
-
|
|
57
|
-
|
|
58
|
-
def should_send_prompts():
|
|
59
|
-
return (
|
|
60
|
-
os.getenv("TRACELOOP_TRACE_CONTENT") or "true"
|
|
61
|
-
).lower() == "true" or context_api.get_value("override_enable_content_tracing")
|
|
62
|
-
|
|
63
|
-
|
|
64
|
-
def is_openai_v1():
|
|
65
|
-
return pkg_resources.get_distribution("openai").version >= "1.0.0"
|
|
66
|
-
|
|
67
|
-
|
|
68
|
-
def _set_span_attribute(span, name, value):
|
|
69
|
-
if value is not None:
|
|
70
|
-
if value != "":
|
|
71
|
-
span.set_attribute(name, value)
|
|
72
|
-
return
|
|
73
|
-
|
|
74
|
-
|
|
75
|
-
def _set_api_attributes(span):
|
|
76
|
-
_set_span_attribute(
|
|
77
|
-
span,
|
|
78
|
-
OpenAISpanAttributes.OPENAI_API_BASE,
|
|
79
|
-
openai.base_url if hasattr(openai, "base_url") else openai.api_base,
|
|
80
|
-
)
|
|
81
|
-
_set_span_attribute(span, OpenAISpanAttributes.OPENAI_API_TYPE, openai.api_type)
|
|
82
|
-
_set_span_attribute(
|
|
83
|
-
span, OpenAISpanAttributes.OPENAI_API_VERSION, openai.api_version
|
|
84
|
-
)
|
|
85
|
-
|
|
86
|
-
return
|
|
87
|
-
|
|
88
|
-
|
|
89
|
-
def _set_span_prompts(span, messages):
|
|
90
|
-
if messages is None:
|
|
91
|
-
return
|
|
92
|
-
|
|
93
|
-
for i, msg in enumerate(messages):
|
|
94
|
-
prefix = f"{SpanAttributes.LLM_PROMPTS}.{i}"
|
|
95
|
-
if isinstance(msg.get("content"), str):
|
|
96
|
-
content = msg.get("content")
|
|
97
|
-
elif isinstance(msg.get("content"), list):
|
|
98
|
-
content = json.dumps(msg.get("content"))
|
|
99
|
-
|
|
100
|
-
_set_span_attribute(span, f"{prefix}.role", msg.get("role"))
|
|
101
|
-
_set_span_attribute(span, f"{prefix}.content", content)
|
|
102
|
-
|
|
103
|
-
|
|
104
|
-
def _set_input_attributes(span, llm_request_type, kwargs):
|
|
105
|
-
_set_span_attribute(span, SpanAttributes.LLM_REQUEST_MODEL, kwargs.get("model"))
|
|
106
|
-
_set_span_attribute(
|
|
107
|
-
span, SpanAttributes.LLM_REQUEST_MAX_TOKENS, kwargs.get("max_tokens")
|
|
108
|
-
)
|
|
109
|
-
_set_span_attribute(span, SpanAttributes.LLM_TEMPERATURE, kwargs.get("temperature"))
|
|
110
|
-
_set_span_attribute(span, SpanAttributes.LLM_TOP_P, kwargs.get("top_p"))
|
|
111
|
-
_set_span_attribute(
|
|
112
|
-
span, SpanAttributes.LLM_FREQUENCY_PENALTY, kwargs.get("frequency_penalty")
|
|
113
|
-
)
|
|
114
|
-
_set_span_attribute(
|
|
115
|
-
span, SpanAttributes.LLM_PRESENCE_PENALTY, kwargs.get("presence_penalty")
|
|
116
|
-
)
|
|
117
|
-
_set_span_attribute(span, SpanAttributes.LLM_USER, kwargs.get("user"))
|
|
118
|
-
_set_span_attribute(span, SpanAttributes.LLM_HEADERS, str(kwargs.get("headers")))
|
|
119
|
-
|
|
120
|
-
if should_send_prompts():
|
|
121
|
-
if llm_request_type == LLMRequestTypeValues.CHAT:
|
|
122
|
-
_set_span_prompts(span, kwargs.get("messages"))
|
|
123
|
-
elif llm_request_type == LLMRequestTypeValues.COMPLETION:
|
|
124
|
-
prompt = kwargs.get("prompt")
|
|
125
|
-
_set_span_attribute(
|
|
126
|
-
span,
|
|
127
|
-
f"{SpanAttributes.LLM_PROMPTS}.0.user",
|
|
128
|
-
prompt[0] if isinstance(prompt, list) else prompt,
|
|
129
|
-
)
|
|
130
|
-
|
|
131
|
-
functions = kwargs.get("functions")
|
|
132
|
-
if functions:
|
|
133
|
-
for i, function in enumerate(functions):
|
|
134
|
-
prefix = f"{SpanAttributes.LLM_REQUEST_FUNCTIONS}.{i}"
|
|
135
|
-
_set_span_attribute(span, f"{prefix}.name", function.get("name"))
|
|
136
|
-
_set_span_attribute(
|
|
137
|
-
span, f"{prefix}.description", function.get("description")
|
|
138
|
-
)
|
|
139
|
-
_set_span_attribute(
|
|
140
|
-
span, f"{prefix}.parameters", json.dumps(function.get("parameters"))
|
|
141
|
-
)
|
|
142
|
-
|
|
143
|
-
return
|
|
144
|
-
|
|
145
|
-
|
|
146
|
-
def _set_span_completions(span, llm_request_type, choices):
|
|
147
|
-
if choices is None:
|
|
148
|
-
return
|
|
149
|
-
|
|
150
|
-
for choice in choices:
|
|
151
|
-
if is_openai_v1() and not isinstance(choice, dict):
|
|
152
|
-
choice = choice.__dict__
|
|
153
|
-
|
|
154
|
-
index = choice.get("index")
|
|
155
|
-
prefix = f"{SpanAttributes.LLM_COMPLETIONS}.{index}"
|
|
156
|
-
_set_span_attribute(
|
|
157
|
-
span, f"{prefix}.finish_reason", choice.get("finish_reason")
|
|
158
|
-
)
|
|
159
|
-
|
|
160
|
-
if llm_request_type == LLMRequestTypeValues.CHAT:
|
|
161
|
-
message = choice.get("message")
|
|
162
|
-
if message is not None:
|
|
163
|
-
if is_openai_v1() and not isinstance(message, dict):
|
|
164
|
-
message = message.__dict__
|
|
165
|
-
|
|
166
|
-
_set_span_attribute(span, f"{prefix}.role", message.get("role"))
|
|
167
|
-
_set_span_attribute(span, f"{prefix}.content", message.get("content"))
|
|
168
|
-
function_call = message.get("function_call")
|
|
169
|
-
if function_call:
|
|
170
|
-
if is_openai_v1() and not isinstance(function_call, dict):
|
|
171
|
-
function_call = function_call.__dict__
|
|
172
|
-
|
|
173
|
-
_set_span_attribute(
|
|
174
|
-
span, f"{prefix}.function_call.name", function_call.get("name")
|
|
175
|
-
)
|
|
176
|
-
_set_span_attribute(
|
|
177
|
-
span,
|
|
178
|
-
f"{prefix}.function_call.arguments",
|
|
179
|
-
function_call.get("arguments"),
|
|
180
|
-
)
|
|
181
|
-
elif llm_request_type == LLMRequestTypeValues.COMPLETION:
|
|
182
|
-
_set_span_attribute(span, f"{prefix}.content", choice.get("text"))
|
|
183
|
-
|
|
184
|
-
|
|
185
|
-
def _set_response_attributes(span, llm_request_type, response):
|
|
186
|
-
_set_span_attribute(span, SpanAttributes.LLM_RESPONSE_MODEL, response.get("model"))
|
|
187
|
-
if should_send_prompts():
|
|
188
|
-
_set_span_completions(span, llm_request_type, response.get("choices"))
|
|
189
|
-
|
|
190
|
-
usage = response.get("usage")
|
|
191
|
-
if usage is not None:
|
|
192
|
-
if is_openai_v1() and not isinstance(usage, dict):
|
|
193
|
-
usage = usage.__dict__
|
|
194
|
-
|
|
195
|
-
_set_span_attribute(
|
|
196
|
-
span, SpanAttributes.LLM_USAGE_TOTAL_TOKENS, usage.get("total_tokens")
|
|
197
|
-
)
|
|
198
|
-
_set_span_attribute(
|
|
199
|
-
span,
|
|
200
|
-
SpanAttributes.LLM_USAGE_COMPLETION_TOKENS,
|
|
201
|
-
usage.get("completion_tokens"),
|
|
202
|
-
)
|
|
203
|
-
_set_span_attribute(
|
|
204
|
-
span, SpanAttributes.LLM_USAGE_PROMPT_TOKENS, usage.get("prompt_tokens")
|
|
205
|
-
)
|
|
206
|
-
|
|
207
|
-
return
|
|
208
|
-
|
|
209
|
-
|
|
210
|
-
def _build_from_streaming_response(span, llm_request_type, response):
|
|
211
|
-
complete_response = {"choices": [], "model": ""}
|
|
212
|
-
for item in response:
|
|
213
|
-
item_to_yield = item
|
|
214
|
-
if is_openai_v1():
|
|
215
|
-
item = item.__dict__
|
|
216
|
-
|
|
217
|
-
for choice in item.get("choices"):
|
|
218
|
-
if is_openai_v1():
|
|
219
|
-
choice = choice.__dict__
|
|
220
|
-
|
|
221
|
-
index = choice.get("index")
|
|
222
|
-
if len(complete_response.get("choices")) <= index:
|
|
223
|
-
complete_response["choices"].append(
|
|
224
|
-
{"index": index, "message": {"content": "", "role": ""}}
|
|
225
|
-
if llm_request_type == LLMRequestTypeValues.CHAT
|
|
226
|
-
else {"index": index, "text": ""}
|
|
227
|
-
)
|
|
228
|
-
complete_choice = complete_response.get("choices")[index]
|
|
229
|
-
if choice.get("finish_reason"):
|
|
230
|
-
complete_choice["finish_reason"] = choice.get("finish_reason")
|
|
231
|
-
if llm_request_type == LLMRequestTypeValues.CHAT:
|
|
232
|
-
delta = choice.get("delta")
|
|
233
|
-
if is_openai_v1():
|
|
234
|
-
delta = delta.__dict__
|
|
235
|
-
|
|
236
|
-
if delta.get("content"):
|
|
237
|
-
complete_choice["message"]["content"] += delta.get("content")
|
|
238
|
-
if delta.get("role"):
|
|
239
|
-
complete_choice["message"]["role"] = delta.get("role")
|
|
240
|
-
else:
|
|
241
|
-
complete_choice["text"] += choice.get("text")
|
|
242
|
-
|
|
243
|
-
yield item_to_yield
|
|
244
|
-
|
|
245
|
-
_set_response_attributes(
|
|
246
|
-
span,
|
|
247
|
-
llm_request_type,
|
|
248
|
-
complete_response,
|
|
249
|
-
)
|
|
250
|
-
span.set_status(Status(StatusCode.OK))
|
|
251
|
-
span.end()
|
|
252
|
-
|
|
253
|
-
|
|
254
|
-
def _with_tracer_wrapper(func):
|
|
255
|
-
"""Helper for providing tracer for wrapper functions."""
|
|
256
|
-
|
|
257
|
-
def _with_tracer(tracer, to_wrap):
|
|
258
|
-
def wrapper(wrapped, instance, args, kwargs):
|
|
259
|
-
return func(tracer, to_wrap, wrapped, instance, args, kwargs)
|
|
260
|
-
|
|
261
|
-
return wrapper
|
|
262
|
-
|
|
263
|
-
return _with_tracer
|
|
264
|
-
|
|
265
|
-
|
|
266
|
-
def _llm_request_type_by_module_object(module_name, object_name):
|
|
267
|
-
if is_openai_v1():
|
|
268
|
-
if module_name == "openai.resources.chat.completions":
|
|
269
|
-
return LLMRequestTypeValues.CHAT
|
|
270
|
-
elif module_name == "openai.resources.completions":
|
|
271
|
-
return LLMRequestTypeValues.COMPLETION
|
|
272
|
-
else:
|
|
273
|
-
return LLMRequestTypeValues.UNKNOWN
|
|
274
|
-
else:
|
|
275
|
-
if object_name == "Completion":
|
|
276
|
-
return LLMRequestTypeValues.COMPLETION
|
|
277
|
-
elif object_name == "ChatCompletion":
|
|
278
|
-
return LLMRequestTypeValues.CHAT
|
|
279
|
-
else:
|
|
280
|
-
return LLMRequestTypeValues.UNKNOWN
|
|
281
|
-
|
|
282
|
-
|
|
283
|
-
def is_streaming_response(response):
|
|
284
|
-
return isinstance(response, types.GeneratorType) or (
|
|
285
|
-
is_openai_v1() and isinstance(response, openai.Stream)
|
|
286
|
-
)
|
|
287
|
-
|
|
288
|
-
|
|
289
|
-
@_with_tracer_wrapper
|
|
290
|
-
def _wrap(tracer, to_wrap, wrapped, instance, args, kwargs):
|
|
291
|
-
"""Instruments and calls every function defined in TO_WRAP."""
|
|
292
|
-
if context_api.get_value(_SUPPRESS_INSTRUMENTATION_KEY):
|
|
293
|
-
return wrapped(*args, **kwargs)
|
|
294
|
-
|
|
295
|
-
name = to_wrap.get("span_name")
|
|
296
|
-
llm_request_type = _llm_request_type_by_module_object(
|
|
297
|
-
to_wrap.get("module"), to_wrap.get("object")
|
|
298
|
-
)
|
|
299
|
-
|
|
300
|
-
span = tracer.start_span(
|
|
301
|
-
name,
|
|
302
|
-
kind=SpanKind.CLIENT,
|
|
303
|
-
attributes={
|
|
304
|
-
SpanAttributes.LLM_VENDOR: "OpenAI",
|
|
305
|
-
SpanAttributes.LLM_REQUEST_TYPE: llm_request_type.value,
|
|
306
|
-
},
|
|
307
|
-
)
|
|
308
|
-
|
|
309
|
-
if span.is_recording():
|
|
310
|
-
_set_api_attributes(span)
|
|
311
|
-
try:
|
|
312
|
-
if span.is_recording():
|
|
313
|
-
_set_input_attributes(span, llm_request_type, kwargs)
|
|
314
|
-
|
|
315
|
-
except Exception as ex: # pylint: disable=broad-except
|
|
316
|
-
logger.warning(
|
|
317
|
-
"Failed to set input attributes for openai span, error: %s", str(ex)
|
|
318
|
-
)
|
|
319
|
-
|
|
320
|
-
response = wrapped(*args, **kwargs)
|
|
321
|
-
|
|
322
|
-
if response:
|
|
323
|
-
try:
|
|
324
|
-
if span.is_recording():
|
|
325
|
-
if is_streaming_response(response):
|
|
326
|
-
return _build_from_streaming_response(
|
|
327
|
-
span, llm_request_type, response
|
|
328
|
-
)
|
|
329
|
-
else:
|
|
330
|
-
_set_response_attributes(
|
|
331
|
-
span,
|
|
332
|
-
llm_request_type,
|
|
333
|
-
response.__dict__ if is_openai_v1() else response,
|
|
334
|
-
)
|
|
335
|
-
|
|
336
|
-
except Exception as ex: # pylint: disable=broad-except
|
|
337
|
-
logger.warning(
|
|
338
|
-
"Failed to set response attributes for openai span, error: %s",
|
|
339
|
-
str(ex),
|
|
340
|
-
)
|
|
341
|
-
if span.is_recording():
|
|
342
|
-
span.set_status(Status(StatusCode.OK))
|
|
343
|
-
|
|
344
|
-
span.end()
|
|
345
|
-
return response
|
|
346
|
-
|
|
347
|
-
|
|
348
|
-
class OpenAISpanAttributes:
|
|
349
|
-
OPENAI_API_VERSION = "openai.api_version"
|
|
350
|
-
OPENAI_API_BASE = "openai.api_base"
|
|
351
|
-
OPENAI_API_TYPE = "openai.api_type"
|
|
352
|
-
|
|
353
|
-
|
|
354
|
-
class OpenAIInstrumentor(BaseInstrumentor):
|
|
355
|
-
"""An instrumentor for OpenAI's client library."""
|
|
356
|
-
|
|
357
|
-
def instrumentation_dependencies(self) -> Collection[str]:
|
|
358
|
-
return _instruments
|
|
359
|
-
|
|
360
|
-
def _instrument(self, **kwargs):
|
|
361
|
-
tracer_provider = kwargs.get("tracer_provider")
|
|
362
|
-
tracer = get_tracer(__name__, __version__, tracer_provider)
|
|
363
|
-
|
|
364
|
-
wrapped_methods = (
|
|
365
|
-
WRAPPED_METHODS_VERSION_1 if is_openai_v1() else WRAPPED_METHODS_VERSION_0
|
|
366
|
-
)
|
|
367
|
-
for wrapped_method in wrapped_methods:
|
|
368
|
-
wrap_module = wrapped_method.get("module")
|
|
369
|
-
wrap_object = wrapped_method.get("object")
|
|
370
|
-
wrap_method = wrapped_method.get("method")
|
|
371
|
-
wrap_function_wrapper(
|
|
372
|
-
wrap_module,
|
|
373
|
-
f"{wrap_object}.{wrap_method}",
|
|
374
|
-
_wrap(tracer, wrapped_method),
|
|
375
|
-
)
|
|
376
|
-
|
|
377
|
-
def _uninstrument(self, **kwargs):
|
|
378
|
-
wrapped_methods = (
|
|
379
|
-
WRAPPED_METHODS_VERSION_1 if is_openai_v1() else WRAPPED_METHODS_VERSION_0
|
|
380
|
-
)
|
|
381
|
-
for wrapped_method in wrapped_methods:
|
|
382
|
-
wrap_object = wrapped_method.get("object")
|
|
383
|
-
unwrap(f"openai.{wrap_object}", wrapped_method.get("method"))
|
|
@@ -1 +0,0 @@
|
|
|
1
|
-
__version__ = "0.5.0"
|
{opentelemetry_instrumentation_openai-0.5.0 → opentelemetry_instrumentation_openai-0.5.2}/README.md
RENAMED
|
File without changes
|