openlit 1.34.19__py3-none-any.whl → 1.34.22__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- openlit/__helpers.py +40 -0
- openlit/instrumentation/bedrock/__init__.py +19 -14
- openlit/instrumentation/bedrock/bedrock.py +169 -35
- openlit/instrumentation/bedrock/utils.py +143 -172
- openlit/instrumentation/litellm/async_litellm.py +2 -2
- openlit/instrumentation/openai/__init__.py +63 -68
- openlit/instrumentation/openai/async_openai.py +203 -1277
- openlit/instrumentation/openai/openai.py +200 -1274
- openlit/instrumentation/openai/utils.py +794 -0
- openlit/instrumentation/vertexai/__init__.py +18 -23
- openlit/instrumentation/vertexai/async_vertexai.py +46 -364
- openlit/instrumentation/vertexai/utils.py +204 -0
- openlit/instrumentation/vertexai/vertexai.py +46 -364
- {openlit-1.34.19.dist-info → openlit-1.34.22.dist-info}/METADATA +1 -1
- {openlit-1.34.19.dist-info → openlit-1.34.22.dist-info}/RECORD +17 -15
- {openlit-1.34.19.dist-info → openlit-1.34.22.dist-info}/LICENSE +0 -0
- {openlit-1.34.19.dist-info → openlit-1.34.22.dist-info}/WHEEL +0 -0
@@ -3,21 +3,55 @@ AWS Bedrock OpenTelemetry instrumentation utility functions
|
|
3
3
|
"""
|
4
4
|
import time
|
5
5
|
|
6
|
-
from opentelemetry.sdk.resources import SERVICE_NAME, TELEMETRY_SDK_NAME, DEPLOYMENT_ENVIRONMENT
|
7
6
|
from opentelemetry.trace import Status, StatusCode
|
8
7
|
|
9
8
|
from openlit.__helpers import (
|
10
9
|
calculate_ttft,
|
11
10
|
response_as_dict,
|
12
11
|
calculate_tbt,
|
13
|
-
extract_and_format_input,
|
14
12
|
get_chat_model_cost,
|
15
|
-
|
16
|
-
|
17
|
-
|
13
|
+
record_completion_metrics,
|
14
|
+
common_span_attributes,
|
15
|
+
handle_exception
|
18
16
|
)
|
19
17
|
from openlit.semcov import SemanticConvention
|
20
18
|
|
19
|
+
def format_content(messages):
|
20
|
+
"""
|
21
|
+
Format the messages into a string for span events.
|
22
|
+
"""
|
23
|
+
|
24
|
+
if not messages:
|
25
|
+
return ""
|
26
|
+
|
27
|
+
formatted_messages = []
|
28
|
+
for message in messages:
|
29
|
+
if isinstance(message, dict):
|
30
|
+
role = message.get("role", "user")
|
31
|
+
content = message.get("content", "")
|
32
|
+
else:
|
33
|
+
# Handle Bedrock object format
|
34
|
+
role = getattr(message, "role", "user")
|
35
|
+
content = getattr(message, "content", "")
|
36
|
+
|
37
|
+
if isinstance(content, list):
|
38
|
+
# Handle structured content (e.g., text + images)
|
39
|
+
text_parts = []
|
40
|
+
for part in content:
|
41
|
+
if isinstance(part, dict):
|
42
|
+
# Bedrock format: {"text": "content"} or generic format: {"type": "text", "text": "content"}
|
43
|
+
if "text" in part:
|
44
|
+
text_parts.append(part.get("text", ""))
|
45
|
+
elif part.get("type") == "text":
|
46
|
+
text_parts.append(part.get("text", ""))
|
47
|
+
content = " ".join(text_parts)
|
48
|
+
elif not isinstance(content, str):
|
49
|
+
content = str(content)
|
50
|
+
|
51
|
+
formatted_messages.append(f"{role}: {content}")
|
52
|
+
|
53
|
+
return "\n".join(formatted_messages)
|
54
|
+
|
21
55
|
def process_chunk(self, chunk):
|
22
56
|
"""
|
23
57
|
Process a chunk of response data and update state.
|
@@ -33,33 +67,33 @@ def process_chunk(self, chunk):
|
|
33
67
|
|
34
68
|
chunked = response_as_dict(chunk)
|
35
69
|
|
36
|
-
#
|
37
|
-
if
|
38
|
-
|
39
|
-
self.
|
40
|
-
|
41
|
-
|
42
|
-
|
43
|
-
|
44
|
-
|
45
|
-
if
|
46
|
-
self._llmresponse +=
|
47
|
-
|
48
|
-
|
49
|
-
|
50
|
-
|
51
|
-
|
52
|
-
|
53
|
-
|
54
|
-
|
55
|
-
|
56
|
-
|
57
|
-
|
58
|
-
self._output_tokens =
|
59
|
-
self.
|
70
|
+
# Handle Bedrock messageStart event
|
71
|
+
if "messageStart" in chunked:
|
72
|
+
message_start = chunked.get("messageStart", {})
|
73
|
+
self._response_role = message_start.get("role", "assistant")
|
74
|
+
|
75
|
+
# Handle Bedrock contentBlockDelta event
|
76
|
+
if "contentBlockDelta" in chunked:
|
77
|
+
content_delta = chunked.get("contentBlockDelta", {})
|
78
|
+
delta = content_delta.get("delta", {})
|
79
|
+
if "text" in delta:
|
80
|
+
self._llmresponse += delta.get("text", "")
|
81
|
+
|
82
|
+
# Handle Bedrock messageStop event
|
83
|
+
if "messageStop" in chunked:
|
84
|
+
message_stop = chunked.get("messageStop", {})
|
85
|
+
self._finish_reason = message_stop.get("stopReason", "")
|
86
|
+
|
87
|
+
# Handle Bedrock metadata event (final event with usage info)
|
88
|
+
if "metadata" in chunked:
|
89
|
+
metadata = chunked.get("metadata", {})
|
90
|
+
usage = metadata.get("usage", {})
|
91
|
+
self._input_tokens = usage.get("inputTokens", 0)
|
92
|
+
self._output_tokens = usage.get("outputTokens", 0)
|
93
|
+
self._end_time = end_time
|
60
94
|
|
61
95
|
def common_chat_logic(scope, pricing_info, environment, application_name, metrics,
|
62
|
-
|
96
|
+
capture_message_content, disable_metrics, version, llm_config, is_stream):
|
63
97
|
"""
|
64
98
|
Process chat request and generate Telemetry
|
65
99
|
"""
|
@@ -68,62 +102,55 @@ def common_chat_logic(scope, pricing_info, environment, application_name, metric
|
|
68
102
|
if len(scope._timestamps) > 1:
|
69
103
|
scope._tbt = calculate_tbt(scope._timestamps)
|
70
104
|
|
71
|
-
formatted_messages =
|
72
|
-
|
73
|
-
request_model = scope._kwargs.get('model', 'claude-3-opus-20240229')
|
105
|
+
formatted_messages = format_content(scope._kwargs.get("messages", []))
|
106
|
+
request_model = scope._kwargs.get("modelId", "amazon.titan-text-express-v1")
|
74
107
|
|
75
108
|
cost = get_chat_model_cost(request_model, pricing_info, scope._input_tokens, scope._output_tokens)
|
76
109
|
|
77
|
-
#
|
78
|
-
scope
|
79
|
-
|
80
|
-
|
81
|
-
|
82
|
-
|
83
|
-
|
84
|
-
|
85
|
-
|
86
|
-
(SemanticConvention.
|
87
|
-
(SemanticConvention.
|
88
|
-
(SemanticConvention.
|
89
|
-
(SemanticConvention.
|
90
|
-
(SemanticConvention.
|
91
|
-
(SemanticConvention.
|
92
|
-
(SemanticConvention.GEN_AI_REQUEST_TOP_K, 'topK'),
|
110
|
+
# Common Span Attributes
|
111
|
+
common_span_attributes(scope,
|
112
|
+
SemanticConvention.GEN_AI_OPERATION_TYPE_CHAT, SemanticConvention.GEN_AI_SYSTEM_AWS_BEDROCK,
|
113
|
+
scope._server_address, scope._server_port, request_model, scope._response_model,
|
114
|
+
environment, application_name, is_stream, scope._tbt, scope._ttft, version)
|
115
|
+
|
116
|
+
# Bedrock-specific attributes from llm_config
|
117
|
+
bedrock_attributes = [
|
118
|
+
(SemanticConvention.GEN_AI_REQUEST_FREQUENCY_PENALTY, "frequencyPenalty"),
|
119
|
+
(SemanticConvention.GEN_AI_REQUEST_MAX_TOKENS, "maxTokens"),
|
120
|
+
(SemanticConvention.GEN_AI_REQUEST_PRESENCE_PENALTY, "presencePenalty"),
|
121
|
+
(SemanticConvention.GEN_AI_REQUEST_STOP_SEQUENCES, "stopSequences"),
|
122
|
+
(SemanticConvention.GEN_AI_REQUEST_TEMPERATURE, "temperature"),
|
123
|
+
(SemanticConvention.GEN_AI_REQUEST_TOP_P, "topP"),
|
124
|
+
(SemanticConvention.GEN_AI_REQUEST_TOP_K, "topK"),
|
93
125
|
]
|
94
126
|
|
95
|
-
# Set each attribute if the corresponding value exists and is not None
|
96
|
-
for attribute, key in
|
127
|
+
# Set each bedrock-specific attribute if the corresponding value exists and is not None
|
128
|
+
for attribute, key in bedrock_attributes:
|
97
129
|
value = llm_config.get(key)
|
98
130
|
if value is not None:
|
99
131
|
scope._span.set_attribute(attribute, value)
|
100
132
|
|
101
|
-
|
133
|
+
# Span Attributes for Response parameters
|
102
134
|
scope._span.set_attribute(SemanticConvention.GEN_AI_RESPONSE_ID, scope._response_id)
|
103
|
-
scope._span.set_attribute(SemanticConvention.
|
135
|
+
scope._span.set_attribute(SemanticConvention.GEN_AI_RESPONSE_FINISH_REASON, [scope._finish_reason])
|
136
|
+
scope._span.set_attribute(SemanticConvention.GEN_AI_OUTPUT_TYPE, "text" if isinstance(scope._llmresponse, str) else "json")
|
137
|
+
|
138
|
+
# Span Attributes for Cost and Tokens
|
104
139
|
scope._span.set_attribute(SemanticConvention.GEN_AI_USAGE_INPUT_TOKENS, scope._input_tokens)
|
105
140
|
scope._span.set_attribute(SemanticConvention.GEN_AI_USAGE_OUTPUT_TOKENS, scope._output_tokens)
|
106
|
-
scope._span.set_attribute(SemanticConvention.SERVER_ADDRESS, scope._server_address)
|
107
|
-
|
108
|
-
scope._span.set_attribute(SemanticConvention.GEN_AI_OUTPUT_TYPE,
|
109
|
-
'text' if isinstance(scope._llmresponse, str) else 'json')
|
110
|
-
|
111
|
-
scope._span.set_attribute(DEPLOYMENT_ENVIRONMENT, environment)
|
112
|
-
scope._span.set_attribute(SERVICE_NAME, application_name)
|
113
|
-
scope._span.set_attribute(SemanticConvention.GEN_AI_REQUEST_IS_STREAM, is_stream)
|
114
141
|
scope._span.set_attribute(SemanticConvention.GEN_AI_CLIENT_TOKEN_USAGE, scope._input_tokens + scope._output_tokens)
|
115
142
|
scope._span.set_attribute(SemanticConvention.GEN_AI_USAGE_COST, cost)
|
116
|
-
scope._span.set_attribute(SemanticConvention.GEN_AI_SERVER_TBT, scope._tbt)
|
117
|
-
scope._span.set_attribute(SemanticConvention.GEN_AI_SERVER_TTFT, scope._ttft)
|
118
|
-
scope._span.set_attribute(SemanticConvention.GEN_AI_SDK_VERSION, version)
|
119
143
|
|
120
|
-
#
|
121
|
-
prompt = concatenate_all_contents(formatted_messages)
|
144
|
+
# Span Attributes for Content
|
122
145
|
if capture_message_content:
|
146
|
+
scope._span.set_attribute(SemanticConvention.GEN_AI_CONTENT_PROMPT, formatted_messages)
|
147
|
+
scope._span.set_attribute(SemanticConvention.GEN_AI_CONTENT_COMPLETION, scope._llmresponse)
|
148
|
+
|
149
|
+
# To be removed once the change to span_attributes (from span events) is complete
|
123
150
|
scope._span.add_event(
|
124
151
|
name=SemanticConvention.GEN_AI_CONTENT_PROMPT_EVENT,
|
125
152
|
attributes={
|
126
|
-
SemanticConvention.GEN_AI_CONTENT_PROMPT:
|
153
|
+
SemanticConvention.GEN_AI_CONTENT_PROMPT: formatted_messages,
|
127
154
|
},
|
128
155
|
)
|
129
156
|
scope._span.add_event(
|
@@ -133,120 +160,64 @@ def common_chat_logic(scope, pricing_info, environment, application_name, metric
|
|
133
160
|
},
|
134
161
|
)
|
135
162
|
|
136
|
-
choice_event_body = {
|
137
|
-
'finish_reason': scope._finish_reason,
|
138
|
-
'index': 0,
|
139
|
-
'message': {
|
140
|
-
**({'content': scope._llmresponse} if capture_message_content else {}),
|
141
|
-
'role': scope._response_role
|
142
|
-
}
|
143
|
-
}
|
144
|
-
|
145
|
-
# Emit events
|
146
|
-
for role in ['user', 'system', 'assistant', 'tool']:
|
147
|
-
if formatted_messages.get(role, {}).get('content', ''):
|
148
|
-
event = otel_event(
|
149
|
-
name=getattr(SemanticConvention, f'GEN_AI_{role.upper()}_MESSAGE'),
|
150
|
-
attributes={
|
151
|
-
SemanticConvention.GEN_AI_SYSTEM: SemanticConvention.GEN_AI_SYSTEM_AWS_BEDROCK
|
152
|
-
},
|
153
|
-
body = {
|
154
|
-
# pylint: disable=line-too-long
|
155
|
-
**({'content': formatted_messages.get(role, {}).get('content', '')} if capture_message_content else {}),
|
156
|
-
'role': formatted_messages.get(role, {}).get('role', []),
|
157
|
-
**({
|
158
|
-
'tool_calls': {
|
159
|
-
'function': {
|
160
|
-
# pylint: disable=line-too-long
|
161
|
-
'name': (scope._tool_calls[0].get('function', {}).get('name', '') if scope._tool_calls else ''),
|
162
|
-
'arguments': (scope._tool_calls[0].get('function', {}).get('arguments', '') if scope._tool_calls else '')
|
163
|
-
},
|
164
|
-
'id': (scope._tool_calls[0].get('id', '') if scope._tool_calls else ''),
|
165
|
-
'type': 'function'
|
166
|
-
}
|
167
|
-
} if role == 'assistant' else {}),
|
168
|
-
**({
|
169
|
-
'id': (scope._tool_calls[0].get('id', '') if scope._tool_calls else '')
|
170
|
-
} if role == 'tool' else {})
|
171
|
-
}
|
172
|
-
)
|
173
|
-
event_provider.emit(event)
|
174
|
-
|
175
|
-
choice_event = otel_event(
|
176
|
-
name=SemanticConvention.GEN_AI_CHOICE,
|
177
|
-
attributes={
|
178
|
-
SemanticConvention.GEN_AI_SYSTEM: SemanticConvention.GEN_AI_SYSTEM_AWS_BEDROCK
|
179
|
-
},
|
180
|
-
body=choice_event_body
|
181
|
-
)
|
182
|
-
event_provider.emit(choice_event)
|
183
|
-
|
184
163
|
scope._span.set_status(Status(StatusCode.OK))
|
185
164
|
|
165
|
+
# Record metrics
|
186
166
|
if not disable_metrics:
|
187
|
-
|
188
|
-
|
189
|
-
|
190
|
-
|
191
|
-
system=SemanticConvention.GEN_AI_SYSTEM_AWS_BEDROCK,
|
192
|
-
request_model=request_model,
|
193
|
-
server_address=scope._server_address,
|
194
|
-
server_port=scope._server_port,
|
195
|
-
response_model=scope._response_model,
|
196
|
-
)
|
197
|
-
|
198
|
-
metrics['genai_client_usage_tokens'].record(scope._input_tokens + scope._output_tokens, metrics_attributes)
|
199
|
-
metrics['genai_client_operation_duration'].record(scope._end_time - scope._start_time, metrics_attributes)
|
200
|
-
metrics['genai_server_tbt'].record(scope._tbt, metrics_attributes)
|
201
|
-
metrics['genai_server_ttft'].record(scope._ttft, metrics_attributes)
|
202
|
-
metrics['genai_requests'].add(1, metrics_attributes)
|
203
|
-
metrics['genai_completion_tokens'].add(scope._output_tokens, metrics_attributes)
|
204
|
-
metrics['genai_prompt_tokens'].add(scope._input_tokens, metrics_attributes)
|
205
|
-
metrics['genai_cost'].record(cost, metrics_attributes)
|
206
|
-
|
207
|
-
def process_streaming_chat_response(self, pricing_info, environment, application_name, metrics,
|
208
|
-
event_provider, capture_message_content=False, disable_metrics=False, version='', llm_config=''):
|
167
|
+
record_completion_metrics(metrics, SemanticConvention.GEN_AI_OPERATION_TYPE_CHAT, SemanticConvention.GEN_AI_SYSTEM_AWS_BEDROCK,
|
168
|
+
scope._server_address, scope._server_port, request_model, scope._response_model, environment,
|
169
|
+
application_name, scope._start_time, scope._end_time, scope._input_tokens, scope._output_tokens,
|
170
|
+
cost, scope._tbt, scope._ttft)
|
209
171
|
|
172
|
+
def process_streaming_chat_response(scope, pricing_info, environment, application_name, metrics,
|
173
|
+
capture_message_content=False, disable_metrics=False, version="", llm_config=None):
|
210
174
|
"""
|
211
|
-
Process chat
|
175
|
+
Process streaming chat response and generate telemetry.
|
212
176
|
"""
|
213
|
-
if self._tool_id != '':
|
214
|
-
self._tool_calls = {
|
215
|
-
'id': self._tool_id,
|
216
|
-
'name': self._tool_name,
|
217
|
-
'input': self._tool_arguments
|
218
|
-
}
|
219
177
|
|
220
|
-
|
221
|
-
|
178
|
+
try:
|
179
|
+
if llm_config is None:
|
180
|
+
llm_config = {}
|
222
181
|
|
223
|
-
|
224
|
-
|
225
|
-
|
182
|
+
common_chat_logic(scope, pricing_info, environment, application_name, metrics,
|
183
|
+
capture_message_content, disable_metrics, version, llm_config, is_stream=True)
|
184
|
+
except Exception as e:
|
185
|
+
handle_exception(scope._span, e)
|
186
|
+
raise
|
226
187
|
|
188
|
+
def process_chat_response(response, request_model, pricing_info, server_port, server_address, environment,
|
189
|
+
application_name, metrics, start_time, span, capture_message_content=False,
|
190
|
+
disable_metrics=False, version="1.0.0", llm_config=None, **kwargs):
|
227
191
|
"""
|
228
|
-
Process chat
|
192
|
+
Process non-streaming chat response and generate telemetry.
|
229
193
|
"""
|
230
194
|
|
231
|
-
|
232
|
-
|
233
|
-
|
234
|
-
|
235
|
-
|
236
|
-
|
237
|
-
|
238
|
-
|
239
|
-
|
240
|
-
|
241
|
-
|
242
|
-
|
243
|
-
|
244
|
-
|
245
|
-
|
246
|
-
|
247
|
-
|
248
|
-
|
249
|
-
|
250
|
-
|
251
|
-
|
252
|
-
|
195
|
+
try:
|
196
|
+
if llm_config is None:
|
197
|
+
llm_config = {}
|
198
|
+
|
199
|
+
scope = type("GenericScope", (), {})()
|
200
|
+
response_dict = response_as_dict(response)
|
201
|
+
|
202
|
+
scope._start_time = start_time
|
203
|
+
scope._end_time = time.time()
|
204
|
+
scope._span = span
|
205
|
+
scope._llmresponse = response_dict.get("output", {}).get("message", {}).get("content", [{}])[0].get("text", "")
|
206
|
+
scope._response_role = response_dict.get("output", {}).get("message", {}).get("role", "assistant")
|
207
|
+
scope._input_tokens = response_dict.get("usage", {}).get("inputTokens", 0)
|
208
|
+
scope._output_tokens = response_dict.get("usage", {}).get("outputTokens", 0)
|
209
|
+
scope._response_model = request_model
|
210
|
+
scope._finish_reason = response_dict.get("stopReason", "")
|
211
|
+
scope._response_id = response_dict.get("RequestId", "")
|
212
|
+
scope._timestamps = []
|
213
|
+
scope._ttft, scope._tbt = scope._end_time - scope._start_time, 0
|
214
|
+
scope._server_address, scope._server_port = server_address, server_port
|
215
|
+
scope._kwargs = kwargs
|
216
|
+
|
217
|
+
common_chat_logic(scope, pricing_info, environment, application_name, metrics,
|
218
|
+
capture_message_content, disable_metrics, version, llm_config, is_stream=False)
|
219
|
+
|
220
|
+
return response
|
221
|
+
except Exception as e:
|
222
|
+
handle_exception(span, e)
|
223
|
+
raise
|
@@ -68,9 +68,9 @@ def acompletion(version, environment, application_name, tracer, pricing_info,
|
|
68
68
|
def __aiter__(self):
|
69
69
|
return self
|
70
70
|
|
71
|
-
def __getattr__(self, name):
|
71
|
+
async def __getattr__(self, name):
|
72
72
|
"""Delegate attribute access to the wrapped object."""
|
73
|
-
return getattr(self.__wrapped__, name)
|
73
|
+
return getattr(await self.__wrapped__, name)
|
74
74
|
|
75
75
|
async def __anext__(self):
|
76
76
|
try:
|
@@ -1,147 +1,142 @@
|
|
1
|
-
# pylint: disable=useless-return, bad-staticmethod-argument, disable=duplicate-code
|
2
1
|
"""Initializer of Auto Instrumentation of OpenAI Functions"""
|
3
2
|
from typing import Collection
|
4
3
|
import importlib.metadata
|
5
4
|
from opentelemetry.instrumentation.instrumentor import BaseInstrumentor
|
6
5
|
from wrapt import wrap_function_wrapper
|
7
6
|
|
8
|
-
from openlit.instrumentation.openai.openai import
|
9
|
-
|
10
|
-
|
11
|
-
|
12
|
-
from openlit.instrumentation.openai.async_openai import
|
13
|
-
|
7
|
+
from openlit.instrumentation.openai.openai import (
|
8
|
+
chat_completions, embedding, responses, chat_completions_parse,
|
9
|
+
image_generate, image_variatons, audio_create
|
10
|
+
)
|
11
|
+
from openlit.instrumentation.openai.async_openai import (
|
12
|
+
async_chat_completions, async_embedding, async_chat_completions_parse,
|
13
|
+
async_image_generate, async_image_variations, async_audio_create, async_responses
|
14
|
+
)
|
14
15
|
|
15
16
|
_instruments = ("openai >= 1.92.0",)
|
16
17
|
|
17
18
|
class OpenAIInstrumentor(BaseInstrumentor):
|
18
|
-
"""
|
19
|
+
"""
|
20
|
+
An instrumentor for OpenAI client library.
|
21
|
+
"""
|
19
22
|
|
20
23
|
def instrumentation_dependencies(self) -> Collection[str]:
|
21
24
|
return _instruments
|
22
25
|
|
23
26
|
def _instrument(self, **kwargs):
|
24
|
-
|
25
|
-
environment = kwargs.get("environment")
|
27
|
+
version = importlib.metadata.version("openai")
|
28
|
+
environment = kwargs.get("environment", "default")
|
29
|
+
application_name = kwargs.get("application_name", "default")
|
26
30
|
tracer = kwargs.get("tracer")
|
31
|
+
pricing_info = kwargs.get("pricing_info", {})
|
32
|
+
capture_message_content = kwargs.get("capture_message_content", False)
|
27
33
|
metrics = kwargs.get("metrics_dict")
|
28
|
-
pricing_info = kwargs.get("pricing_info")
|
29
|
-
capture_message_content = kwargs.get("capture_message_content")
|
30
34
|
disable_metrics = kwargs.get("disable_metrics")
|
31
|
-
version = importlib.metadata.version("openai")
|
32
35
|
|
36
|
+
# chat completions
|
33
37
|
wrap_function_wrapper(
|
34
38
|
"openai.resources.chat.completions",
|
35
39
|
"Completions.create",
|
36
40
|
chat_completions(version, environment, application_name,
|
37
|
-
|
38
|
-
metrics, disable_metrics),
|
41
|
+
tracer, pricing_info, capture_message_content, metrics, disable_metrics),
|
39
42
|
)
|
40
43
|
|
41
44
|
wrap_function_wrapper(
|
42
45
|
"openai.resources.chat.completions",
|
43
46
|
"AsyncCompletions.create",
|
44
47
|
async_chat_completions(version, environment, application_name,
|
45
|
-
|
46
|
-
metrics, disable_metrics),
|
48
|
+
tracer, pricing_info, capture_message_content, metrics, disable_metrics),
|
47
49
|
)
|
48
50
|
|
51
|
+
# chat completions parse
|
49
52
|
wrap_function_wrapper(
|
50
|
-
"openai.resources.
|
51
|
-
"
|
52
|
-
|
53
|
-
|
54
|
-
metrics, disable_metrics),
|
53
|
+
"openai.resources.chat.completions",
|
54
|
+
"Completions.parse",
|
55
|
+
chat_completions_parse(version, environment, application_name,
|
56
|
+
tracer, pricing_info, capture_message_content, metrics, disable_metrics),
|
55
57
|
)
|
56
58
|
|
57
59
|
wrap_function_wrapper(
|
58
|
-
"openai.resources.
|
59
|
-
"
|
60
|
-
|
61
|
-
|
62
|
-
metrics, disable_metrics),
|
60
|
+
"openai.resources.chat.completions",
|
61
|
+
"AsyncCompletions.parse",
|
62
|
+
async_chat_completions_parse(version, environment, application_name,
|
63
|
+
tracer, pricing_info, capture_message_content, metrics, disable_metrics),
|
63
64
|
)
|
64
65
|
|
66
|
+
# responses
|
65
67
|
wrap_function_wrapper(
|
66
|
-
"openai.resources.
|
67
|
-
"
|
68
|
-
|
69
|
-
|
70
|
-
metrics, disable_metrics),
|
68
|
+
"openai.resources.responses.responses",
|
69
|
+
"Responses.create",
|
70
|
+
responses(version, environment, application_name,
|
71
|
+
tracer, pricing_info, capture_message_content, metrics, disable_metrics),
|
71
72
|
)
|
72
73
|
|
73
74
|
wrap_function_wrapper(
|
74
|
-
"openai.resources.
|
75
|
-
"
|
76
|
-
|
77
|
-
|
78
|
-
metrics, disable_metrics),
|
75
|
+
"openai.resources.responses.responses",
|
76
|
+
"AsyncResponses.create",
|
77
|
+
async_responses(version, environment, application_name,
|
78
|
+
tracer, pricing_info, capture_message_content, metrics, disable_metrics),
|
79
79
|
)
|
80
80
|
|
81
|
+
# embeddings
|
81
82
|
wrap_function_wrapper(
|
82
83
|
"openai.resources.embeddings",
|
83
84
|
"Embeddings.create",
|
84
85
|
embedding(version, environment, application_name,
|
85
|
-
|
86
|
-
metrics, disable_metrics),
|
86
|
+
tracer, pricing_info, capture_message_content, metrics, disable_metrics),
|
87
87
|
)
|
88
88
|
|
89
89
|
wrap_function_wrapper(
|
90
90
|
"openai.resources.embeddings",
|
91
91
|
"AsyncEmbeddings.create",
|
92
92
|
async_embedding(version, environment, application_name,
|
93
|
-
|
94
|
-
|
93
|
+
tracer, pricing_info, capture_message_content, metrics, disable_metrics),
|
94
|
+
)
|
95
|
+
|
96
|
+
# image generation
|
97
|
+
wrap_function_wrapper(
|
98
|
+
"openai.resources.images",
|
99
|
+
"Images.generate",
|
100
|
+
image_generate(version, environment, application_name,
|
101
|
+
tracer, pricing_info, capture_message_content, metrics, disable_metrics),
|
95
102
|
)
|
96
103
|
|
104
|
+
wrap_function_wrapper(
|
105
|
+
"openai.resources.images",
|
106
|
+
"AsyncImages.generate",
|
107
|
+
async_image_generate(version, environment, application_name,
|
108
|
+
tracer, pricing_info, capture_message_content, metrics, disable_metrics),
|
109
|
+
)
|
110
|
+
|
111
|
+
# image variations
|
97
112
|
wrap_function_wrapper(
|
98
113
|
"openai.resources.images",
|
99
114
|
"Images.create_variation",
|
100
|
-
image_variatons(version,
|
101
|
-
|
102
|
-
tracer, pricing_info, capture_message_content,
|
103
|
-
metrics, disable_metrics),
|
115
|
+
image_variatons(version, environment, application_name,
|
116
|
+
tracer, pricing_info, capture_message_content, metrics, disable_metrics),
|
104
117
|
)
|
105
118
|
|
106
119
|
wrap_function_wrapper(
|
107
120
|
"openai.resources.images",
|
108
121
|
"AsyncImages.create_variation",
|
109
|
-
|
110
|
-
|
111
|
-
tracer, pricing_info, capture_message_content,
|
112
|
-
metrics, disable_metrics),
|
122
|
+
async_image_variations(version, environment, application_name,
|
123
|
+
tracer, pricing_info, capture_message_content, metrics, disable_metrics),
|
113
124
|
)
|
114
125
|
|
126
|
+
# audio generation
|
115
127
|
wrap_function_wrapper(
|
116
128
|
"openai.resources.audio.speech",
|
117
129
|
"Speech.create",
|
118
130
|
audio_create(version, environment, application_name,
|
119
|
-
|
120
|
-
metrics, disable_metrics),
|
131
|
+
tracer, pricing_info, capture_message_content, metrics, disable_metrics),
|
121
132
|
)
|
122
133
|
|
123
134
|
wrap_function_wrapper(
|
124
135
|
"openai.resources.audio.speech",
|
125
136
|
"AsyncSpeech.create",
|
126
137
|
async_audio_create(version, environment, application_name,
|
127
|
-
|
128
|
-
metrics, disable_metrics),
|
129
|
-
)
|
130
|
-
|
131
|
-
wrap_function_wrapper(
|
132
|
-
"openai.resources.chat.completions",
|
133
|
-
"Completions.parse",
|
134
|
-
chat_completions_parse(version, environment, application_name, tracer, pricing_info,
|
135
|
-
capture_message_content, metrics, disable_metrics),
|
136
|
-
)
|
137
|
-
|
138
|
-
wrap_function_wrapper(
|
139
|
-
"openai.resources.chat.completions",
|
140
|
-
"AsyncCompletions.parse",
|
141
|
-
async_chat_completions_parse(version, environment, application_name, tracer, pricing_info,
|
142
|
-
capture_message_content, metrics, disable_metrics),
|
138
|
+
tracer, pricing_info, capture_message_content, metrics, disable_metrics),
|
143
139
|
)
|
144
140
|
|
145
|
-
@staticmethod
|
146
141
|
def _uninstrument(self, **kwargs):
|
147
142
|
pass
|