openlit 1.33.18__py3-none-any.whl → 1.33.19__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- openlit/__helpers.py +4 -34
- openlit/instrumentation/azure_ai_inference/async_azure_ai_inference.py +9 -9
- openlit/instrumentation/azure_ai_inference/azure_ai_inference.py +9 -9
- openlit/instrumentation/azure_ai_inference/utils.py +2 -2
- openlit/instrumentation/bedrock/__init__.py +2 -1
- openlit/instrumentation/bedrock/bedrock.py +30 -212
- openlit/instrumentation/bedrock/utils.py +252 -0
- {openlit-1.33.18.dist-info → openlit-1.33.19.dist-info}/METADATA +1 -1
- {openlit-1.33.18.dist-info → openlit-1.33.19.dist-info}/RECORD +11 -10
- {openlit-1.33.18.dist-info → openlit-1.33.19.dist-info}/LICENSE +0 -0
- {openlit-1.33.18.dist-info → openlit-1.33.19.dist-info}/WHEEL +0 -0
openlit/__helpers.py
CHANGED
@@ -237,27 +237,21 @@ def extract_and_format_input(messages):
|
|
237
237
|
them into fixed roles like 'user', 'assistant', 'system', 'tool'.
|
238
238
|
"""
|
239
239
|
|
240
|
-
fixed_roles = ['user', 'assistant', 'system', 'tool']
|
241
|
-
# Initialize the dictionary with fixed keys and empty structures
|
240
|
+
fixed_roles = ['user', 'assistant', 'system', 'tool', 'developer']
|
242
241
|
formatted_messages = {role_key: {'role': '', 'content': ''} for role_key in fixed_roles}
|
243
242
|
|
244
243
|
for message in messages:
|
245
|
-
# Normalize the message structure
|
246
244
|
message = response_as_dict(message)
|
247
245
|
|
248
|
-
# Extract role and content
|
249
246
|
role = message.get('role')
|
250
247
|
if role not in fixed_roles:
|
251
|
-
continue
|
248
|
+
continue
|
252
249
|
|
253
250
|
content = message.get('content', '')
|
254
251
|
|
255
|
-
# Prepare content as a string
|
252
|
+
# Prepare content as a string, handling both list and str
|
256
253
|
if isinstance(content, list):
|
257
|
-
content_str = ", ".join(
|
258
|
-
f'{item.get("type", "text")}: {extract_text_from_item(item)}'
|
259
|
-
for item in content
|
260
|
-
)
|
254
|
+
content_str = ", ".join(str(item) for item in content)
|
261
255
|
else:
|
262
256
|
content_str = content
|
263
257
|
|
@@ -272,30 +266,6 @@ def extract_and_format_input(messages):
|
|
272
266
|
|
273
267
|
return formatted_messages
|
274
268
|
|
275
|
-
def extract_text_from_item(item):
|
276
|
-
"""
|
277
|
-
Extract text from inpit message
|
278
|
-
"""
|
279
|
-
|
280
|
-
#pylint: disable=no-else-return
|
281
|
-
if item.get('type') == 'text':
|
282
|
-
return item.get('text', '')
|
283
|
-
elif item.get('type') == 'image':
|
284
|
-
# Handle image content specifically checking for 'url' or 'base64'
|
285
|
-
source = item.get('source', {})
|
286
|
-
if isinstance(source, dict):
|
287
|
-
if source.get('type') == 'base64':
|
288
|
-
# Return the actual base64 data if present
|
289
|
-
return source.get('data', '[Missing base64 data]')
|
290
|
-
elif source.get('type') == 'url':
|
291
|
-
return source.get('url', '[Missing URL]')
|
292
|
-
elif item.get('type') == 'image_url':
|
293
|
-
# New format: Handle the 'image_url' type
|
294
|
-
image_url = item.get('image_url', {})
|
295
|
-
if isinstance(image_url, dict):
|
296
|
-
return image_url.get('url', '[Missing image URL]')
|
297
|
-
return ''
|
298
|
-
|
299
269
|
# To be removed one the change to log events (from span events) is complete
|
300
270
|
def concatenate_all_contents(formatted_messages):
|
301
271
|
"""
|
@@ -43,10 +43,10 @@ def async_complete(version, environment, application_name,
|
|
43
43
|
self.__wrapped__ = wrapped
|
44
44
|
self._span = span
|
45
45
|
self._span_name = span_name
|
46
|
-
self._llmresponse =
|
47
|
-
self._response_id =
|
48
|
-
self._response_model =
|
49
|
-
self._finish_reason =
|
46
|
+
self._llmresponse = ''
|
47
|
+
self._response_id = ''
|
48
|
+
self._response_model = ''
|
49
|
+
self._finish_reason = ''
|
50
50
|
self._input_tokens = 0
|
51
51
|
self._output_tokens = 0
|
52
52
|
|
@@ -96,7 +96,7 @@ def async_complete(version, environment, application_name,
|
|
96
96
|
|
97
97
|
except Exception as e:
|
98
98
|
handle_exception(self._span, e)
|
99
|
-
logger.error(
|
99
|
+
logger.error('Error in trace creation: %s', e)
|
100
100
|
raise
|
101
101
|
|
102
102
|
async def wrapper(wrapped, instance, args, kwargs):
|
@@ -104,11 +104,11 @@ def async_complete(version, environment, application_name,
|
|
104
104
|
Wraps the GenAI function call.
|
105
105
|
"""
|
106
106
|
|
107
|
-
streaming = kwargs.get(
|
108
|
-
server_address, server_port = set_server_address_and_port(instance,
|
109
|
-
request_model = kwargs.get(
|
107
|
+
streaming = kwargs.get('stream', False)
|
108
|
+
server_address, server_port = set_server_address_and_port(instance, 'models.github.ai', 443)
|
109
|
+
request_model = kwargs.get('model', 'gpt-4o')
|
110
110
|
|
111
|
-
span_name = f
|
111
|
+
span_name = f'{SemanticConvetion.GEN_AI_OPERATION_TYPE_CHAT} {request_model}'
|
112
112
|
|
113
113
|
# pylint: disable=no-else-return
|
114
114
|
if streaming:
|
@@ -43,10 +43,10 @@ def complete(version, environment, application_name,
|
|
43
43
|
self.__wrapped__ = wrapped
|
44
44
|
self._span = span
|
45
45
|
self._span_name = span_name
|
46
|
-
self._llmresponse =
|
47
|
-
self._response_id =
|
48
|
-
self._response_model =
|
49
|
-
self._finish_reason =
|
46
|
+
self._llmresponse = ''
|
47
|
+
self._response_id = ''
|
48
|
+
self._response_model = ''
|
49
|
+
self._finish_reason = ''
|
50
50
|
self._input_tokens = 0
|
51
51
|
self._output_tokens = 0
|
52
52
|
|
@@ -96,7 +96,7 @@ def complete(version, environment, application_name,
|
|
96
96
|
|
97
97
|
except Exception as e:
|
98
98
|
handle_exception(self._span, e)
|
99
|
-
logger.error(
|
99
|
+
logger.error('Error in trace creation: %s', e)
|
100
100
|
raise
|
101
101
|
|
102
102
|
def wrapper(wrapped, instance, args, kwargs):
|
@@ -104,11 +104,11 @@ def complete(version, environment, application_name,
|
|
104
104
|
Wraps the GenAI function call.
|
105
105
|
"""
|
106
106
|
|
107
|
-
streaming = kwargs.get(
|
108
|
-
server_address, server_port = set_server_address_and_port(instance,
|
109
|
-
request_model = kwargs.get(
|
107
|
+
streaming = kwargs.get('stream', False)
|
108
|
+
server_address, server_port = set_server_address_and_port(instance, 'models.github.ai', 443)
|
109
|
+
request_model = kwargs.get('model', 'gpt-4o')
|
110
110
|
|
111
|
-
span_name = f
|
111
|
+
span_name = f'{SemanticConvetion.GEN_AI_OPERATION_TYPE_CHAT} {request_model}'
|
112
112
|
|
113
113
|
# pylint: disable=no-else-return
|
114
114
|
if streaming:
|
@@ -76,9 +76,9 @@ def common_chat_logic(scope, pricing_info, environment, application_name, metric
|
|
76
76
|
scope._span.set_attribute(SemanticConvetion.GEN_AI_REQUEST_TOP_K, scope._kwargs.get('top_k', 1.0))
|
77
77
|
scope._span.set_attribute(SemanticConvetion.GEN_AI_REQUEST_TOP_P, scope._kwargs.get('top_p', 1.0))
|
78
78
|
scope._span.set_attribute(SemanticConvetion.GEN_AI_REQUEST_FREQUENCY_PENALTY,
|
79
|
-
scope._kwargs.get(
|
79
|
+
scope._kwargs.get('frequency_penalty', 0.0))
|
80
80
|
scope._span.set_attribute(SemanticConvetion.GEN_AI_REQUEST_PRESENCE_PENALTY,
|
81
|
-
scope._kwargs.get(
|
81
|
+
scope._kwargs.get('presence_penalty', 0.0))
|
82
82
|
scope._span.set_attribute(SemanticConvetion.GEN_AI_RESPONSE_FINISH_REASON, [scope._finish_reason])
|
83
83
|
scope._span.set_attribute(SemanticConvetion.GEN_AI_RESPONSE_ID, scope._response_id)
|
84
84
|
scope._span.set_attribute(SemanticConvetion.GEN_AI_RESPONSE_MODEL, scope._response_model)
|
@@ -22,6 +22,7 @@ class BedrockInstrumentor(BaseInstrumentor):
|
|
22
22
|
application_name = kwargs.get("application_name", "default_application")
|
23
23
|
environment = kwargs.get("environment", "default_environment")
|
24
24
|
tracer = kwargs.get("tracer")
|
25
|
+
event_provider = kwargs.get('event_provider')
|
25
26
|
metrics = kwargs.get("metrics_dict")
|
26
27
|
pricing_info = kwargs.get("pricing_info", {})
|
27
28
|
capture_message_content = kwargs.get("capture_message_content", False)
|
@@ -33,7 +34,7 @@ class BedrockInstrumentor(BaseInstrumentor):
|
|
33
34
|
"botocore.client",
|
34
35
|
"ClientCreator.create_client",
|
35
36
|
converse(version, environment, application_name,
|
36
|
-
tracer, pricing_info, capture_message_content, metrics, disable_metrics),
|
37
|
+
tracer, event_provider, pricing_info, capture_message_content, metrics, disable_metrics),
|
37
38
|
)
|
38
39
|
|
39
40
|
def _uninstrument(self, **kwargs):
|
@@ -4,96 +4,33 @@ Module for monitoring Amazon Bedrock API calls.
|
|
4
4
|
|
5
5
|
import logging
|
6
6
|
import time
|
7
|
-
from
|
8
|
-
from botocore.exceptions import ReadTimeoutError, ResponseStreamingError
|
9
|
-
from urllib3.exceptions import ProtocolError as URLLib3ProtocolError
|
10
|
-
from urllib3.exceptions import ReadTimeoutError as URLLib3ReadTimeoutError
|
11
|
-
from opentelemetry.trace import SpanKind, Status, StatusCode
|
12
|
-
from opentelemetry.sdk.resources import SERVICE_NAME, TELEMETRY_SDK_NAME, DEPLOYMENT_ENVIRONMENT
|
7
|
+
from opentelemetry.trace import SpanKind
|
13
8
|
from openlit.__helpers import (
|
14
|
-
get_chat_model_cost,
|
15
|
-
handle_exception,
|
16
|
-
response_as_dict,
|
17
|
-
create_metrics_attributes,
|
18
9
|
set_server_address_and_port
|
19
10
|
)
|
11
|
+
from openlit.instrumentation.bedrock.utils import (
|
12
|
+
process_chat_response,
|
13
|
+
)
|
20
14
|
from openlit.semcov import SemanticConvetion
|
21
15
|
|
22
16
|
# Initialize logger for logging potential issues and operations
|
23
17
|
logger = logging.getLogger(__name__)
|
24
18
|
|
25
|
-
|
26
|
-
"""Handle streaming responses with the ability to read multiple times."""
|
27
|
-
|
28
|
-
def __init__(self, stream_source, length):
|
29
|
-
super().__init__(stream_source, length)
|
30
|
-
self._stream_data = None
|
31
|
-
self._read_position = 0
|
32
|
-
|
33
|
-
def read(self, amt=None):
|
34
|
-
if self._stream_data is None:
|
35
|
-
try:
|
36
|
-
self._stream_data = self._raw_stream.read()
|
37
|
-
except URLLib3ReadTimeoutError as error:
|
38
|
-
raise ReadTimeoutError(endpoint_url=error.url, error=error) from error
|
39
|
-
except URLLib3ProtocolError as error:
|
40
|
-
raise ResponseStreamingError(error=error) from error
|
41
|
-
|
42
|
-
self._amount_read += len(self._stream_data)
|
43
|
-
if amt is None or (not self._stream_data and amt > 0):
|
44
|
-
self._verify_content_length()
|
45
|
-
|
46
|
-
if amt is None:
|
47
|
-
data_chunk = self._stream_data[self._read_position:]
|
48
|
-
else:
|
49
|
-
data_start = self._read_position
|
50
|
-
self._read_position += amt
|
51
|
-
data_chunk = self._stream_data[data_start:self._read_position]
|
52
|
-
|
53
|
-
return data_chunk
|
54
|
-
|
55
|
-
def converse(version, environment, application_name, tracer,
|
19
|
+
def converse(version, environment, application_name, tracer, event_provider,
|
56
20
|
pricing_info, capture_message_content, metrics, disable_metrics):
|
57
21
|
"""
|
58
|
-
Generates a telemetry wrapper for
|
59
|
-
|
60
|
-
Args:
|
61
|
-
gen_ai_endpoint: Endpoint identifier for logging and tracing.
|
62
|
-
version: The monitoring package version.
|
63
|
-
environment: Deployment environment (e.g. production, staging).
|
64
|
-
application_name: Name of the application using the Bedrock API.
|
65
|
-
tracer: OpenTelemetry tracer for creating spans.
|
66
|
-
pricing_info: Information for calculating Bedrock usage cost.
|
67
|
-
capture_message_content: Whether to trace the actual content.
|
68
|
-
metrics: Metrics collector.
|
69
|
-
disable_metrics: Flag to toggle metrics collection.
|
70
|
-
Returns:
|
71
|
-
A function that wraps the chat method to add telemetry.
|
22
|
+
Generates a telemetry wrapper for GenAI function call
|
72
23
|
"""
|
73
24
|
|
74
25
|
def wrapper(wrapped, instance, args, kwargs):
|
75
26
|
"""
|
76
|
-
Wraps
|
77
|
-
|
78
|
-
Args:
|
79
|
-
wrapped: Original method.
|
80
|
-
instance: Instance of the class.
|
81
|
-
args: Positional arguments of the 'messages' method.
|
82
|
-
kwargs: Keyword arguments of the 'messages' method.
|
83
|
-
Returns:
|
84
|
-
Response from the original method.
|
27
|
+
Wraps the GenAI function call.
|
85
28
|
"""
|
86
29
|
|
87
30
|
def converse_wrapper(original_method, *method_args, **method_kwargs):
|
88
|
-
"""
|
89
|
-
Adds instrumentation to the invoke model call.
|
90
31
|
|
91
|
-
|
92
|
-
|
93
|
-
*method_args: Positional arguments for the method.
|
94
|
-
**method_kwargs: Keyword arguments for the method.
|
95
|
-
Returns:
|
96
|
-
The modified response with telemetry.
|
32
|
+
"""
|
33
|
+
Wraps the GenAI function call.
|
97
34
|
"""
|
98
35
|
|
99
36
|
server_address, server_port = set_server_address_and_port(instance, 'aws.amazon.com', 443)
|
@@ -104,146 +41,27 @@ def converse(version, environment, application_name, tracer,
|
|
104
41
|
with tracer.start_as_current_span(span_name, kind=SpanKind.CLIENT) as span:
|
105
42
|
start_time = time.time()
|
106
43
|
response = original_method(*method_args, **method_kwargs)
|
107
|
-
|
108
|
-
|
109
|
-
|
110
|
-
|
111
|
-
|
112
|
-
|
113
|
-
|
114
|
-
|
115
|
-
|
116
|
-
|
117
|
-
|
118
|
-
|
119
|
-
|
120
|
-
|
121
|
-
|
122
|
-
|
123
|
-
|
124
|
-
|
125
|
-
|
126
|
-
|
127
|
-
|
128
|
-
# Calculate cost of the operation
|
129
|
-
cost = get_chat_model_cost(request_model, pricing_info,
|
130
|
-
input_tokens, output_tokens)
|
131
|
-
|
132
|
-
llm_response = response_dict.get('output').get('message').get('content')[0].get('text')
|
133
|
-
|
134
|
-
# Set base span attribues (OTel Semconv)
|
135
|
-
span.set_attribute(TELEMETRY_SDK_NAME, 'openlit')
|
136
|
-
span.set_attribute(SemanticConvetion.GEN_AI_OPERATION,
|
137
|
-
SemanticConvetion.GEN_AI_OPERATION_TYPE_CHAT)
|
138
|
-
span.set_attribute(SemanticConvetion.GEN_AI_SYSTEM,
|
139
|
-
SemanticConvetion.GEN_AI_SYSTEM_AWS_BEDROCK)
|
140
|
-
span.set_attribute(SemanticConvetion.GEN_AI_REQUEST_MODEL,
|
141
|
-
request_model)
|
142
|
-
span.set_attribute(SemanticConvetion.SERVER_PORT,
|
143
|
-
server_port)
|
144
|
-
|
145
|
-
inference_config = method_kwargs.get('inferenceConfig', {})
|
146
|
-
|
147
|
-
# List of attributes and their config keys
|
148
|
-
attributes = [
|
149
|
-
(SemanticConvetion.GEN_AI_REQUEST_FREQUENCY_PENALTY, 'frequencyPenalty'),
|
150
|
-
(SemanticConvetion.GEN_AI_REQUEST_MAX_TOKENS, 'maxTokens'),
|
151
|
-
(SemanticConvetion.GEN_AI_REQUEST_PRESENCE_PENALTY, 'presencePenalty'),
|
152
|
-
(SemanticConvetion.GEN_AI_REQUEST_STOP_SEQUENCES, 'stopSequences'),
|
153
|
-
(SemanticConvetion.GEN_AI_REQUEST_TEMPERATURE, 'temperature'),
|
154
|
-
(SemanticConvetion.GEN_AI_REQUEST_TOP_P, 'topP'),
|
155
|
-
(SemanticConvetion.GEN_AI_REQUEST_TOP_K, 'topK'),
|
156
|
-
]
|
157
|
-
|
158
|
-
# Set each attribute if the corresponding value exists and is not None
|
159
|
-
for attribute, key in attributes:
|
160
|
-
value = inference_config.get(key)
|
161
|
-
if value is not None:
|
162
|
-
span.set_attribute(attribute, value)
|
163
|
-
|
164
|
-
span.set_attribute(SemanticConvetion.GEN_AI_RESPONSE_ID,
|
165
|
-
response_dict.get('ResponseMetadata').get('RequestId'))
|
166
|
-
span.set_attribute(SemanticConvetion.GEN_AI_RESPONSE_MODEL,
|
167
|
-
request_model)
|
168
|
-
span.set_attribute(SemanticConvetion.GEN_AI_USAGE_INPUT_TOKENS,
|
169
|
-
input_tokens)
|
170
|
-
span.set_attribute(SemanticConvetion.GEN_AI_USAGE_OUTPUT_TOKENS,
|
171
|
-
output_tokens)
|
172
|
-
span.set_attribute(SemanticConvetion.SERVER_ADDRESS,
|
173
|
-
server_address)
|
174
|
-
if isinstance(llm_response, str):
|
175
|
-
span.set_attribute(SemanticConvetion.GEN_AI_OUTPUT_TYPE,
|
176
|
-
'text')
|
177
|
-
else:
|
178
|
-
span.set_attribute(SemanticConvetion.GEN_AI_OUTPUT_TYPE,
|
179
|
-
'json')
|
180
|
-
|
181
|
-
# Set base span attribues (Extras)
|
182
|
-
span.set_attribute(DEPLOYMENT_ENVIRONMENT,
|
183
|
-
environment)
|
184
|
-
span.set_attribute(SERVICE_NAME,
|
185
|
-
application_name)
|
186
|
-
span.set_attribute(SemanticConvetion.GEN_AI_REQUEST_IS_STREAM,
|
187
|
-
False)
|
188
|
-
span.set_attribute(SemanticConvetion.GEN_AI_USAGE_TOTAL_TOKENS,
|
189
|
-
input_tokens + output_tokens)
|
190
|
-
span.set_attribute(SemanticConvetion.GEN_AI_USAGE_COST,
|
191
|
-
cost)
|
192
|
-
span.set_attribute(SemanticConvetion.GEN_AI_SERVER_TTFT,
|
193
|
-
end_time - start_time)
|
194
|
-
span.set_attribute(SemanticConvetion.GEN_AI_SDK_VERSION,
|
195
|
-
version)
|
196
|
-
|
197
|
-
if capture_message_content:
|
198
|
-
span.add_event(
|
199
|
-
name=SemanticConvetion.GEN_AI_CONTENT_PROMPT_EVENT,
|
200
|
-
attributes={
|
201
|
-
SemanticConvetion.GEN_AI_CONTENT_PROMPT: prompt,
|
202
|
-
},
|
203
|
-
)
|
204
|
-
span.add_event(
|
205
|
-
name=SemanticConvetion.GEN_AI_CONTENT_COMPLETION_EVENT,
|
206
|
-
attributes={
|
207
|
-
SemanticConvetion.GEN_AI_CONTENT_COMPLETION: llm_response,
|
208
|
-
},
|
209
|
-
)
|
210
|
-
|
211
|
-
span.set_status(Status(StatusCode.OK))
|
212
|
-
|
213
|
-
if disable_metrics is False:
|
214
|
-
attributes = create_metrics_attributes(
|
215
|
-
service_name=application_name,
|
216
|
-
deployment_environment=environment,
|
217
|
-
operation=SemanticConvetion.GEN_AI_OPERATION_TYPE_CHAT,
|
218
|
-
system=SemanticConvetion.GEN_AI_SYSTEM_AWS_BEDROCK,
|
219
|
-
request_model=request_model,
|
220
|
-
server_address=server_address,
|
221
|
-
server_port=server_port,
|
222
|
-
response_model=request_model,
|
223
|
-
)
|
224
|
-
|
225
|
-
metrics['genai_client_usage_tokens'].record(
|
226
|
-
input_tokens + output_tokens, attributes
|
227
|
-
)
|
228
|
-
metrics['genai_client_operation_duration'].record(
|
229
|
-
end_time - start_time, attributes
|
230
|
-
)
|
231
|
-
metrics['genai_server_ttft'].record(
|
232
|
-
end_time - start_time, attributes
|
233
|
-
)
|
234
|
-
metrics['genai_requests'].add(1, attributes)
|
235
|
-
metrics['genai_completion_tokens'].add(output_tokens, attributes)
|
236
|
-
metrics['genai_prompt_tokens'].add(input_tokens, attributes)
|
237
|
-
metrics['genai_cost'].record(cost, attributes)
|
238
|
-
|
239
|
-
return response
|
240
|
-
|
241
|
-
except Exception as e:
|
242
|
-
handle_exception(span, e)
|
243
|
-
logger.error('Error in trace creation: %s', e)
|
244
|
-
|
245
|
-
# Return original response
|
246
|
-
return response
|
44
|
+
llm_config = method_kwargs.get('inferenceConfig', {})
|
45
|
+
response = process_chat_response(
|
46
|
+
response=response,
|
47
|
+
request_model=request_model,
|
48
|
+
pricing_info=pricing_info,
|
49
|
+
server_port=server_port,
|
50
|
+
server_address=server_address,
|
51
|
+
environment=environment,
|
52
|
+
application_name=application_name,
|
53
|
+
metrics=metrics,
|
54
|
+
event_provider=event_provider,
|
55
|
+
start_time=start_time,
|
56
|
+
span=span,
|
57
|
+
capture_message_content=capture_message_content,
|
58
|
+
disable_metrics=disable_metrics,
|
59
|
+
version=version,
|
60
|
+
llm_config=llm_config,
|
61
|
+
**method_kwargs
|
62
|
+
)
|
63
|
+
|
64
|
+
return response
|
247
65
|
|
248
66
|
# Get the original client instance from the wrapper
|
249
67
|
client = wrapped(*args, **kwargs)
|
@@ -0,0 +1,252 @@
|
|
1
|
+
"""
|
2
|
+
AWS Bedrock OpenTelemetry instrumentation utility functions
|
3
|
+
"""
|
4
|
+
import time
|
5
|
+
|
6
|
+
from opentelemetry.sdk.resources import SERVICE_NAME, TELEMETRY_SDK_NAME, DEPLOYMENT_ENVIRONMENT
|
7
|
+
from opentelemetry.trace import Status, StatusCode
|
8
|
+
|
9
|
+
from openlit.__helpers import (
|
10
|
+
calculate_ttft,
|
11
|
+
response_as_dict,
|
12
|
+
calculate_tbt,
|
13
|
+
extract_and_format_input,
|
14
|
+
get_chat_model_cost,
|
15
|
+
create_metrics_attributes,
|
16
|
+
otel_event,
|
17
|
+
concatenate_all_contents
|
18
|
+
)
|
19
|
+
from openlit.semcov import SemanticConvetion
|
20
|
+
|
21
|
+
def process_chunk(self, chunk):
|
22
|
+
"""
|
23
|
+
Process a chunk of response data and update state.
|
24
|
+
"""
|
25
|
+
|
26
|
+
end_time = time.time()
|
27
|
+
# Record the timestamp for the current chunk
|
28
|
+
self._timestamps.append(end_time)
|
29
|
+
|
30
|
+
if len(self._timestamps) == 1:
|
31
|
+
# Calculate time to first chunk
|
32
|
+
self._ttft = calculate_ttft(self._timestamps, self._start_time)
|
33
|
+
|
34
|
+
chunked = response_as_dict(chunk)
|
35
|
+
|
36
|
+
# Collect message IDs and input token from events
|
37
|
+
if chunked.get('type') == 'message_start':
|
38
|
+
self._response_id = chunked.get('message').get('id')
|
39
|
+
self._input_tokens = chunked.get('message').get('usage').get('input_tokens')
|
40
|
+
self._response_model = chunked.get('message').get('model')
|
41
|
+
self._response_role = chunked.get('message').get('role')
|
42
|
+
|
43
|
+
# Collect message IDs and aggregated response from events
|
44
|
+
if chunked.get('type') == 'content_block_delta':
|
45
|
+
if chunked.get('delta').get('text'):
|
46
|
+
self._llmresponse += chunked.get('delta').get('text')
|
47
|
+
elif chunked.get('delta').get('partial_json'):
|
48
|
+
self._tool_arguments += chunked.get('delta').get('partial_json')
|
49
|
+
|
50
|
+
if chunked.get('type') == 'content_block_start':
|
51
|
+
if chunked.get('content_block').get('id'):
|
52
|
+
self._tool_id = chunked.get('content_block').get('id')
|
53
|
+
if chunked.get('content_block').get('name'):
|
54
|
+
self._tool_name = chunked.get('content_block').get('name')
|
55
|
+
|
56
|
+
# Collect output tokens and stop reason from events
|
57
|
+
if chunked.get('type') == 'message_delta':
|
58
|
+
self._output_tokens = chunked.get('usage').get('output_tokens')
|
59
|
+
self._finish_reason = chunked.get('delta').get('stop_reason')
|
60
|
+
|
61
|
+
def common_chat_logic(scope, pricing_info, environment, application_name, metrics,
|
62
|
+
event_provider, capture_message_content, disable_metrics, version, llm_config, is_stream):
|
63
|
+
"""
|
64
|
+
Process chat request and generate Telemetry
|
65
|
+
"""
|
66
|
+
|
67
|
+
scope._end_time = time.time()
|
68
|
+
if len(scope._timestamps) > 1:
|
69
|
+
scope._tbt = calculate_tbt(scope._timestamps)
|
70
|
+
|
71
|
+
formatted_messages = extract_and_format_input(scope._kwargs.get('messages', ''))
|
72
|
+
print(formatted_messages)
|
73
|
+
request_model = scope._kwargs.get('model', 'claude-3-opus-20240229')
|
74
|
+
|
75
|
+
cost = get_chat_model_cost(request_model, pricing_info, scope._input_tokens, scope._output_tokens)
|
76
|
+
|
77
|
+
# Set Span attributes (OTel Semconv)
|
78
|
+
scope._span.set_attribute(TELEMETRY_SDK_NAME, 'openlit')
|
79
|
+
scope._span.set_attribute(SemanticConvetion.GEN_AI_OPERATION, SemanticConvetion.GEN_AI_OPERATION_TYPE_CHAT)
|
80
|
+
scope._span.set_attribute(SemanticConvetion.GEN_AI_SYSTEM, SemanticConvetion.GEN_AI_SYSTEM_AWS_BEDROCK)
|
81
|
+
scope._span.set_attribute(SemanticConvetion.GEN_AI_REQUEST_MODEL, request_model)
|
82
|
+
scope._span.set_attribute(SemanticConvetion.SERVER_PORT, scope._server_port)
|
83
|
+
|
84
|
+
# List of attributes and their config keys
|
85
|
+
attributes = [
|
86
|
+
(SemanticConvetion.GEN_AI_REQUEST_FREQUENCY_PENALTY, 'frequencyPenalty'),
|
87
|
+
(SemanticConvetion.GEN_AI_REQUEST_MAX_TOKENS, 'maxTokens'),
|
88
|
+
(SemanticConvetion.GEN_AI_REQUEST_PRESENCE_PENALTY, 'presencePenalty'),
|
89
|
+
(SemanticConvetion.GEN_AI_REQUEST_STOP_SEQUENCES, 'stopSequences'),
|
90
|
+
(SemanticConvetion.GEN_AI_REQUEST_TEMPERATURE, 'temperature'),
|
91
|
+
(SemanticConvetion.GEN_AI_REQUEST_TOP_P, 'topP'),
|
92
|
+
(SemanticConvetion.GEN_AI_REQUEST_TOP_K, 'topK'),
|
93
|
+
]
|
94
|
+
|
95
|
+
# Set each attribute if the corresponding value exists and is not None
|
96
|
+
for attribute, key in attributes:
|
97
|
+
value = llm_config.get(key)
|
98
|
+
if value is not None:
|
99
|
+
scope._span.set_attribute(attribute, value)
|
100
|
+
|
101
|
+
scope._span.set_attribute(SemanticConvetion.GEN_AI_RESPONSE_FINISH_REASON, [scope._finish_reason])
|
102
|
+
scope._span.set_attribute(SemanticConvetion.GEN_AI_RESPONSE_ID, scope._response_id)
|
103
|
+
scope._span.set_attribute(SemanticConvetion.GEN_AI_RESPONSE_MODEL, scope._response_model)
|
104
|
+
scope._span.set_attribute(SemanticConvetion.GEN_AI_USAGE_INPUT_TOKENS, scope._input_tokens)
|
105
|
+
scope._span.set_attribute(SemanticConvetion.GEN_AI_USAGE_OUTPUT_TOKENS, scope._output_tokens)
|
106
|
+
scope._span.set_attribute(SemanticConvetion.SERVER_ADDRESS, scope._server_address)
|
107
|
+
|
108
|
+
scope._span.set_attribute(SemanticConvetion.GEN_AI_OUTPUT_TYPE,
|
109
|
+
'text' if isinstance(scope._llmresponse, str) else 'json')
|
110
|
+
|
111
|
+
scope._span.set_attribute(DEPLOYMENT_ENVIRONMENT, environment)
|
112
|
+
scope._span.set_attribute(SERVICE_NAME, application_name)
|
113
|
+
scope._span.set_attribute(SemanticConvetion.GEN_AI_REQUEST_IS_STREAM, is_stream)
|
114
|
+
scope._span.set_attribute(SemanticConvetion.GEN_AI_CLIENT_TOKEN_USAGE, scope._input_tokens + scope._output_tokens)
|
115
|
+
scope._span.set_attribute(SemanticConvetion.GEN_AI_USAGE_COST, cost)
|
116
|
+
scope._span.set_attribute(SemanticConvetion.GEN_AI_SERVER_TBT, scope._tbt)
|
117
|
+
scope._span.set_attribute(SemanticConvetion.GEN_AI_SERVER_TTFT, scope._ttft)
|
118
|
+
scope._span.set_attribute(SemanticConvetion.GEN_AI_SDK_VERSION, version)
|
119
|
+
|
120
|
+
# To be removed one the change to log events (from span events) is complete
|
121
|
+
prompt = concatenate_all_contents(formatted_messages)
|
122
|
+
if capture_message_content:
|
123
|
+
scope._span.add_event(
|
124
|
+
name=SemanticConvetion.GEN_AI_CONTENT_PROMPT_EVENT,
|
125
|
+
attributes={
|
126
|
+
SemanticConvetion.GEN_AI_CONTENT_PROMPT: prompt,
|
127
|
+
},
|
128
|
+
)
|
129
|
+
scope._span.add_event(
|
130
|
+
name=SemanticConvetion.GEN_AI_CONTENT_COMPLETION_EVENT,
|
131
|
+
attributes={
|
132
|
+
SemanticConvetion.GEN_AI_CONTENT_COMPLETION: scope._llmresponse,
|
133
|
+
},
|
134
|
+
)
|
135
|
+
|
136
|
+
choice_event_body = {
|
137
|
+
'finish_reason': scope._finish_reason,
|
138
|
+
'index': 0,
|
139
|
+
'message': {
|
140
|
+
**({'content': scope._llmresponse} if capture_message_content else {}),
|
141
|
+
'role': scope._response_role
|
142
|
+
}
|
143
|
+
}
|
144
|
+
|
145
|
+
# Emit events
|
146
|
+
for role in ['user', 'system', 'assistant', 'tool']:
|
147
|
+
if formatted_messages.get(role, {}).get('content', ''):
|
148
|
+
event = otel_event(
|
149
|
+
name=getattr(SemanticConvetion, f'GEN_AI_{role.upper()}_MESSAGE'),
|
150
|
+
attributes={
|
151
|
+
SemanticConvetion.GEN_AI_SYSTEM: SemanticConvetion.GEN_AI_SYSTEM_AWS_BEDROCK
|
152
|
+
},
|
153
|
+
body = {
|
154
|
+
# pylint: disable=line-too-long
|
155
|
+
**({'content': formatted_messages.get(role, {}).get('content', '')} if capture_message_content else {}),
|
156
|
+
'role': formatted_messages.get(role, {}).get('role', []),
|
157
|
+
**({
|
158
|
+
'tool_calls': {
|
159
|
+
'function': {
|
160
|
+
# pylint: disable=line-too-long
|
161
|
+
'name': (scope._tool_calls[0].get('function', {}).get('name', '') if scope._tool_calls else ''),
|
162
|
+
'arguments': (scope._tool_calls[0].get('function', {}).get('arguments', '') if scope._tool_calls else '')
|
163
|
+
},
|
164
|
+
'id': (scope._tool_calls[0].get('id', '') if scope._tool_calls else ''),
|
165
|
+
'type': 'function'
|
166
|
+
}
|
167
|
+
} if role == 'assistant' else {}),
|
168
|
+
**({
|
169
|
+
'id': (scope._tool_calls[0].get('id', '') if scope._tool_calls else '')
|
170
|
+
} if role == 'tool' else {})
|
171
|
+
}
|
172
|
+
)
|
173
|
+
event_provider.emit(event)
|
174
|
+
|
175
|
+
choice_event = otel_event(
|
176
|
+
name=SemanticConvetion.GEN_AI_CHOICE,
|
177
|
+
attributes={
|
178
|
+
SemanticConvetion.GEN_AI_SYSTEM: SemanticConvetion.GEN_AI_SYSTEM_AWS_BEDROCK
|
179
|
+
},
|
180
|
+
body=choice_event_body
|
181
|
+
)
|
182
|
+
event_provider.emit(choice_event)
|
183
|
+
|
184
|
+
scope._span.set_status(Status(StatusCode.OK))
|
185
|
+
|
186
|
+
if not disable_metrics:
|
187
|
+
metrics_attributes = create_metrics_attributes(
|
188
|
+
service_name=application_name,
|
189
|
+
deployment_environment=environment,
|
190
|
+
operation=SemanticConvetion.GEN_AI_OPERATION_TYPE_CHAT,
|
191
|
+
system=SemanticConvetion.GEN_AI_SYSTEM_AWS_BEDROCK,
|
192
|
+
request_model=request_model,
|
193
|
+
server_address=scope._server_address,
|
194
|
+
server_port=scope._server_port,
|
195
|
+
response_model=scope._response_model,
|
196
|
+
)
|
197
|
+
|
198
|
+
metrics['genai_client_usage_tokens'].record(scope._input_tokens + scope._output_tokens, metrics_attributes)
|
199
|
+
metrics['genai_client_operation_duration'].record(scope._end_time - scope._start_time, metrics_attributes)
|
200
|
+
metrics['genai_server_tbt'].record(scope._tbt, metrics_attributes)
|
201
|
+
metrics['genai_server_ttft'].record(scope._ttft, metrics_attributes)
|
202
|
+
metrics['genai_requests'].add(1, metrics_attributes)
|
203
|
+
metrics['genai_completion_tokens'].add(scope._output_tokens, metrics_attributes)
|
204
|
+
metrics['genai_prompt_tokens'].add(scope._input_tokens, metrics_attributes)
|
205
|
+
metrics['genai_cost'].record(cost, metrics_attributes)
|
206
|
+
|
207
|
+
def process_streaming_chat_response(self, pricing_info, environment, application_name, metrics,
|
208
|
+
event_provider, capture_message_content=False, disable_metrics=False, version='', llm_config=''):
|
209
|
+
|
210
|
+
"""
|
211
|
+
Process chat request and generate Telemetry
|
212
|
+
"""
|
213
|
+
if self._tool_id != '':
|
214
|
+
self._tool_calls = {
|
215
|
+
'id': self._tool_id,
|
216
|
+
'name': self._tool_name,
|
217
|
+
'input': self._tool_arguments
|
218
|
+
}
|
219
|
+
|
220
|
+
common_chat_logic(self, pricing_info, environment, application_name, metrics,
|
221
|
+
event_provider, capture_message_content, disable_metrics, version, llm_config, is_stream=True)
|
222
|
+
|
223
|
+
def process_chat_response(response, request_model, pricing_info, server_port, server_address, environment,
|
224
|
+
application_name, metrics, event_provider, start_time, span, capture_message_content=False,
|
225
|
+
disable_metrics=False, version='1.0.0', llm_config='', **kwargs):
|
226
|
+
|
227
|
+
"""
|
228
|
+
Process chat request and generate Telemetry
|
229
|
+
"""
|
230
|
+
|
231
|
+
self = type('GenericScope', (), {})()
|
232
|
+
response_dict = response_as_dict(response)
|
233
|
+
|
234
|
+
# pylint: disable = no-member
|
235
|
+
self._start_time = start_time
|
236
|
+
self._end_time = time.time()
|
237
|
+
self._span = span
|
238
|
+
self._llmresponse = response_dict.get('output').get('message').get('content')[0].get('text')
|
239
|
+
self._response_role = 'assistant'
|
240
|
+
self._input_tokens = response_dict.get('usage').get('inputTokens')
|
241
|
+
self._output_tokens = response_dict.get('usage').get('outputTokens')
|
242
|
+
self._response_model = request_model
|
243
|
+
self._finish_reason = response_dict.get('stopReason', '')
|
244
|
+
self._response_id = response_dict.get('ResponseMetadata').get('RequestId')
|
245
|
+
self._timestamps = []
|
246
|
+
self._ttft, self._tbt = self._end_time - self._start_time, 0
|
247
|
+
self._server_address, self._server_port = server_address, server_port
|
248
|
+
self._kwargs = kwargs
|
249
|
+
common_chat_logic(self, pricing_info, environment, application_name, metrics,
|
250
|
+
event_provider, capture_message_content, disable_metrics, version, llm_config, is_stream=False)
|
251
|
+
|
252
|
+
return response
|
@@ -1,6 +1,6 @@
|
|
1
1
|
Metadata-Version: 2.3
|
2
2
|
Name: openlit
|
3
|
-
Version: 1.33.
|
3
|
+
Version: 1.33.19
|
4
4
|
Summary: OpenTelemetry-native Auto instrumentation library for monitoring LLM Applications and GPUs, facilitating the integration of observability into your GenAI-driven projects
|
5
5
|
License: Apache-2.0
|
6
6
|
Keywords: OpenTelemetry,otel,otlp,llm,tracing,openai,anthropic,claude,cohere,llm monitoring,observability,monitoring,gpt,Generative AI,chatGPT,gpu
|
@@ -1,4 +1,4 @@
|
|
1
|
-
openlit/__helpers.py,sha256=
|
1
|
+
openlit/__helpers.py,sha256=ob20aAkpzPLn2cjaPMCf-_fwt-8YNJUobdT3f6xEj58,9061
|
2
2
|
openlit/__init__.py,sha256=iHGwg8XB2DhNVCktU3FLFqubAOiQVQCp1F7L7OHp6cg,23921
|
3
3
|
openlit/evals/__init__.py,sha256=nJe99nuLo1b5rf7pt9U9BCdSDedzbVi2Fj96cgl7msM,380
|
4
4
|
openlit/evals/all.py,sha256=oWrue3PotE-rB5WePG3MRYSA-ro6WivkclSHjYlAqGs,7154
|
@@ -29,11 +29,12 @@ openlit/instrumentation/astra/astra.py,sha256=JH2-7RJBbk6nM9kBEVgbxCXXnzgTuGT0Ko
|
|
29
29
|
openlit/instrumentation/astra/async_astra.py,sha256=mMG22exgduREIe-7s2TdqLM1Ub8wP_ttcIS8wJH5P1Y,1625
|
30
30
|
openlit/instrumentation/astra/utils.py,sha256=-Af5R_g8-x9XeQiepLBW3Qa3Beji4EMxppDtiE_nmzM,4933
|
31
31
|
openlit/instrumentation/azure_ai_inference/__init__.py,sha256=ZoMAX_MUNCNMJqLZgl0A_kQ_lsgoz3VddkHiDT3pVF8,2032
|
32
|
-
openlit/instrumentation/azure_ai_inference/async_azure_ai_inference.py,sha256=
|
33
|
-
openlit/instrumentation/azure_ai_inference/azure_ai_inference.py,sha256=
|
34
|
-
openlit/instrumentation/azure_ai_inference/utils.py,sha256=
|
35
|
-
openlit/instrumentation/bedrock/__init__.py,sha256=
|
36
|
-
openlit/instrumentation/bedrock/bedrock.py,sha256=
|
32
|
+
openlit/instrumentation/azure_ai_inference/async_azure_ai_inference.py,sha256=VPp0CF8GOtwFFJc_XggZVGq1wxjCs0VdsQIWK6cZ2AU,5056
|
33
|
+
openlit/instrumentation/azure_ai_inference/azure_ai_inference.py,sha256=82Jv5KYaRSjIrG_6WldUhRVx1XqPhDqnisJ93uS08Kw,4970
|
34
|
+
openlit/instrumentation/azure_ai_inference/utils.py,sha256=BLjWqRiwu1aYompMkxY5Jxvz-o4uL0UbCbWK_0PmncY,10997
|
35
|
+
openlit/instrumentation/bedrock/__init__.py,sha256=Sfd0vm4Dfm1t-N7vBPRwU57GLTlZP2M4rVYRek_JHXY,1625
|
36
|
+
openlit/instrumentation/bedrock/bedrock.py,sha256=e2wG9l7j5CaBQ7Uq074nGwWx5h1vhANoXzJOGSWvW7I,2815
|
37
|
+
openlit/instrumentation/bedrock/utils.py,sha256=OFwMcH0F12XklRV4Vtq6agIPYI7xO4nXGbRml0EaVE8,11597
|
37
38
|
openlit/instrumentation/chroma/__init__.py,sha256=4ZeHY1OInRKQbb4qg8BVvGJtWN1XdzW6mosqi7-6ruE,3353
|
38
39
|
openlit/instrumentation/chroma/chroma.py,sha256=Ar0IYfNtCzFbtBl_irn6xpsKlyAPu5TZ_LYpttW1ixk,10583
|
39
40
|
openlit/instrumentation/cohere/__init__.py,sha256=TIRq1obu-zqBji0HhMbFGfI2q5m-zw0nWbToKeZqpg4,2905
|
@@ -124,7 +125,7 @@ openlit/otel/events.py,sha256=VrMjTpvnLtYRBHCiFwJojTQqqNpRCxoD4yJYeQrtPsk,3560
|
|
124
125
|
openlit/otel/metrics.py,sha256=Iwx6baEiCZPNqsFf92K5mDWU8are8DOF0uQAuNZsCKg,6826
|
125
126
|
openlit/otel/tracing.py,sha256=tjV2bEbEDPUB1Z46gE-UsJsb04sRdFrfbhIDkxViZc0,3103
|
126
127
|
openlit/semcov/__init__.py,sha256=lM0Y3wMYYmCvfcNGD3k0xSn1XZUiGw-bKgCuwcGsOp8,13302
|
127
|
-
openlit-1.33.
|
128
|
-
openlit-1.33.
|
129
|
-
openlit-1.33.
|
130
|
-
openlit-1.33.
|
128
|
+
openlit-1.33.19.dist-info/LICENSE,sha256=xx0jnfkXJvxRnG63LTGOxlggYnIysveWIZ6H3PNdCrQ,11357
|
129
|
+
openlit-1.33.19.dist-info/METADATA,sha256=VLxhVb9GGUeVPxv5V_ZjxNzfMMuJJqOF1ruVOEbNiAU,23471
|
130
|
+
openlit-1.33.19.dist-info/WHEEL,sha256=XbeZDeTWKc1w7CSIyre5aMDU_-PohRwTQceYnisIYYY,88
|
131
|
+
openlit-1.33.19.dist-info/RECORD,,
|
File without changes
|
File without changes
|