openlit 1.34.10__py3-none-any.whl → 1.34.11__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- openlit/__helpers.py +23 -0
- openlit/instrumentation/groq/__init__.py +7 -9
- openlit/instrumentation/groq/async_groq.py +50 -374
- openlit/instrumentation/groq/groq.py +49 -373
- openlit/instrumentation/groq/utils.py +199 -0
- openlit/instrumentation/ollama/async_ollama.py +3 -2
- openlit/instrumentation/ollama/ollama.py +3 -2
- openlit/instrumentation/ollama/utils.py +10 -6
- openlit/instrumentation/premai/utils.py +3 -73
- openlit/instrumentation/reka/utils.py +3 -51
- {openlit-1.34.10.dist-info → openlit-1.34.11.dist-info}/METADATA +1 -1
- {openlit-1.34.10.dist-info → openlit-1.34.11.dist-info}/RECORD +14 -13
- {openlit-1.34.10.dist-info → openlit-1.34.11.dist-info}/LICENSE +0 -0
- {openlit-1.34.10.dist-info → openlit-1.34.11.dist-info}/WHEEL +0 -0
openlit/__helpers.py
CHANGED
@@ -379,3 +379,26 @@ def record_completion_metrics(metrics, gen_ai_operation, gen_ai_system, server_a
|
|
379
379
|
metrics["genai_completion_tokens"].add(output_tokens, attributes)
|
380
380
|
metrics["genai_prompt_tokens"].add(input_tokens, attributes)
|
381
381
|
metrics["genai_cost"].record(cost, attributes)
|
382
|
+
|
383
|
+
def record_embedding_metrics(metrics, gen_ai_operation, gen_ai_system, server_address, server_port,
|
384
|
+
request_model, response_model, environment, application_name, start_time, end_time,
|
385
|
+
input_tokens, cost):
|
386
|
+
"""
|
387
|
+
Record embedding-specific metrics for the operation.
|
388
|
+
"""
|
389
|
+
|
390
|
+
attributes = create_metrics_attributes(
|
391
|
+
operation=gen_ai_operation,
|
392
|
+
system=gen_ai_system,
|
393
|
+
server_address=server_address,
|
394
|
+
server_port=server_port,
|
395
|
+
request_model=request_model,
|
396
|
+
response_model=response_model,
|
397
|
+
service_name=application_name,
|
398
|
+
deployment_environment=environment,
|
399
|
+
)
|
400
|
+
metrics["genai_client_usage_tokens"].record(input_tokens, attributes)
|
401
|
+
metrics["genai_client_operation_duration"].record(end_time - start_time, attributes)
|
402
|
+
metrics["genai_requests"].add(1, attributes)
|
403
|
+
metrics["genai_prompt_tokens"].add(input_tokens, attributes)
|
404
|
+
metrics["genai_cost"].record(cost, attributes)
|
@@ -1,4 +1,3 @@
|
|
1
|
-
# pylint: disable=useless-return, bad-staticmethod-argument, disable=duplicate-code
|
2
1
|
"""Initializer of Auto Instrumentation of Groq Functions"""
|
3
2
|
|
4
3
|
from typing import Collection
|
@@ -13,15 +12,15 @@ _instruments = ("groq >= 0.5.0",)
|
|
13
12
|
|
14
13
|
class GroqInstrumentor(BaseInstrumentor):
|
15
14
|
"""
|
16
|
-
An instrumentor for Groq
|
15
|
+
An instrumentor for Groq client library.
|
17
16
|
"""
|
18
17
|
|
19
18
|
def instrumentation_dependencies(self) -> Collection[str]:
|
20
19
|
return _instruments
|
21
20
|
|
22
21
|
def _instrument(self, **kwargs):
|
23
|
-
application_name = kwargs.get("application_name", "
|
24
|
-
environment = kwargs.get("environment", "
|
22
|
+
application_name = kwargs.get("application_name", "default")
|
23
|
+
environment = kwargs.get("environment", "default")
|
25
24
|
tracer = kwargs.get("tracer")
|
26
25
|
metrics = kwargs.get("metrics_dict")
|
27
26
|
pricing_info = kwargs.get("pricing_info", {})
|
@@ -29,22 +28,21 @@ class GroqInstrumentor(BaseInstrumentor):
|
|
29
28
|
disable_metrics = kwargs.get("disable_metrics")
|
30
29
|
version = importlib.metadata.version("groq")
|
31
30
|
|
32
|
-
#
|
31
|
+
# Chat completions
|
33
32
|
wrap_function_wrapper(
|
34
33
|
"groq.resources.chat.completions",
|
35
34
|
"Completions.create",
|
36
35
|
chat(version, environment, application_name,
|
37
|
-
|
36
|
+
tracer, pricing_info, capture_message_content, metrics, disable_metrics),
|
38
37
|
)
|
39
38
|
|
40
|
-
#
|
39
|
+
# Chat completions
|
41
40
|
wrap_function_wrapper(
|
42
41
|
"groq.resources.chat.completions",
|
43
42
|
"AsyncCompletions.create",
|
44
43
|
async_chat(version, environment, application_name,
|
45
|
-
|
44
|
+
tracer, pricing_info, capture_message_content, metrics, disable_metrics),
|
46
45
|
)
|
47
46
|
|
48
47
|
def _uninstrument(self, **kwargs):
|
49
|
-
# Proper uninstrumentation logic to revert patched methods
|
50
48
|
pass
|
@@ -1,57 +1,36 @@
|
|
1
1
|
"""
|
2
|
-
Module for monitoring Groq API calls.
|
2
|
+
Module for monitoring Groq API calls (async version).
|
3
3
|
"""
|
4
4
|
|
5
|
-
import logging
|
6
5
|
import time
|
7
|
-
from opentelemetry.trace import SpanKind
|
8
|
-
from opentelemetry.sdk.resources import SERVICE_NAME, TELEMETRY_SDK_NAME, DEPLOYMENT_ENVIRONMENT
|
6
|
+
from opentelemetry.trace import SpanKind
|
9
7
|
from openlit.__helpers import (
|
10
|
-
get_chat_model_cost,
|
11
8
|
handle_exception,
|
12
|
-
response_as_dict,
|
13
|
-
calculate_ttft,
|
14
|
-
calculate_tbt,
|
15
|
-
create_metrics_attributes,
|
16
9
|
set_server_address_and_port
|
17
10
|
)
|
11
|
+
from openlit.instrumentation.groq.utils import (
|
12
|
+
process_chunk,
|
13
|
+
process_streaming_chat_response,
|
14
|
+
process_chat_response
|
15
|
+
)
|
18
16
|
from openlit.semcov import SemanticConvention
|
19
17
|
|
20
|
-
|
21
|
-
|
22
|
-
|
23
|
-
def async_chat(version, environment, application_name,
|
24
|
-
tracer, pricing_info, capture_message_content, metrics, disable_metrics):
|
18
|
+
def async_chat(version, environment, application_name, tracer, pricing_info,
|
19
|
+
capture_message_content, metrics, disable_metrics):
|
25
20
|
"""
|
26
|
-
Generates a telemetry wrapper for
|
27
|
-
|
28
|
-
Args:
|
29
|
-
version: Version of the monitoring package.
|
30
|
-
environment: Deployment environment (e.g., production, staging).
|
31
|
-
application_name: Name of the application using the Groq API.
|
32
|
-
tracer: OpenTelemetry tracer for creating spans.
|
33
|
-
pricing_info: Information used for calculating the cost of Groq usage.
|
34
|
-
capture_message_content: Flag indicating whether to trace the actual content.
|
35
|
-
|
36
|
-
Returns:
|
37
|
-
A function that wraps the chat completions method to add telemetry.
|
21
|
+
Generates a telemetry wrapper for GenAI function call
|
38
22
|
"""
|
39
23
|
|
40
24
|
class TracedAsyncStream:
|
41
25
|
"""
|
42
|
-
Wrapper for streaming responses to collect
|
43
|
-
Wraps the response to collect message IDs and aggregated response.
|
44
|
-
|
45
|
-
This class implements the '__aiter__' and '__anext__' methods that
|
46
|
-
handle asynchronous streaming responses.
|
47
|
-
|
48
|
-
This class also implements '__aenter__' and '__aexit__' methods that
|
49
|
-
handle asynchronous context management protocol.
|
26
|
+
Wrapper for async streaming responses to collect telemetry.
|
50
27
|
"""
|
28
|
+
|
51
29
|
def __init__(
|
52
30
|
self,
|
53
31
|
wrapped,
|
54
32
|
span,
|
33
|
+
span_name,
|
55
34
|
kwargs,
|
56
35
|
server_address,
|
57
36
|
server_port,
|
@@ -59,15 +38,15 @@ def async_chat(version, environment, application_name,
|
|
59
38
|
):
|
60
39
|
self.__wrapped__ = wrapped
|
61
40
|
self._span = span
|
62
|
-
|
41
|
+
self._span_name = span_name
|
63
42
|
self._llmresponse = ""
|
64
43
|
self._response_id = ""
|
65
44
|
self._response_model = ""
|
66
45
|
self._finish_reason = ""
|
46
|
+
self._tools = None
|
67
47
|
self._system_fingerprint = ""
|
68
48
|
self._input_tokens = 0
|
69
49
|
self._output_tokens = 0
|
70
|
-
|
71
50
|
self._args = args
|
72
51
|
self._kwargs = kwargs
|
73
52
|
self._start_time = time.time()
|
@@ -95,373 +74,70 @@ def async_chat(version, environment, application_name,
|
|
95
74
|
async def __anext__(self):
|
96
75
|
try:
|
97
76
|
chunk = await self.__wrapped__.__anext__()
|
98
|
-
|
99
|
-
# Record the timestamp for the current chunk
|
100
|
-
self._timestamps.append(end_time)
|
101
|
-
|
102
|
-
if len(self._timestamps) == 1:
|
103
|
-
# Calculate time to first chunk
|
104
|
-
self._ttft = calculate_ttft(self._timestamps, self._start_time)
|
105
|
-
|
106
|
-
chunked = response_as_dict(chunk)
|
107
|
-
# Collect message IDs and aggregated response from events
|
108
|
-
if (len(chunked.get('choices')) > 0 and ('delta' in chunked.get('choices')[0] and
|
109
|
-
'content' in chunked.get('choices')[0].get('delta'))):
|
110
|
-
|
111
|
-
content = chunked.get('choices')[0].get('delta').get('content')
|
112
|
-
if content:
|
113
|
-
self._llmresponse += content
|
114
|
-
|
115
|
-
if chunked.get('usage'):
|
116
|
-
self._input_tokens = chunked.get('usage').get('prompt_tokens')
|
117
|
-
self._output_tokens = chunked.get('usage').get('completion_tokens')
|
118
|
-
self._response_id = chunked.get('id')
|
119
|
-
self._response_model = chunked.get('model')
|
120
|
-
self._finish_reason = chunked.get('choices')[0].get('finish_reason')
|
121
|
-
self._system_fingerprint = chunked.get('system_fingerprint')
|
77
|
+
process_chunk(self, chunk)
|
122
78
|
return chunk
|
123
79
|
except StopAsyncIteration:
|
124
|
-
# Handling exception ensure observability without disrupting operation
|
125
80
|
try:
|
126
|
-
self.
|
127
|
-
|
128
|
-
|
129
|
-
|
130
|
-
|
131
|
-
|
132
|
-
|
133
|
-
|
134
|
-
|
135
|
-
|
136
|
-
|
137
|
-
if isinstance(content, list):
|
138
|
-
content_str_list = []
|
139
|
-
for item in content:
|
140
|
-
if item["type"] == "text":
|
141
|
-
content_str_list.append(f'text: {item["text"]}')
|
142
|
-
elif (item["type"] == "image_url" and
|
143
|
-
not item["image_url"]["url"].startswith("data:")):
|
144
|
-
content_str_list.append(f'image_url: {item["image_url"]["url"]}')
|
145
|
-
content_str = ", ".join(content_str_list)
|
146
|
-
formatted_messages.append(f"{role}: {content_str}")
|
147
|
-
else:
|
148
|
-
formatted_messages.append(f"{role}: {content}")
|
149
|
-
prompt = "\n".join(formatted_messages)
|
150
|
-
|
151
|
-
request_model = self._kwargs.get("model", "gpt-4o")
|
152
|
-
|
153
|
-
# Calculate cost of the operation
|
154
|
-
cost = get_chat_model_cost(request_model,
|
155
|
-
pricing_info, self._input_tokens,
|
156
|
-
self._output_tokens)
|
157
|
-
|
158
|
-
# Set Span attributes (OTel Semconv)
|
159
|
-
self._span.set_attribute(TELEMETRY_SDK_NAME, "openlit")
|
160
|
-
self._span.set_attribute(SemanticConvention.GEN_AI_OPERATION,
|
161
|
-
SemanticConvention.GEN_AI_OPERATION_TYPE_CHAT)
|
162
|
-
self._span.set_attribute(SemanticConvention.GEN_AI_SYSTEM,
|
163
|
-
SemanticConvention.GEN_AI_SYSTEM_GROQ)
|
164
|
-
self._span.set_attribute(SemanticConvention.GEN_AI_REQUEST_MODEL,
|
165
|
-
request_model)
|
166
|
-
self._span.set_attribute(SemanticConvention.GEN_AI_REQUEST_SEED,
|
167
|
-
self._kwargs.get("seed", ""))
|
168
|
-
self._span.set_attribute(SemanticConvention.SERVER_PORT,
|
169
|
-
self._server_port)
|
170
|
-
self._span.set_attribute(SemanticConvention.GEN_AI_REQUEST_FREQUENCY_PENALTY,
|
171
|
-
self._kwargs.get("frequency_penalty", 0.0))
|
172
|
-
self._span.set_attribute(SemanticConvention.GEN_AI_REQUEST_MAX_TOKENS,
|
173
|
-
self._kwargs.get("max_completion_tokens", -1))
|
174
|
-
self._span.set_attribute(SemanticConvention.GEN_AI_REQUEST_PRESENCE_PENALTY,
|
175
|
-
self._kwargs.get("presence_penalty", 0.0))
|
176
|
-
self._span.set_attribute(SemanticConvention.GEN_AI_REQUEST_STOP_SEQUENCES,
|
177
|
-
self._kwargs.get("stop", []))
|
178
|
-
self._span.set_attribute(SemanticConvention.GEN_AI_REQUEST_TEMPERATURE,
|
179
|
-
self._kwargs.get("temperature", 1.0))
|
180
|
-
self._span.set_attribute(SemanticConvention.GEN_AI_REQUEST_TOP_P,
|
181
|
-
self._kwargs.get("top_p", 1.0))
|
182
|
-
self._span.set_attribute(SemanticConvention.GEN_AI_RESPONSE_FINISH_REASON,
|
183
|
-
[self._finish_reason])
|
184
|
-
self._span.set_attribute(SemanticConvention.GEN_AI_RESPONSE_ID,
|
185
|
-
self._response_id)
|
186
|
-
self._span.set_attribute(SemanticConvention.GEN_AI_RESPONSE_MODEL,
|
187
|
-
self._response_model)
|
188
|
-
self._span.set_attribute(SemanticConvention.GEN_AI_USAGE_INPUT_TOKENS,
|
189
|
-
self._input_tokens)
|
190
|
-
self._span.set_attribute(SemanticConvention.GEN_AI_USAGE_OUTPUT_TOKENS,
|
191
|
-
self._output_tokens)
|
192
|
-
self._span.set_attribute(SemanticConvention.SERVER_ADDRESS,
|
193
|
-
self._server_address)
|
194
|
-
self._span.set_attribute(SemanticConvention.GEN_AI_REQUEST_SERVICE_TIER,
|
195
|
-
self._kwargs.get("service_tier", "on_demand"))
|
196
|
-
self._span.set_attribute(SemanticConvention.GEN_AI_RESPONSE_SYSTEM_FINGERPRINT,
|
197
|
-
self._system_fingerprint)
|
198
|
-
if isinstance(self._llmresponse, str):
|
199
|
-
self._span.set_attribute(SemanticConvention.GEN_AI_OUTPUT_TYPE,
|
200
|
-
"text")
|
201
|
-
else:
|
202
|
-
self._span.set_attribute(SemanticConvention.GEN_AI_OUTPUT_TYPE,
|
203
|
-
"json")
|
204
|
-
|
205
|
-
# Set Span attributes (Extra)
|
206
|
-
self._span.set_attribute(DEPLOYMENT_ENVIRONMENT,
|
207
|
-
environment)
|
208
|
-
self._span.set_attribute(SERVICE_NAME,
|
209
|
-
application_name)
|
210
|
-
self._span.set_attribute(SemanticConvention.GEN_AI_REQUEST_USER,
|
211
|
-
self._kwargs.get("user", ""))
|
212
|
-
self._span.set_attribute(SemanticConvention.GEN_AI_REQUEST_IS_STREAM,
|
213
|
-
True)
|
214
|
-
self._span.set_attribute(SemanticConvention.GEN_AI_USAGE_TOTAL_TOKENS,
|
215
|
-
self._input_tokens + self._output_tokens)
|
216
|
-
self._span.set_attribute(SemanticConvention.GEN_AI_USAGE_COST,
|
217
|
-
cost)
|
218
|
-
self._span.set_attribute(SemanticConvention.GEN_AI_SERVER_TBT,
|
219
|
-
self._tbt)
|
220
|
-
self._span.set_attribute(SemanticConvention.GEN_AI_SERVER_TTFT,
|
221
|
-
self._ttft)
|
222
|
-
self._span.set_attribute(SemanticConvention.GEN_AI_SDK_VERSION,
|
223
|
-
version)
|
224
|
-
if capture_message_content:
|
225
|
-
self._span.add_event(
|
226
|
-
name=SemanticConvention.GEN_AI_CONTENT_PROMPT_EVENT,
|
227
|
-
attributes={
|
228
|
-
SemanticConvention.GEN_AI_CONTENT_PROMPT: prompt,
|
229
|
-
},
|
230
|
-
)
|
231
|
-
self._span.add_event(
|
232
|
-
name=SemanticConvention.GEN_AI_CONTENT_COMPLETION_EVENT,
|
233
|
-
attributes={
|
234
|
-
SemanticConvention.GEN_AI_CONTENT_COMPLETION: self._llmresponse,
|
235
|
-
},
|
236
|
-
)
|
237
|
-
self._span.set_status(Status(StatusCode.OK))
|
238
|
-
|
239
|
-
if disable_metrics is False:
|
240
|
-
attributes = create_metrics_attributes(
|
241
|
-
service_name=application_name,
|
242
|
-
deployment_environment=environment,
|
243
|
-
operation=SemanticConvention.GEN_AI_OPERATION_TYPE_CHAT,
|
244
|
-
system=SemanticConvention.GEN_AI_SYSTEM_GROQ,
|
245
|
-
request_model=request_model,
|
246
|
-
server_address=self._server_address,
|
247
|
-
server_port=self._server_port,
|
248
|
-
response_model=self._response_model,
|
249
|
-
)
|
250
|
-
|
251
|
-
metrics["genai_client_usage_tokens"].record(
|
252
|
-
self._input_tokens + self._output_tokens, attributes
|
253
|
-
)
|
254
|
-
metrics["genai_client_operation_duration"].record(
|
255
|
-
self._end_time - self._start_time, attributes
|
256
|
-
)
|
257
|
-
metrics["genai_server_tbt"].record(
|
258
|
-
self._tbt, attributes
|
81
|
+
with tracer.start_as_current_span(self._span_name, kind= SpanKind.CLIENT) as self._span:
|
82
|
+
process_streaming_chat_response(
|
83
|
+
self,
|
84
|
+
pricing_info=pricing_info,
|
85
|
+
environment=environment,
|
86
|
+
application_name=application_name,
|
87
|
+
metrics=metrics,
|
88
|
+
capture_message_content=capture_message_content,
|
89
|
+
disable_metrics=disable_metrics,
|
90
|
+
version=version
|
259
91
|
)
|
260
|
-
metrics["genai_server_ttft"].record(
|
261
|
-
self._ttft, attributes
|
262
|
-
)
|
263
|
-
metrics["genai_requests"].add(1, attributes)
|
264
|
-
metrics["genai_completion_tokens"].add(self._output_tokens, attributes)
|
265
|
-
metrics["genai_prompt_tokens"].add(self._input_tokens, attributes)
|
266
|
-
metrics["genai_cost"].record(cost, attributes)
|
267
92
|
|
268
93
|
except Exception as e:
|
269
94
|
handle_exception(self._span, e)
|
270
|
-
|
271
|
-
finally:
|
272
|
-
self._span.end()
|
95
|
+
|
273
96
|
raise
|
274
97
|
|
275
98
|
async def wrapper(wrapped, instance, args, kwargs):
|
276
99
|
"""
|
277
|
-
Wraps the
|
278
|
-
|
279
|
-
This collects metrics such as execution time, cost, and token usage, and handles errors
|
280
|
-
gracefully, adding details to the trace for observability.
|
281
|
-
|
282
|
-
Args:
|
283
|
-
wrapped: The original 'chat.completions' method to be wrapped.
|
284
|
-
instance: The instance of the class where the original method is defined.
|
285
|
-
args: Positional arguments for the 'chat.completions' method.
|
286
|
-
kwargs: Keyword arguments for the 'chat.completions' method.
|
287
|
-
|
288
|
-
Returns:
|
289
|
-
The response from the original 'chat.completions' method.
|
100
|
+
Wraps the GenAI function call.
|
290
101
|
"""
|
291
|
-
|
292
102
|
# Check if streaming is enabled for the API call
|
293
103
|
streaming = kwargs.get("stream", False)
|
294
104
|
server_address, server_port = set_server_address_and_port(instance, "api.groq.com", 443)
|
295
|
-
request_model = kwargs.get("model", "
|
105
|
+
request_model = kwargs.get("model", "mixtral-8x7b-32768")
|
296
106
|
|
297
107
|
span_name = f"{SemanticConvention.GEN_AI_OPERATION_TYPE_CHAT} {request_model}"
|
298
108
|
|
299
|
-
# pylint: disable=no-else-return
|
300
109
|
if streaming:
|
301
|
-
# Special handling for streaming response
|
110
|
+
# Special handling for streaming response
|
302
111
|
awaited_wrapped = await wrapped(*args, **kwargs)
|
303
112
|
span = tracer.start_span(span_name, kind=SpanKind.CLIENT)
|
304
|
-
|
305
|
-
return TracedAsyncStream(awaited_wrapped, span, kwargs, server_address, server_port)
|
306
|
-
|
307
|
-
# Handling for non-streaming responses
|
113
|
+
return TracedAsyncStream(awaited_wrapped, span, span_name, kwargs, server_address, server_port)
|
308
114
|
else:
|
309
|
-
|
115
|
+
# Handling for non-streaming responses
|
116
|
+
with tracer.start_as_current_span(span_name, kind=SpanKind.CLIENT) as span:
|
310
117
|
start_time = time.time()
|
311
118
|
response = await wrapped(*args, **kwargs)
|
312
|
-
end_time = time.time()
|
313
|
-
|
314
|
-
response_dict = response_as_dict(response)
|
315
119
|
|
316
120
|
try:
|
317
|
-
|
318
|
-
|
319
|
-
|
320
|
-
|
321
|
-
|
322
|
-
|
323
|
-
|
324
|
-
|
325
|
-
|
326
|
-
|
327
|
-
|
328
|
-
|
329
|
-
|
330
|
-
|
331
|
-
|
332
|
-
|
333
|
-
prompt = "\n".join(formatted_messages)
|
334
|
-
|
335
|
-
input_tokens = response_dict.get('usage').get('prompt_tokens')
|
336
|
-
output_tokens = response_dict.get('usage').get('completion_tokens')
|
337
|
-
|
338
|
-
# Calculate cost of the operation
|
339
|
-
cost = get_chat_model_cost(request_model,
|
340
|
-
pricing_info, input_tokens,
|
341
|
-
output_tokens)
|
342
|
-
|
343
|
-
# Set base span attribues (OTel Semconv)
|
344
|
-
span.set_attribute(TELEMETRY_SDK_NAME, "openlit")
|
345
|
-
span.set_attribute(SemanticConvention.GEN_AI_OPERATION,
|
346
|
-
SemanticConvention.GEN_AI_OPERATION_TYPE_CHAT)
|
347
|
-
span.set_attribute(SemanticConvention.GEN_AI_SYSTEM,
|
348
|
-
SemanticConvention.GEN_AI_SYSTEM_GROQ)
|
349
|
-
span.set_attribute(SemanticConvention.GEN_AI_REQUEST_MODEL,
|
350
|
-
request_model)
|
351
|
-
span.set_attribute(SemanticConvention.GEN_AI_REQUEST_SEED,
|
352
|
-
kwargs.get("seed", ""))
|
353
|
-
span.set_attribute(SemanticConvention.SERVER_PORT,
|
354
|
-
server_port)
|
355
|
-
span.set_attribute(SemanticConvention.GEN_AI_REQUEST_FREQUENCY_PENALTY,
|
356
|
-
kwargs.get("frequency_penalty", 0.0))
|
357
|
-
span.set_attribute(SemanticConvention.GEN_AI_REQUEST_MAX_TOKENS,
|
358
|
-
kwargs.get("max_completion_tokens", -1))
|
359
|
-
span.set_attribute(SemanticConvention.GEN_AI_REQUEST_PRESENCE_PENALTY,
|
360
|
-
kwargs.get("presence_penalty", 0.0))
|
361
|
-
span.set_attribute(SemanticConvention.GEN_AI_REQUEST_STOP_SEQUENCES,
|
362
|
-
kwargs.get("stop", []))
|
363
|
-
span.set_attribute(SemanticConvention.GEN_AI_REQUEST_TEMPERATURE,
|
364
|
-
kwargs.get("temperature", 1.0))
|
365
|
-
span.set_attribute(SemanticConvention.GEN_AI_REQUEST_TOP_P,
|
366
|
-
kwargs.get("top_p", 1.0))
|
367
|
-
span.set_attribute(SemanticConvention.GEN_AI_RESPONSE_ID,
|
368
|
-
response_dict.get("id"))
|
369
|
-
span.set_attribute(SemanticConvention.GEN_AI_RESPONSE_MODEL,
|
370
|
-
response_dict.get('model'))
|
371
|
-
span.set_attribute(SemanticConvention.GEN_AI_USAGE_INPUT_TOKENS,
|
372
|
-
input_tokens)
|
373
|
-
span.set_attribute(SemanticConvention.GEN_AI_USAGE_OUTPUT_TOKENS,
|
374
|
-
output_tokens)
|
375
|
-
span.set_attribute(SemanticConvention.SERVER_ADDRESS,
|
376
|
-
server_address)
|
377
|
-
span.set_attribute(SemanticConvention.GEN_AI_REQUEST_SERVICE_TIER,
|
378
|
-
kwargs.get("service_tier", "on_demand"))
|
379
|
-
span.set_attribute(SemanticConvention.GEN_AI_RESPONSE_SYSTEM_FINGERPRINT,
|
380
|
-
response_dict.get('system_fingerprint'))
|
381
|
-
|
382
|
-
# Set base span attribues (Extras)
|
383
|
-
span.set_attribute(DEPLOYMENT_ENVIRONMENT,
|
384
|
-
environment)
|
385
|
-
span.set_attribute(SERVICE_NAME,
|
386
|
-
application_name)
|
387
|
-
span.set_attribute(SemanticConvention.GEN_AI_REQUEST_USER,
|
388
|
-
kwargs.get("user", ""))
|
389
|
-
span.set_attribute(SemanticConvention.GEN_AI_REQUEST_IS_STREAM,
|
390
|
-
False)
|
391
|
-
span.set_attribute(SemanticConvention.GEN_AI_USAGE_TOTAL_TOKENS,
|
392
|
-
input_tokens + output_tokens)
|
393
|
-
span.set_attribute(SemanticConvention.GEN_AI_USAGE_COST,
|
394
|
-
cost)
|
395
|
-
span.set_attribute(SemanticConvention.GEN_AI_SERVER_TTFT,
|
396
|
-
end_time - start_time)
|
397
|
-
span.set_attribute(SemanticConvention.GEN_AI_SDK_VERSION,
|
398
|
-
version)
|
399
|
-
if capture_message_content:
|
400
|
-
span.add_event(
|
401
|
-
name=SemanticConvention.GEN_AI_CONTENT_PROMPT_EVENT,
|
402
|
-
attributes={
|
403
|
-
SemanticConvention.GEN_AI_CONTENT_PROMPT: prompt,
|
404
|
-
},
|
405
|
-
)
|
406
|
-
|
407
|
-
for i in range(kwargs.get('n',1)):
|
408
|
-
span.set_attribute(SemanticConvention.GEN_AI_RESPONSE_FINISH_REASON,
|
409
|
-
[response_dict.get('choices')[i].get('finish_reason')])
|
410
|
-
if capture_message_content:
|
411
|
-
span.add_event(
|
412
|
-
name=SemanticConvention.GEN_AI_CONTENT_COMPLETION_EVENT,
|
413
|
-
attributes={
|
414
|
-
# pylint: disable=line-too-long
|
415
|
-
SemanticConvention.GEN_AI_CONTENT_COMPLETION: str(response_dict.get('choices')[i].get('message').get('content')),
|
416
|
-
},
|
417
|
-
)
|
418
|
-
if kwargs.get('tools'):
|
419
|
-
span.set_attribute(SemanticConvention.GEN_AI_TOOL_CALLS,
|
420
|
-
str(response_dict.get('choices')[i].get('message').get('tool_calls')))
|
421
|
-
|
422
|
-
if isinstance(response_dict.get('choices')[i].get('message').get('content'), str):
|
423
|
-
span.set_attribute(SemanticConvention.GEN_AI_OUTPUT_TYPE,
|
424
|
-
"text")
|
425
|
-
elif response_dict.get('choices')[i].get('message').get('content') is not None:
|
426
|
-
span.set_attribute(SemanticConvention.GEN_AI_OUTPUT_TYPE,
|
427
|
-
"json")
|
428
|
-
|
429
|
-
span.set_status(Status(StatusCode.OK))
|
430
|
-
|
431
|
-
if disable_metrics is False:
|
432
|
-
attributes = create_metrics_attributes(
|
433
|
-
service_name=application_name,
|
434
|
-
deployment_environment=environment,
|
435
|
-
operation=SemanticConvention.GEN_AI_OPERATION_TYPE_CHAT,
|
436
|
-
system=SemanticConvention.GEN_AI_SYSTEM_GROQ,
|
437
|
-
request_model=request_model,
|
438
|
-
server_address=server_address,
|
439
|
-
server_port=server_port,
|
440
|
-
response_model=response_dict.get('model'),
|
441
|
-
)
|
442
|
-
|
443
|
-
metrics["genai_client_usage_tokens"].record(
|
444
|
-
input_tokens + output_tokens, attributes
|
445
|
-
)
|
446
|
-
metrics["genai_client_operation_duration"].record(
|
447
|
-
end_time - start_time, attributes
|
448
|
-
)
|
449
|
-
metrics["genai_server_ttft"].record(
|
450
|
-
end_time - start_time, attributes
|
451
|
-
)
|
452
|
-
metrics["genai_requests"].add(1, attributes)
|
453
|
-
metrics["genai_completion_tokens"].add(output_tokens, attributes)
|
454
|
-
metrics["genai_prompt_tokens"].add(input_tokens, attributes)
|
455
|
-
metrics["genai_cost"].record(cost, attributes)
|
456
|
-
|
457
|
-
# Return original response
|
458
|
-
return response
|
121
|
+
response = process_chat_response(
|
122
|
+
response=response,
|
123
|
+
request_model=request_model,
|
124
|
+
pricing_info=pricing_info,
|
125
|
+
server_port=server_port,
|
126
|
+
server_address=server_address,
|
127
|
+
environment=environment,
|
128
|
+
application_name=application_name,
|
129
|
+
metrics=metrics,
|
130
|
+
start_time=start_time,
|
131
|
+
span=span,
|
132
|
+
capture_message_content=capture_message_content,
|
133
|
+
disable_metrics=disable_metrics,
|
134
|
+
version=version,
|
135
|
+
**kwargs
|
136
|
+
)
|
459
137
|
|
460
138
|
except Exception as e:
|
461
139
|
handle_exception(span, e)
|
462
|
-
logger.error("Error in trace creation: %s", e)
|
463
140
|
|
464
|
-
|
465
|
-
return response
|
141
|
+
return response
|
466
142
|
|
467
143
|
return wrapper
|