openlit 1.34.15__py3-none-any.whl → 1.34.16__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- openlit/instrumentation/azure_ai_inference/__init__.py +39 -23
- openlit/instrumentation/azure_ai_inference/async_azure_ai_inference.py +70 -40
- openlit/instrumentation/azure_ai_inference/azure_ai_inference.py +69 -39
- openlit/instrumentation/azure_ai_inference/utils.py +258 -146
- openlit/semcov/__init__.py +1 -0
- {openlit-1.34.15.dist-info → openlit-1.34.16.dist-info}/METADATA +1 -1
- {openlit-1.34.15.dist-info → openlit-1.34.16.dist-info}/RECORD +9 -9
- {openlit-1.34.15.dist-info → openlit-1.34.16.dist-info}/LICENSE +0 -0
- {openlit-1.34.15.dist-info → openlit-1.34.16.dist-info}/WHEEL +0 -0
@@ -5,49 +5,65 @@ import importlib.metadata
|
|
5
5
|
from opentelemetry.instrumentation.instrumentor import BaseInstrumentor
|
6
6
|
from wrapt import wrap_function_wrapper
|
7
7
|
from openlit.instrumentation.azure_ai_inference.azure_ai_inference import (
|
8
|
-
complete
|
8
|
+
complete,
|
9
|
+
embed
|
9
10
|
)
|
10
11
|
from openlit.instrumentation.azure_ai_inference.async_azure_ai_inference import (
|
11
|
-
async_complete
|
12
|
+
async_complete,
|
13
|
+
async_embed
|
12
14
|
)
|
13
15
|
|
14
|
-
_instruments = (
|
16
|
+
_instruments = ("azure-ai-inference >= 1.0.0b4",)
|
15
17
|
|
16
18
|
class AzureAIInferenceInstrumentor(BaseInstrumentor):
|
17
19
|
"""
|
18
|
-
An instrumentor for azure-ai-inference
|
20
|
+
An instrumentor for azure-ai-inference client library.
|
19
21
|
"""
|
20
22
|
|
21
23
|
def instrumentation_dependencies(self) -> Collection[str]:
|
22
24
|
return _instruments
|
23
25
|
|
24
26
|
def _instrument(self, **kwargs):
|
25
|
-
application_name = kwargs.get(
|
26
|
-
environment = kwargs.get(
|
27
|
-
tracer = kwargs.get(
|
28
|
-
|
29
|
-
|
30
|
-
|
31
|
-
|
32
|
-
|
33
|
-
|
34
|
-
|
35
|
-
# sync generate
|
27
|
+
application_name = kwargs.get("application_name", "default")
|
28
|
+
environment = kwargs.get("environment", "default")
|
29
|
+
tracer = kwargs.get("tracer")
|
30
|
+
metrics = kwargs.get("metrics_dict")
|
31
|
+
pricing_info = kwargs.get("pricing_info", {})
|
32
|
+
capture_message_content = kwargs.get("capture_message_content", False)
|
33
|
+
disable_metrics = kwargs.get("disable_metrics")
|
34
|
+
version = importlib.metadata.version("azure-ai-inference")
|
35
|
+
|
36
|
+
# sync chat completions
|
36
37
|
wrap_function_wrapper(
|
37
|
-
|
38
|
-
|
38
|
+
"azure.ai.inference",
|
39
|
+
"ChatCompletionsClient.complete",
|
39
40
|
complete(version, environment, application_name,
|
40
|
-
tracer,
|
41
|
+
tracer, pricing_info, capture_message_content, metrics, disable_metrics),
|
41
42
|
)
|
42
43
|
|
43
|
-
# async
|
44
|
+
# async chat completions
|
44
45
|
wrap_function_wrapper(
|
45
|
-
|
46
|
-
|
46
|
+
"azure.ai.inference.aio",
|
47
|
+
"ChatCompletionsClient.complete",
|
47
48
|
async_complete(version, environment, application_name,
|
48
|
-
tracer,
|
49
|
+
tracer, pricing_info, capture_message_content, metrics, disable_metrics),
|
50
|
+
)
|
51
|
+
|
52
|
+
# sync embeddings
|
53
|
+
wrap_function_wrapper(
|
54
|
+
"azure.ai.inference",
|
55
|
+
"EmbeddingsClient.embed",
|
56
|
+
embed(version, environment, application_name,
|
57
|
+
tracer, pricing_info, capture_message_content, metrics, disable_metrics),
|
58
|
+
)
|
59
|
+
|
60
|
+
# async embeddings
|
61
|
+
wrap_function_wrapper(
|
62
|
+
"azure.ai.inference.aio",
|
63
|
+
"EmbeddingsClient.embed",
|
64
|
+
async_embed(version, environment, application_name,
|
65
|
+
tracer, pricing_info, capture_message_content, metrics, disable_metrics),
|
49
66
|
)
|
50
67
|
|
51
68
|
def _uninstrument(self, **kwargs):
|
52
|
-
# Proper uninstrumentation logic to revert patched methods
|
53
69
|
pass
|
@@ -13,6 +13,7 @@ from openlit.instrumentation.azure_ai_inference.utils import (
|
|
13
13
|
process_chunk,
|
14
14
|
process_chat_response,
|
15
15
|
process_streaming_chat_response,
|
16
|
+
process_embedding_response,
|
16
17
|
)
|
17
18
|
from openlit.semcov import SemanticConvention
|
18
19
|
|
@@ -20,7 +21,7 @@ from openlit.semcov import SemanticConvention
|
|
20
21
|
logger = logging.getLogger(__name__)
|
21
22
|
|
22
23
|
def async_complete(version, environment, application_name,
|
23
|
-
tracer,
|
24
|
+
tracer, pricing_info, capture_message_content, metrics, disable_metrics):
|
24
25
|
"""
|
25
26
|
Generates a telemetry wrapper for GenAI function call
|
26
27
|
"""
|
@@ -43,12 +44,15 @@ def async_complete(version, environment, application_name,
|
|
43
44
|
self.__wrapped__ = wrapped
|
44
45
|
self._span = span
|
45
46
|
self._span_name = span_name
|
46
|
-
self._llmresponse =
|
47
|
-
self._response_id =
|
48
|
-
self._response_model =
|
49
|
-
self._finish_reason =
|
47
|
+
self._llmresponse = ""
|
48
|
+
self._response_id = ""
|
49
|
+
self._response_model = ""
|
50
|
+
self._finish_reason = ""
|
51
|
+
self._response_service_tier = ""
|
52
|
+
self._tools = None
|
50
53
|
self._input_tokens = 0
|
51
54
|
self._output_tokens = 0
|
55
|
+
self._reasoning_tokens = 0
|
52
56
|
|
53
57
|
self._args = args
|
54
58
|
self._kwargs = kwargs
|
@@ -64,53 +68,35 @@ def async_complete(version, environment, application_name,
|
|
64
68
|
await self.__wrapped__.__aenter__()
|
65
69
|
return self
|
66
70
|
|
67
|
-
async def __aexit__(self, exc_type,
|
68
|
-
await self.__wrapped__.__aexit__(exc_type,
|
71
|
+
async def __aexit__(self, exc_type, exc_val, exc_tb):
|
72
|
+
await self.__wrapped__.__aexit__(exc_type, exc_val, exc_tb)
|
73
|
+
process_streaming_chat_response(
|
74
|
+
self, pricing_info, environment, application_name, metrics,
|
75
|
+
capture_message_content, disable_metrics, version
|
76
|
+
)
|
69
77
|
|
70
78
|
def __aiter__(self):
|
71
79
|
return self
|
72
80
|
|
73
|
-
async def __getattr__(self, name):
|
74
|
-
"""Delegate attribute access to the wrapped object."""
|
75
|
-
return getattr(await self.__wrapped__, name)
|
76
|
-
|
77
81
|
async def __anext__(self):
|
78
|
-
|
79
|
-
|
80
|
-
|
81
|
-
|
82
|
-
|
83
|
-
|
84
|
-
with tracer.start_as_current_span(self._span_name, kind= SpanKind.CLIENT) as self._span:
|
85
|
-
process_streaming_chat_response(
|
86
|
-
self,
|
87
|
-
pricing_info=pricing_info,
|
88
|
-
environment=environment,
|
89
|
-
application_name=application_name,
|
90
|
-
metrics=metrics,
|
91
|
-
event_provider=event_provider,
|
92
|
-
capture_message_content=capture_message_content,
|
93
|
-
disable_metrics=disable_metrics,
|
94
|
-
version=version
|
95
|
-
)
|
96
|
-
|
97
|
-
except Exception as e:
|
98
|
-
handle_exception(self._span, e)
|
99
|
-
logger.error('Error in trace creation: %s', e)
|
100
|
-
raise
|
82
|
+
chunk = await self.__wrapped__.__anext__()
|
83
|
+
process_chunk(self, chunk)
|
84
|
+
return chunk
|
85
|
+
|
86
|
+
def __getattr__(self, name):
|
87
|
+
return getattr(self.__wrapped__, name)
|
101
88
|
|
102
89
|
async def wrapper(wrapped, instance, args, kwargs):
|
103
90
|
"""
|
104
91
|
Wraps the GenAI function call.
|
105
92
|
"""
|
106
93
|
|
107
|
-
streaming = kwargs.get(
|
108
|
-
server_address, server_port = set_server_address_and_port(instance,
|
109
|
-
request_model = kwargs.get(
|
94
|
+
streaming = kwargs.get("stream", False)
|
95
|
+
server_address, server_port = set_server_address_and_port(instance, "models.github.ai", 443)
|
96
|
+
request_model = kwargs.get("model", "gpt-4o")
|
110
97
|
|
111
|
-
span_name = f
|
98
|
+
span_name = f"{SemanticConvention.GEN_AI_OPERATION_TYPE_CHAT} {request_model}"
|
112
99
|
|
113
|
-
# pylint: disable=no-else-return
|
114
100
|
if streaming:
|
115
101
|
awaited_wrapped = await wrapped(*args, **kwargs)
|
116
102
|
span = tracer.start_span(span_name, kind=SpanKind.CLIENT)
|
@@ -130,7 +116,6 @@ def async_complete(version, environment, application_name,
|
|
130
116
|
environment=environment,
|
131
117
|
application_name=application_name,
|
132
118
|
metrics=metrics,
|
133
|
-
event_provider=event_provider,
|
134
119
|
start_time=start_time,
|
135
120
|
span=span,
|
136
121
|
capture_message_content=capture_message_content,
|
@@ -142,3 +127,48 @@ def async_complete(version, environment, application_name,
|
|
142
127
|
return response
|
143
128
|
|
144
129
|
return wrapper
|
130
|
+
|
131
|
+
def async_embed(version, environment, application_name,
|
132
|
+
tracer, pricing_info, capture_message_content, metrics, disable_metrics):
|
133
|
+
"""
|
134
|
+
Generates a telemetry wrapper for GenAI embedding function call
|
135
|
+
"""
|
136
|
+
|
137
|
+
async def wrapper(wrapped, instance, args, kwargs):
|
138
|
+
"""
|
139
|
+
Wraps the GenAI embedding function call.
|
140
|
+
"""
|
141
|
+
|
142
|
+
server_address, server_port = set_server_address_and_port(instance, "models.github.ai", 443)
|
143
|
+
request_model = kwargs.get("model", "text-embedding-3-small")
|
144
|
+
|
145
|
+
span_name = f"{SemanticConvention.GEN_AI_OPERATION_TYPE_EMBEDDING} {request_model}"
|
146
|
+
|
147
|
+
with tracer.start_as_current_span(span_name, kind=SpanKind.CLIENT) as span:
|
148
|
+
start_time = time.time()
|
149
|
+
response = await wrapped(*args, **kwargs)
|
150
|
+
|
151
|
+
try:
|
152
|
+
response = process_embedding_response(
|
153
|
+
response=response,
|
154
|
+
request_model=request_model,
|
155
|
+
pricing_info=pricing_info,
|
156
|
+
server_port=server_port,
|
157
|
+
server_address=server_address,
|
158
|
+
environment=environment,
|
159
|
+
application_name=application_name,
|
160
|
+
metrics=metrics,
|
161
|
+
start_time=start_time,
|
162
|
+
span=span,
|
163
|
+
capture_message_content=capture_message_content,
|
164
|
+
disable_metrics=disable_metrics,
|
165
|
+
version=version,
|
166
|
+
**kwargs
|
167
|
+
)
|
168
|
+
|
169
|
+
except Exception as e:
|
170
|
+
handle_exception(span, e)
|
171
|
+
|
172
|
+
return response
|
173
|
+
|
174
|
+
return wrapper
|
@@ -13,6 +13,7 @@ from openlit.instrumentation.azure_ai_inference.utils import (
|
|
13
13
|
process_chunk,
|
14
14
|
process_chat_response,
|
15
15
|
process_streaming_chat_response,
|
16
|
+
process_embedding_response,
|
16
17
|
)
|
17
18
|
from openlit.semcov import SemanticConvention
|
18
19
|
|
@@ -20,7 +21,7 @@ from openlit.semcov import SemanticConvention
|
|
20
21
|
logger = logging.getLogger(__name__)
|
21
22
|
|
22
23
|
def complete(version, environment, application_name,
|
23
|
-
tracer,
|
24
|
+
tracer, pricing_info, capture_message_content, metrics, disable_metrics):
|
24
25
|
"""
|
25
26
|
Generates a telemetry wrapper for GenAI function call
|
26
27
|
"""
|
@@ -43,12 +44,15 @@ def complete(version, environment, application_name,
|
|
43
44
|
self.__wrapped__ = wrapped
|
44
45
|
self._span = span
|
45
46
|
self._span_name = span_name
|
46
|
-
self._llmresponse =
|
47
|
-
self._response_id =
|
48
|
-
self._response_model =
|
49
|
-
self._finish_reason =
|
47
|
+
self._llmresponse = ""
|
48
|
+
self._response_id = ""
|
49
|
+
self._response_model = ""
|
50
|
+
self._finish_reason = ""
|
51
|
+
self._response_service_tier = ""
|
52
|
+
self._tools = None
|
50
53
|
self._input_tokens = 0
|
51
54
|
self._output_tokens = 0
|
55
|
+
self._reasoning_tokens = 0
|
52
56
|
|
53
57
|
self._args = args
|
54
58
|
self._kwargs = kwargs
|
@@ -64,53 +68,35 @@ def complete(version, environment, application_name,
|
|
64
68
|
self.__wrapped__.__enter__()
|
65
69
|
return self
|
66
70
|
|
67
|
-
def __exit__(self, exc_type,
|
68
|
-
self.__wrapped__.__exit__(exc_type,
|
71
|
+
def __exit__(self, exc_type, exc_val, exc_tb):
|
72
|
+
self.__wrapped__.__exit__(exc_type, exc_val, exc_tb)
|
73
|
+
process_streaming_chat_response(
|
74
|
+
self, pricing_info, environment, application_name, metrics,
|
75
|
+
capture_message_content, disable_metrics, version
|
76
|
+
)
|
69
77
|
|
70
78
|
def __iter__(self):
|
71
79
|
return self
|
72
80
|
|
81
|
+
def __next__(self):
|
82
|
+
chunk = next(self.__wrapped__)
|
83
|
+
process_chunk(self, chunk)
|
84
|
+
return chunk
|
85
|
+
|
73
86
|
def __getattr__(self, name):
|
74
|
-
"""Delegate attribute access to the wrapped object."""
|
75
87
|
return getattr(self.__wrapped__, name)
|
76
88
|
|
77
|
-
def __next__(self):
|
78
|
-
try:
|
79
|
-
chunk = self.__wrapped__.__next__()
|
80
|
-
process_chunk(self, chunk)
|
81
|
-
return chunk
|
82
|
-
except StopIteration:
|
83
|
-
try:
|
84
|
-
with tracer.start_as_current_span(self._span_name, kind= SpanKind.CLIENT) as self._span:
|
85
|
-
process_streaming_chat_response(
|
86
|
-
self,
|
87
|
-
pricing_info=pricing_info,
|
88
|
-
environment=environment,
|
89
|
-
application_name=application_name,
|
90
|
-
metrics=metrics,
|
91
|
-
event_provider=event_provider,
|
92
|
-
capture_message_content=capture_message_content,
|
93
|
-
disable_metrics=disable_metrics,
|
94
|
-
version=version
|
95
|
-
)
|
96
|
-
|
97
|
-
except Exception as e:
|
98
|
-
handle_exception(self._span, e)
|
99
|
-
logger.error('Error in trace creation: %s', e)
|
100
|
-
raise
|
101
|
-
|
102
89
|
def wrapper(wrapped, instance, args, kwargs):
|
103
90
|
"""
|
104
91
|
Wraps the GenAI function call.
|
105
92
|
"""
|
106
93
|
|
107
|
-
streaming = kwargs.get(
|
108
|
-
server_address, server_port = set_server_address_and_port(instance,
|
109
|
-
request_model = kwargs.get(
|
94
|
+
streaming = kwargs.get("stream", False)
|
95
|
+
server_address, server_port = set_server_address_and_port(instance, "models.github.ai", 443)
|
96
|
+
request_model = kwargs.get("model", "gpt-4o")
|
110
97
|
|
111
|
-
span_name = f
|
98
|
+
span_name = f"{SemanticConvention.GEN_AI_OPERATION_TYPE_CHAT} {request_model}"
|
112
99
|
|
113
|
-
# pylint: disable=no-else-return
|
114
100
|
if streaming:
|
115
101
|
awaited_wrapped = wrapped(*args, **kwargs)
|
116
102
|
span = tracer.start_span(span_name, kind=SpanKind.CLIENT)
|
@@ -130,7 +116,6 @@ def complete(version, environment, application_name,
|
|
130
116
|
environment=environment,
|
131
117
|
application_name=application_name,
|
132
118
|
metrics=metrics,
|
133
|
-
event_provider=event_provider,
|
134
119
|
start_time=start_time,
|
135
120
|
span=span,
|
136
121
|
capture_message_content=capture_message_content,
|
@@ -142,3 +127,48 @@ def complete(version, environment, application_name,
|
|
142
127
|
return response
|
143
128
|
|
144
129
|
return wrapper
|
130
|
+
|
131
|
+
def embed(version, environment, application_name,
|
132
|
+
tracer, pricing_info, capture_message_content, metrics, disable_metrics):
|
133
|
+
"""
|
134
|
+
Generates a telemetry wrapper for GenAI embedding function call
|
135
|
+
"""
|
136
|
+
|
137
|
+
def wrapper(wrapped, instance, args, kwargs):
|
138
|
+
"""
|
139
|
+
Wraps the GenAI embedding function call.
|
140
|
+
"""
|
141
|
+
|
142
|
+
server_address, server_port = set_server_address_and_port(instance, "models.github.ai", 443)
|
143
|
+
request_model = kwargs.get("model", "text-embedding-3-small")
|
144
|
+
|
145
|
+
span_name = f"{SemanticConvention.GEN_AI_OPERATION_TYPE_EMBEDDING} {request_model}"
|
146
|
+
|
147
|
+
with tracer.start_as_current_span(span_name, kind=SpanKind.CLIENT) as span:
|
148
|
+
start_time = time.time()
|
149
|
+
response = wrapped(*args, **kwargs)
|
150
|
+
|
151
|
+
try:
|
152
|
+
response = process_embedding_response(
|
153
|
+
response=response,
|
154
|
+
request_model=request_model,
|
155
|
+
pricing_info=pricing_info,
|
156
|
+
server_port=server_port,
|
157
|
+
server_address=server_address,
|
158
|
+
environment=environment,
|
159
|
+
application_name=application_name,
|
160
|
+
metrics=metrics,
|
161
|
+
start_time=start_time,
|
162
|
+
span=span,
|
163
|
+
capture_message_content=capture_message_content,
|
164
|
+
disable_metrics=disable_metrics,
|
165
|
+
version=version,
|
166
|
+
**kwargs
|
167
|
+
)
|
168
|
+
|
169
|
+
except Exception as e:
|
170
|
+
handle_exception(span, e)
|
171
|
+
|
172
|
+
return response
|
173
|
+
|
174
|
+
return wrapper
|
@@ -3,104 +3,185 @@ Azure AI Inference OpenTelemetry instrumentation utility functions
|
|
3
3
|
"""
|
4
4
|
import time
|
5
5
|
|
6
|
-
from opentelemetry.sdk.resources import SERVICE_NAME, TELEMETRY_SDK_NAME, DEPLOYMENT_ENVIRONMENT
|
7
6
|
from opentelemetry.trace import Status, StatusCode
|
8
7
|
|
9
8
|
from openlit.__helpers import (
|
10
9
|
calculate_ttft,
|
11
10
|
response_as_dict,
|
12
11
|
calculate_tbt,
|
13
|
-
extract_and_format_input,
|
14
12
|
get_chat_model_cost,
|
15
|
-
|
16
|
-
|
17
|
-
|
13
|
+
get_embed_model_cost,
|
14
|
+
common_span_attributes,
|
15
|
+
record_completion_metrics,
|
16
|
+
record_embedding_metrics,
|
18
17
|
)
|
19
18
|
from openlit.semcov import SemanticConvention
|
20
19
|
|
21
|
-
def
|
20
|
+
def format_content(messages):
|
21
|
+
"""
|
22
|
+
Process a list of messages to extract content.
|
23
|
+
"""
|
24
|
+
|
25
|
+
formatted_messages = []
|
26
|
+
for message in messages:
|
27
|
+
role = message.get("role", "user")
|
28
|
+
content = message.get("content", "")
|
29
|
+
|
30
|
+
if isinstance(content, list):
|
31
|
+
content_str = ", ".join(
|
32
|
+
f'{item["type"]}: {item["text"] if "text" in item else item.get("image_url", "")}'
|
33
|
+
if "type" in item else f'text: {item.get("text", "")}'
|
34
|
+
for item in content
|
35
|
+
)
|
36
|
+
formatted_messages.append(f'{role}: {content_str}')
|
37
|
+
else:
|
38
|
+
formatted_messages.append(f'{role}: {content}')
|
39
|
+
|
40
|
+
return '\n'.join(formatted_messages)
|
41
|
+
|
42
|
+
def process_chunk(scope, chunk):
|
22
43
|
"""
|
23
44
|
Process a chunk of response data and update state.
|
24
45
|
"""
|
25
46
|
|
26
47
|
end_time = time.time()
|
27
48
|
# Record the timestamp for the current chunk
|
28
|
-
|
49
|
+
scope._timestamps.append(end_time)
|
29
50
|
|
30
|
-
if len(
|
51
|
+
if len(scope._timestamps) == 1:
|
31
52
|
# Calculate time to first chunk
|
32
|
-
|
53
|
+
scope._ttft = calculate_ttft(scope._timestamps, scope._start_time)
|
33
54
|
|
34
55
|
chunked = response_as_dict(chunk)
|
35
56
|
|
36
57
|
# Collect message IDs and aggregated response from events
|
37
|
-
|
38
|
-
|
58
|
+
choices = chunked.get("choices", [])
|
59
|
+
if choices and "delta" in choices[0]:
|
60
|
+
delta = choices[0]["delta"]
|
61
|
+
|
62
|
+
# Handle content
|
63
|
+
content = delta.get("content")
|
64
|
+
if content:
|
65
|
+
scope._llmresponse += content
|
66
|
+
|
67
|
+
# Handle reasoning content (if present)
|
68
|
+
reasoning_content = delta.get("reasoning_content")
|
69
|
+
if reasoning_content:
|
70
|
+
if not hasattr(scope, "_reasoning_content"):
|
71
|
+
scope._reasoning_content = ""
|
72
|
+
scope._reasoning_content += reasoning_content
|
39
73
|
|
40
|
-
|
41
|
-
|
74
|
+
# Handle finish_reason (appears in final chunk)
|
75
|
+
finish_reason = chunked.get("choices")[0].get("finish_reason")
|
76
|
+
if finish_reason:
|
77
|
+
scope._finish_reason = finish_reason
|
78
|
+
scope._end_time = time.time()
|
42
79
|
|
43
|
-
|
44
|
-
|
80
|
+
# Handle tool calls in streaming - optimized
|
81
|
+
delta_tools = delta.get("tool_calls")
|
82
|
+
if delta_tools:
|
83
|
+
scope._tools = scope._tools or []
|
45
84
|
|
46
|
-
|
47
|
-
|
48
|
-
|
49
|
-
|
50
|
-
|
85
|
+
for tool in delta_tools:
|
86
|
+
idx = tool.get("index", 0)
|
87
|
+
|
88
|
+
# Extend list if needed
|
89
|
+
scope._tools.extend([{}] * (idx + 1 - len(scope._tools)))
|
90
|
+
|
91
|
+
if tool.get("id"): # New tool (id exists)
|
92
|
+
func = tool.get("function", {})
|
93
|
+
scope._tools[idx] = {
|
94
|
+
"id": tool["id"],
|
95
|
+
"function": {"name": func.get("name", ""), "arguments": func.get("arguments", "")},
|
96
|
+
"type": tool.get("type", "function")
|
97
|
+
}
|
98
|
+
elif scope._tools[idx] and "function" in tool: # Append args (id is None)
|
99
|
+
scope._tools[idx]["function"]["arguments"] += tool["function"].get("arguments", "")
|
100
|
+
|
101
|
+
# Handle usage information (typically only in final chunk)
|
102
|
+
if chunked.get("usage"):
|
103
|
+
scope._input_tokens = chunked.get("usage").get("prompt_tokens", 0)
|
104
|
+
scope._output_tokens = chunked.get("usage").get("completion_tokens", 0)
|
105
|
+
# Handle reasoning tokens if present (optional) - check nested structure
|
106
|
+
completion_details = chunked.get("usage", {}).get("completion_tokens_details", {})
|
107
|
+
if "reasoning_tokens" in completion_details:
|
108
|
+
scope._reasoning_tokens = completion_details.get("reasoning_tokens", 0)
|
109
|
+
elif "reasoning_tokens" in chunked.get("usage", {}):
|
110
|
+
scope._reasoning_tokens = chunked.get("usage").get("reasoning_tokens", 0)
|
51
111
|
|
52
112
|
def common_chat_logic(scope, pricing_info, environment, application_name, metrics,
|
53
|
-
|
113
|
+
capture_message_content, disable_metrics, version, is_stream):
|
54
114
|
"""
|
55
115
|
Process chat request and generate Telemetry
|
56
116
|
"""
|
57
117
|
|
58
|
-
scope._end_time = time.time()
|
59
118
|
if len(scope._timestamps) > 1:
|
60
119
|
scope._tbt = calculate_tbt(scope._timestamps)
|
61
120
|
|
62
|
-
|
63
|
-
request_model = scope._kwargs.get(
|
121
|
+
prompt = format_content(scope._kwargs.get("messages", []))
|
122
|
+
request_model = scope._kwargs.get("model", "gpt-4o")
|
64
123
|
|
65
124
|
cost = get_chat_model_cost(request_model, pricing_info, scope._input_tokens, scope._output_tokens)
|
66
125
|
|
67
|
-
#
|
68
|
-
scope
|
69
|
-
|
70
|
-
|
71
|
-
|
72
|
-
|
73
|
-
|
74
|
-
scope._span.set_attribute(SemanticConvention.
|
75
|
-
scope._span.set_attribute(SemanticConvention.
|
76
|
-
scope._span.set_attribute(SemanticConvention.
|
77
|
-
scope._span.set_attribute(SemanticConvention.
|
78
|
-
scope._span.set_attribute(SemanticConvention.
|
79
|
-
|
80
|
-
|
81
|
-
|
82
|
-
scope._span.set_attribute(SemanticConvention.GEN_AI_RESPONSE_FINISH_REASON, [scope._finish_reason])
|
126
|
+
# Common Span Attributes
|
127
|
+
common_span_attributes(scope,
|
128
|
+
SemanticConvention.GEN_AI_OPERATION_TYPE_CHAT, SemanticConvention.GEN_AI_SYSTEM_AZURE_AI_INFERENCE,
|
129
|
+
scope._server_address, scope._server_port, request_model, scope._response_model,
|
130
|
+
environment, application_name, is_stream, scope._tbt, scope._ttft, version)
|
131
|
+
|
132
|
+
# Span Attributes for Request parameters
|
133
|
+
scope._span.set_attribute(SemanticConvention.GEN_AI_REQUEST_FREQUENCY_PENALTY, scope._kwargs.get("frequency_penalty", 0.0))
|
134
|
+
scope._span.set_attribute(SemanticConvention.GEN_AI_REQUEST_MAX_TOKENS, scope._kwargs.get("max_tokens", -1))
|
135
|
+
scope._span.set_attribute(SemanticConvention.GEN_AI_REQUEST_PRESENCE_PENALTY, scope._kwargs.get("presence_penalty", 0.0))
|
136
|
+
scope._span.set_attribute(SemanticConvention.GEN_AI_REQUEST_STOP_SEQUENCES, scope._kwargs.get("stop", []))
|
137
|
+
scope._span.set_attribute(SemanticConvention.GEN_AI_REQUEST_TEMPERATURE, scope._kwargs.get("temperature", 1.0))
|
138
|
+
scope._span.set_attribute(SemanticConvention.GEN_AI_REQUEST_TOP_P, scope._kwargs.get("top_p", 1.0))
|
139
|
+
|
140
|
+
# Span Attributes for Response parameters
|
83
141
|
scope._span.set_attribute(SemanticConvention.GEN_AI_RESPONSE_ID, scope._response_id)
|
84
|
-
scope._span.set_attribute(SemanticConvention.
|
142
|
+
scope._span.set_attribute(SemanticConvention.GEN_AI_RESPONSE_FINISH_REASON, [scope._finish_reason])
|
143
|
+
scope._span.set_attribute(SemanticConvention.GEN_AI_RESPONSE_SERVICE_TIER, scope._response_service_tier)
|
144
|
+
scope._span.set_attribute(SemanticConvention.GEN_AI_RESPONSE_SYSTEM_FINGERPRINT, scope._response_service_tier)
|
145
|
+
scope._span.set_attribute(SemanticConvention.GEN_AI_OUTPUT_TYPE, "text" if isinstance(scope._llmresponse, str) else "json")
|
146
|
+
|
147
|
+
# Span Attributes for Cost and Tokens
|
85
148
|
scope._span.set_attribute(SemanticConvention.GEN_AI_USAGE_INPUT_TOKENS, scope._input_tokens)
|
86
149
|
scope._span.set_attribute(SemanticConvention.GEN_AI_USAGE_OUTPUT_TOKENS, scope._output_tokens)
|
87
|
-
scope._span.set_attribute(SemanticConvention.SERVER_ADDRESS, scope._server_address)
|
88
|
-
|
89
|
-
scope._span.set_attribute(SemanticConvention.GEN_AI_OUTPUT_TYPE,
|
90
|
-
'text' if isinstance(scope._llmresponse, str) else 'json')
|
91
|
-
|
92
|
-
scope._span.set_attribute(DEPLOYMENT_ENVIRONMENT, environment)
|
93
|
-
scope._span.set_attribute(SERVICE_NAME, application_name)
|
94
|
-
scope._span.set_attribute(SemanticConvention.GEN_AI_REQUEST_IS_STREAM, is_stream)
|
95
150
|
scope._span.set_attribute(SemanticConvention.GEN_AI_CLIENT_TOKEN_USAGE, scope._input_tokens + scope._output_tokens)
|
96
151
|
scope._span.set_attribute(SemanticConvention.GEN_AI_USAGE_COST, cost)
|
97
|
-
scope._span.set_attribute(SemanticConvention.GEN_AI_SERVER_TBT, scope._tbt)
|
98
|
-
scope._span.set_attribute(SemanticConvention.GEN_AI_SERVER_TTFT, scope._ttft)
|
99
|
-
scope._span.set_attribute(SemanticConvention.GEN_AI_SDK_VERSION, version)
|
100
152
|
|
101
|
-
#
|
102
|
-
|
153
|
+
# Span Attributes for Reasoning (if present)
|
154
|
+
if hasattr(scope, "_reasoning_tokens") and scope._reasoning_tokens > 0:
|
155
|
+
scope._span.set_attribute(SemanticConvention.GEN_AI_USAGE_REASONING_TOKENS, scope._reasoning_tokens)
|
156
|
+
# Update total token usage to include reasoning tokens
|
157
|
+
scope._span.set_attribute(SemanticConvention.GEN_AI_CLIENT_TOKEN_USAGE,
|
158
|
+
scope._input_tokens + scope._output_tokens + scope._reasoning_tokens)
|
159
|
+
|
160
|
+
# Span Attributes for Tools - optimized
|
161
|
+
if scope._tools:
|
162
|
+
tools = scope._tools if isinstance(scope._tools, list) else [scope._tools]
|
163
|
+
|
164
|
+
names, ids, args = zip(*[
|
165
|
+
(t.get("function", {}).get("name", ""),
|
166
|
+
str(t.get("id", "")),
|
167
|
+
str(t.get("function", {}).get("arguments", "")))
|
168
|
+
for t in tools if isinstance(t, dict) and t
|
169
|
+
]) if tools else ([], [], [])
|
170
|
+
|
171
|
+
scope._span.set_attribute(SemanticConvention.GEN_AI_TOOL_NAME, ", ".join(filter(None, names)))
|
172
|
+
scope._span.set_attribute(SemanticConvention.GEN_AI_TOOL_CALL_ID, ", ".join(filter(None, ids)))
|
173
|
+
scope._span.set_attribute(SemanticConvention.GEN_AI_TOOL_ARGS, ", ".join(filter(None, args)))
|
174
|
+
|
175
|
+
# Span Attributes for Content
|
103
176
|
if capture_message_content:
|
177
|
+
scope._span.set_attribute(SemanticConvention.GEN_AI_CONTENT_PROMPT, prompt)
|
178
|
+
scope._span.set_attribute(SemanticConvention.GEN_AI_CONTENT_COMPLETION, scope._llmresponse)
|
179
|
+
|
180
|
+
# Add reasoning content if available
|
181
|
+
if hasattr(scope, "_reasoning_content") and scope._reasoning_content:
|
182
|
+
scope._span.set_attribute(SemanticConvention.GEN_AI_CONTENT_REASONING, scope._reasoning_content)
|
183
|
+
|
184
|
+
# To be removed once the change to span_attributes (from span events) is complete
|
104
185
|
scope._span.add_event(
|
105
186
|
name=SemanticConvention.GEN_AI_CONTENT_PROMPT_EVENT,
|
106
187
|
attributes={
|
@@ -114,112 +195,143 @@ def common_chat_logic(scope, pricing_info, environment, application_name, metric
|
|
114
195
|
},
|
115
196
|
)
|
116
197
|
|
117
|
-
choice_event_body = {
|
118
|
-
'finish_reason': scope._finish_reason,
|
119
|
-
'index': 0,
|
120
|
-
'message': {
|
121
|
-
**({'content': scope._llmresponse} if capture_message_content else {}),
|
122
|
-
'role': 'assistant'
|
123
|
-
}
|
124
|
-
}
|
125
|
-
|
126
|
-
# Emit events
|
127
|
-
for role in ['user', 'system', 'assistant', 'tool']:
|
128
|
-
if formatted_messages.get(role, {}).get('content', ''):
|
129
|
-
event = otel_event(
|
130
|
-
name=getattr(SemanticConvention, f'GEN_AI_{role.upper()}_MESSAGE'),
|
131
|
-
attributes={
|
132
|
-
SemanticConvention.GEN_AI_SYSTEM: SemanticConvention.GEN_AI_SYSTEM_AZURE_AI_INFERENCE
|
133
|
-
},
|
134
|
-
body = {
|
135
|
-
# pylint: disable=line-too-long
|
136
|
-
**({'content': formatted_messages.get(role, {}).get('content', '')} if capture_message_content else {}),
|
137
|
-
'role': formatted_messages.get(role, {}).get('role', []),
|
138
|
-
**({
|
139
|
-
'tool_calls': {
|
140
|
-
'function': {
|
141
|
-
# pylint: disable=line-too-long
|
142
|
-
'name': (scope._tool_calls[0].get('function', {}).get('name', '') if scope._tool_calls else ''),
|
143
|
-
'arguments': (scope._tool_calls[0].get('function', {}).get('arguments', '') if scope._tool_calls else '')
|
144
|
-
},
|
145
|
-
'id': (scope._tool_calls[0].get('id', '') if scope._tool_calls else ''),
|
146
|
-
'type': 'function'
|
147
|
-
}
|
148
|
-
} if role == 'assistant' else {}),
|
149
|
-
**({
|
150
|
-
'id': (scope._tool_calls[0].get('id', '') if scope._tool_calls else '')
|
151
|
-
} if role == 'tool' else {})
|
152
|
-
}
|
153
|
-
)
|
154
|
-
event_provider.emit(event)
|
155
|
-
|
156
|
-
choice_event = otel_event(
|
157
|
-
name=SemanticConvention.GEN_AI_CHOICE,
|
158
|
-
attributes={
|
159
|
-
SemanticConvention.GEN_AI_SYSTEM: SemanticConvention.GEN_AI_SYSTEM_AZURE_AI_INFERENCE
|
160
|
-
},
|
161
|
-
body=choice_event_body
|
162
|
-
)
|
163
|
-
event_provider.emit(choice_event)
|
164
|
-
|
165
198
|
scope._span.set_status(Status(StatusCode.OK))
|
166
199
|
|
200
|
+
# Metrics
|
167
201
|
if not disable_metrics:
|
168
|
-
|
169
|
-
|
170
|
-
|
171
|
-
|
172
|
-
system=SemanticConvention.GEN_AI_SYSTEM_AZURE_AI_INFERENCE,
|
173
|
-
request_model=request_model,
|
174
|
-
server_address=scope._server_address,
|
175
|
-
server_port=scope._server_port,
|
176
|
-
response_model=scope._response_model,
|
177
|
-
)
|
202
|
+
record_completion_metrics(metrics, SemanticConvention.GEN_AI_OPERATION_TYPE_CHAT,
|
203
|
+
SemanticConvention.GEN_AI_SYSTEM_AZURE_AI_INFERENCE, scope._server_address, scope._server_port,
|
204
|
+
request_model, scope._response_model, environment, application_name, scope._start_time, scope._end_time,
|
205
|
+
scope._input_tokens, scope._output_tokens, cost, scope._tbt, scope._ttft)
|
178
206
|
|
179
|
-
|
180
|
-
|
181
|
-
metrics['genai_server_tbt'].record(scope._tbt, metrics_attributes)
|
182
|
-
metrics['genai_server_ttft'].record(scope._ttft, metrics_attributes)
|
183
|
-
metrics['genai_requests'].add(1, metrics_attributes)
|
184
|
-
metrics['genai_completion_tokens'].add(scope._output_tokens, metrics_attributes)
|
185
|
-
metrics['genai_prompt_tokens'].add(scope._input_tokens, metrics_attributes)
|
186
|
-
metrics['genai_cost'].record(cost, metrics_attributes)
|
187
|
-
|
188
|
-
def process_streaming_chat_response(self, pricing_info, environment, application_name, metrics,
|
189
|
-
event_provider, capture_message_content=False, disable_metrics=False, version=''):
|
207
|
+
def process_streaming_chat_response(scope, pricing_info, environment, application_name, metrics,
|
208
|
+
capture_message_content=False, disable_metrics=False, version=""):
|
190
209
|
"""
|
191
|
-
Process chat request and generate Telemetry
|
210
|
+
Process streaming chat request and generate Telemetry
|
192
211
|
"""
|
193
212
|
|
194
|
-
common_chat_logic(
|
195
|
-
|
213
|
+
common_chat_logic(scope, pricing_info, environment, application_name, metrics,
|
214
|
+
capture_message_content, disable_metrics, version, is_stream=True)
|
196
215
|
|
197
216
|
def process_chat_response(response, request_model, pricing_info, server_port, server_address,
|
198
|
-
|
199
|
-
|
217
|
+
environment, application_name, metrics, start_time, span, capture_message_content=False,
|
218
|
+
disable_metrics=False, version="1.0.0", **kwargs):
|
200
219
|
"""
|
201
220
|
Process chat request and generate Telemetry
|
202
221
|
"""
|
203
222
|
|
204
|
-
|
223
|
+
# Create scope object
|
224
|
+
scope = type("GenericScope", (), {})()
|
225
|
+
response_dict = response_as_dict(response)
|
226
|
+
|
227
|
+
scope._start_time = start_time
|
228
|
+
scope._end_time = time.time()
|
229
|
+
scope._span = span
|
230
|
+
scope._llmresponse = " ".join(
|
231
|
+
(choice.get("message", {}).get("content") or "")
|
232
|
+
for choice in response_dict.get("choices", [])
|
233
|
+
)
|
234
|
+
# Handle reasoning content from non-streaming response
|
235
|
+
reasoning_content = response_dict.get("choices", [{}])[0].get("message", {}).get("reasoning_content")
|
236
|
+
if reasoning_content:
|
237
|
+
scope._reasoning_content = reasoning_content
|
238
|
+
|
239
|
+
scope._input_tokens = response_dict.get("usage", {}).get("prompt_tokens", 0)
|
240
|
+
scope._output_tokens = response_dict.get("usage", {}).get("completion_tokens", 0)
|
241
|
+
# Handle reasoning tokens if present (optional) - check nested structure
|
242
|
+
completion_details = response_dict.get("usage", {}).get("completion_tokens_details", {})
|
243
|
+
if "reasoning_tokens" in completion_details:
|
244
|
+
scope._reasoning_tokens = completion_details.get("reasoning_tokens", 0)
|
245
|
+
elif "reasoning_tokens" in response_dict.get("usage", {}):
|
246
|
+
scope._reasoning_tokens = response_dict.get("usage").get("reasoning_tokens", 0)
|
247
|
+
else:
|
248
|
+
scope._reasoning_tokens = 0
|
249
|
+
scope._response_id = response_dict.get("id")
|
250
|
+
scope._response_model = response_dict.get("model")
|
251
|
+
scope._finish_reason = str(response_dict.get("choices", [])[0].get("finish_reason", ""))
|
252
|
+
scope._response_service_tier = str(response_dict.get("system_fingerprint", ""))
|
253
|
+
scope._timestamps = []
|
254
|
+
scope._ttft, scope._tbt = scope._end_time - scope._start_time, 0
|
255
|
+
scope._server_address, scope._server_port = server_address, server_port
|
256
|
+
scope._kwargs = kwargs
|
257
|
+
|
258
|
+
# Handle tool calls
|
259
|
+
if scope._kwargs.get("tools"):
|
260
|
+
scope._tools = response_dict.get("choices", [{}])[0].get("message", {}).get("tool_calls")
|
261
|
+
else:
|
262
|
+
scope._tools = None
|
263
|
+
|
264
|
+
common_chat_logic(scope, pricing_info, environment, application_name, metrics,
|
265
|
+
capture_message_content, disable_metrics, version, is_stream=False)
|
266
|
+
|
267
|
+
return response
|
268
|
+
|
269
|
+
def common_embedding_logic(scope, pricing_info, environment, application_name, metrics,
|
270
|
+
capture_message_content, disable_metrics, version):
|
271
|
+
"""
|
272
|
+
Process embedding request and generate Telemetry
|
273
|
+
"""
|
274
|
+
|
275
|
+
request_model = scope._kwargs.get("model", "text-embedding-3-small")
|
276
|
+
|
277
|
+
cost = get_embed_model_cost(request_model, pricing_info, scope._input_tokens)
|
278
|
+
|
279
|
+
# Common Span Attributes
|
280
|
+
common_span_attributes(scope,
|
281
|
+
SemanticConvention.GEN_AI_OPERATION_TYPE_EMBEDDING, SemanticConvention.GEN_AI_SYSTEM_AZURE_AI_INFERENCE,
|
282
|
+
scope._server_address, scope._server_port, request_model, scope._response_model,
|
283
|
+
environment, application_name, False, 0, scope._end_time - scope._start_time, version)
|
284
|
+
|
285
|
+
# Span Attributes for Request parameters
|
286
|
+
scope._span.set_attribute(SemanticConvention.GEN_AI_REQUEST_ENCODING_FORMATS, [scope._kwargs.get("encoding_format", "float")])
|
287
|
+
scope._span.set_attribute(SemanticConvention.GEN_AI_REQUEST_USER, scope._kwargs.get("user", ""))
|
288
|
+
|
289
|
+
# Span Attributes for Cost and Tokens
|
290
|
+
scope._span.set_attribute(SemanticConvention.GEN_AI_USAGE_INPUT_TOKENS, scope._input_tokens)
|
291
|
+
scope._span.set_attribute(SemanticConvention.GEN_AI_CLIENT_TOKEN_USAGE, scope._input_tokens)
|
292
|
+
scope._span.set_attribute(SemanticConvention.GEN_AI_USAGE_COST, cost)
|
293
|
+
|
294
|
+
# Span Attributes for Content
|
295
|
+
if capture_message_content:
|
296
|
+
scope._span.set_attribute(SemanticConvention.GEN_AI_CONTENT_PROMPT, str(scope._kwargs.get("input", "")))
|
297
|
+
|
298
|
+
# To be removed once the change to span_attributes (from span events) is complete
|
299
|
+
scope._span.add_event(
|
300
|
+
name=SemanticConvention.GEN_AI_CONTENT_PROMPT_EVENT,
|
301
|
+
attributes={
|
302
|
+
SemanticConvention.GEN_AI_CONTENT_PROMPT: str(scope._kwargs.get("input", "")),
|
303
|
+
},
|
304
|
+
)
|
305
|
+
|
306
|
+
scope._span.set_status(Status(StatusCode.OK))
|
307
|
+
|
308
|
+
# Metrics
|
309
|
+
if not disable_metrics:
|
310
|
+
record_embedding_metrics(metrics, SemanticConvention.GEN_AI_OPERATION_TYPE_EMBEDDING,
|
311
|
+
SemanticConvention.GEN_AI_SYSTEM_AZURE_AI_INFERENCE, scope._server_address, scope._server_port,
|
312
|
+
request_model, scope._response_model, environment, application_name, scope._start_time, scope._end_time,
|
313
|
+
scope._input_tokens, cost)
|
314
|
+
|
315
|
+
def process_embedding_response(response, request_model, pricing_info, server_port, server_address,
|
316
|
+
environment, application_name, metrics, start_time, span, capture_message_content=False,
|
317
|
+
disable_metrics=False, version="1.0.0", **kwargs):
|
318
|
+
"""
|
319
|
+
Process embedding request and generate Telemetry
|
320
|
+
"""
|
321
|
+
|
322
|
+
# Create scope object
|
323
|
+
scope = type("GenericScope", (), {})()
|
205
324
|
response_dict = response_as_dict(response)
|
206
325
|
|
207
|
-
|
208
|
-
|
209
|
-
|
210
|
-
|
211
|
-
|
212
|
-
|
213
|
-
|
214
|
-
|
215
|
-
|
216
|
-
|
217
|
-
self._timestamps = []
|
218
|
-
self._ttft, self._tbt = self._end_time - self._start_time, 0
|
219
|
-
self._server_address, self._server_port = server_address, server_port
|
220
|
-
self._kwargs = kwargs
|
221
|
-
|
222
|
-
common_chat_logic(self, pricing_info, environment, application_name, metrics,
|
223
|
-
event_provider, capture_message_content, disable_metrics, version, is_stream=False)
|
326
|
+
scope._start_time = start_time
|
327
|
+
scope._end_time = time.time()
|
328
|
+
scope._span = span
|
329
|
+
scope._input_tokens = response_dict.get("usage", {}).get("prompt_tokens", 0)
|
330
|
+
scope._response_model = response_dict.get("model")
|
331
|
+
scope._server_address, scope._server_port = server_address, server_port
|
332
|
+
scope._kwargs = kwargs
|
333
|
+
|
334
|
+
common_embedding_logic(scope, pricing_info, environment, application_name, metrics,
|
335
|
+
capture_message_content, disable_metrics, version)
|
224
336
|
|
225
337
|
return response
|
openlit/semcov/__init__.py
CHANGED
@@ -166,6 +166,7 @@ class SemanticConvention:
|
|
166
166
|
GEN_AI_CONTENT_COMPLETION_EVENT = "gen_ai.content.completion"
|
167
167
|
GEN_AI_CONTENT_COMPLETION = "gen_ai.completion"
|
168
168
|
GEN_AI_CONTENT_REVISED_PROMPT = "gen_ai.content.revised_prompt"
|
169
|
+
GEN_AI_CONTENT_REASONING = "gen_ai.content.reasoning"
|
169
170
|
|
170
171
|
# GenAI Rag
|
171
172
|
GEN_AI_RAG_MAX_SEGMENTS = "gen_ai.rag.max_segments"
|
@@ -1,6 +1,6 @@
|
|
1
1
|
Metadata-Version: 2.3
|
2
2
|
Name: openlit
|
3
|
-
Version: 1.34.
|
3
|
+
Version: 1.34.16
|
4
4
|
Summary: OpenTelemetry-native Auto instrumentation library for monitoring LLM Applications and GPUs, facilitating the integration of observability into your GenAI-driven projects
|
5
5
|
License: Apache-2.0
|
6
6
|
Keywords: OpenTelemetry,otel,otlp,llm,tracing,openai,anthropic,claude,cohere,llm monitoring,observability,monitoring,gpt,Generative AI,chatGPT,gpu
|
@@ -29,10 +29,10 @@ openlit/instrumentation/astra/__init__.py,sha256=-JG3_YHQQaOQUr4XtFzqfaYiQKqviAA
|
|
29
29
|
openlit/instrumentation/astra/astra.py,sha256=L_Yw980eEY0AzMqhNreKamlSplTlL8XiG5lx9Sj3D0c,1610
|
30
30
|
openlit/instrumentation/astra/async_astra.py,sha256=87QFKnEQPHywuqMH0dOlnXZ2GqdYDZQgT4TfXB16fPI,1628
|
31
31
|
openlit/instrumentation/astra/utils.py,sha256=qBNpugK0R6wQLFx39ZANY1fQvNAIc5PrpEazz-K69Yw,4976
|
32
|
-
openlit/instrumentation/azure_ai_inference/__init__.py,sha256=
|
33
|
-
openlit/instrumentation/azure_ai_inference/async_azure_ai_inference.py,sha256=
|
34
|
-
openlit/instrumentation/azure_ai_inference/azure_ai_inference.py,sha256=
|
35
|
-
openlit/instrumentation/azure_ai_inference/utils.py,sha256=
|
32
|
+
openlit/instrumentation/azure_ai_inference/__init__.py,sha256=_GuYy4ypF6_HICpAC8dNQ5-FBjkcNzPTPF4q3fTM10Q,2512
|
33
|
+
openlit/instrumentation/azure_ai_inference/async_azure_ai_inference.py,sha256=SFrniRWPqVBxFJVOpC8w1qNGSYZhCFXeKHVHu5pEdZI,5906
|
34
|
+
openlit/instrumentation/azure_ai_inference/azure_ai_inference.py,sha256=hRFuuvaXflctNNbk7N2GOfKaC_eCHbrBWf9_1sZcaGY,5808
|
35
|
+
openlit/instrumentation/azure_ai_inference/utils.py,sha256=JqKZgb6VppDbAQ2RdH_dQ2oWVnaqGIA1PCmMl1yrMtA,15491
|
36
36
|
openlit/instrumentation/bedrock/__init__.py,sha256=Sfd0vm4Dfm1t-N7vBPRwU57GLTlZP2M4rVYRek_JHXY,1625
|
37
37
|
openlit/instrumentation/bedrock/bedrock.py,sha256=kP9ESKzqhWu-dIWseyaeyendUyo6b7xJwjGo3LGi5Jc,2817
|
38
38
|
openlit/instrumentation/bedrock/utils.py,sha256=_mTUIbioEg4jfoxocUbfc7RgGjhm9ACelbxIoFu4jbM,11636
|
@@ -138,8 +138,8 @@ openlit/instrumentation/vllm/vllm.py,sha256=VzazF2f4LLwjZDO_G8lIN_d622oSJM0fIO9w
|
|
138
138
|
openlit/otel/events.py,sha256=VrMjTpvnLtYRBHCiFwJojTQqqNpRCxoD4yJYeQrtPsk,3560
|
139
139
|
openlit/otel/metrics.py,sha256=GM2PDloBGRhBTkHHkYaqmOwIAQkY124ZhW4sEqW1Fgk,7086
|
140
140
|
openlit/otel/tracing.py,sha256=tjV2bEbEDPUB1Z46gE-UsJsb04sRdFrfbhIDkxViZc0,3103
|
141
|
-
openlit/semcov/__init__.py,sha256=
|
142
|
-
openlit-1.34.
|
143
|
-
openlit-1.34.
|
144
|
-
openlit-1.34.
|
145
|
-
openlit-1.34.
|
141
|
+
openlit/semcov/__init__.py,sha256=8oIh2VC667NDh8FA3M-ESusHmeus1sgDUD8binx_nAc,13519
|
142
|
+
openlit-1.34.16.dist-info/LICENSE,sha256=xx0jnfkXJvxRnG63LTGOxlggYnIysveWIZ6H3PNdCrQ,11357
|
143
|
+
openlit-1.34.16.dist-info/METADATA,sha256=mdKEEE4FgRuUOe_Pl1Crh0S89-A8wCrBchSFJ7cqBRI,23470
|
144
|
+
openlit-1.34.16.dist-info/WHEEL,sha256=b4K_helf-jlQoXBBETfwnf4B04YC67LOev0jo4fX5m8,88
|
145
|
+
openlit-1.34.16.dist-info/RECORD,,
|
File without changes
|
File without changes
|