openlit 1.34.7__py3-none-any.whl → 1.34.10__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- openlit/__helpers.py +46 -0
- openlit/instrumentation/assemblyai/__init__.py +14 -18
- openlit/instrumentation/assemblyai/assemblyai.py +29 -120
- openlit/instrumentation/assemblyai/utils.py +142 -0
- openlit/instrumentation/elevenlabs/__init__.py +5 -27
- openlit/instrumentation/elevenlabs/async_elevenlabs.py +29 -119
- openlit/instrumentation/elevenlabs/elevenlabs.py +28 -118
- openlit/instrumentation/elevenlabs/utils.py +133 -0
- openlit/instrumentation/gpt4all/__init__.py +3 -6
- openlit/instrumentation/gpt4all/gpt4all.py +75 -383
- openlit/instrumentation/gpt4all/utils.py +281 -0
- openlit/instrumentation/ollama/__init__.py +5 -6
- openlit/instrumentation/ollama/async_ollama.py +65 -62
- openlit/instrumentation/ollama/ollama.py +65 -62
- openlit/instrumentation/ollama/utils.py +180 -239
- openlit/instrumentation/premai/__init__.py +2 -2
- openlit/instrumentation/premai/utils.py +4 -3
- openlit/instrumentation/reka/utils.py +3 -3
- openlit/instrumentation/together/utils.py +3 -3
- {openlit-1.34.7.dist-info → openlit-1.34.10.dist-info}/METADATA +1 -1
- {openlit-1.34.7.dist-info → openlit-1.34.10.dist-info}/RECORD +23 -20
- {openlit-1.34.7.dist-info → openlit-1.34.10.dist-info}/LICENSE +0 -0
- {openlit-1.34.7.dist-info → openlit-1.34.10.dist-info}/WHEEL +0 -0
@@ -0,0 +1,281 @@
|
|
1
|
+
"""
|
2
|
+
GPT4All OpenTelemetry instrumentation utility functions
|
3
|
+
"""
|
4
|
+
import time
|
5
|
+
|
6
|
+
from opentelemetry.sdk.resources import SERVICE_NAME, TELEMETRY_SDK_NAME, DEPLOYMENT_ENVIRONMENT
|
7
|
+
from opentelemetry.trace import Status, StatusCode
|
8
|
+
|
9
|
+
from openlit.__helpers import (
|
10
|
+
calculate_ttft,
|
11
|
+
calculate_tbt,
|
12
|
+
general_tokens,
|
13
|
+
create_metrics_attributes,
|
14
|
+
get_chat_model_cost,
|
15
|
+
get_embed_model_cost,
|
16
|
+
)
|
17
|
+
from openlit.semcov import SemanticConvention
|
18
|
+
|
19
|
+
def format_content(prompt):
|
20
|
+
"""
|
21
|
+
Process a prompt to extract content.
|
22
|
+
"""
|
23
|
+
return str(prompt) if prompt else ""
|
24
|
+
|
25
|
+
def process_chunk(scope, chunk):
|
26
|
+
"""
|
27
|
+
Process a chunk of response data and update state.
|
28
|
+
"""
|
29
|
+
|
30
|
+
end_time = time.time()
|
31
|
+
# Record the timestamp for the current chunk
|
32
|
+
scope._timestamps.append(end_time)
|
33
|
+
|
34
|
+
if len(scope._timestamps) == 1:
|
35
|
+
# Calculate time to first chunk
|
36
|
+
scope._ttft = calculate_ttft(scope._timestamps, scope._start_time)
|
37
|
+
|
38
|
+
scope._llmresponse += chunk
|
39
|
+
scope._end_time = time.time()
|
40
|
+
|
41
|
+
def common_span_attributes(scope, gen_ai_operation, gen_ai_system, server_address, server_port,
|
42
|
+
request_model, response_model, environment, application_name, is_stream, tbt, ttft, version):
|
43
|
+
"""
|
44
|
+
Set common span attributes for both generate and embed operations.
|
45
|
+
"""
|
46
|
+
|
47
|
+
scope._span.set_attribute(TELEMETRY_SDK_NAME, "openlit")
|
48
|
+
scope._span.set_attribute(SemanticConvention.GEN_AI_OPERATION, gen_ai_operation)
|
49
|
+
scope._span.set_attribute(SemanticConvention.GEN_AI_SYSTEM, gen_ai_system)
|
50
|
+
scope._span.set_attribute(SemanticConvention.SERVER_ADDRESS, server_address)
|
51
|
+
scope._span.set_attribute(SemanticConvention.SERVER_PORT, server_port)
|
52
|
+
scope._span.set_attribute(SemanticConvention.GEN_AI_REQUEST_MODEL, request_model)
|
53
|
+
scope._span.set_attribute(SemanticConvention.GEN_AI_RESPONSE_MODEL, response_model)
|
54
|
+
scope._span.set_attribute(DEPLOYMENT_ENVIRONMENT, environment)
|
55
|
+
scope._span.set_attribute(SERVICE_NAME, application_name)
|
56
|
+
scope._span.set_attribute(SemanticConvention.GEN_AI_REQUEST_IS_STREAM, is_stream)
|
57
|
+
scope._span.set_attribute(SemanticConvention.GEN_AI_SERVER_TBT, tbt)
|
58
|
+
scope._span.set_attribute(SemanticConvention.GEN_AI_SERVER_TTFT, ttft)
|
59
|
+
scope._span.set_attribute(SemanticConvention.GEN_AI_SDK_VERSION, version)
|
60
|
+
|
61
|
+
def record_completion_metrics(metrics, gen_ai_operation, gen_ai_system, server_address, server_port,
|
62
|
+
request_model, response_model, environment, application_name, start_time, end_time,
|
63
|
+
input_tokens, output_tokens, cost, tbt=None, ttft=None):
|
64
|
+
"""
|
65
|
+
Record completion-specific metrics for the operation.
|
66
|
+
"""
|
67
|
+
|
68
|
+
attributes = create_metrics_attributes(
|
69
|
+
operation=gen_ai_operation,
|
70
|
+
system=gen_ai_system,
|
71
|
+
server_address=server_address,
|
72
|
+
server_port=server_port,
|
73
|
+
request_model=request_model,
|
74
|
+
response_model=response_model,
|
75
|
+
service_name=application_name,
|
76
|
+
deployment_environment=environment,
|
77
|
+
)
|
78
|
+
metrics["genai_client_operation_duration"].record(end_time - start_time, attributes)
|
79
|
+
metrics["genai_requests"].add(1, attributes)
|
80
|
+
metrics["genai_prompt_tokens"].add(input_tokens, attributes)
|
81
|
+
metrics["genai_completion_tokens"].add(output_tokens, attributes)
|
82
|
+
metrics["genai_client_usage_tokens"].record(input_tokens + output_tokens, attributes)
|
83
|
+
metrics["genai_cost"].record(cost, attributes)
|
84
|
+
if tbt is not None:
|
85
|
+
metrics["genai_server_tbt"].record(tbt, attributes)
|
86
|
+
if ttft is not None:
|
87
|
+
metrics["genai_server_ttft"].record(ttft, attributes)
|
88
|
+
|
89
|
+
def record_embedding_metrics(metrics, gen_ai_operation, gen_ai_system, server_address, server_port,
|
90
|
+
request_model, response_model, environment, application_name, start_time, end_time,
|
91
|
+
input_tokens, cost):
|
92
|
+
"""
|
93
|
+
Record embedding-specific metrics for the operation.
|
94
|
+
"""
|
95
|
+
|
96
|
+
attributes = create_metrics_attributes(
|
97
|
+
operation=gen_ai_operation,
|
98
|
+
system=gen_ai_system,
|
99
|
+
server_address=server_address,
|
100
|
+
server_port=server_port,
|
101
|
+
request_model=request_model,
|
102
|
+
response_model=response_model,
|
103
|
+
service_name=application_name,
|
104
|
+
deployment_environment=environment,
|
105
|
+
)
|
106
|
+
metrics["genai_client_usage_tokens"].record(input_tokens, attributes)
|
107
|
+
metrics["genai_client_operation_duration"].record(end_time - start_time, attributes)
|
108
|
+
metrics["genai_requests"].add(1, attributes)
|
109
|
+
metrics["genai_prompt_tokens"].add(input_tokens, attributes)
|
110
|
+
metrics["genai_cost"].record(cost, attributes)
|
111
|
+
|
112
|
+
def common_t2s_logic(scope, pricing_info, environment, application_name, metrics,
|
113
|
+
capture_message_content, disable_metrics, version, is_stream):
|
114
|
+
"""
|
115
|
+
Process generate request and generate Telemetry
|
116
|
+
"""
|
117
|
+
|
118
|
+
if len(scope._timestamps) > 1:
|
119
|
+
scope._tbt = calculate_tbt(scope._timestamps)
|
120
|
+
|
121
|
+
prompt = format_content(scope._kwargs.get("prompt") or (scope._args[0] if scope._args else "") or "")
|
122
|
+
request_model = scope._request_model
|
123
|
+
|
124
|
+
# Calculate tokens using input prompt and aggregated response
|
125
|
+
input_tokens = general_tokens(prompt)
|
126
|
+
output_tokens = general_tokens(scope._llmresponse)
|
127
|
+
|
128
|
+
cost = get_chat_model_cost(request_model, pricing_info, input_tokens, output_tokens)
|
129
|
+
|
130
|
+
# Common Span Attributes
|
131
|
+
common_span_attributes(scope,
|
132
|
+
SemanticConvention.GEN_AI_OPERATION_TYPE_CHAT, SemanticConvention.GEN_AI_SYSTEM_GPT4ALL,
|
133
|
+
scope._server_address, scope._server_port, request_model, request_model,
|
134
|
+
environment, application_name, is_stream, scope._tbt, scope._ttft, version)
|
135
|
+
|
136
|
+
# Span Attributes for Request parameters
|
137
|
+
scope._span.set_attribute(SemanticConvention.GEN_AI_REQUEST_FREQUENCY_PENALTY, scope._kwargs.get("repeat_penalty", 1.18))
|
138
|
+
scope._span.set_attribute(SemanticConvention.GEN_AI_REQUEST_MAX_TOKENS, scope._kwargs.get("max_tokens", 200))
|
139
|
+
scope._span.set_attribute(SemanticConvention.GEN_AI_REQUEST_PRESENCE_PENALTY, scope._kwargs.get("presence_penalty", 0.0))
|
140
|
+
scope._span.set_attribute(SemanticConvention.GEN_AI_REQUEST_TEMPERATURE, scope._kwargs.get("temp", 0.7))
|
141
|
+
scope._span.set_attribute(SemanticConvention.GEN_AI_REQUEST_TOP_P, scope._kwargs.get("top_p", 0.4))
|
142
|
+
scope._span.set_attribute(SemanticConvention.GEN_AI_REQUEST_TOP_K, scope._kwargs.get("top_k", 40))
|
143
|
+
scope._span.set_attribute(SemanticConvention.GEN_AI_OUTPUT_TYPE, "text" if isinstance(scope._llmresponse, str) else "json")
|
144
|
+
|
145
|
+
# Span Attributes for Cost and Tokens
|
146
|
+
scope._span.set_attribute(SemanticConvention.GEN_AI_USAGE_INPUT_TOKENS, input_tokens)
|
147
|
+
scope._span.set_attribute(SemanticConvention.GEN_AI_USAGE_OUTPUT_TOKENS, output_tokens)
|
148
|
+
scope._span.set_attribute(SemanticConvention.GEN_AI_CLIENT_TOKEN_USAGE, input_tokens + output_tokens)
|
149
|
+
scope._span.set_attribute(SemanticConvention.GEN_AI_USAGE_COST, cost)
|
150
|
+
|
151
|
+
# Span Attributes for Tools
|
152
|
+
if scope._tools:
|
153
|
+
scope._span.set_attribute(SemanticConvention.GEN_AI_TOOL_NAME, scope._tools.get("function","")).get("name","")
|
154
|
+
scope._span.set_attribute(SemanticConvention.GEN_AI_TOOL_CALL_ID, str(scope._tools.get("id","")))
|
155
|
+
scope._span.set_attribute(SemanticConvention.GEN_AI_TOOL_ARGS, str(scope._tools.get("function","").get("arguments","")))
|
156
|
+
|
157
|
+
# Span Attributes for Content
|
158
|
+
if capture_message_content:
|
159
|
+
scope._span.set_attribute(SemanticConvention.GEN_AI_CONTENT_PROMPT, prompt)
|
160
|
+
scope._span.set_attribute(SemanticConvention.GEN_AI_CONTENT_COMPLETION, scope._llmresponse)
|
161
|
+
|
162
|
+
# To be removed one the change to span_attributes (from span events) is complete
|
163
|
+
scope._span.add_event(
|
164
|
+
name=SemanticConvention.GEN_AI_CONTENT_PROMPT_EVENT,
|
165
|
+
attributes={
|
166
|
+
SemanticConvention.GEN_AI_CONTENT_PROMPT: prompt,
|
167
|
+
},
|
168
|
+
)
|
169
|
+
scope._span.add_event(
|
170
|
+
name=SemanticConvention.GEN_AI_CONTENT_COMPLETION_EVENT,
|
171
|
+
attributes={
|
172
|
+
SemanticConvention.GEN_AI_CONTENT_COMPLETION: scope._llmresponse,
|
173
|
+
},
|
174
|
+
)
|
175
|
+
|
176
|
+
scope._span.set_status(Status(StatusCode.OK))
|
177
|
+
|
178
|
+
# Metrics
|
179
|
+
if not disable_metrics:
|
180
|
+
record_completion_metrics(metrics, SemanticConvention.GEN_AI_OPERATION_TYPE_CHAT, SemanticConvention.GEN_AI_SYSTEM_GPT4ALL,
|
181
|
+
scope._server_address, scope._server_port, request_model, request_model, environment,
|
182
|
+
application_name, scope._start_time, scope._end_time, input_tokens, output_tokens,
|
183
|
+
cost, scope._tbt, scope._ttft)
|
184
|
+
|
185
|
+
def common_embedding_logic(scope, pricing_info, environment, application_name, metrics,
|
186
|
+
capture_message_content, disable_metrics, version):
|
187
|
+
"""
|
188
|
+
Process embedding request and generate Telemetry
|
189
|
+
"""
|
190
|
+
|
191
|
+
prompt = format_content(scope._kwargs.get("text") or "")
|
192
|
+
request_model = scope._request_model
|
193
|
+
|
194
|
+
input_tokens = general_tokens(prompt)
|
195
|
+
|
196
|
+
cost = get_embed_model_cost(request_model, pricing_info, input_tokens)
|
197
|
+
|
198
|
+
# Common Span Attributes
|
199
|
+
common_span_attributes(scope,
|
200
|
+
SemanticConvention.GEN_AI_OPERATION_TYPE_EMBEDDING, SemanticConvention.GEN_AI_SYSTEM_GPT4ALL,
|
201
|
+
scope._server_address, scope._server_port, request_model, request_model,
|
202
|
+
environment, application_name, False, scope._tbt, scope._ttft, version)
|
203
|
+
|
204
|
+
# Embedding-specific span attributes
|
205
|
+
scope._span.set_attribute(SemanticConvention.GEN_AI_USAGE_INPUT_TOKENS, input_tokens)
|
206
|
+
scope._span.set_attribute(SemanticConvention.GEN_AI_CLIENT_TOKEN_USAGE, input_tokens)
|
207
|
+
scope._span.set_attribute(SemanticConvention.GEN_AI_USAGE_COST, cost)
|
208
|
+
|
209
|
+
# Span Attributes for Content
|
210
|
+
if capture_message_content:
|
211
|
+
scope._span.add_event(
|
212
|
+
name=SemanticConvention.GEN_AI_CONTENT_PROMPT_EVENT,
|
213
|
+
attributes={
|
214
|
+
SemanticConvention.GEN_AI_CONTENT_PROMPT: str(scope._kwargs.get("input", "")),
|
215
|
+
},
|
216
|
+
)
|
217
|
+
|
218
|
+
scope._span.set_status(Status(StatusCode.OK))
|
219
|
+
|
220
|
+
# Metrics
|
221
|
+
if not disable_metrics:
|
222
|
+
record_embedding_metrics(metrics, SemanticConvention.GEN_AI_OPERATION_TYPE_EMBEDDING, SemanticConvention.GEN_AI_SYSTEM_GPT4ALL,
|
223
|
+
scope._server_address, scope._server_port, request_model, request_model, environment,
|
224
|
+
application_name, scope._start_time, scope._end_time, input_tokens, cost)
|
225
|
+
|
226
|
+
def process_streaming_generate_response(scope, pricing_info, environment, application_name, metrics,
|
227
|
+
capture_message_content=False, disable_metrics=False, version=""):
|
228
|
+
"""
|
229
|
+
Process generate request and generate Telemetry
|
230
|
+
"""
|
231
|
+
common_t2s_logic(scope, pricing_info, environment, application_name, metrics,
|
232
|
+
capture_message_content, disable_metrics, version, is_stream=True)
|
233
|
+
|
234
|
+
def process_generate_response(response, request_model, pricing_info, server_port, server_address,
|
235
|
+
environment, application_name, metrics, start_time, span, args, kwargs, capture_message_content=False,
|
236
|
+
disable_metrics=False, version="1.0.0"):
|
237
|
+
"""
|
238
|
+
Process generate request and generate Telemetry
|
239
|
+
"""
|
240
|
+
|
241
|
+
scope = type("GenericScope", (), {})()
|
242
|
+
|
243
|
+
scope._start_time = start_time
|
244
|
+
scope._end_time = time.time()
|
245
|
+
scope._span = span
|
246
|
+
scope._llmresponse = str(response)
|
247
|
+
scope._request_model = request_model
|
248
|
+
scope._timestamps = []
|
249
|
+
scope._ttft, scope._tbt = scope._end_time - scope._start_time, 0
|
250
|
+
scope._server_address, scope._server_port = server_address, server_port
|
251
|
+
scope._kwargs = kwargs
|
252
|
+
scope._args = args
|
253
|
+
scope._tools = None
|
254
|
+
|
255
|
+
common_t2s_logic(scope, pricing_info, environment, application_name, metrics,
|
256
|
+
capture_message_content, disable_metrics, version, is_stream=False)
|
257
|
+
|
258
|
+
return response
|
259
|
+
|
260
|
+
def process_embedding_response(response, request_model, pricing_info, server_port, server_address,
|
261
|
+
environment, application_name, metrics, start_time, span, capture_message_content=False,
|
262
|
+
disable_metrics=False, version="1.0.0", **kwargs):
|
263
|
+
"""
|
264
|
+
Process embedding request and generate Telemetry
|
265
|
+
"""
|
266
|
+
|
267
|
+
scope = type("GenericScope", (), {})()
|
268
|
+
|
269
|
+
scope._start_time = start_time
|
270
|
+
scope._end_time = time.time()
|
271
|
+
scope._span = span
|
272
|
+
scope._request_model = request_model
|
273
|
+
scope._timestamps = []
|
274
|
+
scope._ttft, scope._tbt = scope._end_time - scope._start_time, 0
|
275
|
+
scope._server_address, scope._server_port = server_address, server_port
|
276
|
+
scope._kwargs = kwargs
|
277
|
+
|
278
|
+
common_embedding_logic(scope, pricing_info, environment, application_name, metrics,
|
279
|
+
capture_message_content, disable_metrics, version)
|
280
|
+
|
281
|
+
return response
|
@@ -41,7 +41,7 @@ def _dispatch_async(async_chat_wrap, async_emb_wrap):
|
|
41
41
|
|
42
42
|
class OllamaInstrumentor(BaseInstrumentor):
|
43
43
|
"""
|
44
|
-
An instrumentor for Ollama
|
44
|
+
An instrumentor for Ollama client library.
|
45
45
|
"""
|
46
46
|
|
47
47
|
def instrumentation_dependencies(self) -> Collection[str]:
|
@@ -51,7 +51,6 @@ class OllamaInstrumentor(BaseInstrumentor):
|
|
51
51
|
application_name = kwargs.get("application_name", "default_application")
|
52
52
|
environment = kwargs.get("environment", "default_environment")
|
53
53
|
tracer = kwargs.get("tracer")
|
54
|
-
event_provider = kwargs.get("event_provider")
|
55
54
|
metrics = kwargs.get("metrics_dict")
|
56
55
|
pricing_info = kwargs.get("pricing_info", {})
|
57
56
|
capture_message_content = kwargs.get("capture_message_content", False)
|
@@ -61,22 +60,22 @@ class OllamaInstrumentor(BaseInstrumentor):
|
|
61
60
|
# Build wrapper factories for chat and embeddings
|
62
61
|
sync_chat_wrap = chat(
|
63
62
|
version, environment, application_name,
|
64
|
-
tracer,
|
63
|
+
tracer, pricing_info,
|
65
64
|
capture_message_content, metrics, disable_metrics
|
66
65
|
)
|
67
66
|
sync_emb_wrap = embeddings(
|
68
67
|
version, environment, application_name,
|
69
|
-
tracer,
|
68
|
+
tracer, pricing_info,
|
70
69
|
capture_message_content, metrics, disable_metrics
|
71
70
|
)
|
72
71
|
async_chat_wrap = async_chat(
|
73
72
|
version, environment, application_name,
|
74
|
-
tracer,
|
73
|
+
tracer, pricing_info,
|
75
74
|
capture_message_content, metrics, disable_metrics
|
76
75
|
)
|
77
76
|
async_emb_wrap = async_embeddings(
|
78
77
|
version, environment, application_name,
|
79
|
-
tracer,
|
78
|
+
tracer, pricing_info,
|
80
79
|
capture_message_content, metrics, disable_metrics
|
81
80
|
)
|
82
81
|
|
@@ -2,7 +2,6 @@
|
|
2
2
|
Module for monitoring Ollama API calls.
|
3
3
|
"""
|
4
4
|
|
5
|
-
import logging
|
6
5
|
import time
|
7
6
|
from opentelemetry.trace import SpanKind
|
8
7
|
from openlit.__helpers import (
|
@@ -17,12 +16,10 @@ from openlit.instrumentation.ollama.utils import (
|
|
17
16
|
)
|
18
17
|
from openlit.semcov import SemanticConvention
|
19
18
|
|
20
|
-
logger = logging.getLogger(__name__)
|
21
|
-
|
22
19
|
def async_chat(version, environment, application_name,
|
23
|
-
tracer,
|
20
|
+
tracer, pricing_info, capture_message_content, metrics, disable_metrics):
|
24
21
|
"""
|
25
|
-
Generates a telemetry wrapper for
|
22
|
+
Generates a telemetry wrapper for Ollama async chat function call
|
26
23
|
"""
|
27
24
|
|
28
25
|
class TracedAsyncStream:
|
@@ -38,7 +35,7 @@ def async_chat(version, environment, application_name,
|
|
38
35
|
kwargs,
|
39
36
|
server_address,
|
40
37
|
server_port,
|
41
|
-
|
38
|
+
args,
|
42
39
|
):
|
43
40
|
self.__wrapped__ = wrapped
|
44
41
|
self._span = span
|
@@ -48,11 +45,11 @@ def async_chat(version, environment, application_name,
|
|
48
45
|
self._tool_calls = []
|
49
46
|
self._input_tokens = 0
|
50
47
|
self._output_tokens = 0
|
51
|
-
self._response_role =
|
48
|
+
self._response_role = ""
|
52
49
|
self._span_name = span_name
|
53
50
|
self._args = args
|
54
51
|
self._kwargs = kwargs
|
55
|
-
self._start_time = time.
|
52
|
+
self._start_time = time.monotonic()
|
56
53
|
self._end_time = None
|
57
54
|
self._timestamps = []
|
58
55
|
self._ttft = 0
|
@@ -81,56 +78,101 @@ def async_chat(version, environment, application_name,
|
|
81
78
|
return chunk
|
82
79
|
except StopAsyncIteration:
|
83
80
|
try:
|
84
|
-
with tracer.start_as_current_span(self._span_name, kind=
|
81
|
+
with tracer.start_as_current_span(self._span_name, kind=SpanKind.CLIENT) as self._span:
|
85
82
|
process_streaming_chat_response(
|
86
83
|
self,
|
87
84
|
pricing_info=pricing_info,
|
88
85
|
environment=environment,
|
89
86
|
application_name=application_name,
|
90
87
|
metrics=metrics,
|
91
|
-
event_provider=event_provider,
|
92
88
|
capture_message_content=capture_message_content,
|
93
89
|
disable_metrics=disable_metrics,
|
94
90
|
version=version
|
95
91
|
)
|
96
92
|
except Exception as e:
|
97
93
|
handle_exception(self._span, e)
|
98
|
-
|
94
|
+
|
99
95
|
raise
|
100
96
|
|
101
97
|
async def wrapper(wrapped, instance, args, kwargs):
|
102
98
|
"""
|
103
|
-
Wraps the
|
99
|
+
Wraps the Ollama async chat function call.
|
104
100
|
"""
|
105
101
|
|
106
102
|
streaming = kwargs.get("stream", False)
|
107
103
|
|
108
104
|
server_address, server_port = set_server_address_and_port(instance, "127.0.0.1", 11434)
|
109
|
-
|
110
|
-
request_model = json_body.get("model") or kwargs.get("model")
|
105
|
+
request_model = kwargs.get("model")
|
111
106
|
|
112
107
|
span_name = f"{SemanticConvention.GEN_AI_OPERATION_TYPE_CHAT} {request_model}"
|
113
108
|
|
114
|
-
# pylint: disable=no-else-return
|
115
109
|
if streaming:
|
116
110
|
awaited_wrapped = await wrapped(*args, **kwargs)
|
117
111
|
span = tracer.start_span(span_name, kind=SpanKind.CLIENT)
|
118
|
-
return TracedAsyncStream(awaited_wrapped, span, span_name, kwargs, server_address, server_port)
|
112
|
+
return TracedAsyncStream(awaited_wrapped, span, span_name, kwargs, server_address, server_port, args)
|
119
113
|
|
120
114
|
else:
|
121
|
-
with tracer.start_as_current_span(span_name, kind=
|
122
|
-
start_time = time.
|
115
|
+
with tracer.start_as_current_span(span_name, kind=SpanKind.CLIENT) as span:
|
116
|
+
start_time = time.monotonic()
|
117
|
+
|
118
|
+
try:
|
119
|
+
response = await wrapped(*args, **kwargs)
|
120
|
+
|
121
|
+
response = process_chat_response(
|
122
|
+
response=response,
|
123
|
+
gen_ai_endpoint="ollama.chat",
|
124
|
+
pricing_info=pricing_info,
|
125
|
+
server_port=server_port,
|
126
|
+
server_address=server_address,
|
127
|
+
environment=environment,
|
128
|
+
application_name=application_name,
|
129
|
+
metrics=metrics,
|
130
|
+
start_time=start_time,
|
131
|
+
span=span,
|
132
|
+
capture_message_content=capture_message_content,
|
133
|
+
disable_metrics=disable_metrics,
|
134
|
+
version=version,
|
135
|
+
**kwargs
|
136
|
+
)
|
137
|
+
|
138
|
+
except Exception as e:
|
139
|
+
handle_exception(span, e)
|
140
|
+
|
141
|
+
return response
|
142
|
+
|
143
|
+
return wrapper
|
144
|
+
|
145
|
+
def async_embeddings(version, environment, application_name,
|
146
|
+
tracer, pricing_info, capture_message_content, metrics, disable_metrics):
|
147
|
+
"""
|
148
|
+
Generates a telemetry wrapper for Ollama async embeddings function call
|
149
|
+
"""
|
150
|
+
|
151
|
+
async def wrapper(wrapped, instance, args, kwargs):
|
152
|
+
"""
|
153
|
+
Wraps the Ollama async embeddings function call.
|
154
|
+
"""
|
155
|
+
|
156
|
+
server_address, server_port = set_server_address_and_port(instance, "127.0.0.1", 11434)
|
157
|
+
request_model = kwargs.get("model")
|
158
|
+
|
159
|
+
span_name = f"{SemanticConvention.GEN_AI_OPERATION_TYPE_EMBEDDING} {request_model}"
|
160
|
+
|
161
|
+
with tracer.start_as_current_span(span_name, kind=SpanKind.CLIENT) as span:
|
162
|
+
start_time = time.monotonic()
|
163
|
+
|
164
|
+
try:
|
123
165
|
response = await wrapped(*args, **kwargs)
|
124
|
-
|
166
|
+
|
167
|
+
response = process_embedding_response(
|
125
168
|
response=response,
|
126
|
-
|
169
|
+
gen_ai_endpoint="ollama.embeddings",
|
127
170
|
pricing_info=pricing_info,
|
128
171
|
server_port=server_port,
|
129
172
|
server_address=server_address,
|
130
173
|
environment=environment,
|
131
174
|
application_name=application_name,
|
132
175
|
metrics=metrics,
|
133
|
-
event_provider=event_provider,
|
134
176
|
start_time=start_time,
|
135
177
|
span=span,
|
136
178
|
capture_message_content=capture_message_content,
|
@@ -139,47 +181,8 @@ def async_chat(version, environment, application_name,
|
|
139
181
|
**kwargs
|
140
182
|
)
|
141
183
|
|
142
|
-
|
143
|
-
|
144
|
-
return wrapper
|
145
|
-
|
146
|
-
def async_embeddings(version, environment, application_name,
|
147
|
-
tracer, event_provider, pricing_info, capture_message_content, metrics, disable_metrics):
|
148
|
-
"""
|
149
|
-
Generates a telemetry wrapper for GenAI function call
|
150
|
-
"""
|
151
|
-
|
152
|
-
async def wrapper(wrapped, instance, args, kwargs):
|
153
|
-
"""
|
154
|
-
Wraps the GenAI function call.
|
155
|
-
"""
|
156
|
-
|
157
|
-
server_address, server_port = set_server_address_and_port(instance, '127.0.0.1', 11434)
|
158
|
-
json_body = kwargs.get('json', {}) or {}
|
159
|
-
request_model = json_body.get('model') or kwargs.get('model')
|
160
|
-
|
161
|
-
span_name = f'{SemanticConvention.GEN_AI_OPERATION_TYPE_EMBEDDING} {request_model}'
|
162
|
-
|
163
|
-
with tracer.start_as_current_span(span_name, kind= SpanKind.CLIENT) as span:
|
164
|
-
start_time = time.time()
|
165
|
-
response = await wrapped(*args, **kwargs)
|
166
|
-
response = process_embedding_response(
|
167
|
-
response=response,
|
168
|
-
request_model=request_model,
|
169
|
-
pricing_info=pricing_info,
|
170
|
-
server_port=server_port,
|
171
|
-
server_address=server_address,
|
172
|
-
environment=environment,
|
173
|
-
application_name=application_name,
|
174
|
-
metrics=metrics,
|
175
|
-
event_provider=event_provider,
|
176
|
-
start_time=start_time,
|
177
|
-
span=span,
|
178
|
-
capture_message_content=capture_message_content,
|
179
|
-
disable_metrics=disable_metrics,
|
180
|
-
version=version,
|
181
|
-
**kwargs
|
182
|
-
)
|
184
|
+
except Exception as e:
|
185
|
+
handle_exception(span, e)
|
183
186
|
|
184
187
|
return response
|
185
188
|
|