openlit 1.34.7__py3-none-any.whl → 1.34.10__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- openlit/__helpers.py +46 -0
- openlit/instrumentation/assemblyai/__init__.py +14 -18
- openlit/instrumentation/assemblyai/assemblyai.py +29 -120
- openlit/instrumentation/assemblyai/utils.py +142 -0
- openlit/instrumentation/elevenlabs/__init__.py +5 -27
- openlit/instrumentation/elevenlabs/async_elevenlabs.py +29 -119
- openlit/instrumentation/elevenlabs/elevenlabs.py +28 -118
- openlit/instrumentation/elevenlabs/utils.py +133 -0
- openlit/instrumentation/gpt4all/__init__.py +3 -6
- openlit/instrumentation/gpt4all/gpt4all.py +75 -383
- openlit/instrumentation/gpt4all/utils.py +281 -0
- openlit/instrumentation/ollama/__init__.py +5 -6
- openlit/instrumentation/ollama/async_ollama.py +65 -62
- openlit/instrumentation/ollama/ollama.py +65 -62
- openlit/instrumentation/ollama/utils.py +180 -239
- openlit/instrumentation/premai/__init__.py +2 -2
- openlit/instrumentation/premai/utils.py +4 -3
- openlit/instrumentation/reka/utils.py +3 -3
- openlit/instrumentation/together/utils.py +3 -3
- {openlit-1.34.7.dist-info → openlit-1.34.10.dist-info}/METADATA +1 -1
- {openlit-1.34.7.dist-info → openlit-1.34.10.dist-info}/RECORD +23 -20
- {openlit-1.34.7.dist-info → openlit-1.34.10.dist-info}/LICENSE +0 -0
- {openlit-1.34.7.dist-info → openlit-1.34.10.dist-info}/WHEEL +0 -0
@@ -2,7 +2,6 @@
|
|
2
2
|
Module for monitoring Ollama API calls.
|
3
3
|
"""
|
4
4
|
|
5
|
-
import logging
|
6
5
|
import time
|
7
6
|
from opentelemetry.trace import SpanKind
|
8
7
|
from openlit.__helpers import (
|
@@ -17,12 +16,10 @@ from openlit.instrumentation.ollama.utils import (
|
|
17
16
|
)
|
18
17
|
from openlit.semcov import SemanticConvention
|
19
18
|
|
20
|
-
logger = logging.getLogger(__name__)
|
21
|
-
|
22
19
|
def chat(version, environment, application_name,
|
23
|
-
tracer,
|
20
|
+
tracer, pricing_info, capture_message_content, metrics, disable_metrics):
|
24
21
|
"""
|
25
|
-
Generates a telemetry wrapper for
|
22
|
+
Generates a telemetry wrapper for Ollama chat function call
|
26
23
|
"""
|
27
24
|
|
28
25
|
class TracedSyncStream:
|
@@ -38,7 +35,7 @@ def chat(version, environment, application_name,
|
|
38
35
|
kwargs,
|
39
36
|
server_address,
|
40
37
|
server_port,
|
41
|
-
|
38
|
+
args,
|
42
39
|
):
|
43
40
|
self.__wrapped__ = wrapped
|
44
41
|
self._span = span
|
@@ -48,11 +45,11 @@ def chat(version, environment, application_name,
|
|
48
45
|
self._tool_calls = []
|
49
46
|
self._input_tokens = 0
|
50
47
|
self._output_tokens = 0
|
51
|
-
self._response_role =
|
48
|
+
self._response_role = ""
|
52
49
|
self._span_name = span_name
|
53
50
|
self._args = args
|
54
51
|
self._kwargs = kwargs
|
55
|
-
self._start_time = time.
|
52
|
+
self._start_time = time.monotonic()
|
56
53
|
self._end_time = None
|
57
54
|
self._timestamps = []
|
58
55
|
self._ttft = 0
|
@@ -81,56 +78,101 @@ def chat(version, environment, application_name,
|
|
81
78
|
return chunk
|
82
79
|
except StopIteration:
|
83
80
|
try:
|
84
|
-
with tracer.start_as_current_span(self._span_name, kind=
|
81
|
+
with tracer.start_as_current_span(self._span_name, kind=SpanKind.CLIENT) as self._span:
|
85
82
|
process_streaming_chat_response(
|
86
83
|
self,
|
87
84
|
pricing_info=pricing_info,
|
88
85
|
environment=environment,
|
89
86
|
application_name=application_name,
|
90
87
|
metrics=metrics,
|
91
|
-
event_provider=event_provider,
|
92
88
|
capture_message_content=capture_message_content,
|
93
89
|
disable_metrics=disable_metrics,
|
94
90
|
version=version
|
95
91
|
)
|
96
92
|
except Exception as e:
|
97
93
|
handle_exception(self._span, e)
|
98
|
-
|
94
|
+
|
99
95
|
raise
|
100
96
|
|
101
97
|
def wrapper(wrapped, instance, args, kwargs):
|
102
98
|
"""
|
103
|
-
Wraps the
|
99
|
+
Wraps the Ollama chat function call.
|
104
100
|
"""
|
105
101
|
|
106
102
|
streaming = kwargs.get("stream", False)
|
107
103
|
|
108
104
|
server_address, server_port = set_server_address_and_port(instance, "127.0.0.1", 11434)
|
109
|
-
|
110
|
-
request_model = json_body.get("model") or kwargs.get("model")
|
105
|
+
request_model = kwargs.get("model")
|
111
106
|
|
112
107
|
span_name = f"{SemanticConvention.GEN_AI_OPERATION_TYPE_CHAT} {request_model}"
|
113
108
|
|
114
|
-
# pylint: disable=no-else-return
|
115
109
|
if streaming:
|
116
110
|
awaited_wrapped = wrapped(*args, **kwargs)
|
117
111
|
span = tracer.start_span(span_name, kind=SpanKind.CLIENT)
|
118
|
-
return TracedSyncStream(awaited_wrapped, span, span_name, kwargs, server_address, server_port)
|
112
|
+
return TracedSyncStream(awaited_wrapped, span, span_name, kwargs, server_address, server_port, args)
|
119
113
|
|
120
114
|
else:
|
121
|
-
with tracer.start_as_current_span(span_name, kind=
|
122
|
-
start_time = time.
|
115
|
+
with tracer.start_as_current_span(span_name, kind=SpanKind.CLIENT) as span:
|
116
|
+
start_time = time.monotonic()
|
117
|
+
|
118
|
+
try:
|
119
|
+
response = wrapped(*args, **kwargs)
|
120
|
+
|
121
|
+
response = process_chat_response(
|
122
|
+
response=response,
|
123
|
+
gen_ai_endpoint="ollama.chat",
|
124
|
+
pricing_info=pricing_info,
|
125
|
+
server_port=server_port,
|
126
|
+
server_address=server_address,
|
127
|
+
environment=environment,
|
128
|
+
application_name=application_name,
|
129
|
+
metrics=metrics,
|
130
|
+
start_time=start_time,
|
131
|
+
span=span,
|
132
|
+
capture_message_content=capture_message_content,
|
133
|
+
disable_metrics=disable_metrics,
|
134
|
+
version=version,
|
135
|
+
**kwargs
|
136
|
+
)
|
137
|
+
|
138
|
+
except Exception as e:
|
139
|
+
handle_exception(span, e)
|
140
|
+
|
141
|
+
return response
|
142
|
+
|
143
|
+
return wrapper
|
144
|
+
|
145
|
+
def embeddings(version, environment, application_name,
|
146
|
+
tracer, pricing_info, capture_message_content, metrics, disable_metrics):
|
147
|
+
"""
|
148
|
+
Generates a telemetry wrapper for Ollama embeddings function call
|
149
|
+
"""
|
150
|
+
|
151
|
+
def wrapper(wrapped, instance, args, kwargs):
|
152
|
+
"""
|
153
|
+
Wraps the Ollama embeddings function call.
|
154
|
+
"""
|
155
|
+
|
156
|
+
server_address, server_port = set_server_address_and_port(instance, "127.0.0.1", 11434)
|
157
|
+
request_model = kwargs.get("model")
|
158
|
+
|
159
|
+
span_name = f"{SemanticConvention.GEN_AI_OPERATION_TYPE_EMBEDDING} {request_model}"
|
160
|
+
|
161
|
+
with tracer.start_as_current_span(span_name, kind=SpanKind.CLIENT) as span:
|
162
|
+
start_time = time.monotonic()
|
163
|
+
|
164
|
+
try:
|
123
165
|
response = wrapped(*args, **kwargs)
|
124
|
-
|
166
|
+
|
167
|
+
response = process_embedding_response(
|
125
168
|
response=response,
|
126
|
-
|
169
|
+
gen_ai_endpoint="ollama.embeddings",
|
127
170
|
pricing_info=pricing_info,
|
128
171
|
server_port=server_port,
|
129
172
|
server_address=server_address,
|
130
173
|
environment=environment,
|
131
174
|
application_name=application_name,
|
132
175
|
metrics=metrics,
|
133
|
-
event_provider=event_provider,
|
134
176
|
start_time=start_time,
|
135
177
|
span=span,
|
136
178
|
capture_message_content=capture_message_content,
|
@@ -139,47 +181,8 @@ def chat(version, environment, application_name,
|
|
139
181
|
**kwargs
|
140
182
|
)
|
141
183
|
|
142
|
-
|
143
|
-
|
144
|
-
return wrapper
|
145
|
-
|
146
|
-
def embeddings(version, environment, application_name,
|
147
|
-
tracer, event_provider, pricing_info, capture_message_content, metrics, disable_metrics):
|
148
|
-
"""
|
149
|
-
Generates a telemetry wrapper for GenAI function call
|
150
|
-
"""
|
151
|
-
|
152
|
-
def wrapper(wrapped, instance, args, kwargs):
|
153
|
-
"""
|
154
|
-
Wraps the GenAI function call.
|
155
|
-
"""
|
156
|
-
|
157
|
-
server_address, server_port = set_server_address_and_port(instance, '127.0.0.1', 11434)
|
158
|
-
json_body = kwargs.get('json', {}) or {}
|
159
|
-
request_model = json_body.get('model') or kwargs.get('model')
|
160
|
-
|
161
|
-
span_name = f'{SemanticConvention.GEN_AI_OPERATION_TYPE_EMBEDDING} {request_model}'
|
162
|
-
|
163
|
-
with tracer.start_as_current_span(span_name, kind= SpanKind.CLIENT) as span:
|
164
|
-
start_time = time.time()
|
165
|
-
response = wrapped(*args, **kwargs)
|
166
|
-
response = process_embedding_response(
|
167
|
-
response=response,
|
168
|
-
request_model=request_model,
|
169
|
-
pricing_info=pricing_info,
|
170
|
-
server_port=server_port,
|
171
|
-
server_address=server_address,
|
172
|
-
environment=environment,
|
173
|
-
application_name=application_name,
|
174
|
-
metrics=metrics,
|
175
|
-
event_provider=event_provider,
|
176
|
-
start_time=start_time,
|
177
|
-
span=span,
|
178
|
-
capture_message_content=capture_message_content,
|
179
|
-
disable_metrics=disable_metrics,
|
180
|
-
version=version,
|
181
|
-
**kwargs
|
182
|
-
)
|
184
|
+
except Exception as e:
|
185
|
+
handle_exception(span, e)
|
183
186
|
|
184
187
|
return response
|
185
188
|
|