openlit 1.34.4__py3-none-any.whl → 1.34.7__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- openlit/instrumentation/google_ai_studio/__init__.py +2 -4
- openlit/instrumentation/google_ai_studio/async_google_ai_studio.py +0 -6
- openlit/instrumentation/google_ai_studio/google_ai_studio.py +0 -6
- openlit/instrumentation/google_ai_studio/utils.py +1 -2
- openlit/instrumentation/openai/async_openai.py +1 -1
- openlit/instrumentation/premai/__init__.py +0 -1
- openlit/instrumentation/premai/premai.py +84 -454
- openlit/instrumentation/premai/utils.py +325 -0
- openlit/instrumentation/reka/__init__.py +5 -7
- openlit/instrumentation/reka/async_reka.py +25 -163
- openlit/instrumentation/reka/reka.py +24 -162
- openlit/instrumentation/reka/utils.py +193 -0
- openlit/instrumentation/together/__init__.py +3 -5
- openlit/instrumentation/together/async_together.py +70 -476
- openlit/instrumentation/together/together.py +69 -475
- openlit/instrumentation/together/utils.py +320 -0
- {openlit-1.34.4.dist-info → openlit-1.34.7.dist-info}/METADATA +1 -1
- {openlit-1.34.4.dist-info → openlit-1.34.7.dist-info}/RECORD +20 -17
- {openlit-1.34.4.dist-info → openlit-1.34.7.dist-info}/LICENSE +0 -0
- {openlit-1.34.4.dist-info → openlit-1.34.7.dist-info}/WHEEL +0 -0
@@ -0,0 +1,320 @@
|
|
1
|
+
"""
|
2
|
+
Together AI OpenTelemetry instrumentation utility functions
|
3
|
+
"""
|
4
|
+
import time
|
5
|
+
|
6
|
+
from opentelemetry.sdk.resources import SERVICE_NAME, TELEMETRY_SDK_NAME, DEPLOYMENT_ENVIRONMENT
|
7
|
+
from opentelemetry.trace import Status, StatusCode
|
8
|
+
|
9
|
+
from openlit.__helpers import (
|
10
|
+
calculate_ttft,
|
11
|
+
response_as_dict,
|
12
|
+
calculate_tbt,
|
13
|
+
get_chat_model_cost,
|
14
|
+
get_image_model_cost,
|
15
|
+
create_metrics_attributes,
|
16
|
+
)
|
17
|
+
from openlit.semcov import SemanticConvention
|
18
|
+
|
19
|
+
def format_content(messages):
|
20
|
+
"""
|
21
|
+
Process a list of messages to extract content.
|
22
|
+
"""
|
23
|
+
|
24
|
+
formatted_messages = []
|
25
|
+
for message in messages:
|
26
|
+
role = message["role"]
|
27
|
+
content = message["content"]
|
28
|
+
|
29
|
+
if isinstance(content, list):
|
30
|
+
content_str = ", ".join(
|
31
|
+
f'{item["type"]}: {item["text"] if "text" in item else item["image_url"]}'
|
32
|
+
if "type" in item else f'text: {item["text"]}'
|
33
|
+
for item in content
|
34
|
+
)
|
35
|
+
formatted_messages.append(f"{role}: {content_str}")
|
36
|
+
else:
|
37
|
+
formatted_messages.append(f"{role}: {content}")
|
38
|
+
|
39
|
+
return "\n".join(formatted_messages)
|
40
|
+
|
41
|
+
def process_chunk(scope, chunk):
|
42
|
+
"""
|
43
|
+
Process a chunk of response data and update state.
|
44
|
+
"""
|
45
|
+
|
46
|
+
end_time = time.time()
|
47
|
+
# Record the timestamp for the current chunk
|
48
|
+
scope._timestamps.append(end_time)
|
49
|
+
|
50
|
+
if len(scope._timestamps) == 1:
|
51
|
+
# Calculate time to first chunk
|
52
|
+
scope._ttft = calculate_ttft(scope._timestamps, scope._start_time)
|
53
|
+
|
54
|
+
chunked = response_as_dict(chunk)
|
55
|
+
# Collect message IDs and aggregated response from events
|
56
|
+
if (len(chunked.get("choices")) > 0 and ("delta" in chunked.get("choices")[0] and
|
57
|
+
"content" in chunked.get("choices")[0].get("delta"))):
|
58
|
+
|
59
|
+
content = chunked.get("choices")[0].get("delta").get("content")
|
60
|
+
if content:
|
61
|
+
scope._llmresponse += content
|
62
|
+
|
63
|
+
if chunked.get("usage"):
|
64
|
+
scope._response_id = chunked.get("id")
|
65
|
+
scope._response_model = chunked.get("model")
|
66
|
+
scope._input_tokens = chunked.get("usage").get("prompt_tokens")
|
67
|
+
scope._output_tokens = chunked.get("usage").get("completion_tokens")
|
68
|
+
scope._finish_reason = str(chunked.get("finish_reason"))
|
69
|
+
scope._end_time = time.time()
|
70
|
+
|
71
|
+
def common_span_attributes(scope, gen_ai_operation, gen_ai_system, server_address, server_port,
|
72
|
+
request_model, response_model, environment, application_name, is_stream, tbt, ttft, version):
|
73
|
+
"""
|
74
|
+
Set common span attributes for both chat and RAG operations.
|
75
|
+
"""
|
76
|
+
|
77
|
+
scope._span.set_attribute(TELEMETRY_SDK_NAME, "openlit")
|
78
|
+
scope._span.set_attribute(SemanticConvention.GEN_AI_OPERATION, gen_ai_operation)
|
79
|
+
scope._span.set_attribute(SemanticConvention.GEN_AI_SYSTEM, gen_ai_system)
|
80
|
+
scope._span.set_attribute(SemanticConvention.SERVER_ADDRESS, server_address)
|
81
|
+
scope._span.set_attribute(SemanticConvention.SERVER_PORT, server_port)
|
82
|
+
scope._span.set_attribute(SemanticConvention.GEN_AI_REQUEST_MODEL, request_model)
|
83
|
+
scope._span.set_attribute(SemanticConvention.GEN_AI_RESPONSE_MODEL, scope._response_model)
|
84
|
+
scope._span.set_attribute(DEPLOYMENT_ENVIRONMENT, environment)
|
85
|
+
scope._span.set_attribute(SERVICE_NAME, application_name)
|
86
|
+
scope._span.set_attribute(SemanticConvention.GEN_AI_REQUEST_IS_STREAM, is_stream)
|
87
|
+
scope._span.set_attribute(SemanticConvention.GEN_AI_SERVER_TBT, scope._tbt)
|
88
|
+
scope._span.set_attribute(SemanticConvention.GEN_AI_SERVER_TTFT, scope._ttft)
|
89
|
+
scope._span.set_attribute(SemanticConvention.GEN_AI_SDK_VERSION, version)
|
90
|
+
|
91
|
+
def record_common_metrics(metrics, gen_ai_operation, gen_ai_system, server_address, server_port,
|
92
|
+
request_model, response_model, environment, application_name, start_time, end_time,
|
93
|
+
input_tokens, output_tokens, cost, tbt=None, ttft=None):
|
94
|
+
"""
|
95
|
+
Record common metrics for the operation.
|
96
|
+
"""
|
97
|
+
|
98
|
+
attributes = create_metrics_attributes(
|
99
|
+
operation=gen_ai_operation,
|
100
|
+
system=gen_ai_system,
|
101
|
+
server_address=server_address,
|
102
|
+
server_port=server_port,
|
103
|
+
request_model=request_model,
|
104
|
+
response_model=response_model,
|
105
|
+
service_name=application_name,
|
106
|
+
deployment_environment=environment,
|
107
|
+
)
|
108
|
+
metrics["genai_client_operation_duration"].record(end_time - start_time, attributes)
|
109
|
+
metrics["genai_requests"].add(1, attributes)
|
110
|
+
metrics["genai_prompt_tokens"].add(input_tokens, attributes)
|
111
|
+
metrics["genai_completion_tokens"].add(output_tokens, attributes)
|
112
|
+
metrics["genai_client_usage_tokens"].record(input_tokens + output_tokens, attributes)
|
113
|
+
metrics["genai_cost"].record(cost, attributes)
|
114
|
+
if tbt is not None:
|
115
|
+
metrics["genai_server_tbt"].record(tbt, attributes)
|
116
|
+
if ttft is not None:
|
117
|
+
metrics["genai_server_ttft"].record(ttft, attributes)
|
118
|
+
|
119
|
+
def common_chat_logic(scope, pricing_info, environment, application_name, metrics,
|
120
|
+
capture_message_content, disable_metrics, version, is_stream):
|
121
|
+
"""
|
122
|
+
Process chat request and generate Telemetry
|
123
|
+
"""
|
124
|
+
|
125
|
+
if len(scope._timestamps) > 1:
|
126
|
+
scope._tbt = calculate_tbt(scope._timestamps)
|
127
|
+
|
128
|
+
prompt = format_content(scope._kwargs.get("messages", ""))
|
129
|
+
request_model = scope._kwargs.get("model", "jamba-1.5-mini")
|
130
|
+
|
131
|
+
cost = get_chat_model_cost(request_model, pricing_info, scope._input_tokens, scope._output_tokens)
|
132
|
+
|
133
|
+
# Common Span Attributes
|
134
|
+
common_span_attributes(scope,
|
135
|
+
SemanticConvention.GEN_AI_OPERATION_TYPE_CHAT,SemanticConvention.GEN_AI_SYSTEM_TOGETHER,
|
136
|
+
scope._server_address, scope._server_port, request_model, scope._response_model,
|
137
|
+
environment, application_name, is_stream, scope._tbt, scope._ttft, version)
|
138
|
+
|
139
|
+
# Span Attributes for Response parameters
|
140
|
+
scope._span.set_attribute(SemanticConvention.GEN_AI_RESPONSE_FINISH_REASON, [scope._finish_reason])
|
141
|
+
scope._span.set_attribute(SemanticConvention.GEN_AI_REQUEST_SEED, scope._kwargs.get("seed", ""))
|
142
|
+
scope._span.set_attribute(SemanticConvention.GEN_AI_REQUEST_FREQUENCY_PENALTY, scope._kwargs.get("frequency_penalty", 0.0))
|
143
|
+
scope._span.set_attribute(SemanticConvention.GEN_AI_REQUEST_MAX_TOKENS, scope._kwargs.get("max_tokens", -1))
|
144
|
+
scope._span.set_attribute(SemanticConvention.GEN_AI_REQUEST_PRESENCE_PENALTY, scope._kwargs.get("presence_penalty", 0.0))
|
145
|
+
scope._span.set_attribute(SemanticConvention.GEN_AI_REQUEST_STOP_SEQUENCES, scope._kwargs.get("stop", []))
|
146
|
+
scope._span.set_attribute(SemanticConvention.GEN_AI_REQUEST_TEMPERATURE, scope._kwargs.get("temperature", 1.0))
|
147
|
+
scope._span.set_attribute(SemanticConvention.GEN_AI_REQUEST_TOP_P, scope._kwargs.get("top_p", 1.0))
|
148
|
+
scope._span.set_attribute(SemanticConvention.GEN_AI_RESPONSE_ID, scope._response_id)
|
149
|
+
scope._span.set_attribute(SemanticConvention.GEN_AI_OUTPUT_TYPE, "text" if isinstance(scope._llmresponse, str) else "json")
|
150
|
+
|
151
|
+
# Span Attributes for Cost and Tokens
|
152
|
+
scope._span.set_attribute(SemanticConvention.GEN_AI_USAGE_INPUT_TOKENS, scope._input_tokens)
|
153
|
+
scope._span.set_attribute(SemanticConvention.GEN_AI_USAGE_OUTPUT_TOKENS, scope._output_tokens)
|
154
|
+
scope._span.set_attribute(SemanticConvention.GEN_AI_CLIENT_TOKEN_USAGE, scope._input_tokens + scope._output_tokens)
|
155
|
+
scope._span.set_attribute(SemanticConvention.GEN_AI_USAGE_COST, cost)
|
156
|
+
|
157
|
+
# Span Attributes for Tools
|
158
|
+
if scope._tools:
|
159
|
+
scope._span.set_attribute(SemanticConvention.GEN_AI_TOOL_NAME, scope._tools.get("function","")).get("name","")
|
160
|
+
scope._span.set_attribute(SemanticConvention.GEN_AI_TOOL_CALL_ID, str(scope._tools.get("id","")))
|
161
|
+
scope._span.set_attribute(SemanticConvention.GEN_AI_TOOL_ARGS, str(scope._tools.get("function","").get("arguments","")))
|
162
|
+
|
163
|
+
# Span Attributes for Content
|
164
|
+
if capture_message_content:
|
165
|
+
scope._span.set_attribute(SemanticConvention.GEN_AI_CONTENT_PROMPT, prompt)
|
166
|
+
scope._span.set_attribute(SemanticConvention.GEN_AI_CONTENT_COMPLETION, scope._llmresponse)
|
167
|
+
|
168
|
+
# To be removed one the change to span_attributes (from span events) is complete
|
169
|
+
scope._span.add_event(
|
170
|
+
name=SemanticConvention.GEN_AI_CONTENT_PROMPT_EVENT,
|
171
|
+
attributes={
|
172
|
+
SemanticConvention.GEN_AI_CONTENT_PROMPT: prompt,
|
173
|
+
},
|
174
|
+
)
|
175
|
+
scope._span.add_event(
|
176
|
+
name=SemanticConvention.GEN_AI_CONTENT_COMPLETION_EVENT,
|
177
|
+
attributes={
|
178
|
+
SemanticConvention.GEN_AI_CONTENT_COMPLETION: scope._llmresponse,
|
179
|
+
},
|
180
|
+
)
|
181
|
+
|
182
|
+
scope._span.set_status(Status(StatusCode.OK))
|
183
|
+
|
184
|
+
# Metrics
|
185
|
+
if not disable_metrics:
|
186
|
+
record_common_metrics(metrics, SemanticConvention.GEN_AI_OPERATION_TYPE_CHAT, SemanticConvention.GEN_AI_SYSTEM_TOGETHER,
|
187
|
+
scope._server_address, scope._server_port, request_model, scope._response_model, environment,
|
188
|
+
application_name, scope._start_time, scope._end_time, scope._input_tokens, scope._output_tokens,
|
189
|
+
cost, scope._tbt, scope._ttft)
|
190
|
+
|
191
|
+
def process_streaming_chat_response(scope, pricing_info, environment, application_name, metrics,
|
192
|
+
capture_message_content=False, disable_metrics=False, version=""):
|
193
|
+
"""
|
194
|
+
Process chat request and generate Telemetry
|
195
|
+
"""
|
196
|
+
common_chat_logic(scope, pricing_info, environment, application_name, metrics,
|
197
|
+
capture_message_content, disable_metrics, version, is_stream=True)
|
198
|
+
|
199
|
+
def process_chat_response(response, request_model, pricing_info, server_port, server_address,
|
200
|
+
environment, application_name, metrics, start_time, span, capture_message_content=False,
|
201
|
+
disable_metrics=False, version="1.0.0", **kwargs):
|
202
|
+
"""
|
203
|
+
Process chat request and generate Telemetry
|
204
|
+
"""
|
205
|
+
|
206
|
+
scope = type("GenericScope", (), {})()
|
207
|
+
response_dict = response_as_dict(response)
|
208
|
+
|
209
|
+
scope._start_time = start_time
|
210
|
+
scope._end_time = time.time()
|
211
|
+
scope._span = span
|
212
|
+
scope._llmresponse = " ".join(
|
213
|
+
(choice.get("message", {}).get("content") or "")
|
214
|
+
for choice in response_dict.get("choices", [])
|
215
|
+
)
|
216
|
+
scope._response_id = response_dict.get("id")
|
217
|
+
scope._response_model = response_dict.get("model")
|
218
|
+
scope._input_tokens = response_dict.get("usage").get("prompt_tokens")
|
219
|
+
scope._output_tokens = response_dict.get("usage").get("completion_tokens")
|
220
|
+
scope._timestamps = []
|
221
|
+
scope._ttft, scope._tbt = scope._end_time - scope._start_time, 0
|
222
|
+
scope._server_address, scope._server_port = server_address, server_port
|
223
|
+
scope._kwargs = kwargs
|
224
|
+
scope._finish_reason = str(response_dict.get("choices")[0].get("finish_reason"))
|
225
|
+
|
226
|
+
if scope._kwargs.get("tools"):
|
227
|
+
scope._tools = response_dict.get("choices")[0].get("message").get("tool_calls")
|
228
|
+
else:
|
229
|
+
scope._tools = None
|
230
|
+
|
231
|
+
common_chat_logic(scope, pricing_info, environment, application_name, metrics,
|
232
|
+
capture_message_content, disable_metrics, version, is_stream=False)
|
233
|
+
|
234
|
+
return response
|
235
|
+
|
236
|
+
def common_image_logic(scope, pricing_info, environment, application_name, metrics,
|
237
|
+
capture_message_content, disable_metrics, version):
|
238
|
+
"""
|
239
|
+
Process image generation request and generate Telemetry
|
240
|
+
"""
|
241
|
+
|
242
|
+
# Find Image format
|
243
|
+
if "response_format" in scope._kwargs and scope._kwargs["response_format"] == "b64_json":
|
244
|
+
image_format = "b64_json"
|
245
|
+
else:
|
246
|
+
image_format = "url"
|
247
|
+
|
248
|
+
image_size = str(scope._kwargs.get("width", "1024")) + "x" + str(scope._kwargs.get("height", "1024"))
|
249
|
+
request_model = scope._kwargs.get("model", "dall-e-2")
|
250
|
+
|
251
|
+
# Calculate cost of the operation
|
252
|
+
cost = get_image_model_cost(request_model, pricing_info, image_size,
|
253
|
+
scope._kwargs.get("quality", "standard"))
|
254
|
+
|
255
|
+
# Common Span Attributes
|
256
|
+
common_span_attributes(scope,
|
257
|
+
SemanticConvention.GEN_AI_OPERATION_TYPE_IMAGE, SemanticConvention.GEN_AI_SYSTEM_TOGETHER,
|
258
|
+
scope._server_address, scope._server_port, request_model, scope._response_model,
|
259
|
+
environment, application_name, False, scope._tbt, scope._ttft, version)
|
260
|
+
|
261
|
+
# Image-specific span attributes
|
262
|
+
scope._span.set_attribute(SemanticConvention.GEN_AI_RESPONSE_ID, scope._response_id)
|
263
|
+
scope._span.set_attribute(SemanticConvention.GEN_AI_OUTPUT_TYPE, "image")
|
264
|
+
scope._span.set_attribute(SemanticConvention.GEN_AI_REQUEST_IMAGE_SIZE, image_size)
|
265
|
+
scope._span.set_attribute(SemanticConvention.GEN_AI_USAGE_COST, len(scope._response_data) * cost)
|
266
|
+
|
267
|
+
# Content attributes
|
268
|
+
if capture_message_content:
|
269
|
+
scope._span.add_event(
|
270
|
+
name=SemanticConvention.GEN_AI_CONTENT_PROMPT_EVENT,
|
271
|
+
attributes={
|
272
|
+
SemanticConvention.GEN_AI_CONTENT_PROMPT: scope._kwargs.get("prompt", ""),
|
273
|
+
},
|
274
|
+
)
|
275
|
+
|
276
|
+
for images_count, item in enumerate(scope._response_data):
|
277
|
+
attribute_name = f"{SemanticConvention.GEN_AI_RESPONSE_IMAGE}.{images_count}"
|
278
|
+
scope._span.add_event(
|
279
|
+
name=attribute_name,
|
280
|
+
attributes={
|
281
|
+
SemanticConvention.GEN_AI_CONTENT_COMPLETION: getattr(item, image_format),
|
282
|
+
},
|
283
|
+
)
|
284
|
+
|
285
|
+
scope._span.set_status(Status(StatusCode.OK))
|
286
|
+
|
287
|
+
# Metrics
|
288
|
+
if not disable_metrics:
|
289
|
+
record_common_metrics(
|
290
|
+
metrics, SemanticConvention.GEN_AI_OPERATION_TYPE_IMAGE,
|
291
|
+
SemanticConvention.GEN_AI_SYSTEM_TOGETHER, scope._server_address, scope._server_port,
|
292
|
+
request_model, scope._response_model, environment, application_name,
|
293
|
+
scope._start_time, scope._end_time, 0, 0, len(scope._response_data) * cost
|
294
|
+
)
|
295
|
+
|
296
|
+
def process_image_response(response, request_model, pricing_info, server_address, server_port,
|
297
|
+
environment, application_name, metrics, start_time, end_time, span, capture_message_content,
|
298
|
+
disable_metrics, version, **kwargs):
|
299
|
+
"""
|
300
|
+
Process image generation request and generate Telemetry
|
301
|
+
"""
|
302
|
+
|
303
|
+
scope = type("GenericScope", (), {})()
|
304
|
+
|
305
|
+
scope._start_time = start_time
|
306
|
+
scope._end_time = end_time
|
307
|
+
scope._span = span
|
308
|
+
scope._response_id = response.id
|
309
|
+
scope._response_model = response.model
|
310
|
+
scope._response_data = response.data
|
311
|
+
scope._server_address = server_address
|
312
|
+
scope._server_port = server_port
|
313
|
+
scope._kwargs = kwargs
|
314
|
+
scope._tbt = 0
|
315
|
+
scope._ttft = end_time - start_time
|
316
|
+
|
317
|
+
common_image_logic(scope, pricing_info, environment, application_name, metrics,
|
318
|
+
capture_message_content, disable_metrics, version)
|
319
|
+
|
320
|
+
return response
|
@@ -1,6 +1,6 @@
|
|
1
1
|
Metadata-Version: 2.3
|
2
2
|
Name: openlit
|
3
|
-
Version: 1.34.
|
3
|
+
Version: 1.34.7
|
4
4
|
Summary: OpenTelemetry-native Auto instrumentation library for monitoring LLM Applications and GPUs, facilitating the integration of observability into your GenAI-driven projects
|
5
5
|
License: Apache-2.0
|
6
6
|
Keywords: OpenTelemetry,otel,otlp,llm,tracing,openai,anthropic,claude,cohere,llm monitoring,observability,monitoring,gpt,Generative AI,chatGPT,gpu
|
@@ -56,10 +56,10 @@ openlit/instrumentation/embedchain/__init__.py,sha256=x2_qvJTwWog_mH6IY987Bp9mWx
|
|
56
56
|
openlit/instrumentation/embedchain/embedchain.py,sha256=f4hyOr1Xr0RC4PNHRu46aV-jmEh-lIeKN8XLjgY7aWM,7929
|
57
57
|
openlit/instrumentation/firecrawl/__init__.py,sha256=kyVsAiDBC2djifqT2w1cPRAotiEyEabNvnBeSQxi9N8,1876
|
58
58
|
openlit/instrumentation/firecrawl/firecrawl.py,sha256=4X38UrLYeGm3uez-edYA6qEc0nKC3p77yfKgKBBud0A,3826
|
59
|
-
openlit/instrumentation/google_ai_studio/__init__.py,sha256=
|
60
|
-
openlit/instrumentation/google_ai_studio/async_google_ai_studio.py,sha256=
|
61
|
-
openlit/instrumentation/google_ai_studio/google_ai_studio.py,sha256=
|
62
|
-
openlit/instrumentation/google_ai_studio/utils.py,sha256
|
59
|
+
openlit/instrumentation/google_ai_studio/__init__.py,sha256=VLNOlaTFzjOpuUzloynvADewiTmaEu1wx8FerEbmsvg,2510
|
60
|
+
openlit/instrumentation/google_ai_studio/async_google_ai_studio.py,sha256=UL5AdTwkzdTKUomTfETMgYjUl00qL7BB8U0izuXfKFo,5527
|
61
|
+
openlit/instrumentation/google_ai_studio/google_ai_studio.py,sha256=nanOoXz-1uJtdh39aD438_yMk0no3AM7VVNKzDganHo,5429
|
62
|
+
openlit/instrumentation/google_ai_studio/utils.py,sha256=-X5sHk216ajJrl4cP35f5vT8YAZaIE4yLKI7nWEKHkQ,11140
|
63
63
|
openlit/instrumentation/gpt4all/__init__.py,sha256=cO8mi3hhPDXcNwb9AwQ3-wQ_ydnOeBRwb0cptlQmAM4,1805
|
64
64
|
openlit/instrumentation/gpt4all/gpt4all.py,sha256=EYp0njZ1kF56rTAjYZVtufA5W4xTWGzSIntjJ4MEfl4,24185
|
65
65
|
openlit/instrumentation/gpu/__init__.py,sha256=QQCFVEbRfdeTjmdFe-UeEiy19vEEWSIBpj2B1wYGhUs,11036
|
@@ -96,7 +96,7 @@ openlit/instrumentation/ollama/async_ollama.py,sha256=zJPDr2ROh1nvFGoxgdTbe04Zr1
|
|
96
96
|
openlit/instrumentation/ollama/ollama.py,sha256=MNUltiP9XVT4azmO_-E2vjhFaoHQyJ0Z6c-HnB0_jCE,6563
|
97
97
|
openlit/instrumentation/ollama/utils.py,sha256=41uvYaYkGwWfRyHYqhOOwrFy6cMzBlG1urJYUat9Q24,14819
|
98
98
|
openlit/instrumentation/openai/__init__.py,sha256=y9Ox5aYWTb2nAa_d0ic3Mkv4wEKmUGqslW9nHKg6NnY,6320
|
99
|
-
openlit/instrumentation/openai/async_openai.py,sha256=
|
99
|
+
openlit/instrumentation/openai/async_openai.py,sha256=JyA8MDxWCM38Te6mJzBdfonRgIIlo2ziLn7HOmzqxxo,81398
|
100
100
|
openlit/instrumentation/openai/openai.py,sha256=5fgRyK5dUN2zUdrN0vBSZFnSEAXf2dKS0qnq_85-mQE,81175
|
101
101
|
openlit/instrumentation/openai_agents/__init__.py,sha256=tRTSIrUtkXc_lfQnVanXmQLd2Sy9RqBNTHF5FhhZx7o,1530
|
102
102
|
openlit/instrumentation/openai_agents/openai_agents.py,sha256=kRWPgjofcOviMi3w7CsRvJO3SCjqPmuq-PM800vIM7g,2678
|
@@ -104,20 +104,23 @@ openlit/instrumentation/phidata/__init__.py,sha256=tqls5-UI6FzbjxYgq_qqAfALhWJm8
|
|
104
104
|
openlit/instrumentation/phidata/phidata.py,sha256=ohrxs6i0Oik75P2BrjNGbK71tdZg94ZMmaXixrXwV5M,4834
|
105
105
|
openlit/instrumentation/pinecone/__init__.py,sha256=0guSEPmObaZiOF8yHExpOGY-qW_egHXfZGog3rKGi8M,2596
|
106
106
|
openlit/instrumentation/pinecone/pinecone.py,sha256=7hVUlC0HOj0yQyvLasfdb6kS46hRJQdoSRzZQ4ixIkk,8850
|
107
|
-
openlit/instrumentation/premai/__init__.py,sha256=
|
108
|
-
openlit/instrumentation/premai/premai.py,sha256=
|
107
|
+
openlit/instrumentation/premai/__init__.py,sha256=S3Q-Pa58jkJ1dKzPqKC8Q_phwFcGR0OuEP29vUmQ7TI,1752
|
108
|
+
openlit/instrumentation/premai/premai.py,sha256=rWRqfoIZUbTz-M7zgC2Z92gTVv9fCj1Z4iJcsG86YeI,6438
|
109
|
+
openlit/instrumentation/premai/utils.py,sha256=ENZby0YET1U7kgNQeyHUTgK0rKIuhOcwau9R4Bbn-l0,14925
|
109
110
|
openlit/instrumentation/pydantic_ai/__init__.py,sha256=mq52QanFI4xDx6JK-qW5yzhFPXwznJqIYsuxRoBA2Xg,2023
|
110
111
|
openlit/instrumentation/pydantic_ai/pydantic_ai.py,sha256=2F2hrowGqcPjTDLG9IeLY8OO-lXZKhLSU93XtZ3tt5A,1868
|
111
112
|
openlit/instrumentation/pydantic_ai/utils.py,sha256=b0TqhSDnRqkPdM_qsOgMuXT3lwTvHzMYpaBv2qibiVo,4307
|
112
113
|
openlit/instrumentation/qdrant/__init__.py,sha256=5prYH46yQt2hSA5zgg7kKM6P_F_7s9OQtfRE_lqsaVc,8970
|
113
114
|
openlit/instrumentation/qdrant/async_qdrant.py,sha256=dwMQx8bI4Lp8Tgze87esIdVMOffbQcK80lKNLjxsNOU,15263
|
114
115
|
openlit/instrumentation/qdrant/qdrant.py,sha256=pafjlAzMPzYLRYFfTtWXsLKYVQls-grkHVO3YmFuNPg,15689
|
115
|
-
openlit/instrumentation/reka/__init__.py,sha256=
|
116
|
-
openlit/instrumentation/reka/async_reka.py,sha256=
|
117
|
-
openlit/instrumentation/reka/reka.py,sha256=
|
118
|
-
openlit/instrumentation/
|
119
|
-
openlit/instrumentation/together/
|
120
|
-
openlit/instrumentation/together/
|
116
|
+
openlit/instrumentation/reka/__init__.py,sha256=wI5KUYyTAD8ni4E98uziy9WPqoQqlzybDXanFOqDan0,1720
|
117
|
+
openlit/instrumentation/reka/async_reka.py,sha256=CZk5rr7njThDkmrauRAJmNtMBgsLarTbQ54raPQb92A,1909
|
118
|
+
openlit/instrumentation/reka/reka.py,sha256=wou7vVdN_1Y5UZd4tpkLpTPAtgmAl6gmh_onLn4k4GE,1908
|
119
|
+
openlit/instrumentation/reka/utils.py,sha256=_2o4TcVgVhQNkI8_M9FCLMsEx5KCXxNw0wnUpGFTtiY,9232
|
120
|
+
openlit/instrumentation/together/__init__.py,sha256=0UmUqQtppyK3oopb4lTjX2LITgVCR8VtH46IAV1rpA8,2484
|
121
|
+
openlit/instrumentation/together/async_together.py,sha256=0-h5fKw6rIwN_fvWVpGuvVqizIuM9xFCzz8Z4oGgOj0,6822
|
122
|
+
openlit/instrumentation/together/together.py,sha256=nY6mzHmHgoMbbnB_9eL0EBQjP0ltJVdkQj4pbamHAj0,6723
|
123
|
+
openlit/instrumentation/together/utils.py,sha256=nIAxxbY5eTSB4yb90NdXwqim0W4ybfipQaerW_cyQEM,14389
|
121
124
|
openlit/instrumentation/transformers/__init__.py,sha256=9Ubss5nlumcypxprxff8Fv3sst7II27SsvCzqkBX9Kg,1457
|
122
125
|
openlit/instrumentation/transformers/transformers.py,sha256=y--t7PXhUfPC81w-aEE7qowMah3os9gnKBQ5bN4QLGc,1980
|
123
126
|
openlit/instrumentation/transformers/utils.py,sha256=3f-ewpUpduaBrTVIFJKaabACjz-6Vf8K7NEU0EzQ4Nk,8042
|
@@ -131,7 +134,7 @@ openlit/otel/events.py,sha256=VrMjTpvnLtYRBHCiFwJojTQqqNpRCxoD4yJYeQrtPsk,3560
|
|
131
134
|
openlit/otel/metrics.py,sha256=GM2PDloBGRhBTkHHkYaqmOwIAQkY124ZhW4sEqW1Fgk,7086
|
132
135
|
openlit/otel/tracing.py,sha256=tjV2bEbEDPUB1Z46gE-UsJsb04sRdFrfbhIDkxViZc0,3103
|
133
136
|
openlit/semcov/__init__.py,sha256=ptyo37PY-FHDx_PShEvbdns71cD4YvvXw15bCRXKCKM,13461
|
134
|
-
openlit-1.34.
|
135
|
-
openlit-1.34.
|
136
|
-
openlit-1.34.
|
137
|
-
openlit-1.34.
|
137
|
+
openlit-1.34.7.dist-info/LICENSE,sha256=xx0jnfkXJvxRnG63LTGOxlggYnIysveWIZ6H3PNdCrQ,11357
|
138
|
+
openlit-1.34.7.dist-info/METADATA,sha256=Hhv4r0ePRYKY1nHeIImBPWZ6sN4o_CeT4CySPB8kw6g,23469
|
139
|
+
openlit-1.34.7.dist-info/WHEEL,sha256=b4K_helf-jlQoXBBETfwnf4B04YC67LOev0jo4fX5m8,88
|
140
|
+
openlit-1.34.7.dist-info/RECORD,,
|
File without changes
|
File without changes
|