openlit 1.34.3__py3-none-any.whl → 1.34.5__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -0,0 +1,320 @@
1
+ """
2
+ Google AI Studio OpenTelemetry instrumentation utility functions
3
+ """
4
+ import time
5
+
6
+ from opentelemetry.sdk.resources import SERVICE_NAME, TELEMETRY_SDK_NAME, DEPLOYMENT_ENVIRONMENT
7
+ from opentelemetry.trace import Status, StatusCode
8
+
9
+ from openlit.__helpers import (
10
+ calculate_ttft,
11
+ response_as_dict,
12
+ calculate_tbt,
13
+ get_chat_model_cost,
14
+ get_image_model_cost,
15
+ create_metrics_attributes,
16
+ )
17
+ from openlit.semcov import SemanticConvention
18
+
19
+ def format_content(messages):
20
+ """
21
+ Process a list of messages to extract content.
22
+ """
23
+
24
+ formatted_messages = []
25
+ for message in messages:
26
+ role = message["role"]
27
+ content = message["content"]
28
+
29
+ if isinstance(content, list):
30
+ content_str = ", ".join(
31
+ f'{item["type"]}: {item["text"] if "text" in item else item["image_url"]}'
32
+ if "type" in item else f'text: {item["text"]}'
33
+ for item in content
34
+ )
35
+ formatted_messages.append(f"{role}: {content_str}")
36
+ else:
37
+ formatted_messages.append(f"{role}: {content}")
38
+
39
+ return "\n".join(formatted_messages)
40
+
41
+ def process_chunk(scope, chunk):
42
+ """
43
+ Process a chunk of response data and update state.
44
+ """
45
+
46
+ end_time = time.time()
47
+ # Record the timestamp for the current chunk
48
+ scope._timestamps.append(end_time)
49
+
50
+ if len(scope._timestamps) == 1:
51
+ # Calculate time to first chunk
52
+ scope._ttft = calculate_ttft(scope._timestamps, scope._start_time)
53
+
54
+ chunked = response_as_dict(chunk)
55
+ # Collect message IDs and aggregated response from events
56
+ if (len(chunked.get("choices")) > 0 and ("delta" in chunked.get("choices")[0] and
57
+ "content" in chunked.get("choices")[0].get("delta"))):
58
+
59
+ content = chunked.get("choices")[0].get("delta").get("content")
60
+ if content:
61
+ scope._llmresponse += content
62
+
63
+ if chunked.get("usage"):
64
+ scope._response_id = chunked.get("id")
65
+ scope._response_model = chunked.get("model")
66
+ scope._input_tokens = chunked.get("usage").get("prompt_tokens")
67
+ scope._output_tokens = chunked.get("usage").get("completion_tokens")
68
+ scope._finish_reason = str(chunked.get("finish_reason"))
69
+ scope._end_time = time.time()
70
+
71
+ def common_span_attributes(scope, gen_ai_operation, gen_ai_system, server_address, server_port,
72
+ request_model, response_model, environment, application_name, is_stream, tbt, ttft, version):
73
+ """
74
+ Set common span attributes for both chat and RAG operations.
75
+ """
76
+
77
+ scope._span.set_attribute(TELEMETRY_SDK_NAME, "openlit")
78
+ scope._span.set_attribute(SemanticConvention.GEN_AI_OPERATION, gen_ai_operation)
79
+ scope._span.set_attribute(SemanticConvention.GEN_AI_SYSTEM, gen_ai_system)
80
+ scope._span.set_attribute(SemanticConvention.SERVER_ADDRESS, server_address)
81
+ scope._span.set_attribute(SemanticConvention.SERVER_PORT, server_port)
82
+ scope._span.set_attribute(SemanticConvention.GEN_AI_REQUEST_MODEL, request_model)
83
+ scope._span.set_attribute(SemanticConvention.GEN_AI_RESPONSE_MODEL, scope._response_model)
84
+ scope._span.set_attribute(DEPLOYMENT_ENVIRONMENT, environment)
85
+ scope._span.set_attribute(SERVICE_NAME, application_name)
86
+ scope._span.set_attribute(SemanticConvention.GEN_AI_REQUEST_IS_STREAM, is_stream)
87
+ scope._span.set_attribute(SemanticConvention.GEN_AI_SERVER_TBT, scope._tbt)
88
+ scope._span.set_attribute(SemanticConvention.GEN_AI_SERVER_TTFT, scope._ttft)
89
+ scope._span.set_attribute(SemanticConvention.GEN_AI_SDK_VERSION, version)
90
+
91
+ def record_common_metrics(metrics, gen_ai_operation, gen_ai_system, server_address, server_port,
92
+ request_model, response_model, environment, application_name, start_time, end_time,
93
+ input_tokens, output_tokens, cost, tbt=None, ttft=None):
94
+ """
95
+ Record common metrics for the operation.
96
+ """
97
+
98
+ attributes = create_metrics_attributes(
99
+ operation=gen_ai_operation,
100
+ system=gen_ai_system,
101
+ server_address=server_address,
102
+ server_port=server_port,
103
+ request_model=request_model,
104
+ response_model=response_model,
105
+ service_name=application_name,
106
+ deployment_environment=environment,
107
+ )
108
+ metrics["genai_client_operation_duration"].record(end_time - start_time, attributes)
109
+ metrics["genai_requests"].add(1, attributes)
110
+ metrics["genai_prompt_tokens"].add(input_tokens, attributes)
111
+ metrics["genai_completion_tokens"].add(output_tokens, attributes)
112
+ metrics["genai_client_usage_tokens"].record(input_tokens + output_tokens, attributes)
113
+ metrics["genai_cost"].record(cost, attributes)
114
+ if tbt:
115
+ metrics["genai_server_tbt"].record(tbt, attributes)
116
+ if ttft:
117
+ metrics["genai_server_ttft"].record(ttft, attributes)
118
+
119
+ def common_chat_logic(scope, pricing_info, environment, application_name, metrics,
120
+ capture_message_content, disable_metrics, version, is_stream):
121
+ """
122
+ Process chat request and generate Telemetry
123
+ """
124
+
125
+ if len(scope._timestamps) > 1:
126
+ scope._tbt = calculate_tbt(scope._timestamps)
127
+
128
+ prompt = format_content(scope._kwargs.get("messages", ""))
129
+ request_model = scope._kwargs.get("model", "jamba-1.5-mini")
130
+
131
+ cost = get_chat_model_cost(request_model, pricing_info, scope._input_tokens, scope._output_tokens)
132
+
133
+ # Common Span Attributes
134
+ common_span_attributes(scope,
135
+ SemanticConvention.GEN_AI_OPERATION_TYPE_CHAT,SemanticConvention.GEN_AI_SYSTEM_TOGETHER,
136
+ scope._server_address, scope._server_port, request_model, scope._response_model,
137
+ environment, application_name, is_stream, scope._tbt, scope._ttft, version)
138
+
139
+ # Span Attributes for Response parameters
140
+ scope._span.set_attribute(SemanticConvention.GEN_AI_RESPONSE_FINISH_REASON, [scope._finish_reason])
141
+ scope._span.set_attribute(SemanticConvention.GEN_AI_REQUEST_SEED, scope._kwargs.get("seed", ""))
142
+ scope._span.set_attribute(SemanticConvention.GEN_AI_REQUEST_FREQUENCY_PENALTY, scope._kwargs.get("frequency_penalty", 0.0))
143
+ scope._span.set_attribute(SemanticConvention.GEN_AI_REQUEST_MAX_TOKENS, scope._kwargs.get("max_tokens", -1))
144
+ scope._span.set_attribute(SemanticConvention.GEN_AI_REQUEST_PRESENCE_PENALTY, scope._kwargs.get("presence_penalty", 0.0))
145
+ scope._span.set_attribute(SemanticConvention.GEN_AI_REQUEST_STOP_SEQUENCES, scope._kwargs.get("stop", []))
146
+ scope._span.set_attribute(SemanticConvention.GEN_AI_REQUEST_TEMPERATURE, scope._kwargs.get("temperature", 1.0))
147
+ scope._span.set_attribute(SemanticConvention.GEN_AI_REQUEST_TOP_P, scope._kwargs.get("top_p", 1.0))
148
+ scope._span.set_attribute(SemanticConvention.GEN_AI_RESPONSE_ID, scope._response_id)
149
+ scope._span.set_attribute(SemanticConvention.GEN_AI_OUTPUT_TYPE, "text" if isinstance(scope._llmresponse, str) else "json")
150
+
151
+ # Span Attributes for Cost and Tokens
152
+ scope._span.set_attribute(SemanticConvention.GEN_AI_USAGE_INPUT_TOKENS, scope._input_tokens)
153
+ scope._span.set_attribute(SemanticConvention.GEN_AI_USAGE_OUTPUT_TOKENS, scope._output_tokens)
154
+ scope._span.set_attribute(SemanticConvention.GEN_AI_CLIENT_TOKEN_USAGE, scope._input_tokens + scope._output_tokens)
155
+ scope._span.set_attribute(SemanticConvention.GEN_AI_USAGE_COST, cost)
156
+
157
+ # Span Attributes for Tools
158
+ if scope._tools:
159
+ scope._span.set_attribute(SemanticConvention.GEN_AI_TOOL_NAME, scope._tools.get("function","")).get("name","")
160
+ scope._span.set_attribute(SemanticConvention.GEN_AI_TOOL_CALL_ID, str(scope._tools.get("id","")))
161
+ scope._span.set_attribute(SemanticConvention.GEN_AI_TOOL_ARGS, str(scope._tools.get("function","").get("arguments","")))
162
+
163
+ # Span Attributes for Content
164
+ if capture_message_content:
165
+ scope._span.set_attribute(SemanticConvention.GEN_AI_CONTENT_PROMPT, prompt)
166
+ scope._span.set_attribute(SemanticConvention.GEN_AI_CONTENT_COMPLETION, scope._llmresponse)
167
+
168
+ # To be removed one the change to span_attributes (from span events) is complete
169
+ scope._span.add_event(
170
+ name=SemanticConvention.GEN_AI_CONTENT_PROMPT_EVENT,
171
+ attributes={
172
+ SemanticConvention.GEN_AI_CONTENT_PROMPT: prompt,
173
+ },
174
+ )
175
+ scope._span.add_event(
176
+ name=SemanticConvention.GEN_AI_CONTENT_COMPLETION_EVENT,
177
+ attributes={
178
+ SemanticConvention.GEN_AI_CONTENT_COMPLETION: scope._llmresponse,
179
+ },
180
+ )
181
+
182
+ scope._span.set_status(Status(StatusCode.OK))
183
+
184
+ # Metrics
185
+ if not disable_metrics:
186
+ record_common_metrics(metrics, SemanticConvention.GEN_AI_OPERATION_TYPE_CHAT, SemanticConvention.GEN_AI_SYSTEM_TOGETHER,
187
+ scope._server_address, scope._server_port, request_model, scope._response_model, environment,
188
+ application_name, scope._start_time, scope._end_time, scope._input_tokens, scope._output_tokens,
189
+ cost, scope._tbt, scope._ttft)
190
+
191
+ def process_streaming_chat_response(scope, pricing_info, environment, application_name, metrics,
192
+ capture_message_content=False, disable_metrics=False, version=""):
193
+ """
194
+ Process chat request and generate Telemetry
195
+ """
196
+ common_chat_logic(scope, pricing_info, environment, application_name, metrics,
197
+ capture_message_content, disable_metrics, version, is_stream=True)
198
+
199
+ def process_chat_response(response, request_model, pricing_info, server_port, server_address,
200
+ environment, application_name, metrics, start_time, span, capture_message_content=False,
201
+ disable_metrics=False, version="1.0.0", **kwargs):
202
+ """
203
+ Process chat request and generate Telemetry
204
+ """
205
+
206
+ scope = type("GenericScope", (), {})()
207
+ response_dict = response_as_dict(response)
208
+
209
+ scope._start_time = start_time
210
+ scope._end_time = time.time()
211
+ scope._span = span
212
+ scope._llmresponse = " ".join(
213
+ (choice.get("message", {}).get("content") or "")
214
+ for choice in response_dict.get("choices", [])
215
+ )
216
+ scope._response_id = response_dict.get("id")
217
+ scope._response_model = response_dict.get("model")
218
+ scope._input_tokens = response_dict.get("usage").get("prompt_tokens")
219
+ scope._output_tokens = response_dict.get("usage").get("completion_tokens")
220
+ scope._timestamps = []
221
+ scope._ttft, scope._tbt = scope._end_time - scope._start_time, 0
222
+ scope._server_address, scope._server_port = server_address, server_port
223
+ scope._kwargs = kwargs
224
+ scope._finish_reason = str(response_dict.get("choices")[0].get("finish_reason"))
225
+
226
+ if scope._kwargs.get("tools"):
227
+ scope._tools = response_dict.get("choices")[0].get("message").get("tool_calls")
228
+ else:
229
+ scope._tools = None
230
+
231
+ common_chat_logic(scope, pricing_info, environment, application_name, metrics,
232
+ capture_message_content, disable_metrics, version, is_stream=False)
233
+
234
+ return response
235
+
236
+ def common_image_logic(scope, pricing_info, environment, application_name, metrics,
237
+ capture_message_content, disable_metrics, version):
238
+ """
239
+ Process image generation request and generate Telemetry
240
+ """
241
+
242
+ # Find Image format
243
+ if "response_format" in scope._kwargs and scope._kwargs["response_format"] == "b64_json":
244
+ image_format = "b64_json"
245
+ else:
246
+ image_format = "url"
247
+
248
+ image_size = str(scope._kwargs.get("width", "1024")) + "x" + str(scope._kwargs.get("height", "1024"))
249
+ request_model = scope._kwargs.get("model", "dall-e-2")
250
+
251
+ # Calculate cost of the operation
252
+ cost = get_image_model_cost(request_model, pricing_info, image_size,
253
+ scope._kwargs.get("quality", "standard"))
254
+
255
+ # Common Span Attributes
256
+ common_span_attributes(scope,
257
+ SemanticConvention.GEN_AI_OPERATION_TYPE_IMAGE, SemanticConvention.GEN_AI_SYSTEM_TOGETHER,
258
+ scope._server_address, scope._server_port, request_model, scope._response_model,
259
+ environment, application_name, False, scope._tbt, scope._ttft, version)
260
+
261
+ # Image-specific span attributes
262
+ scope._span.set_attribute(SemanticConvention.GEN_AI_RESPONSE_ID, scope._response_id)
263
+ scope._span.set_attribute(SemanticConvention.GEN_AI_OUTPUT_TYPE, "image")
264
+ scope._span.set_attribute(SemanticConvention.GEN_AI_REQUEST_IMAGE_SIZE, image_size)
265
+ scope._span.set_attribute(SemanticConvention.GEN_AI_USAGE_COST, len(scope._response_data) * cost)
266
+
267
+ # Content attributes
268
+ if capture_message_content:
269
+ scope._span.add_event(
270
+ name=SemanticConvention.GEN_AI_CONTENT_PROMPT_EVENT,
271
+ attributes={
272
+ SemanticConvention.GEN_AI_CONTENT_PROMPT: scope._kwargs.get("prompt", ""),
273
+ },
274
+ )
275
+
276
+ for images_count, item in enumerate(scope._response_data):
277
+ attribute_name = f"{SemanticConvention.GEN_AI_RESPONSE_IMAGE}.{images_count}"
278
+ scope._span.add_event(
279
+ name=attribute_name,
280
+ attributes={
281
+ SemanticConvention.GEN_AI_CONTENT_COMPLETION: getattr(item, image_format),
282
+ },
283
+ )
284
+
285
+ scope._span.set_status(Status(StatusCode.OK))
286
+
287
+ # Metrics
288
+ if not disable_metrics:
289
+ record_common_metrics(
290
+ metrics, SemanticConvention.GEN_AI_OPERATION_TYPE_IMAGE,
291
+ SemanticConvention.GEN_AI_SYSTEM_TOGETHER, scope._server_address, scope._server_port,
292
+ request_model, scope._response_model, environment, application_name,
293
+ scope._start_time, scope._end_time, 0, 0, len(scope._response_data) * cost
294
+ )
295
+
296
+ def process_image_response(response, request_model, pricing_info, server_address, server_port,
297
+ environment, application_name, metrics, start_time, end_time, span, capture_message_content,
298
+ disable_metrics, version, **kwargs):
299
+ """
300
+ Process image generation request and generate Telemetry
301
+ """
302
+
303
+ scope = type("GenericScope", (), {})()
304
+
305
+ scope._start_time = start_time
306
+ scope._end_time = end_time
307
+ scope._span = span
308
+ scope._response_id = response.id
309
+ scope._response_model = response.model
310
+ scope._response_data = response.data
311
+ scope._server_address = server_address
312
+ scope._server_port = server_port
313
+ scope._kwargs = kwargs
314
+ scope._tbt = 0
315
+ scope._ttft = end_time - start_time
316
+
317
+ common_image_logic(scope, pricing_info, environment, application_name, metrics,
318
+ capture_message_content, disable_metrics, version)
319
+
320
+ return response
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.3
2
2
  Name: openlit
3
- Version: 1.34.3
3
+ Version: 1.34.5
4
4
  Summary: OpenTelemetry-native Auto instrumentation library for monitoring LLM Applications and GPUs, facilitating the integration of observability into your GenAI-driven projects
5
5
  License: Apache-2.0
6
6
  Keywords: OpenTelemetry,otel,otlp,llm,tracing,openai,anthropic,claude,cohere,llm monitoring,observability,monitoring,gpt,Generative AI,chatGPT,gpu
@@ -14,10 +14,10 @@ openlit/guard/sensitive_topic.py,sha256=RgVw_laFERv0nNdzBsAd2_3yLomMOK-gVq-P7oj1
14
14
  openlit/guard/utils.py,sha256=6hE3rCRjFXYjKRQYUo8YsqUSlvod48nOWp8MwoQEYdw,7670
15
15
  openlit/instrumentation/ag2/__init__.py,sha256=KgyLJBmwAxRWu7Z0S8FDDK4TZ13EFoAAIalvG5Oq4wc,1839
16
16
  openlit/instrumentation/ag2/ag2.py,sha256=eNQziyeZl4396GsIp5qI1Dne2KcnQMmhftW7joKQvNU,6934
17
- openlit/instrumentation/ai21/__init__.py,sha256=QXMByKCUhFITUIwUR01m0Fjpr20txV_GWcRJ66dTu_Q,2703
18
- openlit/instrumentation/ai21/ai21.py,sha256=J1QlBg56LWhkiD9IDmu0tJMOGnqKz0AiHtrMnACjU0Y,6814
19
- openlit/instrumentation/ai21/async_ai21.py,sha256=fIIGrAjF2xfGlAJiW6itqL88PSaA9vxy7F8nFpyVjZg,6918
20
- openlit/instrumentation/ai21/utils.py,sha256=7PcxzAwrLNTF5df5SR0Mp6UAkLGTATWxYrKS98c9iig,19770
17
+ openlit/instrumentation/ai21/__init__.py,sha256=U24XlK1aHX0zubyUyBY6PBCa59fwp5sU5f-VD1EkCjc,2583
18
+ openlit/instrumentation/ai21/ai21.py,sha256=1fJ1MvVIRQG-gh5YXkDycuTriT7_VB77vjXDKb7GZY8,6965
19
+ openlit/instrumentation/ai21/async_ai21.py,sha256=uUJUXCKJcokYi6fPfcgBABSMVLj9CQsWJySakiZcSiU,7003
20
+ openlit/instrumentation/ai21/utils.py,sha256=TiJtzG6kcrTf4FMJkAeHYUwZxkjp6JS3xoM2qn4gw54,14215
21
21
  openlit/instrumentation/anthropic/__init__.py,sha256=QEsiwdxcQDzzlVYR4_x7KTdf0-UJDJt8FjwNQMspnxM,1929
22
22
  openlit/instrumentation/anthropic/anthropic.py,sha256=NxJJjhsu9sSFIlBp322olGkPlLt9Bn5sndaugYA68dE,5149
23
23
  openlit/instrumentation/anthropic/async_anthropic.py,sha256=ivJGygKWVTS2hWWX12_g1tiq-5mpeHXETZsWoFZL3UE,5235
@@ -56,10 +56,10 @@ openlit/instrumentation/embedchain/__init__.py,sha256=x2_qvJTwWog_mH6IY987Bp9mWx
56
56
  openlit/instrumentation/embedchain/embedchain.py,sha256=f4hyOr1Xr0RC4PNHRu46aV-jmEh-lIeKN8XLjgY7aWM,7929
57
57
  openlit/instrumentation/firecrawl/__init__.py,sha256=kyVsAiDBC2djifqT2w1cPRAotiEyEabNvnBeSQxi9N8,1876
58
58
  openlit/instrumentation/firecrawl/firecrawl.py,sha256=4X38UrLYeGm3uez-edYA6qEc0nKC3p77yfKgKBBud0A,3826
59
- openlit/instrumentation/google_ai_studio/__init__.py,sha256=d4aDvCSfDtT2geRbwG5yinu62uPTHaj4PtalimuvG-k,2685
60
- openlit/instrumentation/google_ai_studio/async_google_ai_studio.py,sha256=sWwoUCzaRO9TTSmwO12hPqKTdBmrgG1TTkyvgLN-CIo,5774
61
- openlit/instrumentation/google_ai_studio/google_ai_studio.py,sha256=Qps8EY6xZDFT1ZQWZJ0RAQxORHG8PjrTzq52vSqrJ2o,5676
62
- openlit/instrumentation/google_ai_studio/utils.py,sha256=0Bqs2GMFv6e5UU-FV-s-RBVvJOTSNvD74AaGCPD5CVc,11254
59
+ openlit/instrumentation/google_ai_studio/__init__.py,sha256=VLNOlaTFzjOpuUzloynvADewiTmaEu1wx8FerEbmsvg,2510
60
+ openlit/instrumentation/google_ai_studio/async_google_ai_studio.py,sha256=UL5AdTwkzdTKUomTfETMgYjUl00qL7BB8U0izuXfKFo,5527
61
+ openlit/instrumentation/google_ai_studio/google_ai_studio.py,sha256=nanOoXz-1uJtdh39aD438_yMk0no3AM7VVNKzDganHo,5429
62
+ openlit/instrumentation/google_ai_studio/utils.py,sha256=-X5sHk216ajJrl4cP35f5vT8YAZaIE4yLKI7nWEKHkQ,11140
63
63
  openlit/instrumentation/gpt4all/__init__.py,sha256=cO8mi3hhPDXcNwb9AwQ3-wQ_ydnOeBRwb0cptlQmAM4,1805
64
64
  openlit/instrumentation/gpt4all/gpt4all.py,sha256=EYp0njZ1kF56rTAjYZVtufA5W4xTWGzSIntjJ4MEfl4,24185
65
65
  openlit/instrumentation/gpu/__init__.py,sha256=QQCFVEbRfdeTjmdFe-UeEiy19vEEWSIBpj2B1wYGhUs,11036
@@ -115,9 +115,10 @@ openlit/instrumentation/qdrant/qdrant.py,sha256=pafjlAzMPzYLRYFfTtWXsLKYVQls-grk
115
115
  openlit/instrumentation/reka/__init__.py,sha256=39ZKj44PPUue8feG3bivAejfL66yD23pCJQ8hHnQKbY,1884
116
116
  openlit/instrumentation/reka/async_reka.py,sha256=GlUGTMy8LgA6qSwM0YyXlSM1Z-hYxyH9rMqIbw2pSRU,9446
117
117
  openlit/instrumentation/reka/reka.py,sha256=L6gH7j94tcYlc_FCkQP6SrxH7yBr4uSgtN8Bzm_mu6k,9428
118
- openlit/instrumentation/together/__init__.py,sha256=MLLL2t8FyrytpfMueqcwekiqTKn-JN40HBD_LbZS_jQ,2661
119
- openlit/instrumentation/together/async_together.py,sha256=ToSeYqE0mCgSsCNSO0pqoyS7WU6YarHxa3I7ZrzH-d8,30634
120
- openlit/instrumentation/together/together.py,sha256=7Da9fjHaZk_ObXMnSZA79-RktgwHRVYevsZAA-OpcXY,30530
118
+ openlit/instrumentation/together/__init__.py,sha256=0UmUqQtppyK3oopb4lTjX2LITgVCR8VtH46IAV1rpA8,2484
119
+ openlit/instrumentation/together/async_together.py,sha256=0-h5fKw6rIwN_fvWVpGuvVqizIuM9xFCzz8Z4oGgOj0,6822
120
+ openlit/instrumentation/together/together.py,sha256=nY6mzHmHgoMbbnB_9eL0EBQjP0ltJVdkQj4pbamHAj0,6723
121
+ openlit/instrumentation/together/utils.py,sha256=TtRzPMXHBBFzB_9X-9dcxZvvDWrAY7TbqVn_zZovjQk,14370
121
122
  openlit/instrumentation/transformers/__init__.py,sha256=9Ubss5nlumcypxprxff8Fv3sst7II27SsvCzqkBX9Kg,1457
122
123
  openlit/instrumentation/transformers/transformers.py,sha256=y--t7PXhUfPC81w-aEE7qowMah3os9gnKBQ5bN4QLGc,1980
123
124
  openlit/instrumentation/transformers/utils.py,sha256=3f-ewpUpduaBrTVIFJKaabACjz-6Vf8K7NEU0EzQ4Nk,8042
@@ -131,7 +132,7 @@ openlit/otel/events.py,sha256=VrMjTpvnLtYRBHCiFwJojTQqqNpRCxoD4yJYeQrtPsk,3560
131
132
  openlit/otel/metrics.py,sha256=GM2PDloBGRhBTkHHkYaqmOwIAQkY124ZhW4sEqW1Fgk,7086
132
133
  openlit/otel/tracing.py,sha256=tjV2bEbEDPUB1Z46gE-UsJsb04sRdFrfbhIDkxViZc0,3103
133
134
  openlit/semcov/__init__.py,sha256=ptyo37PY-FHDx_PShEvbdns71cD4YvvXw15bCRXKCKM,13461
134
- openlit-1.34.3.dist-info/LICENSE,sha256=xx0jnfkXJvxRnG63LTGOxlggYnIysveWIZ6H3PNdCrQ,11357
135
- openlit-1.34.3.dist-info/METADATA,sha256=8_jDnUBC1cxAr2DNwkg5IXbNQX2qru-_nC7OpwC6Jh8,23469
136
- openlit-1.34.3.dist-info/WHEEL,sha256=b4K_helf-jlQoXBBETfwnf4B04YC67LOev0jo4fX5m8,88
137
- openlit-1.34.3.dist-info/RECORD,,
135
+ openlit-1.34.5.dist-info/LICENSE,sha256=xx0jnfkXJvxRnG63LTGOxlggYnIysveWIZ6H3PNdCrQ,11357
136
+ openlit-1.34.5.dist-info/METADATA,sha256=cuZKF-emjaVNJ5L7tfP9fNDgt_EnO1JQty7xZ2TyHXc,23469
137
+ openlit-1.34.5.dist-info/WHEEL,sha256=b4K_helf-jlQoXBBETfwnf4B04YC67LOev0jo4fX5m8,88
138
+ openlit-1.34.5.dist-info/RECORD,,