openlit 1.18.0__py3-none-any.whl → 1.18.2__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- openlit/instrumentation/mistral/__init__.py +18 -18
- openlit/instrumentation/mistral/async_mistral.py +13 -13
- openlit/instrumentation/mistral/mistral.py +12 -12
- openlit/instrumentation/ollama/async_ollama.py +20 -11
- openlit/instrumentation/ollama/ollama.py +21 -11
- {openlit-1.18.0.dist-info → openlit-1.18.2.dist-info}/METADATA +3 -3
- {openlit-1.18.0.dist-info → openlit-1.18.2.dist-info}/RECORD +9 -9
- {openlit-1.18.0.dist-info → openlit-1.18.2.dist-info}/LICENSE +0 -0
- {openlit-1.18.0.dist-info → openlit-1.18.2.dist-info}/WHEEL +0 -0
@@ -9,10 +9,10 @@ from openlit.instrumentation.mistral.mistral import chat, chat_stream, embedding
|
|
9
9
|
from openlit.instrumentation.mistral.async_mistral import async_chat, async_chat_stream
|
10
10
|
from openlit.instrumentation.mistral.async_mistral import async_embeddings
|
11
11
|
|
12
|
-
_instruments = ("mistralai >=
|
12
|
+
_instruments = ("mistralai >= 1.0.0",)
|
13
13
|
|
14
14
|
class MistralInstrumentor(BaseInstrumentor):
|
15
|
-
"""An instrumentor for
|
15
|
+
"""An instrumentor for Mistral's client library."""
|
16
16
|
|
17
17
|
def instrumentation_dependencies(self) -> Collection[str]:
|
18
18
|
return _instruments
|
@@ -27,50 +27,50 @@ class MistralInstrumentor(BaseInstrumentor):
|
|
27
27
|
disable_metrics = kwargs.get("disable_metrics")
|
28
28
|
version = importlib.metadata.version("mistralai")
|
29
29
|
|
30
|
-
#sync
|
30
|
+
# sync
|
31
31
|
wrap_function_wrapper(
|
32
|
-
"mistralai.
|
33
|
-
"
|
32
|
+
"mistralai.chat",
|
33
|
+
"Chat.complete",
|
34
34
|
chat("mistral.chat", version, environment, application_name,
|
35
35
|
tracer, pricing_info, trace_content, metrics, disable_metrics),
|
36
36
|
)
|
37
37
|
|
38
|
-
#sync
|
38
|
+
# sync
|
39
39
|
wrap_function_wrapper(
|
40
|
-
"mistralai.
|
41
|
-
"
|
40
|
+
"mistralai.chat",
|
41
|
+
"Chat.stream",
|
42
42
|
chat_stream("mistral.chat", version, environment, application_name,
|
43
43
|
tracer, pricing_info, trace_content, metrics, disable_metrics),
|
44
44
|
)
|
45
45
|
|
46
|
-
#sync
|
46
|
+
# sync
|
47
47
|
wrap_function_wrapper(
|
48
|
-
"mistralai.
|
49
|
-
"
|
48
|
+
"mistralai.embeddings",
|
49
|
+
"Embeddings.create",
|
50
50
|
embeddings("mistral.embeddings", version, environment, application_name,
|
51
51
|
tracer, pricing_info, trace_content, metrics, disable_metrics),
|
52
52
|
)
|
53
53
|
|
54
54
|
# Async
|
55
55
|
wrap_function_wrapper(
|
56
|
-
"mistralai.
|
57
|
-
"
|
56
|
+
"mistralai.chat",
|
57
|
+
"Chat.complete_async",
|
58
58
|
async_chat("mistral.chat", version, environment, application_name,
|
59
59
|
tracer, pricing_info, trace_content, metrics, disable_metrics),
|
60
60
|
)
|
61
61
|
|
62
|
-
#
|
62
|
+
# Async
|
63
63
|
wrap_function_wrapper(
|
64
|
-
"mistralai.
|
65
|
-
"
|
64
|
+
"mistralai.chat",
|
65
|
+
"Chat.stream_async",
|
66
66
|
async_chat_stream("mistral.chat", version, environment, application_name,
|
67
67
|
tracer, pricing_info, trace_content, metrics, disable_metrics),
|
68
68
|
)
|
69
69
|
|
70
70
|
#sync
|
71
71
|
wrap_function_wrapper(
|
72
|
-
"mistralai.
|
73
|
-
"
|
72
|
+
"mistralai.embeddings",
|
73
|
+
"Embeddings.create_async",
|
74
74
|
async_embeddings("mistral.embeddings", version, environment, application_name,
|
75
75
|
tracer, pricing_info, trace_content, metrics, disable_metrics),
|
76
76
|
)
|
@@ -56,8 +56,8 @@ def async_chat(gen_ai_endpoint, version, environment, application_name,
|
|
56
56
|
message_prompt = kwargs.get('messages', "")
|
57
57
|
formatted_messages = []
|
58
58
|
for message in message_prompt:
|
59
|
-
role = message
|
60
|
-
content = message
|
59
|
+
role = message["role"]
|
60
|
+
content = message["content"]
|
61
61
|
|
62
62
|
if isinstance(content, list):
|
63
63
|
content_str = ", ".join(
|
@@ -207,14 +207,14 @@ def async_chat_stream(gen_ai_endpoint, version, environment, application_name,
|
|
207
207
|
llmresponse = ""
|
208
208
|
|
209
209
|
# Loop through streaming events capturing relevant details
|
210
|
-
async for event in wrapped(*args, **kwargs):
|
211
|
-
response_id = event.id
|
212
|
-
llmresponse += event.choices[0].delta.content
|
213
|
-
if event.usage is not None:
|
214
|
-
prompt_tokens = event.usage.prompt_tokens
|
215
|
-
completion_tokens = event.usage.completion_tokens
|
216
|
-
total_tokens = event.usage.total_tokens
|
217
|
-
finish_reason = event.choices[0].finish_reason
|
210
|
+
async for event in await wrapped(*args, **kwargs):
|
211
|
+
response_id = event.data.id
|
212
|
+
llmresponse += event.data.choices[0].delta.content
|
213
|
+
if event.data.usage is not None:
|
214
|
+
prompt_tokens = event.data.usage.prompt_tokens
|
215
|
+
completion_tokens = event.data.usage.completion_tokens
|
216
|
+
total_tokens = event.data.usage.total_tokens
|
217
|
+
finish_reason = event.data.choices[0].finish_reason
|
218
218
|
yield event
|
219
219
|
|
220
220
|
# Handling exception ensure observability without disrupting operation
|
@@ -223,8 +223,8 @@ def async_chat_stream(gen_ai_endpoint, version, environment, application_name,
|
|
223
223
|
message_prompt = kwargs.get('messages', "")
|
224
224
|
formatted_messages = []
|
225
225
|
for message in message_prompt:
|
226
|
-
role = message
|
227
|
-
content = message
|
226
|
+
role = message["role"]
|
227
|
+
content = message["content"]
|
228
228
|
|
229
229
|
if isinstance(content, list):
|
230
230
|
content_str = ", ".join(
|
@@ -364,7 +364,7 @@ def async_embeddings(gen_ai_endpoint, version, environment, application_name,
|
|
364
364
|
|
365
365
|
try:
|
366
366
|
# Get prompt from kwargs and store as a single string
|
367
|
-
prompt = ', '.join(kwargs.get('
|
367
|
+
prompt = ', '.join(kwargs.get('inputs', []))
|
368
368
|
|
369
369
|
# Calculate cost of the operation
|
370
370
|
cost = get_embed_model_cost(kwargs.get('model', "mistral-embed"),
|
@@ -55,8 +55,8 @@ def chat(gen_ai_endpoint, version, environment, application_name,
|
|
55
55
|
message_prompt = kwargs.get('messages', "")
|
56
56
|
formatted_messages = []
|
57
57
|
for message in message_prompt:
|
58
|
-
role = message
|
59
|
-
content = message
|
58
|
+
role = message["role"]
|
59
|
+
content = message["content"]
|
60
60
|
|
61
61
|
if isinstance(content, list):
|
62
62
|
content_str = ", ".join(
|
@@ -207,13 +207,13 @@ def chat_stream(gen_ai_endpoint, version, environment, application_name,
|
|
207
207
|
|
208
208
|
# Loop through streaming events capturing relevant details
|
209
209
|
for event in wrapped(*args, **kwargs):
|
210
|
-
response_id = event.id
|
211
|
-
llmresponse += event.choices[0].delta.content
|
212
|
-
if event.usage is not None:
|
213
|
-
prompt_tokens = event.usage.prompt_tokens
|
214
|
-
completion_tokens = event.usage.completion_tokens
|
215
|
-
total_tokens = event.usage.total_tokens
|
216
|
-
finish_reason = event.choices[0].finish_reason
|
210
|
+
response_id = event.data.id
|
211
|
+
llmresponse += event.data.choices[0].delta.content
|
212
|
+
if event.data.usage is not None:
|
213
|
+
prompt_tokens = event.data.usage.prompt_tokens
|
214
|
+
completion_tokens = event.data.usage.completion_tokens
|
215
|
+
total_tokens = event.data.usage.total_tokens
|
216
|
+
finish_reason = event.data.choices[0].finish_reason
|
217
217
|
yield event
|
218
218
|
|
219
219
|
# Handling exception ensure observability without disrupting operation
|
@@ -222,8 +222,8 @@ def chat_stream(gen_ai_endpoint, version, environment, application_name,
|
|
222
222
|
message_prompt = kwargs.get('messages', "")
|
223
223
|
formatted_messages = []
|
224
224
|
for message in message_prompt:
|
225
|
-
role = message
|
226
|
-
content = message
|
225
|
+
role = message["role"]
|
226
|
+
content = message["content"]
|
227
227
|
|
228
228
|
if isinstance(content, list):
|
229
229
|
content_str = ", ".join(
|
@@ -363,7 +363,7 @@ def embeddings(gen_ai_endpoint, version, environment, application_name,
|
|
363
363
|
|
364
364
|
try:
|
365
365
|
# Get prompt from kwargs and store as a single string
|
366
|
-
prompt = ', '.join(kwargs.get('
|
366
|
+
prompt = ', '.join(kwargs.get('inputs', []))
|
367
367
|
|
368
368
|
# Calculate cost of the operation
|
369
369
|
cost = get_embed_model_cost(kwargs.get('model', "mistral-embed"),
|
@@ -6,7 +6,11 @@ Module for monitoring Ollama API calls.
|
|
6
6
|
import logging
|
7
7
|
from opentelemetry.trace import SpanKind, Status, StatusCode
|
8
8
|
from opentelemetry.sdk.resources import TELEMETRY_SDK_NAME
|
9
|
-
from openlit.__helpers import
|
9
|
+
from openlit.__helpers import (
|
10
|
+
handle_exception,
|
11
|
+
general_tokens,
|
12
|
+
get_chat_model_cost,
|
13
|
+
get_embed_model_cost)
|
10
14
|
from openlit.semcov import SemanticConvetion
|
11
15
|
|
12
16
|
# Initialize logger for logging potential issues and operations
|
@@ -90,10 +94,11 @@ def async_chat(gen_ai_endpoint, version, environment, application_name,
|
|
90
94
|
formatted_messages.append(f"{role}: {content}")
|
91
95
|
prompt = "\n".join(formatted_messages)
|
92
96
|
|
93
|
-
# Calculate cost of the operation
|
94
|
-
cost = 0
|
95
97
|
prompt_tokens = general_tokens(prompt)
|
96
98
|
total_tokens = prompt_tokens + completion_tokens
|
99
|
+
# Calculate cost of the operation
|
100
|
+
cost = get_chat_model_cost(kwargs.get("model", "llama3"),
|
101
|
+
pricing_info, prompt_tokens, completion_tokens)
|
97
102
|
|
98
103
|
# Set Span attributes
|
99
104
|
span.set_attribute(TELEMETRY_SDK_NAME, "openlit")
|
@@ -219,11 +224,12 @@ def async_chat(gen_ai_endpoint, version, environment, application_name,
|
|
219
224
|
},
|
220
225
|
)
|
221
226
|
|
222
|
-
# Calculate cost of the operation
|
223
|
-
cost = 0
|
224
227
|
prompt_tokens = general_tokens(prompt)
|
225
228
|
completion_tokens = response["eval_count"]
|
226
229
|
total_tokens = prompt_tokens + completion_tokens
|
230
|
+
# Calculate cost of the operation
|
231
|
+
cost = get_chat_model_cost(kwargs.get("model", "llama3"),
|
232
|
+
pricing_info, prompt_tokens, completion_tokens)
|
227
233
|
|
228
234
|
span.set_attribute(SemanticConvetion.GEN_AI_USAGE_PROMPT_TOKENS,
|
229
235
|
prompt_tokens)
|
@@ -331,10 +337,11 @@ def async_generate(gen_ai_endpoint, version, environment, application_name,
|
|
331
337
|
|
332
338
|
# Handling exception ensure observability without disrupting operation
|
333
339
|
try:
|
334
|
-
# Calculate cost of the operation
|
335
|
-
cost = 0
|
336
340
|
prompt_tokens = general_tokens(kwargs.get("prompt", ""))
|
337
341
|
total_tokens = prompt_tokens + completion_tokens
|
342
|
+
# Calculate cost of the operation
|
343
|
+
cost = get_chat_model_cost(kwargs.get("model", "llama3"),
|
344
|
+
pricing_info, prompt_tokens, completion_tokens)
|
338
345
|
|
339
346
|
# Set Span attributes
|
340
347
|
span.set_attribute(TELEMETRY_SDK_NAME, "openlit")
|
@@ -442,11 +449,12 @@ def async_generate(gen_ai_endpoint, version, environment, application_name,
|
|
442
449
|
},
|
443
450
|
)
|
444
451
|
|
445
|
-
# Calculate cost of the operation
|
446
|
-
cost = 0
|
447
452
|
prompt_tokens = response["prompt_eval_count"]
|
448
453
|
completion_tokens = response["eval_count"]
|
449
454
|
total_tokens = prompt_tokens + completion_tokens
|
455
|
+
# Calculate cost of the operation
|
456
|
+
cost = get_chat_model_cost(kwargs.get("model", "llama3"),
|
457
|
+
pricing_info, prompt_tokens, completion_tokens)
|
450
458
|
|
451
459
|
span.set_attribute(SemanticConvetion.GEN_AI_USAGE_PROMPT_TOKENS,
|
452
460
|
prompt_tokens)
|
@@ -534,9 +542,10 @@ def async_embeddings(gen_ai_endpoint, version, environment, application_name,
|
|
534
542
|
response = await wrapped(*args, **kwargs)
|
535
543
|
|
536
544
|
try:
|
537
|
-
# Calculate cost of the operation
|
538
|
-
cost = 0
|
539
545
|
prompt_tokens = general_tokens(kwargs.get('prompt', ""))
|
546
|
+
# Calculate cost of the operation
|
547
|
+
cost = get_embed_model_cost(kwargs.get('model', "mistral-embed"),
|
548
|
+
pricing_info, prompt_tokens)
|
540
549
|
# Set Span attributes
|
541
550
|
span.set_attribute(TELEMETRY_SDK_NAME, "openlit")
|
542
551
|
span.set_attribute(SemanticConvetion.GEN_AI_SYSTEM,
|
@@ -6,7 +6,12 @@ Module for monitoring Ollama API calls.
|
|
6
6
|
import logging
|
7
7
|
from opentelemetry.trace import SpanKind, Status, StatusCode
|
8
8
|
from opentelemetry.sdk.resources import TELEMETRY_SDK_NAME
|
9
|
-
from openlit.__helpers import
|
9
|
+
from openlit.__helpers import (
|
10
|
+
handle_exception,
|
11
|
+
general_tokens,
|
12
|
+
get_chat_model_cost,
|
13
|
+
get_embed_model_cost
|
14
|
+
)
|
10
15
|
from openlit.semcov import SemanticConvetion
|
11
16
|
|
12
17
|
# Initialize logger for logging potential issues and operations
|
@@ -90,10 +95,11 @@ def chat(gen_ai_endpoint, version, environment, application_name,
|
|
90
95
|
formatted_messages.append(f"{role}: {content}")
|
91
96
|
prompt = "\n".join(formatted_messages)
|
92
97
|
|
93
|
-
# Calculate cost of the operation
|
94
|
-
cost = 0
|
95
98
|
prompt_tokens = general_tokens(prompt)
|
96
99
|
total_tokens = prompt_tokens + completion_tokens
|
100
|
+
# Calculate cost of the operation
|
101
|
+
cost = get_chat_model_cost(kwargs.get("model", "llama3"),
|
102
|
+
pricing_info, prompt_tokens, completion_tokens)
|
97
103
|
|
98
104
|
# Set Span attributes
|
99
105
|
span.set_attribute(TELEMETRY_SDK_NAME, "openlit")
|
@@ -219,11 +225,12 @@ def chat(gen_ai_endpoint, version, environment, application_name,
|
|
219
225
|
},
|
220
226
|
)
|
221
227
|
|
222
|
-
# Calculate cost of the operation
|
223
|
-
cost = 0
|
224
228
|
prompt_tokens = general_tokens(prompt)
|
225
229
|
completion_tokens = response["eval_count"]
|
226
230
|
total_tokens = prompt_tokens + completion_tokens
|
231
|
+
# Calculate cost of the operation
|
232
|
+
cost = get_chat_model_cost(kwargs.get("model", "llama3"),
|
233
|
+
pricing_info, prompt_tokens, completion_tokens)
|
227
234
|
|
228
235
|
span.set_attribute(SemanticConvetion.GEN_AI_USAGE_PROMPT_TOKENS,
|
229
236
|
prompt_tokens)
|
@@ -331,10 +338,11 @@ def generate(gen_ai_endpoint, version, environment, application_name,
|
|
331
338
|
|
332
339
|
# Handling exception ensure observability without disrupting operation
|
333
340
|
try:
|
334
|
-
# Calculate cost of the operation
|
335
|
-
cost = 0
|
336
341
|
prompt_tokens = general_tokens(kwargs.get("prompt", ""))
|
337
342
|
total_tokens = prompt_tokens + completion_tokens
|
343
|
+
# Calculate cost of the operation
|
344
|
+
cost = get_chat_model_cost(kwargs.get("model", "llama3"),
|
345
|
+
pricing_info, prompt_tokens, completion_tokens)
|
338
346
|
|
339
347
|
# Set Span attributes
|
340
348
|
span.set_attribute(TELEMETRY_SDK_NAME, "openlit")
|
@@ -442,11 +450,12 @@ def generate(gen_ai_endpoint, version, environment, application_name,
|
|
442
450
|
},
|
443
451
|
)
|
444
452
|
|
445
|
-
# Calculate cost of the operation
|
446
|
-
cost = 0
|
447
453
|
prompt_tokens = response["prompt_eval_count"]
|
448
454
|
completion_tokens = response["eval_count"]
|
449
455
|
total_tokens = prompt_tokens + completion_tokens
|
456
|
+
# Calculate cost of the operation
|
457
|
+
cost = get_chat_model_cost(kwargs.get("model", "llama3"),
|
458
|
+
pricing_info, prompt_tokens, completion_tokens)
|
450
459
|
|
451
460
|
span.set_attribute(SemanticConvetion.GEN_AI_USAGE_PROMPT_TOKENS,
|
452
461
|
prompt_tokens)
|
@@ -534,9 +543,10 @@ def embeddings(gen_ai_endpoint, version, environment, application_name,
|
|
534
543
|
response = wrapped(*args, **kwargs)
|
535
544
|
|
536
545
|
try:
|
537
|
-
# Calculate cost of the operation
|
538
|
-
cost = 0
|
539
546
|
prompt_tokens = general_tokens(kwargs.get('prompt', ""))
|
547
|
+
# Calculate cost of the operation
|
548
|
+
cost = get_embed_model_cost(kwargs.get('model', "mistral-embed"),
|
549
|
+
pricing_info, prompt_tokens)
|
540
550
|
# Set Span attributes
|
541
551
|
span.set_attribute(TELEMETRY_SDK_NAME, "openlit")
|
542
552
|
span.set_attribute(SemanticConvetion.GEN_AI_SYSTEM,
|
@@ -1,9 +1,9 @@
|
|
1
1
|
Metadata-Version: 2.1
|
2
2
|
Name: openlit
|
3
|
-
Version: 1.18.
|
4
|
-
Summary: OpenTelemetry-native Auto instrumentation library for monitoring LLM Applications, facilitating the integration of observability into your GenAI-driven projects
|
3
|
+
Version: 1.18.2
|
4
|
+
Summary: OpenTelemetry-native Auto instrumentation library for monitoring LLM Applications and GPUs, facilitating the integration of observability into your GenAI-driven projects
|
5
5
|
Home-page: https://github.com/openlit/openlit/tree/main/openlit/python
|
6
|
-
Keywords: OpenTelemetry,otel,otlp,llm,tracing,openai,anthropic,claude,cohere,llm monitoring,observability,monitoring,gpt,Generative AI,chatGPT
|
6
|
+
Keywords: OpenTelemetry,otel,otlp,llm,tracing,openai,anthropic,claude,cohere,llm monitoring,observability,monitoring,gpt,Generative AI,chatGPT,gpu
|
7
7
|
Author: OpenLIT
|
8
8
|
Requires-Python: >=3.7.1,<4.0.0
|
9
9
|
Classifier: Programming Language :: Python :: 3
|
@@ -28,12 +28,12 @@ openlit/instrumentation/llamaindex/__init__.py,sha256=vPtK65G6b-TwJERowVRUVl7f_n
|
|
28
28
|
openlit/instrumentation/llamaindex/llamaindex.py,sha256=uiIigbwhonSbJWA7LpgOVI1R4kxxPODS1K5wyHIQ4hM,4048
|
29
29
|
openlit/instrumentation/milvus/__init__.py,sha256=qi1yfmMrvkDtnrN_6toW8qC9BRL78bq7ayWpObJ8Bq4,2961
|
30
30
|
openlit/instrumentation/milvus/milvus.py,sha256=qhKIoggBAJhRctRrBYz69AcvXH-eh7oBn_l9WfxpAjI,9121
|
31
|
-
openlit/instrumentation/mistral/__init__.py,sha256=
|
32
|
-
openlit/instrumentation/mistral/async_mistral.py,sha256=
|
33
|
-
openlit/instrumentation/mistral/mistral.py,sha256=
|
31
|
+
openlit/instrumentation/mistral/__init__.py,sha256=niWn0gYNOTPS5zoTjtCciDqQVj-iJehnpdh7ElB-H9w,3088
|
32
|
+
openlit/instrumentation/mistral/async_mistral.py,sha256=l-kcaGPrX3sqPH-RXWo6ope0Ui3nUvExNJ4KX9QgDMY,22246
|
33
|
+
openlit/instrumentation/mistral/mistral.py,sha256=Q7MMRvVFsM8o0_ebZ0EfnhGjs16SJSnmu-oE798gYMQ,22087
|
34
34
|
openlit/instrumentation/ollama/__init__.py,sha256=cOax8PiypDuo_FC4WvDCYBRo7lH5nV9xU92h7k-eZbg,3812
|
35
|
-
openlit/instrumentation/ollama/async_ollama.py,sha256=
|
36
|
-
openlit/instrumentation/ollama/ollama.py,sha256=
|
35
|
+
openlit/instrumentation/ollama/async_ollama.py,sha256=7lbikD-I9k8VL63idqj3VMEfiEKJmFNUPR8Xb6g2phQ,31366
|
36
|
+
openlit/instrumentation/ollama/ollama.py,sha256=lBt1d3rFnF1tFbfdOccwjEafHnmTAUGsiOKSHku6Fkw,31277
|
37
37
|
openlit/instrumentation/openai/__init__.py,sha256=AZ2cPr3TMKkgGdMl_yXMeSi7bWhtmMqOW1iHdzHHGHA,16265
|
38
38
|
openlit/instrumentation/openai/async_azure_openai.py,sha256=XbST1UE_zXzNL6RX2XwCsK_a6IhG9PHVTMKBjGrUcB0,48961
|
39
39
|
openlit/instrumentation/openai/async_openai.py,sha256=RGNpKLsHYfJXjj1ImuWRJToVSs0wdvMNp2kyTBrBaDw,47578
|
@@ -53,7 +53,7 @@ openlit/instrumentation/vllm/vllm.py,sha256=lDzM7F5pgxvh8nKL0dcKB4TD0Mc9wXOWeXOs
|
|
53
53
|
openlit/otel/metrics.py,sha256=O7NoaDz0bY19mqpE4-0PcKwEe-B-iJFRgOCaanAuZAc,4291
|
54
54
|
openlit/otel/tracing.py,sha256=vL1ifMbARPBpqK--yXYsCM6y5dSu5LFIKqkhZXtYmUc,3712
|
55
55
|
openlit/semcov/__init__.py,sha256=EvoNOKtc7UKwLZ3Gp0-B1zwmeTcAIbx8O7wvAw8wXP4,7498
|
56
|
-
openlit-1.18.
|
57
|
-
openlit-1.18.
|
58
|
-
openlit-1.18.
|
59
|
-
openlit-1.18.
|
56
|
+
openlit-1.18.2.dist-info/LICENSE,sha256=xx0jnfkXJvxRnG63LTGOxlggYnIysveWIZ6H3PNdCrQ,11357
|
57
|
+
openlit-1.18.2.dist-info/METADATA,sha256=eD-PsH7RbUA7EOICMoUHzO_f1g_Sa0b6UcNvHnBwY-8,14347
|
58
|
+
openlit-1.18.2.dist-info/WHEEL,sha256=sP946D7jFCHeNz5Iq4fL4Lu-PrWrFsgfLXbbkciIZwg,88
|
59
|
+
openlit-1.18.2.dist-info/RECORD,,
|
File without changes
|
File without changes
|