openlit 1.17.0__py3-none-any.whl → 1.18.1__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
openlit/__init__.py CHANGED
@@ -32,6 +32,7 @@ from openlit.instrumentation.groq import GroqInstrumentor
32
32
  from openlit.instrumentation.ollama import OllamaInstrumentor
33
33
  from openlit.instrumentation.gpt4all import GPT4AllInstrumentor
34
34
  from openlit.instrumentation.elevenlabs import ElevenLabsInstrumentor
35
+ from openlit.instrumentation.vllm import VLLMInstrumentor
35
36
  from openlit.instrumentation.langchain import LangChainInstrumentor
36
37
  from openlit.instrumentation.llamaindex import LlamaIndexInstrumentor
37
38
  from openlit.instrumentation.haystack import HaystackInstrumentor
@@ -194,6 +195,7 @@ def init(environment="default", application_name="default", tracer=None, otlp_en
194
195
  "ollama": "ollama",
195
196
  "gpt4all": "gpt4all",
196
197
  "elevenlabs": "elevenlabs",
198
+ "vllm": "vllm",
197
199
  "langchain": "langchain",
198
200
  "llama_index": "llama_index",
199
201
  "haystack": "haystack",
@@ -270,6 +272,7 @@ def init(environment="default", application_name="default", tracer=None, otlp_en
270
272
  "ollama": OllamaInstrumentor(),
271
273
  "gpt4all": GPT4AllInstrumentor(),
272
274
  "elevenlabs": ElevenLabsInstrumentor(),
275
+ "vllm": VLLMInstrumentor(),
273
276
  "langchain": LangChainInstrumentor(),
274
277
  "llama_index": LlamaIndexInstrumentor(),
275
278
  "haystack": HaystackInstrumentor(),
@@ -6,7 +6,11 @@ Module for monitoring Ollama API calls.
6
6
  import logging
7
7
  from opentelemetry.trace import SpanKind, Status, StatusCode
8
8
  from opentelemetry.sdk.resources import TELEMETRY_SDK_NAME
9
- from openlit.__helpers import handle_exception, general_tokens
9
+ from openlit.__helpers import (
10
+ handle_exception,
11
+ general_tokens,
12
+ get_chat_model_cost,
13
+ get_embed_model_cost)
10
14
  from openlit.semcov import SemanticConvetion
11
15
 
12
16
  # Initialize logger for logging potential issues and operations
@@ -90,10 +94,11 @@ def async_chat(gen_ai_endpoint, version, environment, application_name,
90
94
  formatted_messages.append(f"{role}: {content}")
91
95
  prompt = "\n".join(formatted_messages)
92
96
 
93
- # Calculate cost of the operation
94
- cost = 0
95
97
  prompt_tokens = general_tokens(prompt)
96
98
  total_tokens = prompt_tokens + completion_tokens
99
+ # Calculate cost of the operation
100
+ cost = get_chat_model_cost(kwargs.get("model", "llama3"),
101
+ pricing_info, prompt_tokens, completion_tokens)
97
102
 
98
103
  # Set Span attributes
99
104
  span.set_attribute(TELEMETRY_SDK_NAME, "openlit")
@@ -219,11 +224,12 @@ def async_chat(gen_ai_endpoint, version, environment, application_name,
219
224
  },
220
225
  )
221
226
 
222
- # Calculate cost of the operation
223
- cost = 0
224
227
  prompt_tokens = general_tokens(prompt)
225
228
  completion_tokens = response["eval_count"]
226
229
  total_tokens = prompt_tokens + completion_tokens
230
+ # Calculate cost of the operation
231
+ cost = get_chat_model_cost(kwargs.get("model", "llama3"),
232
+ pricing_info, prompt_tokens, completion_tokens)
227
233
 
228
234
  span.set_attribute(SemanticConvetion.GEN_AI_USAGE_PROMPT_TOKENS,
229
235
  prompt_tokens)
@@ -331,10 +337,11 @@ def async_generate(gen_ai_endpoint, version, environment, application_name,
331
337
 
332
338
  # Handling exception ensure observability without disrupting operation
333
339
  try:
334
- # Calculate cost of the operation
335
- cost = 0
336
340
  prompt_tokens = general_tokens(kwargs.get("prompt", ""))
337
341
  total_tokens = prompt_tokens + completion_tokens
342
+ # Calculate cost of the operation
343
+ cost = get_chat_model_cost(kwargs.get("model", "llama3"),
344
+ pricing_info, prompt_tokens, completion_tokens)
338
345
 
339
346
  # Set Span attributes
340
347
  span.set_attribute(TELEMETRY_SDK_NAME, "openlit")
@@ -442,11 +449,12 @@ def async_generate(gen_ai_endpoint, version, environment, application_name,
442
449
  },
443
450
  )
444
451
 
445
- # Calculate cost of the operation
446
- cost = 0
447
452
  prompt_tokens = response["prompt_eval_count"]
448
453
  completion_tokens = response["eval_count"]
449
454
  total_tokens = prompt_tokens + completion_tokens
455
+ # Calculate cost of the operation
456
+ cost = get_chat_model_cost(kwargs.get("model", "llama3"),
457
+ pricing_info, prompt_tokens, completion_tokens)
450
458
 
451
459
  span.set_attribute(SemanticConvetion.GEN_AI_USAGE_PROMPT_TOKENS,
452
460
  prompt_tokens)
@@ -534,9 +542,10 @@ def async_embeddings(gen_ai_endpoint, version, environment, application_name,
534
542
  response = await wrapped(*args, **kwargs)
535
543
 
536
544
  try:
537
- # Calculate cost of the operation
538
- cost = 0
539
545
  prompt_tokens = general_tokens(kwargs.get('prompt', ""))
546
+ # Calculate cost of the operation
547
+ cost = get_embed_model_cost(kwargs.get('model', "mistral-embed"),
548
+ pricing_info, prompt_tokens)
540
549
  # Set Span attributes
541
550
  span.set_attribute(TELEMETRY_SDK_NAME, "openlit")
542
551
  span.set_attribute(SemanticConvetion.GEN_AI_SYSTEM,
@@ -6,7 +6,12 @@ Module for monitoring Ollama API calls.
6
6
  import logging
7
7
  from opentelemetry.trace import SpanKind, Status, StatusCode
8
8
  from opentelemetry.sdk.resources import TELEMETRY_SDK_NAME
9
- from openlit.__helpers import handle_exception, general_tokens
9
+ from openlit.__helpers import (
10
+ handle_exception,
11
+ general_tokens,
12
+ get_chat_model_cost,
13
+ get_embed_model_cost
14
+ )
10
15
  from openlit.semcov import SemanticConvetion
11
16
 
12
17
  # Initialize logger for logging potential issues and operations
@@ -90,10 +95,11 @@ def chat(gen_ai_endpoint, version, environment, application_name,
90
95
  formatted_messages.append(f"{role}: {content}")
91
96
  prompt = "\n".join(formatted_messages)
92
97
 
93
- # Calculate cost of the operation
94
- cost = 0
95
98
  prompt_tokens = general_tokens(prompt)
96
99
  total_tokens = prompt_tokens + completion_tokens
100
+ # Calculate cost of the operation
101
+ cost = get_chat_model_cost(kwargs.get("model", "llama3"),
102
+ pricing_info, prompt_tokens, completion_tokens)
97
103
 
98
104
  # Set Span attributes
99
105
  span.set_attribute(TELEMETRY_SDK_NAME, "openlit")
@@ -219,11 +225,12 @@ def chat(gen_ai_endpoint, version, environment, application_name,
219
225
  },
220
226
  )
221
227
 
222
- # Calculate cost of the operation
223
- cost = 0
224
228
  prompt_tokens = general_tokens(prompt)
225
229
  completion_tokens = response["eval_count"]
226
230
  total_tokens = prompt_tokens + completion_tokens
231
+ # Calculate cost of the operation
232
+ cost = get_chat_model_cost(kwargs.get("model", "llama3"),
233
+ pricing_info, prompt_tokens, completion_tokens)
227
234
 
228
235
  span.set_attribute(SemanticConvetion.GEN_AI_USAGE_PROMPT_TOKENS,
229
236
  prompt_tokens)
@@ -331,10 +338,11 @@ def generate(gen_ai_endpoint, version, environment, application_name,
331
338
 
332
339
  # Handling exception ensure observability without disrupting operation
333
340
  try:
334
- # Calculate cost of the operation
335
- cost = 0
336
341
  prompt_tokens = general_tokens(kwargs.get("prompt", ""))
337
342
  total_tokens = prompt_tokens + completion_tokens
343
+ # Calculate cost of the operation
344
+ cost = get_chat_model_cost(kwargs.get("model", "llama3"),
345
+ pricing_info, prompt_tokens, completion_tokens)
338
346
 
339
347
  # Set Span attributes
340
348
  span.set_attribute(TELEMETRY_SDK_NAME, "openlit")
@@ -442,11 +450,12 @@ def generate(gen_ai_endpoint, version, environment, application_name,
442
450
  },
443
451
  )
444
452
 
445
- # Calculate cost of the operation
446
- cost = 0
447
453
  prompt_tokens = response["prompt_eval_count"]
448
454
  completion_tokens = response["eval_count"]
449
455
  total_tokens = prompt_tokens + completion_tokens
456
+ # Calculate cost of the operation
457
+ cost = get_chat_model_cost(kwargs.get("model", "llama3"),
458
+ pricing_info, prompt_tokens, completion_tokens)
450
459
 
451
460
  span.set_attribute(SemanticConvetion.GEN_AI_USAGE_PROMPT_TOKENS,
452
461
  prompt_tokens)
@@ -534,9 +543,10 @@ def embeddings(gen_ai_endpoint, version, environment, application_name,
534
543
  response = wrapped(*args, **kwargs)
535
544
 
536
545
  try:
537
- # Calculate cost of the operation
538
- cost = 0
539
546
  prompt_tokens = general_tokens(kwargs.get('prompt', ""))
547
+ # Calculate cost of the operation
548
+ cost = get_embed_model_cost(kwargs.get('model', "mistral-embed"),
549
+ pricing_info, prompt_tokens)
540
550
  # Set Span attributes
541
551
  span.set_attribute(TELEMETRY_SDK_NAME, "openlit")
542
552
  span.set_attribute(SemanticConvetion.GEN_AI_SYSTEM,
@@ -0,0 +1,43 @@
1
+ # pylint: disable=useless-return, bad-staticmethod-argument, disable=duplicate-code
2
+ """Initializer of Auto Instrumentation of vLLM Functions"""
3
+
4
+ from typing import Collection
5
+ import importlib.metadata
6
+ from opentelemetry.instrumentation.instrumentor import BaseInstrumentor
7
+ from wrapt import wrap_function_wrapper
8
+
9
+ from openlit.instrumentation.vllm.vllm import (
10
+ generate
11
+ )
12
+
13
+ _instruments = ("vllm >= 0.5.4",)
14
+
15
+ class VLLMInstrumentor(BaseInstrumentor):
16
+ """
17
+ An instrumentor for vLLM's client library.
18
+ """
19
+
20
+ def instrumentation_dependencies(self) -> Collection[str]:
21
+ return _instruments
22
+
23
+ def _instrument(self, **kwargs):
24
+ application_name = kwargs.get("application_name", "default_application")
25
+ environment = kwargs.get("environment", "default_environment")
26
+ tracer = kwargs.get("tracer")
27
+ metrics = kwargs.get("metrics_dict")
28
+ pricing_info = kwargs.get("pricing_info", {})
29
+ trace_content = kwargs.get("trace_content", False)
30
+ disable_metrics = kwargs.get("disable_metrics")
31
+ version = importlib.metadata.version("vllm")
32
+
33
+ # sync chat
34
+ wrap_function_wrapper(
35
+ "vllm",
36
+ "LLM.generate",
37
+ generate("vllm.generate", version, environment, application_name,
38
+ tracer, pricing_info, trace_content, metrics, disable_metrics),
39
+ )
40
+
41
+ def _uninstrument(self, **kwargs):
42
+ # Proper uninstrumentation logic to revert patched methods
43
+ pass
@@ -0,0 +1,143 @@
1
+ # pylint: disable=duplicate-code, broad-exception-caught, too-many-statements, unused-argument, possibly-used-before-assignment
2
+ """
3
+ Module for monitoring vLLM API calls.
4
+ """
5
+
6
+ import logging
7
+ from opentelemetry.trace import SpanKind, Status, StatusCode
8
+ from opentelemetry.sdk.resources import TELEMETRY_SDK_NAME
9
+ from openlit.__helpers import handle_exception, general_tokens
10
+ from openlit.semcov import SemanticConvetion
11
+
12
+ # Initialize logger for logging potential issues and operations
13
+ logger = logging.getLogger(__name__)
14
+
15
+ def generate(gen_ai_endpoint, version, environment, application_name,
16
+ tracer, pricing_info, trace_content, metrics, disable_metrics):
17
+ """
18
+ Generates a telemetry wrapper for generate to collect metrics.
19
+
20
+ Args:
21
+ gen_ai_endpoint: Endpoint identifier for logging and tracing.
22
+ version: Version of the monitoring package.
23
+ environment: Deployment environment (e.g., production, staging).
24
+ application_name: Name of the application using the vLLM API.
25
+ tracer: OpenTelemetry tracer for creating spans.
26
+ pricing_info: Information used for calculating the cost of vLLM usage.
27
+ trace_content: Flag indicating whether to trace the actual content.
28
+
29
+ Returns:
30
+ A function that wraps the generate method to add telemetry.
31
+ """
32
+
33
+ def wrapper(wrapped, instance, args, kwargs):
34
+ """
35
+ Wraps the 'generate' API call to add telemetry.
36
+
37
+ This collects metrics such as execution time, cost, and token usage, and handles errors
38
+ gracefully, adding details to the trace for observability.
39
+
40
+ Args:
41
+ wrapped: The original 'generate' method to be wrapped.
42
+ instance: The instance of the class where the original method is defined.
43
+ args: Positional arguments for the 'generate' method.
44
+ kwargs: Keyword arguments for the 'generate' method.
45
+
46
+ Returns:
47
+ The response from the original 'generate' method.
48
+ """
49
+
50
+ # pylint: disable=line-too-long
51
+ with tracer.start_as_current_span(gen_ai_endpoint, kind= SpanKind.CLIENT) as span:
52
+ response = wrapped(*args, **kwargs)
53
+
54
+ try:
55
+ model = instance.llm_engine.model_config.model or "facebook/opt-125m"
56
+ # Set base span attribues
57
+ span.set_attribute(TELEMETRY_SDK_NAME, "openlit")
58
+ span.set_attribute(SemanticConvetion.GEN_AI_SYSTEM,
59
+ SemanticConvetion.GEN_AI_SYSTEM_VLLM)
60
+ span.set_attribute(SemanticConvetion.GEN_AI_TYPE,
61
+ SemanticConvetion.GEN_AI_TYPE_CHAT)
62
+ span.set_attribute(SemanticConvetion.GEN_AI_ENDPOINT,
63
+ gen_ai_endpoint)
64
+ span.set_attribute(SemanticConvetion.GEN_AI_ENVIRONMENT,
65
+ environment)
66
+ span.set_attribute(SemanticConvetion.GEN_AI_APPLICATION_NAME,
67
+ application_name)
68
+ span.set_attribute(SemanticConvetion.GEN_AI_REQUEST_MODEL,
69
+ model)
70
+ span.set_attribute(SemanticConvetion.GEN_AI_REQUEST_IS_STREAM,
71
+ False)
72
+ input_tokens = 0
73
+ output_tokens = 0
74
+ cost = 0
75
+
76
+ if trace_content:
77
+ prompt_attributes = {}
78
+ completion_attributes = {}
79
+
80
+ for i, output in enumerate(response):
81
+ prompt_attributes[f"{SemanticConvetion.GEN_AI_CONTENT_PROMPT}.{i}"] = output.prompt
82
+ completion_attributes[f"{SemanticConvetion.GEN_AI_CONTENT_COMPLETION}.{i}"] = output.outputs[0].text
83
+ input_tokens += general_tokens(output.prompt)
84
+ output_tokens += general_tokens(output.outputs[0].text)
85
+
86
+ # Add a single event for all prompts
87
+ span.add_event(
88
+ name=SemanticConvetion.GEN_AI_CONTENT_PROMPT_EVENT,
89
+ attributes=prompt_attributes,
90
+ )
91
+
92
+ # Add a single event for all completions
93
+ span.add_event(
94
+ name=SemanticConvetion.GEN_AI_CONTENT_COMPLETION_EVENT,
95
+ attributes=completion_attributes,
96
+ )
97
+
98
+ total_tokens = input_tokens + output_tokens
99
+
100
+ span.set_attribute(SemanticConvetion.GEN_AI_USAGE_PROMPT_TOKENS,
101
+ input_tokens)
102
+ span.set_attribute(SemanticConvetion.GEN_AI_USAGE_COMPLETION_TOKENS,
103
+ output_tokens)
104
+ span.set_attribute(SemanticConvetion.GEN_AI_USAGE_TOTAL_TOKENS,
105
+ total_tokens)
106
+ span.set_attribute(SemanticConvetion.GEN_AI_USAGE_COST,
107
+ cost)
108
+
109
+ span.set_status(Status(StatusCode.OK))
110
+
111
+ if disable_metrics is False:
112
+ attributes = {
113
+ TELEMETRY_SDK_NAME:
114
+ "openlit",
115
+ SemanticConvetion.GEN_AI_APPLICATION_NAME:
116
+ application_name,
117
+ SemanticConvetion.GEN_AI_SYSTEM:
118
+ SemanticConvetion.GEN_AI_SYSTEM_VLLM,
119
+ SemanticConvetion.GEN_AI_ENVIRONMENT:
120
+ environment,
121
+ SemanticConvetion.GEN_AI_TYPE:
122
+ SemanticConvetion.GEN_AI_TYPE_CHAT,
123
+ SemanticConvetion.GEN_AI_REQUEST_MODEL:
124
+ model
125
+ }
126
+
127
+ metrics["genai_requests"].add(1, attributes)
128
+ metrics["genai_total_tokens"].add(total_tokens, attributes)
129
+ metrics["genai_completion_tokens"].add(output_tokens, attributes)
130
+ metrics["genai_prompt_tokens"].add(input_tokens, attributes)
131
+ metrics["genai_cost"].record(cost, attributes)
132
+
133
+ # Return original response
134
+ return response
135
+
136
+ except Exception as e:
137
+ handle_exception(span, e)
138
+ logger.error("Error in trace creation: %s", e)
139
+
140
+ # Return original response
141
+ return response
142
+
143
+ return wrapper
@@ -101,6 +101,7 @@ class SemanticConvetion:
101
101
  GEN_AI_SYSTEM_OLLAMA = "ollama"
102
102
  GEN_AI_SYSTEM_GPT4ALL = "gpt4all"
103
103
  GEN_AI_SYSTEM_ELEVENLABS = "elevenlabs"
104
+ GEN_AI_SYSTEM_VLLM = "vLLM"
104
105
  GEN_AI_SYSTEM_LANGCHAIN = "langchain"
105
106
  GEN_AI_SYSTEM_LLAMAINDEX = "llama_index"
106
107
  GEN_AI_SYSTEM_HAYSTACK = "haystack"
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.1
2
2
  Name: openlit
3
- Version: 1.17.0
3
+ Version: 1.18.1
4
4
  Summary: OpenTelemetry-native Auto instrumentation library for monitoring LLM Applications, facilitating the integration of observability into your GenAI-driven projects
5
5
  Home-page: https://github.com/openlit/openlit/tree/main/openlit/python
6
6
  Keywords: OpenTelemetry,otel,otlp,llm,tracing,openai,anthropic,claude,cohere,llm monitoring,observability,monitoring,gpt,Generative AI,chatGPT
@@ -68,6 +68,8 @@ This project adheres to the [Semantic Conventions](https://github.com/open-telem
68
68
  | [✅ Vertex AI](https://docs.openlit.io/latest/integrations/vertexai) | | | |
69
69
  | [✅ Groq](https://docs.openlit.io/latest/integrations/groq) | | | |
70
70
  | [✅ ElevenLabs](https://docs.openlit.io/latest/integrations/elevenlabs) | | | |
71
+ | [✅ vLLM](https://docs.openlit.io/latest/integrations/vllm) | | | |
72
+
71
73
  ## Supported Destinations
72
74
  - [✅ OpenTelemetry Collector](https://docs.openlit.io/latest/connections/otelcol)
73
75
  - [✅ Prometheus + Tempo](https://docs.openlit.io/latest/connections/prometheus-tempo)
@@ -1,5 +1,5 @@
1
1
  openlit/__helpers.py,sha256=lrn4PBs9owDudiCY2NBoVbAi7AU_HtUpyOj0oqPBsPY,5545
2
- openlit/__init__.py,sha256=eJKH1Op7wzBsuoBYuM_C022Jo7cCtRQBJxf2lpDfe_o,14981
2
+ openlit/__init__.py,sha256=LfU5w-D62u5pY70DdNbv5_DGtqeL1Yb0TlY-l0NAn8I,15103
3
3
  openlit/instrumentation/anthropic/__init__.py,sha256=oaU53BOPyfUKbEzYvLr1DPymDluurSnwo4Hernf2XdU,1955
4
4
  openlit/instrumentation/anthropic/anthropic.py,sha256=y7CEGhKOGHWt8G_5Phr4qPJTfPGRJIAr9Yk6nM3CcvM,16775
5
5
  openlit/instrumentation/anthropic/async_anthropic.py,sha256=Zz1KRKIG9wGn0quOoLvjORC-49IvHQpJ6GBdB-4PfCQ,16816
@@ -32,8 +32,8 @@ openlit/instrumentation/mistral/__init__.py,sha256=zJCIpFWRbsYrvooOJYuqwyuKeSOQL
32
32
  openlit/instrumentation/mistral/async_mistral.py,sha256=WXU46nbe61IglfGehrwdprMVB6hNiuqqmJHE3XvvP0E,22192
33
33
  openlit/instrumentation/mistral/mistral.py,sha256=DC-wLIypokPEEAbVSKX5sytv94DY2QDnk12401e5vq8,22039
34
34
  openlit/instrumentation/ollama/__init__.py,sha256=cOax8PiypDuo_FC4WvDCYBRo7lH5nV9xU92h7k-eZbg,3812
35
- openlit/instrumentation/ollama/async_ollama.py,sha256=UBwl-Jv4VPyPuhKIesL0VYIQ2u1AZ0U8c9MZQzEPn6c,30594
36
- openlit/instrumentation/ollama/ollama.py,sha256=bZPgkJJUD3RgJTUUt98cklfZ8Y1lsSgQGOUXXHk53BI,30504
35
+ openlit/instrumentation/ollama/async_ollama.py,sha256=7lbikD-I9k8VL63idqj3VMEfiEKJmFNUPR8Xb6g2phQ,31366
36
+ openlit/instrumentation/ollama/ollama.py,sha256=lBt1d3rFnF1tFbfdOccwjEafHnmTAUGsiOKSHku6Fkw,31277
37
37
  openlit/instrumentation/openai/__init__.py,sha256=AZ2cPr3TMKkgGdMl_yXMeSi7bWhtmMqOW1iHdzHHGHA,16265
38
38
  openlit/instrumentation/openai/async_azure_openai.py,sha256=XbST1UE_zXzNL6RX2XwCsK_a6IhG9PHVTMKBjGrUcB0,48961
39
39
  openlit/instrumentation/openai/async_openai.py,sha256=RGNpKLsHYfJXjj1ImuWRJToVSs0wdvMNp2kyTBrBaDw,47578
@@ -48,10 +48,12 @@ openlit/instrumentation/transformers/transformers.py,sha256=KNAT2ROjziW6OAP6Y0Ec
48
48
  openlit/instrumentation/vertexai/__init__.py,sha256=N3E9HtzefD-zC0fvmfGYiDmSqssoavp_i59wfuYLyMw,6079
49
49
  openlit/instrumentation/vertexai/async_vertexai.py,sha256=8JwSwLPPA4lAatf4w_5kJ5_YZDLwl5yG8N59cTD-EZM,55198
50
50
  openlit/instrumentation/vertexai/vertexai.py,sha256=R6dDQfC3YFoZDygxU2fkflcMsqIv8AVoU3XOwWSvpwA,54951
51
+ openlit/instrumentation/vllm/__init__.py,sha256=OVWalQ1dXvip1DUsjUGaHX4J-2FrSp-T-qCVOfw7OZo,1495
52
+ openlit/instrumentation/vllm/vllm.py,sha256=lDzM7F5pgxvh8nKL0dcKB4TD0Mc9wXOWeXOsOGN7Wd8,6527
51
53
  openlit/otel/metrics.py,sha256=O7NoaDz0bY19mqpE4-0PcKwEe-B-iJFRgOCaanAuZAc,4291
52
54
  openlit/otel/tracing.py,sha256=vL1ifMbARPBpqK--yXYsCM6y5dSu5LFIKqkhZXtYmUc,3712
53
- openlit/semcov/__init__.py,sha256=7E8qfoNRQbu6QAYBOkSUlESOQ8-VzvLTIKyj75BrHsY,7466
54
- openlit-1.17.0.dist-info/LICENSE,sha256=xx0jnfkXJvxRnG63LTGOxlggYnIysveWIZ6H3PNdCrQ,11357
55
- openlit-1.17.0.dist-info/METADATA,sha256=mY_exhD2wObnFejJJJjXxlMwdMZ34uZBKlObiwhMc1Y,14120
56
- openlit-1.17.0.dist-info/WHEEL,sha256=sP946D7jFCHeNz5Iq4fL4Lu-PrWrFsgfLXbbkciIZwg,88
57
- openlit-1.17.0.dist-info/RECORD,,
55
+ openlit/semcov/__init__.py,sha256=EvoNOKtc7UKwLZ3Gp0-B1zwmeTcAIbx8O7wvAw8wXP4,7498
56
+ openlit-1.18.1.dist-info/LICENSE,sha256=xx0jnfkXJvxRnG63LTGOxlggYnIysveWIZ6H3PNdCrQ,11357
57
+ openlit-1.18.1.dist-info/METADATA,sha256=l-nXpcuMqzXPGB-IiwqWhvkjq-pW4lsK_8Q5_rH3uoo,14334
58
+ openlit-1.18.1.dist-info/WHEEL,sha256=sP946D7jFCHeNz5Iq4fL4Lu-PrWrFsgfLXbbkciIZwg,88
59
+ openlit-1.18.1.dist-info/RECORD,,