openlit 1.33.8__py3-none-any.whl → 1.33.10__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (92) hide show
  1. openlit/__helpers.py +88 -0
  2. openlit/__init__.py +4 -3
  3. openlit/instrumentation/ag2/ag2.py +5 -5
  4. openlit/instrumentation/ai21/__init__.py +4 -4
  5. openlit/instrumentation/ai21/ai21.py +370 -319
  6. openlit/instrumentation/ai21/async_ai21.py +371 -319
  7. openlit/instrumentation/anthropic/__init__.py +4 -4
  8. openlit/instrumentation/anthropic/anthropic.py +321 -189
  9. openlit/instrumentation/anthropic/async_anthropic.py +323 -190
  10. openlit/instrumentation/assemblyai/__init__.py +1 -1
  11. openlit/instrumentation/assemblyai/assemblyai.py +59 -43
  12. openlit/instrumentation/astra/astra.py +9 -9
  13. openlit/instrumentation/astra/async_astra.py +9 -9
  14. openlit/instrumentation/azure_ai_inference/__init__.py +4 -4
  15. openlit/instrumentation/azure_ai_inference/async_azure_ai_inference.py +406 -252
  16. openlit/instrumentation/azure_ai_inference/azure_ai_inference.py +406 -252
  17. openlit/instrumentation/bedrock/__init__.py +1 -1
  18. openlit/instrumentation/bedrock/bedrock.py +115 -58
  19. openlit/instrumentation/chroma/chroma.py +9 -9
  20. openlit/instrumentation/cohere/__init__.py +33 -10
  21. openlit/instrumentation/cohere/async_cohere.py +610 -0
  22. openlit/instrumentation/cohere/cohere.py +410 -219
  23. openlit/instrumentation/controlflow/controlflow.py +5 -5
  24. openlit/instrumentation/crawl4ai/async_crawl4ai.py +5 -5
  25. openlit/instrumentation/crawl4ai/crawl4ai.py +5 -5
  26. openlit/instrumentation/crewai/crewai.py +6 -4
  27. openlit/instrumentation/dynamiq/dynamiq.py +5 -5
  28. openlit/instrumentation/elevenlabs/async_elevenlabs.py +71 -46
  29. openlit/instrumentation/elevenlabs/elevenlabs.py +71 -51
  30. openlit/instrumentation/embedchain/embedchain.py +9 -9
  31. openlit/instrumentation/firecrawl/firecrawl.py +5 -5
  32. openlit/instrumentation/google_ai_studio/__init__.py +9 -9
  33. openlit/instrumentation/google_ai_studio/async_google_ai_studio.py +183 -219
  34. openlit/instrumentation/google_ai_studio/google_ai_studio.py +183 -220
  35. openlit/instrumentation/gpt4all/__init__.py +2 -2
  36. openlit/instrumentation/gpt4all/gpt4all.py +345 -220
  37. openlit/instrumentation/gpu/__init__.py +5 -5
  38. openlit/instrumentation/groq/__init__.py +2 -2
  39. openlit/instrumentation/groq/async_groq.py +356 -240
  40. openlit/instrumentation/groq/groq.py +356 -240
  41. openlit/instrumentation/haystack/haystack.py +5 -5
  42. openlit/instrumentation/julep/async_julep.py +5 -5
  43. openlit/instrumentation/julep/julep.py +5 -5
  44. openlit/instrumentation/langchain/__init__.py +13 -7
  45. openlit/instrumentation/langchain/async_langchain.py +384 -0
  46. openlit/instrumentation/langchain/langchain.py +105 -492
  47. openlit/instrumentation/letta/letta.py +11 -9
  48. openlit/instrumentation/litellm/__init__.py +4 -5
  49. openlit/instrumentation/litellm/async_litellm.py +318 -247
  50. openlit/instrumentation/litellm/litellm.py +314 -243
  51. openlit/instrumentation/llamaindex/llamaindex.py +5 -5
  52. openlit/instrumentation/mem0/mem0.py +5 -5
  53. openlit/instrumentation/milvus/milvus.py +9 -9
  54. openlit/instrumentation/mistral/__init__.py +6 -6
  55. openlit/instrumentation/mistral/async_mistral.py +423 -250
  56. openlit/instrumentation/mistral/mistral.py +420 -246
  57. openlit/instrumentation/multion/async_multion.py +6 -4
  58. openlit/instrumentation/multion/multion.py +6 -4
  59. openlit/instrumentation/ollama/__init__.py +8 -30
  60. openlit/instrumentation/ollama/async_ollama.py +385 -417
  61. openlit/instrumentation/ollama/ollama.py +384 -417
  62. openlit/instrumentation/openai/__init__.py +11 -230
  63. openlit/instrumentation/openai/async_openai.py +433 -410
  64. openlit/instrumentation/openai/openai.py +414 -394
  65. openlit/instrumentation/phidata/phidata.py +6 -4
  66. openlit/instrumentation/pinecone/pinecone.py +9 -9
  67. openlit/instrumentation/premai/__init__.py +2 -2
  68. openlit/instrumentation/premai/premai.py +262 -213
  69. openlit/instrumentation/qdrant/async_qdrant.py +9 -9
  70. openlit/instrumentation/qdrant/qdrant.py +9 -9
  71. openlit/instrumentation/reka/__init__.py +2 -2
  72. openlit/instrumentation/reka/async_reka.py +90 -52
  73. openlit/instrumentation/reka/reka.py +90 -52
  74. openlit/instrumentation/together/__init__.py +4 -4
  75. openlit/instrumentation/together/async_together.py +278 -236
  76. openlit/instrumentation/together/together.py +278 -236
  77. openlit/instrumentation/transformers/__init__.py +1 -1
  78. openlit/instrumentation/transformers/transformers.py +76 -45
  79. openlit/instrumentation/vertexai/__init__.py +14 -64
  80. openlit/instrumentation/vertexai/async_vertexai.py +330 -987
  81. openlit/instrumentation/vertexai/vertexai.py +330 -987
  82. openlit/instrumentation/vllm/__init__.py +1 -1
  83. openlit/instrumentation/vllm/vllm.py +66 -36
  84. openlit/otel/metrics.py +98 -7
  85. openlit/semcov/__init__.py +113 -80
  86. {openlit-1.33.8.dist-info → openlit-1.33.10.dist-info}/METADATA +1 -1
  87. openlit-1.33.10.dist-info/RECORD +122 -0
  88. {openlit-1.33.8.dist-info → openlit-1.33.10.dist-info}/WHEEL +1 -1
  89. openlit/instrumentation/openai/async_azure_openai.py +0 -900
  90. openlit/instrumentation/openai/azure_openai.py +0 -898
  91. openlit-1.33.8.dist-info/RECORD +0 -122
  92. {openlit-1.33.8.dist-info → openlit-1.33.10.dist-info}/LICENSE +0 -0
@@ -34,7 +34,7 @@ class VLLMInstrumentor(BaseInstrumentor):
34
34
  wrap_function_wrapper(
35
35
  "vllm",
36
36
  "LLM.generate",
37
- generate("vllm.generate", version, environment, application_name,
37
+ generate(version, environment, application_name,
38
38
  tracer, pricing_info, trace_content, metrics, disable_metrics),
39
39
  )
40
40
 
@@ -1,24 +1,29 @@
1
- # pylint: disable=duplicate-code, broad-exception-caught, too-many-statements, unused-argument, possibly-used-before-assignment
2
1
  """
3
2
  Module for monitoring vLLM API calls.
4
3
  """
5
4
 
6
5
  import logging
6
+ import time
7
7
  from opentelemetry.trace import SpanKind, Status, StatusCode
8
- from opentelemetry.sdk.resources import TELEMETRY_SDK_NAME
9
- from openlit.__helpers import handle_exception, general_tokens
8
+ from opentelemetry.sdk.resources import SERVICE_NAME, TELEMETRY_SDK_NAME, DEPLOYMENT_ENVIRONMENT
9
+ from openlit.__helpers import (
10
+ get_chat_model_cost,
11
+ handle_exception,
12
+ general_tokens,
13
+ create_metrics_attributes,
14
+ set_server_address_and_port
15
+ )
10
16
  from openlit.semcov import SemanticConvetion
11
17
 
12
18
  # Initialize logger for logging potential issues and operations
13
19
  logger = logging.getLogger(__name__)
14
20
 
15
- def generate(gen_ai_endpoint, version, environment, application_name,
21
+ def generate(version, environment, application_name,
16
22
  tracer, pricing_info, trace_content, metrics, disable_metrics):
17
23
  """
18
24
  Generates a telemetry wrapper for generate to collect metrics.
19
25
 
20
26
  Args:
21
- gen_ai_endpoint: Endpoint identifier for logging and tracing.
22
27
  version: Version of the monitoring package.
23
28
  environment: Deployment environment (e.g., production, staging).
24
29
  application_name: Name of the application using the vLLM API.
@@ -47,28 +52,47 @@ def generate(gen_ai_endpoint, version, environment, application_name,
47
52
  The response from the original 'generate' method.
48
53
  """
49
54
 
55
+ server_address, server_port = set_server_address_and_port(instance, "api.cohere.com", 443)
56
+ request_model = instance.llm_engine.model_config.model or "facebook/opt-125m"
57
+
58
+ span_name = f"{SemanticConvetion.GEN_AI_OPERATION_TYPE_CHAT} {request_model}"
59
+
50
60
  # pylint: disable=line-too-long
51
- with tracer.start_as_current_span(gen_ai_endpoint, kind= SpanKind.CLIENT) as span:
61
+ with tracer.start_as_current_span(span_name, kind= SpanKind.CLIENT) as span:
62
+ start_time = time.time()
52
63
  response = wrapped(*args, **kwargs)
64
+ end_time = time.time()
53
65
 
54
66
  try:
55
- model = instance.llm_engine.model_config.model or "facebook/opt-125m"
56
67
  # Set base span attribues
57
68
  span.set_attribute(TELEMETRY_SDK_NAME, "openlit")
58
69
  span.set_attribute(SemanticConvetion.GEN_AI_SYSTEM,
59
70
  SemanticConvetion.GEN_AI_SYSTEM_VLLM)
60
- span.set_attribute(SemanticConvetion.GEN_AI_TYPE,
61
- SemanticConvetion.GEN_AI_TYPE_CHAT)
62
- span.set_attribute(SemanticConvetion.GEN_AI_ENDPOINT,
63
- gen_ai_endpoint)
64
- span.set_attribute(SemanticConvetion.GEN_AI_ENVIRONMENT,
65
- environment)
66
- span.set_attribute(SemanticConvetion.GEN_AI_APPLICATION_NAME,
67
- application_name)
71
+ span.set_attribute(SemanticConvetion.GEN_AI_OPERATION,
72
+ SemanticConvetion.GEN_AI_OPERATION_TYPE_CHAT)
73
+ span.set_attribute(SemanticConvetion.SERVER_PORT,
74
+ server_port)
68
75
  span.set_attribute(SemanticConvetion.GEN_AI_REQUEST_MODEL,
69
- model)
76
+ request_model)
77
+ span.set_attribute(SemanticConvetion.GEN_AI_RESPONSE_MODEL,
78
+ request_model)
79
+ span.set_attribute(SemanticConvetion.SERVER_ADDRESS,
80
+ server_address)
81
+ span.set_attribute(SemanticConvetion.GEN_AI_OUTPUT_TYPE,
82
+ "text")
83
+
84
+ # Set base span attribues (Extras)
85
+ span.set_attribute(DEPLOYMENT_ENVIRONMENT,
86
+ environment)
87
+ span.set_attribute(SERVICE_NAME,
88
+ application_name)
70
89
  span.set_attribute(SemanticConvetion.GEN_AI_REQUEST_IS_STREAM,
71
90
  False)
91
+ span.set_attribute(SemanticConvetion.GEN_AI_SERVER_TTFT,
92
+ end_time - start_time)
93
+ span.set_attribute(SemanticConvetion.GEN_AI_SDK_VERSION,
94
+ version)
95
+
72
96
  input_tokens = 0
73
97
  output_tokens = 0
74
98
  cost = 0
@@ -95,37 +119,43 @@ def generate(gen_ai_endpoint, version, environment, application_name,
95
119
  attributes=completion_attributes,
96
120
  )
97
121
 
98
- total_tokens = input_tokens + output_tokens
99
-
100
- span.set_attribute(SemanticConvetion.GEN_AI_USAGE_PROMPT_TOKENS,
122
+ span.set_attribute(SemanticConvetion.GEN_AI_USAGE_INPUT_TOKENS,
101
123
  input_tokens)
102
- span.set_attribute(SemanticConvetion.GEN_AI_USAGE_COMPLETION_TOKENS,
124
+ span.set_attribute(SemanticConvetion.GEN_AI_USAGE_OUTPUT_TOKENS,
103
125
  output_tokens)
104
126
  span.set_attribute(SemanticConvetion.GEN_AI_USAGE_TOTAL_TOKENS,
105
- total_tokens)
127
+ input_tokens + output_tokens)
128
+
129
+ # Calculate cost of the operation
130
+ cost = get_chat_model_cost(request_model, pricing_info,
131
+ input_tokens, output_tokens)
106
132
  span.set_attribute(SemanticConvetion.GEN_AI_USAGE_COST,
107
133
  cost)
108
134
 
109
135
  span.set_status(Status(StatusCode.OK))
110
136
 
111
137
  if disable_metrics is False:
112
- attributes = {
113
- TELEMETRY_SDK_NAME:
114
- "openlit",
115
- SemanticConvetion.GEN_AI_APPLICATION_NAME:
116
- application_name,
117
- SemanticConvetion.GEN_AI_SYSTEM:
118
- SemanticConvetion.GEN_AI_SYSTEM_VLLM,
119
- SemanticConvetion.GEN_AI_ENVIRONMENT:
120
- environment,
121
- SemanticConvetion.GEN_AI_TYPE:
122
- SemanticConvetion.GEN_AI_TYPE_CHAT,
123
- SemanticConvetion.GEN_AI_REQUEST_MODEL:
124
- model
125
- }
138
+ attributes = create_metrics_attributes(
139
+ service_name=application_name,
140
+ deployment_environment=environment,
141
+ operation=SemanticConvetion.GEN_AI_OPERATION_TYPE_CHAT,
142
+ system=SemanticConvetion.GEN_AI_SYSTEM_VLLM,
143
+ request_model=request_model,
144
+ server_address=server_address,
145
+ server_port=server_port,
146
+ response_model=request_model,
147
+ )
126
148
 
149
+ metrics["genai_client_usage_tokens"].record(
150
+ input_tokens + output_tokens, attributes
151
+ )
152
+ metrics["genai_client_operation_duration"].record(
153
+ end_time - start_time, attributes
154
+ )
155
+ metrics["genai_server_ttft"].record(
156
+ end_time - start_time, attributes
157
+ )
127
158
  metrics["genai_requests"].add(1, attributes)
128
- metrics["genai_total_tokens"].add(total_tokens, attributes)
129
159
  metrics["genai_completion_tokens"].add(output_tokens, attributes)
130
160
  metrics["genai_prompt_tokens"].add(input_tokens, attributes)
131
161
  metrics["genai_cost"].record(cost, attributes)
openlit/otel/metrics.py CHANGED
@@ -18,6 +18,75 @@ else:
18
18
  # Global flag to check if the meter provider initialization is complete.
19
19
  METER_SET = False
20
20
 
21
+ _GEN_AI_CLIENT_OPERATION_DURATION_BUCKETS = [
22
+ 0.01,
23
+ 0.02,
24
+ 0.04,
25
+ 0.08,
26
+ 0.16,
27
+ 0.32,
28
+ 0.64,
29
+ 1.28,
30
+ 2.56,
31
+ 5.12,
32
+ 10.24,
33
+ 20.48,
34
+ 40.96,
35
+ 81.92,
36
+ ]
37
+
38
+ _GEN_AI_SERVER_TBT = [
39
+ 0.01,
40
+ 0.025,
41
+ 0.05,
42
+ 0.075,
43
+ 0.1,
44
+ 0.15,
45
+ 0.2,
46
+ 0.3,
47
+ 0.4,
48
+ 0.5,
49
+ 0.75,
50
+ 1.0,
51
+ 2.5
52
+ ]
53
+
54
+ _GEN_AI_SERVER_TFTT = [
55
+ 0.001,
56
+ 0.005,
57
+ 0.01,
58
+ 0.02,
59
+ 0.04,
60
+ 0.06,
61
+ 0.08,
62
+ 0.1,
63
+ 0.25,
64
+ 0.5,
65
+ 0.75,
66
+ 1.0,
67
+ 2.5,
68
+ 5.0,
69
+ 7.5,
70
+ 10.0
71
+ ]
72
+
73
+ _GEN_AI_CLIENT_TOKEN_USAGE_BUCKETS = [
74
+ 1,
75
+ 4,
76
+ 16,
77
+ 64,
78
+ 256,
79
+ 1024,
80
+ 4096,
81
+ 16384,
82
+ 65536,
83
+ 262144,
84
+ 1048576,
85
+ 4194304,
86
+ 16777216,
87
+ 67108864,
88
+ ]
89
+
21
90
  def setup_meter(application_name, environment, meter, otlp_endpoint, otlp_headers):
22
91
  """
23
92
  Sets up OpenTelemetry metrics with a counter for total requests.
@@ -73,26 +142,48 @@ def setup_meter(application_name, environment, meter, otlp_endpoint, otlp_header
73
142
 
74
143
  # Define and create the metrics
75
144
  metrics_dict = {
145
+ # OTel Semconv
146
+ "genai_client_usage_tokens": meter.create_histogram(
147
+ name=SemanticConvetion.GEN_AI_CLIENT_TOKEN_USAGE,
148
+ description="Measures number of input and output tokens used",
149
+ unit="{token}",
150
+ explicit_bucket_boundaries_advisory=_GEN_AI_CLIENT_TOKEN_USAGE_BUCKETS,
151
+ ),
152
+ "genai_client_operation_duration": meter.create_histogram(
153
+ name=SemanticConvetion.GEN_AI_CLIENT_OPERATION_DURATION,
154
+ description="GenAI operation duration",
155
+ unit="s",
156
+ explicit_bucket_boundaries_advisory=_GEN_AI_CLIENT_OPERATION_DURATION_BUCKETS,
157
+ ),
158
+ "genai_server_tbt": meter.create_histogram(
159
+ name=SemanticConvetion.GEN_AI_SERVER_TBT,
160
+ description="Time per output token generated after the first token for successful responses",
161
+ unit="s",
162
+ explicit_bucket_boundaries_advisory=_GEN_AI_SERVER_TBT,
163
+ ),
164
+ "genai_server_ttft": meter.create_histogram(
165
+ name=SemanticConvetion.GEN_AI_SERVER_TTFT,
166
+ description="Time to generate first token for successful responses",
167
+ unit="s",
168
+ explicit_bucket_boundaries_advisory=_GEN_AI_SERVER_TFTT,
169
+ ),
170
+
171
+ # Extra
76
172
  "genai_requests": meter.create_counter(
77
173
  name=SemanticConvetion.GEN_AI_REQUESTS,
78
174
  description="Number of requests to GenAI",
79
175
  unit="1",
80
176
  ),
81
177
  "genai_prompt_tokens": meter.create_counter(
82
- name=SemanticConvetion.GEN_AI_USAGE_PROMPT_TOKENS,
178
+ name=SemanticConvetion.GEN_AI_USAGE_INPUT_TOKENS,
83
179
  description="Number of prompt tokens processed.",
84
180
  unit="1",
85
181
  ),
86
182
  "genai_completion_tokens": meter.create_counter(
87
- name=SemanticConvetion.GEN_AI_USAGE_COMPLETION_TOKENS,
183
+ name=SemanticConvetion.GEN_AI_USAGE_OUTPUT_TOKENS,
88
184
  description="Number of completion tokens processed.",
89
185
  unit="1",
90
186
  ),
91
- "genai_total_tokens": meter.create_counter(
92
- name=SemanticConvetion.GEN_AI_USAGE_TOTAL_TOKENS,
93
- description="Number of total tokens processed.",
94
- unit="1",
95
- ),
96
187
  "genai_cost": meter.create_histogram(
97
188
  name=SemanticConvetion.GEN_AI_USAGE_COST,
98
189
  description="The distribution of GenAI request costs.",
@@ -16,30 +16,114 @@ class SemanticConvetion:
16
16
  the application's data logging and processing functionalities.
17
17
  """
18
18
 
19
- # GenAI General
20
- GEN_AI_ENDPOINT = "gen_ai.endpoint"
21
- GEN_AI_SYSTEM = "gen_ai.system"
22
- GEN_AI_ENVIRONMENT = "gen_ai.environment"
23
- GEN_AI_APPLICATION_NAME = "gen_ai.application_name"
24
- GEN_AI_TYPE = "gen_ai.operation.name"
25
- GEN_AI_HUB_OWNER = "gen_ai.hub.owner"
26
- GEN_AI_HUB_REPO = "gen_ai.hub.repo"
27
- GEN_AI_RETRIEVAL_SOURCE = "gen_ai.retrieval.source"
28
- GEN_AI_REQUESTS = "gen_ai.total.requests"
29
- GEN_AI_DATA_SOURCES = "gen_ai.data_source_count"
19
+ # General Attributes (OTel Semconv)
20
+ SERVER_PORT = "server.port"
21
+ SERVER_ADDRESS = "server.address"
22
+ ERROR_TYPE = "error.type"
30
23
 
31
- # GenAI Request
24
+ # GenAI Metric Names (OTel Semconv)
25
+ GEN_AI_CLIENT_TOKEN_USAGE = "gen_ai.client.token.usage"
26
+ GEN_AI_CLIENT_OPERATION_DURATION = "gen_ai.client.operation.duration"
27
+ GEN_AI_SERVER_REQUEST_DURATION = "gen_ai.server.request.duration"
28
+ GEN_AI_SERVER_TBT = "gen_ai.server.time_per_output_token"
29
+ GEN_AI_SERVER_TTFT = "gen_ai.server.time_to_first_token"
30
+
31
+ # GenAI Request Attributes (OTel Semconv)
32
+ GEN_AI_OPERATION = "gen_ai.operation.name"
33
+ GEN_AI_SYSTEM = "gen_ai.system"
34
+ GEN_AI_OUTPUT_TYPE = "gen_ai.output.type"
35
+ GEN_AI_ENDPOINT = "gen_ai.endpoint"
32
36
  GEN_AI_REQUEST_MODEL = "gen_ai.request.model"
37
+ GEN_AI_REQUEST_SEED = "gen_ai.request.seed"
38
+ GEN_AI_REQUEST_ENCODING_FORMATS = "gen_ai.request.encoding_formats"
39
+ GEN_AI_REQUEST_FREQUENCY_PENALTY = "gen_ai.request.frequency_penalty"
40
+ GEN_AI_REQUEST_MAX_TOKENS = "gen_ai.request.max_tokens"
41
+ GEN_AI_REQUEST_PRESENCE_PENALTY = "gen_ai.request.presence_penalty"
42
+ GEN_AI_REQUEST_STOP_SEQUENCES = "gen_ai.request.stop_sequences"
33
43
  GEN_AI_REQUEST_TEMPERATURE = "gen_ai.request.temperature"
34
- GEN_AI_REQUEST_TOP_P = "gen_ai.request.top_p"
35
44
  GEN_AI_REQUEST_TOP_K = "gen_ai.request.top_k"
36
- GEN_AI_REQUEST_MAX_TOKENS = "gen_ai.request.max_tokens"
45
+ GEN_AI_REQUEST_TOP_P = "gen_ai.request.top_p"
46
+
47
+ # GenAI Response Attributes (OTel Semconv)
48
+ GEN_AI_TOKEN_TYPE = "gen_ai.token.type"
49
+ GEN_AI_RESPONSE_FINISH_REASON = "gen_ai.response.finish_reasons"
50
+ GEN_AI_RESPONSE_ID = "gen_ai.response.id"
51
+ GEN_AI_RESPONSE_MODEL = "gen_ai.response.model"
52
+ GEN_AI_USAGE_INPUT_TOKENS = "gen_ai.usage.input_tokens"
53
+ GEN_AI_USAGE_OUTPUT_TOKENS = "gen_ai.usage.output_tokens"
54
+ GEN_AI_TOOL_CALL_ID = "gen_ai.tool.call.id"
55
+ GEN_AI_TOOL_NAME = "gen_ai.tool.name"
56
+
57
+ # GenAI Operation Types (OTel Semconv)
58
+ GEN_AI_OPERATION_TYPE_CHAT = "chat"
59
+ GEN_AI_OPERATION_TYPE_TOOLS = "execute_tool"
60
+ GEN_AI_OPERATION_TYPE_EMBEDDING = "embeddings"
61
+ GEN_AI_OPERATION_TYPE_IMAGE = "image"
62
+ GEN_AI_OPERATION_TYPE_AUDIO = "audio"
63
+ GEN_AI_OPERATION_TYPE_VECTORDB = "vectordb"
64
+ GEN_AI_OPERATION_TYPE_FRAMEWORK = "framework"
65
+ GEN_AI_OPERATION_TYPE_AGENT = "agent"
66
+ GEN_AI_OPERATION_TYPE_CREATE_AGENT = "create_agent"
67
+
68
+ # GenAI Output Types (OTel Semconv)
69
+ GEN_AI_OUTPUT_TYPE_IMAGE = "image"
70
+ GEN_AI_OUTPUT_TYPE_JSON = "json"
71
+ GEN_AI_OUTPUT_TYPE_SPEECH = "speech"
72
+ GEN_AI_OUTPUT_TYPE_TEXT = "text"
73
+
74
+ # GenAI System Names (OTel Semconv)
75
+ GEN_AI_SYSTEM_ANTHROPIC = "anthropic"
76
+ GEN_AI_SYSTEM_AWS_BEDROCK = "aws.bedrock"
77
+ GEN_AI_SYSTEM_AZURE_AI_INFERENCE = "az.ai.inference"
78
+ GEN_AI_SYSTEM_AZURE_OPENAI = "az.ai.openai"
79
+ GEN_AI_SYSTEM_COHERE = "cohere"
80
+ GEN_AI_SYSTEM_DEEPSEEK = "deepseek"
81
+ GEN_AI_SYSTEM_GEMINI = "gemini"
82
+ GEN_AI_SYSTEM_GROQ = "groq"
83
+ GEN_AI_SYSTEM_IBM_WATSON = "ibm.watson.ai"
84
+ GEN_AI_SYSTEM_MISTRAL = "mistral_ai"
85
+ GEN_AI_SYSTEM_OPENAI = "openai"
86
+ GEN_AI_SYSTEM_PERPLEXITY = "perplexity"
87
+ GEN_AI_SYSTEM_VERTEXAI = "vertex_ai"
88
+ GEN_AI_SYSTEM_XAI = "xai"
89
+
90
+ # GenAI OpenAI Attributes (OTel Semconv)
91
+ GEN_AI_REQUEST_SERVICE_TIER = "gen_ai.request.service_tier"
92
+ GEN_AI_RESPONSE_SERVICE_TIER = "gen_ai.response.service_tier"
93
+ GEN_AI_RESPONSE_SYSTEM_FINGERPRINT = "gen_ai.response.system_fingerprint"
94
+
95
+ # GenAI System Names (Extra)
96
+ GEN_AI_SYSTEM_HUGGING_FACE = "huggingface"
97
+ GEN_AI_SYSTEM_OLLAMA = "ollama"
98
+ GEN_AI_SYSTEM_GPT4ALL = "gpt4all"
99
+ GEN_AI_SYSTEM_ELEVENLABS = "elevenlabs"
100
+ GEN_AI_SYSTEM_VLLM = "vLLM"
101
+ GEN_AI_SYSTEM_GOOGLE_AI_STUDIO = "google.ai.studio"
102
+ GEN_AI_SYSTEM_REKAAI = "rekaai"
103
+ GEN_AI_SYSTEM_PREMAI = "premai"
104
+ GEN_AI_SYSTEM_LANGCHAIN = "langchain"
105
+ GEN_AI_SYSTEM_LLAMAINDEX = "llama_index"
106
+ GEN_AI_SYSTEM_HAYSTACK = "haystack"
107
+ GEN_AI_SYSTEM_EMBEDCHAIN = "embedchain"
108
+ GEN_AI_SYSTEM_MEM0 = "mem0"
109
+ GEN_AI_SYSTEM_LITELLM = "litellm"
110
+ GEN_AI_SYSTEM_CREWAI = "crewai"
111
+ GEN_AI_SYSTEM_AG2 = "ag2"
112
+ GEN_AI_SYSTEM_MULTION = "multion"
113
+ GEN_AI_SYSTEM_DYNAMIQ = "dynamiq"
114
+ GEN_AI_SYSTEM_PHIDATA = "phidata"
115
+ GEN_AI_SYSTEM_JULEP = "julep"
116
+ GEN_AI_SYSTEM_AI21 = "ai21"
117
+ GEN_AI_SYSTEM_CONTROLFLOW = "controlflow"
118
+ GEN_AI_SYSTEM_ASSEMBLYAI = "assemblyai"
119
+ GEN_AI_SYSTEM_CRAWL4AI = "crawl4ai"
120
+ GEN_AI_SYSTEM_FIRECRAWL = "firecrawl"
121
+ GEN_AI_SYSTEM_LETTA = "letta"
122
+ GEN_AI_SYSTEM_TOGETHER = "together"
123
+
124
+ # GenAI Request Attributes (Extra)
37
125
  GEN_AI_REQUEST_IS_STREAM = "gen_ai.request.is_stream"
38
126
  GEN_AI_REQUEST_USER = "gen_ai.request.user"
39
- GEN_AI_REQUEST_SEED = "gen_ai.request.seed"
40
- GEN_AI_REQUEST_FREQUENCY_PENALTY = "gen_ai.request.frequency_penalty"
41
- GEN_AI_REQUEST_PRESENCE_PENALTY = "gen_ai.request.presence_penalty"
42
- GEN_AI_REQUEST_EMBEDDING_FORMAT = "gen_ai.request.embedding_format"
43
127
  GEN_AI_REQUEST_EMBEDDING_DIMENSION = "gen_ai.request.embedding_dimension"
44
128
  GEN_AI_REQUEST_TOOL_CHOICE = "gen_ai.request.tool_choice"
45
129
  GEN_AI_REQUEST_AUDIO_VOICE = "gen_ai.request.audio_voice"
@@ -47,28 +131,23 @@ class SemanticConvetion:
47
131
  GEN_AI_REQUEST_AUDIO_SPEED = "gen_ai.request.audio_speed"
48
132
  GEN_AI_REQUEST_AUDIO_SETTINGS = "gen_ai.request.audio_settings"
49
133
  GEN_AI_REQUEST_AUDIO_DURATION = "gen_ai.request.audio_duration"
50
- GEN_AI_REQUEST_FINETUNE_STATUS = "gen_ai.request.fine_tune_status"
51
- GEN_AI_REQUEST_FINETUNE_MODEL_SUFFIX = "gen_ai.request.fine_tune_model_suffix"
52
- GEN_AI_REQUEST_FINETUNE_MODEL_EPOCHS = "gen_ai.request.fine_tune_n_epochs"
53
- GEN_AI_REQUEST_FINETUNE_MODEL_LRM = "gen_ai.request.learning_rate_multiplier"
54
- GEN_AI_REQUEST_FINETUNE_BATCH_SIZE = "gen_ai.request.fine_tune_batch_size"
55
- GEN_AI_REQUEST_VALIDATION_FILE = "gen_ai.request.validation_file"
56
- GEN_AI_REQUEST_TRAINING_FILE = "gen_ai.request.training_file"
57
-
58
134
  GEN_AI_REQUEST_IMAGE_SIZE = "gen_ai.request.image_size"
59
135
  GEN_AI_REQUEST_IMAGE_QUALITY = "gen_ai.request.image_quality"
60
136
  GEN_AI_REQUEST_IMAGE_STYLE = "gen_ai.request.image_style"
137
+ GEN_AI_HUB_OWNER = "gen_ai.hub.owner"
138
+ GEN_AI_HUB_REPO = "gen_ai.hub.repo"
139
+ GEN_AI_RETRIEVAL_SOURCE = "gen_ai.retrieval.source"
140
+ GEN_AI_REQUESTS = "gen_ai.total.requests"
141
+ GEN_AI_DATA_SOURCES = "gen_ai.data_source_count"
142
+ GEN_AI_ENVIRONMENT = "gen_ai.environment"
143
+ GEN_AI_APPLICATION_NAME = "gen_ai.application_name"
144
+ GEN_AI_SDK_VERSION = "gen_ai.sdk.version"
61
145
 
62
- # GenAI Usage
63
- GEN_AI_USAGE_PROMPT_TOKENS = "gen_ai.usage.input_tokens"
64
- GEN_AI_USAGE_COMPLETION_TOKENS = "gen_ai.usage.output_tokens"
146
+ # GenAI Response Attributes (Extra)
65
147
  GEN_AI_USAGE_TOTAL_TOKENS = "gen_ai.usage.total_tokens"
66
148
  GEN_AI_USAGE_COST = "gen_ai.usage.cost"
67
-
68
- # GenAI Response
69
- GEN_AI_RESPONSE_ID = "gen_ai.response.id"
70
- GEN_AI_RESPONSE_FINISH_REASON = "gen_ai.response.finish_reasons"
71
- GEN_AI_RESPONSE_IMAGE = "gen_ai.response.image" # Not used directly in code yet
149
+ GEN_AI_RESPONSE_IMAGE = "gen_ai.response.image"
150
+ GEN_AI_TOOL_CALLS = "gen_ai.response.tool_calls"
72
151
 
73
152
  # GenAI Content
74
153
  GEN_AI_CONTENT_PROMPT_EVENT = "gen_ai.content.prompt"
@@ -90,52 +169,6 @@ class SemanticConvetion:
90
169
  GEN_AI_EVAL_ANSWER_RELEVANCY = "gen_ai.eval.answer_relevancy"
91
170
  GEN_AI_EVAL_GROUNDEDNESS = "gen_ai.eval.groundedness"
92
171
 
93
- GEN_AI_TYPE_CHAT = "chat"
94
- GEN_AI_TYPE_EMBEDDING = "embedding"
95
- GEN_AI_TYPE_IMAGE = "image"
96
- GEN_AI_TYPE_AUDIO = "audio"
97
- GEN_AI_TYPE_FINETUNING = "fine_tuning"
98
- GEN_AI_TYPE_VECTORDB = "vectordb"
99
- GEN_AI_TYPE_FRAMEWORK = "framework"
100
- GEN_AI_TYPE_AGENT = "agent"
101
-
102
- GEN_AI_SYSTEM_HUGGING_FACE = "huggingface"
103
- GEN_AI_SYSTEM_OPENAI = "openai"
104
- GEN_AI_SYSTEM_AZURE_OPENAI = "azure_openai"
105
- GEN_AI_SYSTEM_ANTHROPIC = "anthropic"
106
- GEN_AI_SYSTEM_COHERE = "cohere"
107
- GEN_AI_SYSTEM_MISTRAL = "mistral"
108
- GEN_AI_SYSTEM_BEDROCK = "bedrock"
109
- GEN_AI_SYSTEM_VERTEXAI = "vertex_ai"
110
- GEN_AI_SYSTEM_GROQ = "groq"
111
- GEN_AI_SYSTEM_OLLAMA = "ollama"
112
- GEN_AI_SYSTEM_GPT4ALL = "gpt4all"
113
- GEN_AI_SYSTEM_ELEVENLABS = "elevenlabs"
114
- GEN_AI_SYSTEM_VLLM = "vLLM"
115
- GEN_AI_SYSTEM_GOOGLE_AI_STUDIO = "google-ai-studio"
116
- GEN_AI_SYSTEM_REKAAI = "rekaai"
117
- GEN_AI_SYSTEM_PREMAI = "premai"
118
- GEN_AI_SYSTEM_AZURE_AI_INFERENCE = "azure-ai-inference"
119
- GEN_AI_SYSTEM_LANGCHAIN = "langchain"
120
- GEN_AI_SYSTEM_LLAMAINDEX = "llama_index"
121
- GEN_AI_SYSTEM_HAYSTACK = "haystack"
122
- GEN_AI_SYSTEM_EMBEDCHAIN = "embedchain"
123
- GEN_AI_SYSTEM_MEM0 = "mem0"
124
- GEN_AI_SYSTEM_LITELLM = "litellm"
125
- GEN_AI_SYSTEM_CREWAI = "crewai"
126
- GEN_AI_SYSTEM_AG2 = "ag2"
127
- GEN_AI_SYSTEM_MULTION = "multion"
128
- GEN_AI_SYSTEM_DYNAMIQ = "dynamiq"
129
- GEN_AI_SYSTEM_PHIDATA = "phidata"
130
- GEN_AI_SYSTEM_JULEP = "julep"
131
- GEN_AI_SYSTEM_AI21 = "ai21"
132
- GEN_AI_SYSTEM_CONTROLFLOW = "controlflow"
133
- GEN_AI_SYSTEM_ASSEMBLYAI = "assemblyai"
134
- GEN_AI_SYSTEM_CRAWL4AI = "crawl4ai"
135
- GEN_AI_SYSTEM_FIRECRAWL = "firecrawl"
136
- GEN_AI_SYSTEM_LETTA = "letta"
137
- GEN_AI_SYSTEM_TOGETHER = "together"
138
-
139
172
  # Vector DB
140
173
  DB_OPERATION_API_ENDPOINT = "db.operation.api_endpoint"
141
174
  DB_REQUESTS = "db.total.requests"
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.3
2
2
  Name: openlit
3
- Version: 1.33.8
3
+ Version: 1.33.10
4
4
  Summary: OpenTelemetry-native Auto instrumentation library for monitoring LLM Applications and GPUs, facilitating the integration of observability into your GenAI-driven projects
5
5
  License: Apache-2.0
6
6
  Keywords: OpenTelemetry,otel,otlp,llm,tracing,openai,anthropic,claude,cohere,llm monitoring,observability,monitoring,gpt,Generative AI,chatGPT,gpu
@@ -0,0 +1,122 @@
1
+ openlit/__helpers.py,sha256=44dXhkRJ5K42qjNwC5PMphLj2N6CoXdO3m9ZmbxSFtQ,9840
2
+ openlit/__init__.py,sha256=GKu_3fhw27IXODjUxTpTkX0JGO9yskuyxhX4w0y6Qx4,22446
3
+ openlit/evals/__init__.py,sha256=nJe99nuLo1b5rf7pt9U9BCdSDedzbVi2Fj96cgl7msM,380
4
+ openlit/evals/all.py,sha256=oWrue3PotE-rB5WePG3MRYSA-ro6WivkclSHjYlAqGs,7154
5
+ openlit/evals/bias_detection.py,sha256=mCdsfK7x1vX7S3psC3g641IMlZ-7df3h-V6eiICj5N8,8154
6
+ openlit/evals/hallucination.py,sha256=Yn5OfWVJKynAiImV_aAqCvc0zqYjdJ3XUppCnMTy1pg,7507
7
+ openlit/evals/toxicity.py,sha256=Ii_kX2GToO9fDstDBuK4iN0tEQUkMoPWUBDMFFfeMC4,7000
8
+ openlit/evals/utils.py,sha256=lXgxyh1OZHeQzlzTLBAEnIYVaUt0YqYA6Uaygjpbv0s,8472
9
+ openlit/guard/__init__.py,sha256=B-D7iPadGFn5i7nEsQfVbS6feL1ViRht3zWTQ45Jgkg,417
10
+ openlit/guard/all.py,sha256=VNHQERad-4qAMWsJJrpW9QNkhM7U8v_wy20KtDtQgzM,9755
11
+ openlit/guard/prompt_injection.py,sha256=3e4DKxB7QDzM-xPCpwEuureiH_2s_OTJ9BSckknPpzY,5784
12
+ openlit/guard/restrict_topic.py,sha256=KTuWa7XeMsV4oXxOrD1CYZV0wXWxTfA0H3p_6q_IOsk,6444
13
+ openlit/guard/sensitive_topic.py,sha256=RgVw_laFERv0nNdzBsAd2_3yLomMOK-gVq-P7oj1bTk,5552
14
+ openlit/guard/utils.py,sha256=x0-_hAtNa_ogYR2GfnwiBF1rlqaXtaJ-rJeGguTDe-Q,7663
15
+ openlit/instrumentation/ag2/__init__.py,sha256=Nf9cDoXB16NYgZisvVQduFYJ5fpU90CNlMrIF4pSH-Y,1827
16
+ openlit/instrumentation/ag2/ag2.py,sha256=Yo6WfuTLLk3yVValZGp7GC5lKb2Z3kOHAUWxlhhOyN4,4454
17
+ openlit/instrumentation/ai21/__init__.py,sha256=Wk23syYEbHuKBzdIabD3dfjZJTW-qzu7LgJPbG8mNHY,2549
18
+ openlit/instrumentation/ai21/ai21.py,sha256=-wzx6SMGJaMXXgfC4T31EBqi6TEC1JNy-fV5bOj4nY4,33615
19
+ openlit/instrumentation/ai21/async_ai21.py,sha256=vay0klz43slfC9RhITQNC9iKAg3gzAqSahy9rPgqIiI,33719
20
+ openlit/instrumentation/anthropic/__init__.py,sha256=IoWGApFQ72DQYp9jN-LRDr0uRW3OamDgHG-mO23OSzs,1887
21
+ openlit/instrumentation/anthropic/anthropic.py,sha256=fj023njYA4nSvwuj5vrvOsGqyqKvxzzMdY7pWYKikM4,22332
22
+ openlit/instrumentation/anthropic/async_anthropic.py,sha256=yxy5UlRaodyVfMTLK5UDujoCZ9ioHW__-YdBv1jTRMg,22418
23
+ openlit/instrumentation/assemblyai/__init__.py,sha256=6uSNexxRJXIX-ZLitD3ow7ej3WK_N_W4ZxeIFb_eseY,1535
24
+ openlit/instrumentation/assemblyai/assemblyai.py,sha256=dLCdL9SyfOocmzvT0iYG4UwM3giom_nKFlCoTQix5Bk,6262
25
+ openlit/instrumentation/astra/__init__.py,sha256=G4alCOR6hXQvUQPDCS8lY1rj0Mz-KmrW5vVWk5loO78,8054
26
+ openlit/instrumentation/astra/astra.py,sha256=19WweO3jGvUOC2fkij2ZKKaGbKfg1LaC-GSMoEsDmjU,12061
27
+ openlit/instrumentation/astra/async_astra.py,sha256=8te62MqJBcdVqtG4iz9wFAn9bFaeESFvcqZVaVA9VnM,12073
28
+ openlit/instrumentation/azure_ai_inference/__init__.py,sha256=xmDk8eK0VB6C1TsH5oIciVnxAhIrXiqDzyqHgy6u51Q,2621
29
+ openlit/instrumentation/azure_ai_inference/async_azure_ai_inference.py,sha256=anQUO465P0ns9CrKAPFGFziA5qJvGuv69RB50cFAysk,29894
30
+ openlit/instrumentation/azure_ai_inference/azure_ai_inference.py,sha256=b611q2NSIsT6KfgOogtfcHW-eliOK_bS8rSnOjU1D0A,29790
31
+ openlit/instrumentation/bedrock/__init__.py,sha256=i32hjjdBu2JUGS1eytWjV2WX2_xvXw-iqrbXADSpHx0,1525
32
+ openlit/instrumentation/bedrock/bedrock.py,sha256=IwV2y3e0yXcK11_6VJ6HjeaC9ic0Xc3rd4hqNyMBY6I,12234
33
+ openlit/instrumentation/chroma/__init__.py,sha256=61lFpHlUEQUobsUJZHXdvOViKwsOH8AOvSfc4VgCmiM,3253
34
+ openlit/instrumentation/chroma/chroma.py,sha256=hZgmGSWf22UqLyoyFTSh9uqrSiFNwMPrCZW9HckMJvo,10513
35
+ openlit/instrumentation/cohere/__init__.py,sha256=P8w30qt_RoXeQ5VsdW0KB4G1-WaWS9DkhTqaoCZuDeI,2825
36
+ openlit/instrumentation/cohere/async_cohere.py,sha256=_W7mcfShY0LlPc6S5WmIRlp9KVBqivBBSfZbThb1Wn4,30567
37
+ openlit/instrumentation/cohere/cohere.py,sha256=egR629mOhSRdg1-x-rMwqNjBqIIxjuYNTS8_HQ3KfAg,30450
38
+ openlit/instrumentation/controlflow/__init__.py,sha256=iKZ08IANfoN_n4o1TZJIK_C_t6RZQ6AS1H7kMfyBbYA,2118
39
+ openlit/instrumentation/controlflow/controlflow.py,sha256=m9ZqO_9iQo1Hg9-9Endtw_biC0cZfzaFVLlpUiN9sXs,5538
40
+ openlit/instrumentation/crawl4ai/__init__.py,sha256=CGkcbQijpKae_8GD_1ybDnCCk0MVu2AdV-ppFOg8mAA,1907
41
+ openlit/instrumentation/crawl4ai/async_crawl4ai.py,sha256=R3S9iMkcetQhiag18oq29of7TDU5s6kmpGw9sWHxylQ,4859
42
+ openlit/instrumentation/crawl4ai/crawl4ai.py,sha256=COTz6EWKK1iRGWMsPpuSC63P4jvZdyUyY_mMFSHqndE,4841
43
+ openlit/instrumentation/crewai/__init__.py,sha256=cETkkwnKYEMAKlMrHbZ9-RvcRUPYaSNqNIhy2-vCDK8,1794
44
+ openlit/instrumentation/crewai/crewai.py,sha256=JblvqWdZqC8H6geiQB35sHS0KBP_N0nGZzTG8DVHQpk,7181
45
+ openlit/instrumentation/dynamiq/__init__.py,sha256=2uIHHxFWca0g2YLO2RBfi2Al6uWUYvVZBfDiPOHCdpQ,2331
46
+ openlit/instrumentation/dynamiq/dynamiq.py,sha256=1Iga7NS4zw3LDFE_VMxT1PBGC4MAcfmCCKoyQRORojE,5260
47
+ openlit/instrumentation/elevenlabs/__init__.py,sha256=BZjAe-kzFJpKxT0tKksXVfZgirvgEp8qM3SfegWU5co,2631
48
+ openlit/instrumentation/elevenlabs/async_elevenlabs.py,sha256=H5svCDy_JGCsssi4IbF3HDF0ECgvR6_I_j1p0rVJe0o,6792
49
+ openlit/instrumentation/elevenlabs/elevenlabs.py,sha256=khhkb7CHOMkQwhia___t0Iy9xMrQs3w-dEovvqdIeks,6775
50
+ openlit/instrumentation/embedchain/__init__.py,sha256=8TYk1OEbz46yF19dr-gB_x80VZMagU3kJ8-QihPXTeA,1929
51
+ openlit/instrumentation/embedchain/embedchain.py,sha256=-V4QAPtbE8mDE8FGzYQ2AUp8FpoYD2ygJWmXPEqwQKs,7874
52
+ openlit/instrumentation/firecrawl/__init__.py,sha256=2QTcpPoaximsApdp68WD2iYR1_vZnKlkbAd4RHhgeOo,1836
53
+ openlit/instrumentation/firecrawl/firecrawl.py,sha256=ZtYJsKcWuNOKihI76G7gZOlNSHs5ua8THj7FBWDukKY,3796
54
+ openlit/instrumentation/google_ai_studio/__init__.py,sha256=n_vOdZeccUvw8W_NYEpiw468sywqnD5O_E1CjuElXYM,2009
55
+ openlit/instrumentation/google_ai_studio/async_google_ai_studio.py,sha256=vYPfpwvHQh1vLgR2JMhpzXLJ4e7vPU5NhjjsAR7UNwM,11006
56
+ openlit/instrumentation/google_ai_studio/google_ai_studio.py,sha256=4EfHmAQG9Auihs0DaaSx1wBmC8FV-1gDAGh5cnltMIE,10988
57
+ openlit/instrumentation/gpt4all/__init__.py,sha256=QGk_uhpS9ac3jk_nk7yGAw9YBbGbbMrVv6OkAh4vvEM,1765
58
+ openlit/instrumentation/gpt4all/gpt4all.py,sha256=ATbPMj1jzF-62iKTKMXnREvzM-eYndZB2nSIl2LNRt4,24037
59
+ openlit/instrumentation/gpu/__init__.py,sha256=flcKWN18lqrSiuqsQAGM7x7gPUr5YuZNSPlrocC1zcE,11028
60
+ openlit/instrumentation/groq/__init__.py,sha256=o_3StdM3VyMjL4XT-agYfZkQouVr1MqeQ-2fUPdpRyI,1861
61
+ openlit/instrumentation/groq/async_groq.py,sha256=qGf5x7Ng4D_IfK2GMctWXe1Cq6uaT4sk-cAqWMdSnhM,24646
62
+ openlit/instrumentation/groq/groq.py,sha256=CB8Dtgym7RaMkM8FBpHMoYlAy1BRoW7n9EFX3YdBsQc,24560
63
+ openlit/instrumentation/haystack/__init__.py,sha256=QK6XxxZUHX8vMv2Crk7rNBOc64iOOBLhJGL_lPlAZ8s,1758
64
+ openlit/instrumentation/haystack/haystack.py,sha256=qGBLRFRs5Z0nuAYf9oic3BrdQMx7eXT-g3S0rihHHtU,3901
65
+ openlit/instrumentation/julep/__init__.py,sha256=oonEVK41P5g4SYRm0E_H4zCVH9NM4aJ-UswXzF3Oiv0,3136
66
+ openlit/instrumentation/julep/async_julep.py,sha256=fMMa8_6r97ipDRo8_soQKSZ1tw9LRCjdgd_JTFEXjkM,5283
67
+ openlit/instrumentation/julep/julep.py,sha256=tKjQN82PyKHNhfqwjjFGz5iCZRkFn1kLPuc2wIt8F0Y,5286
68
+ openlit/instrumentation/langchain/__init__.py,sha256=thTGg9NCNlnVEUc_KsxR94AY2v5pwEk7uVnbLJ_Bl-4,3823
69
+ openlit/instrumentation/langchain/async_langchain.py,sha256=s1LzZhT3Dxw2FD5wwK2hmDBSoxtp39yuqUB9ru5g9so,18155
70
+ openlit/instrumentation/langchain/langchain.py,sha256=fk4nBc-wqJRE45PkXuOCIHUNoke1wlsNGEd3t1l_lis,18017
71
+ openlit/instrumentation/letta/__init__.py,sha256=sjjOuMvZ1EPGEluNW0WTuSXYPhrb453cBIizt88Ta3g,2951
72
+ openlit/instrumentation/letta/letta.py,sha256=WJYDmHQVeBsLBIlTHfZXiHBhjOkmr9dEj2HtkX-1OU0,8405
73
+ openlit/instrumentation/litellm/__init__.py,sha256=dIFwJx45riHwolxBlUZUabO2TEnAR9MptwmSaFvaJfY,2314
74
+ openlit/instrumentation/litellm/async_litellm.py,sha256=6Ljft-M80763dQvhgT_RPvt63EE33pWUY5G1rE6MciM,30328
75
+ openlit/instrumentation/litellm/litellm.py,sha256=n5p0MnDFjNwYhYkcRfFRWRH_qtXbUR821X1GvHigYPc,30234
76
+ openlit/instrumentation/llamaindex/__init__.py,sha256=vPtK65G6b-TwJERowVRUVl7f_nBSlFdwPBtpg8dOGos,1977
77
+ openlit/instrumentation/llamaindex/llamaindex.py,sha256=reCkNucy-B4MC0ZsnlJ2lU68iN81KyDmWMqb180IaIc,4058
78
+ openlit/instrumentation/mem0/__init__.py,sha256=guOkLoSKvHSVSmEWhCHMVRMUGEa5JzqI8CIluHtwirQ,2417
79
+ openlit/instrumentation/mem0/mem0.py,sha256=AIddouvkTTjhyHrMOX7Z8z9fZ7jpD5PVlTYeHWatycI,5315
80
+ openlit/instrumentation/milvus/__init__.py,sha256=qi1yfmMrvkDtnrN_6toW8qC9BRL78bq7ayWpObJ8Bq4,2961
81
+ openlit/instrumentation/milvus/milvus.py,sha256=UOucpoOH0ay2jG9CRUsWtFXhceYJ3Z6ghWzhORCOF2E,9103
82
+ openlit/instrumentation/mistral/__init__.py,sha256=u3e9ueG0-wJ1ZgRa8zknPj3bTWA0AFI-3-UxlothkLg,2980
83
+ openlit/instrumentation/mistral/async_mistral.py,sha256=h_fd9oDX_AuFoK6023haxaYxr0PMd0M7PLs05uwFmA8,30999
84
+ openlit/instrumentation/mistral/mistral.py,sha256=RnFedQO05jEWyhjtsxKRAIALf1iAkbTA82-vjDb-bcs,30895
85
+ openlit/instrumentation/multion/__init__.py,sha256=DUt70uINLYi4xTxZ6D3bxKUBHYi1FpKbliQ6E7D_SeQ,3069
86
+ openlit/instrumentation/multion/async_multion.py,sha256=2lSIP3HQa6XvWoA0pafQ1qnEIA1bBUxTlXwrGWZeOkc,6021
87
+ openlit/instrumentation/multion/multion.py,sha256=LaUKx8XShi4Lv3IwnQR_NB0_Eb9s5Xq_VC-E0I17BlY,6003
88
+ openlit/instrumentation/ollama/__init__.py,sha256=KDO0JxdfNiQiEpWxEk93m3LiM07zAmvnNznb1E3PtN4,2856
89
+ openlit/instrumentation/ollama/async_ollama.py,sha256=k9ya3wSZfb8rQMqUqi3-fRcP2cw969-jf8TMjl47cyY,28301
90
+ openlit/instrumentation/ollama/ollama.py,sha256=Y74fa0C4JrxN5m4c7_lFvEaamnGNqWlCc2Zmn2gIMUs,28197
91
+ openlit/instrumentation/openai/__init__.py,sha256=54pwkndSO1IS78Qqx1A-QNkObx5eJnsvWeMtDZj2lD8,4773
92
+ openlit/instrumentation/openai/async_openai.py,sha256=sm9ZbO5RYp69-4ak2kmQ8iWziE8qzG8fCc10jcfWYoE,50461
93
+ openlit/instrumentation/openai/openai.py,sha256=y9ZS1NQmXzBTM0I5Nt0Ex5DK8x7EUiIW8t-coqfidMs,50290
94
+ openlit/instrumentation/phidata/__init__.py,sha256=rfPCXYOIsJbxChee2p269UzkJ1Z-pvQbii7Fgrw1v2g,1527
95
+ openlit/instrumentation/phidata/phidata.py,sha256=0qLqJ79A5-fVXDDDfFr0yLr1yiJQqaVa79l_9I5jwuQ,4799
96
+ openlit/instrumentation/pinecone/__init__.py,sha256=Mv9bElqNs07_JQkYyNnO0wOM3hdbprmw7sttdMeKC7g,2526
97
+ openlit/instrumentation/pinecone/pinecone.py,sha256=EV9PPri34c_OCGKqRuImshn0E1L5HsSNwgHWSr8v_DE,8747
98
+ openlit/instrumentation/premai/__init__.py,sha256=VUrJaD59cpUDizobpkkIkWq-Kurq84Daj_SBwEMlMDE,1796
99
+ openlit/instrumentation/premai/premai.py,sha256=grBsIc_5xt8XjtHeQymgD9zGPMZ9Jb55CPWOYd42i94,27890
100
+ openlit/instrumentation/qdrant/__init__.py,sha256=GMlZgRBKoQMgrL4cFbAKwytfdTHLzJEIuTQMxp0uZO0,8940
101
+ openlit/instrumentation/qdrant/async_qdrant.py,sha256=I_nchTueGLvb8OXID7MZyV1k97xnwGBDQWEzWla7vqo,15053
102
+ openlit/instrumentation/qdrant/qdrant.py,sha256=WUrjYCgQZVWVyFxiA-lXB6LbLM1l8tFWG5FoojiShIg,15470
103
+ openlit/instrumentation/reka/__init__.py,sha256=LIZt2K-GAXD69JAXIl_apf-IHIYMQSfRG2eHlAX-Zj0,1844
104
+ openlit/instrumentation/reka/async_reka.py,sha256=MNSNy1SoaCcWgv-dAuKD7ylk4q6dLn10RFeKxc2IuiM,9383
105
+ openlit/instrumentation/reka/reka.py,sha256=OU6Zv3FQ4f9Ewgd3ZlrLVRfuoawNFfKTmrU-_ZH7f9s,9365
106
+ openlit/instrumentation/together/__init__.py,sha256=Kjj2Q2iy7e93Zsz4jPocOg8fb0DmNHMF1VzP90XieC4,2601
107
+ openlit/instrumentation/together/async_together.py,sha256=ZGb_Gf9WZ7aDdM5L61_tF_PfOb1OOqkq8CMD77AV0UY,30464
108
+ openlit/instrumentation/together/together.py,sha256=M6nysPLb2UM_yWGGRtJYf4YtNVSkM0bUZ__uAahSy_8,30360
109
+ openlit/instrumentation/transformers/__init__.py,sha256=Fq2Nz1IsduF2GMIRSK3DK8KD0BYx6zk4zMASlkD9-cY,1437
110
+ openlit/instrumentation/transformers/transformers.py,sha256=A6CPjMh9alqMmxs_ZIewlkT2WrPDOtw8VjRrYm99Qi0,9108
111
+ openlit/instrumentation/vertexai/__init__.py,sha256=dgzzSOFFIw9TD8xyp8T2FTEMG-_15oyNycNN7MLjwsI,3645
112
+ openlit/instrumentation/vertexai/async_vertexai.py,sha256=wNTJ42ZXAHrb727OT6XDOx8p3hmRD6uY_G4suSFPyTk,22982
113
+ openlit/instrumentation/vertexai/vertexai.py,sha256=Jns15HksqqjRPpj_ogK7KXwMXW9OTQMnqZBdQjZXMAk,22896
114
+ openlit/instrumentation/vllm/__init__.py,sha256=9rT_S7o28PLzoqjwW7bL7JZoBuigZZLRlctud6yZD_k,1478
115
+ openlit/instrumentation/vllm/vllm.py,sha256=Momw7f94flqakXhi9PF1lz2xTp6L-Ddson8ymwpyprw,7724
116
+ openlit/otel/metrics.py,sha256=URL7gzQbnxaNQJSX7oHRa15v6xi1GFmANn-5uFNL-aY,6378
117
+ openlit/otel/tracing.py,sha256=fG3vl-flSZ30whCi7rrG25PlkIhhr8PhnfJYCkZzCD0,3895
118
+ openlit/semcov/__init__.py,sha256=c9rHbQCFiGmbHp-8bIxY6x7z2HimSO-ANrXLXtpy1hE,12316
119
+ openlit-1.33.10.dist-info/LICENSE,sha256=xx0jnfkXJvxRnG63LTGOxlggYnIysveWIZ6H3PNdCrQ,11357
120
+ openlit-1.33.10.dist-info/METADATA,sha256=-7lZ2D6sEjbDiMh_ZOf87_NKKi4EbrWfNUI7781feyw,23502
121
+ openlit-1.33.10.dist-info/WHEEL,sha256=XbeZDeTWKc1w7CSIyre5aMDU_-PohRwTQceYnisIYYY,88
122
+ openlit-1.33.10.dist-info/RECORD,,