openlit 1.31.0__py3-none-any.whl → 1.32.2__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -0,0 +1,159 @@
1
+ # pylint: disable=duplicate-code, broad-exception-caught, too-many-statements, unused-argument, possibly-used-before-assignment
2
+ """
3
+ Module for monitoring Reka API calls.
4
+ """
5
+
6
+ import logging
7
+ from opentelemetry.trace import SpanKind, Status, StatusCode
8
+ from opentelemetry.sdk.resources import TELEMETRY_SDK_NAME
9
+ from openlit.__helpers import (
10
+ handle_exception,
11
+ get_chat_model_cost,
12
+ )
13
+ from openlit.semcov import SemanticConvetion
14
+
15
+ # Initialize logger for logging potential issues and operations
16
+ logger = logging.getLogger(__name__)
17
+
18
+ def async_chat(gen_ai_endpoint, version, environment, application_name,
19
+ tracer, pricing_info, trace_content, metrics, disable_metrics):
20
+ """
21
+ Generates a telemetry wrapper for chat to collect metrics.
22
+
23
+ Args:
24
+ gen_ai_endpoint: Endpoint identifier for logging and tracing.
25
+ version: Version of the monitoring package.
26
+ environment: Deployment environment (e.g., production, staging).
27
+ application_name: Name of the application using the Reka API.
28
+ tracer: OpenTelemetry tracer for creating spans.
29
+ pricing_info: Information used for calculating the cost of Reka usage.
30
+ trace_content: Flag indicating whether to trace the actual content.
31
+
32
+ Returns:
33
+ A function that wraps the chat method to add telemetry.
34
+ """
35
+
36
+ async def wrapper(wrapped, instance, args, kwargs):
37
+ """
38
+ Wraps the 'chat' API call to add telemetry.
39
+
40
+ This collects metrics such as execution time, cost, and token usage, and handles errors
41
+ gracefully, adding details to the trace for observability.
42
+
43
+ Args:
44
+ wrapped: The original 'chat' method to be wrapped.
45
+ instance: The instance of the class where the original method is defined.
46
+ args: Positional arguments for the 'chat' method.
47
+ kwargs: Keyword arguments for the 'chat' method.
48
+
49
+ Returns:
50
+ The response from the original 'chat' method.
51
+ """
52
+
53
+ with tracer.start_as_current_span(gen_ai_endpoint, kind= SpanKind.CLIENT) as span:
54
+ response = await wrapped(*args, **kwargs)
55
+
56
+ try:
57
+ # Format 'messages' into a single string
58
+ message_prompt = kwargs.get("messages", "")
59
+ formatted_messages = []
60
+ for message in message_prompt:
61
+ role = message["role"]
62
+ content = message["content"]
63
+
64
+ if isinstance(content, list):
65
+ content_str = ", ".join(
66
+ # pylint: disable=line-too-long
67
+ f'{item["type"]}: {item["text"] if "text" in item else item["image_url"]}'
68
+ if "type" in item else f'text: {item["text"]}'
69
+ for item in content
70
+ )
71
+ formatted_messages.append(f"{role}: {content_str}")
72
+ else:
73
+ formatted_messages.append(f"{role}: {content}")
74
+ prompt = "\n".join(formatted_messages)
75
+
76
+ # Set base span attribues
77
+ span.set_attribute(TELEMETRY_SDK_NAME, "openlit")
78
+ span.set_attribute(SemanticConvetion.GEN_AI_SYSTEM,
79
+ SemanticConvetion.GEN_AI_SYSTEM_REKAAI)
80
+ span.set_attribute(SemanticConvetion.GEN_AI_TYPE,
81
+ SemanticConvetion.GEN_AI_TYPE_CHAT)
82
+ span.set_attribute(SemanticConvetion.GEN_AI_ENDPOINT,
83
+ gen_ai_endpoint)
84
+ span.set_attribute(SemanticConvetion.GEN_AI_ENVIRONMENT,
85
+ environment)
86
+ span.set_attribute(SemanticConvetion.GEN_AI_APPLICATION_NAME,
87
+ application_name)
88
+ span.set_attribute(SemanticConvetion.GEN_AI_REQUEST_MODEL,
89
+ kwargs.get("model", "reka-core"))
90
+ span.set_attribute(SemanticConvetion.GEN_AI_REQUEST_IS_STREAM,
91
+ False)
92
+ if trace_content:
93
+ span.add_event(
94
+ name=SemanticConvetion.GEN_AI_CONTENT_PROMPT_EVENT,
95
+ attributes={
96
+ SemanticConvetion.GEN_AI_CONTENT_PROMPT: prompt,
97
+ },
98
+ )
99
+ span.add_event(
100
+ name=SemanticConvetion.GEN_AI_CONTENT_COMPLETION_EVENT,
101
+ attributes={
102
+ # pylint: disable=line-too-long
103
+ SemanticConvetion.GEN_AI_CONTENT_COMPLETION: response.responses[0].message.content,
104
+ },
105
+ )
106
+
107
+ prompt_tokens = response.usage.input_tokens
108
+ completion_tokens = response.usage.output_tokens
109
+ total_tokens = prompt_tokens + completion_tokens
110
+ # Calculate cost of the operation
111
+ cost = get_chat_model_cost(kwargs.get("model", "reka-core"),
112
+ pricing_info, prompt_tokens, completion_tokens)
113
+
114
+ span.set_attribute(SemanticConvetion.GEN_AI_USAGE_PROMPT_TOKENS,
115
+ prompt_tokens)
116
+ span.set_attribute(SemanticConvetion.GEN_AI_USAGE_COMPLETION_TOKENS,
117
+ completion_tokens)
118
+ span.set_attribute(SemanticConvetion.GEN_AI_USAGE_TOTAL_TOKENS,
119
+ total_tokens)
120
+ span.set_attribute(SemanticConvetion.GEN_AI_RESPONSE_FINISH_REASON,
121
+ [response.responses[0].finish_reason])
122
+ span.set_attribute(SemanticConvetion.GEN_AI_USAGE_COST,
123
+ cost)
124
+
125
+ span.set_status(Status(StatusCode.OK))
126
+
127
+ if disable_metrics is False:
128
+ attributes = {
129
+ TELEMETRY_SDK_NAME:
130
+ "openlit",
131
+ SemanticConvetion.GEN_AI_APPLICATION_NAME:
132
+ application_name,
133
+ SemanticConvetion.GEN_AI_SYSTEM:
134
+ SemanticConvetion.GEN_AI_SYSTEM_REKAAI,
135
+ SemanticConvetion.GEN_AI_ENVIRONMENT:
136
+ environment,
137
+ SemanticConvetion.GEN_AI_TYPE:
138
+ SemanticConvetion.GEN_AI_TYPE_CHAT,
139
+ SemanticConvetion.GEN_AI_REQUEST_MODEL:
140
+ kwargs.get("model", "reka-core")
141
+ }
142
+
143
+ metrics["genai_requests"].add(1, attributes)
144
+ metrics["genai_total_tokens"].add(total_tokens, attributes)
145
+ metrics["genai_completion_tokens"].add(completion_tokens, attributes)
146
+ metrics["genai_prompt_tokens"].add(prompt_tokens, attributes)
147
+ metrics["genai_cost"].record(cost, attributes)
148
+
149
+ # Return original response
150
+ return response
151
+
152
+ except Exception as e:
153
+ handle_exception(span, e)
154
+ logger.error("Error in trace creation: %s", e)
155
+
156
+ # Return original response
157
+ return response
158
+
159
+ return wrapper
@@ -0,0 +1,159 @@
1
+ # pylint: disable=duplicate-code, broad-exception-caught, too-many-statements, unused-argument, possibly-used-before-assignment
2
+ """
3
+ Module for monitoring Reka API calls.
4
+ """
5
+
6
+ import logging
7
+ from opentelemetry.trace import SpanKind, Status, StatusCode
8
+ from opentelemetry.sdk.resources import TELEMETRY_SDK_NAME
9
+ from openlit.__helpers import (
10
+ handle_exception,
11
+ get_chat_model_cost,
12
+ )
13
+ from openlit.semcov import SemanticConvetion
14
+
15
+ # Initialize logger for logging potential issues and operations
16
+ logger = logging.getLogger(__name__)
17
+
18
+ def chat(gen_ai_endpoint, version, environment, application_name,
19
+ tracer, pricing_info, trace_content, metrics, disable_metrics):
20
+ """
21
+ Generates a telemetry wrapper for chat to collect metrics.
22
+
23
+ Args:
24
+ gen_ai_endpoint: Endpoint identifier for logging and tracing.
25
+ version: Version of the monitoring package.
26
+ environment: Deployment environment (e.g., production, staging).
27
+ application_name: Name of the application using the Reka API.
28
+ tracer: OpenTelemetry tracer for creating spans.
29
+ pricing_info: Information used for calculating the cost of Reka usage.
30
+ trace_content: Flag indicating whether to trace the actual content.
31
+
32
+ Returns:
33
+ A function that wraps the chat method to add telemetry.
34
+ """
35
+
36
+ def wrapper(wrapped, instance, args, kwargs):
37
+ """
38
+ Wraps the 'chat' API call to add telemetry.
39
+
40
+ This collects metrics such as execution time, cost, and token usage, and handles errors
41
+ gracefully, adding details to the trace for observability.
42
+
43
+ Args:
44
+ wrapped: The original 'chat' method to be wrapped.
45
+ instance: The instance of the class where the original method is defined.
46
+ args: Positional arguments for the 'chat' method.
47
+ kwargs: Keyword arguments for the 'chat' method.
48
+
49
+ Returns:
50
+ The response from the original 'chat' method.
51
+ """
52
+
53
+ with tracer.start_as_current_span(gen_ai_endpoint, kind= SpanKind.CLIENT) as span:
54
+ response = wrapped(*args, **kwargs)
55
+
56
+ try:
57
+ # Format 'messages' into a single string
58
+ message_prompt = kwargs.get("messages", "")
59
+ formatted_messages = []
60
+ for message in message_prompt:
61
+ role = message["role"]
62
+ content = message["content"]
63
+
64
+ if isinstance(content, list):
65
+ content_str = ", ".join(
66
+ # pylint: disable=line-too-long
67
+ f'{item["type"]}: {item["text"] if "text" in item else item["image_url"]}'
68
+ if "type" in item else f'text: {item["text"]}'
69
+ for item in content
70
+ )
71
+ formatted_messages.append(f"{role}: {content_str}")
72
+ else:
73
+ formatted_messages.append(f"{role}: {content}")
74
+ prompt = "\n".join(formatted_messages)
75
+
76
+ # Set base span attribues
77
+ span.set_attribute(TELEMETRY_SDK_NAME, "openlit")
78
+ span.set_attribute(SemanticConvetion.GEN_AI_SYSTEM,
79
+ SemanticConvetion.GEN_AI_SYSTEM_REKAAI)
80
+ span.set_attribute(SemanticConvetion.GEN_AI_TYPE,
81
+ SemanticConvetion.GEN_AI_TYPE_CHAT)
82
+ span.set_attribute(SemanticConvetion.GEN_AI_ENDPOINT,
83
+ gen_ai_endpoint)
84
+ span.set_attribute(SemanticConvetion.GEN_AI_ENVIRONMENT,
85
+ environment)
86
+ span.set_attribute(SemanticConvetion.GEN_AI_APPLICATION_NAME,
87
+ application_name)
88
+ span.set_attribute(SemanticConvetion.GEN_AI_REQUEST_MODEL,
89
+ kwargs.get("model", "reka-core"))
90
+ span.set_attribute(SemanticConvetion.GEN_AI_REQUEST_IS_STREAM,
91
+ False)
92
+ if trace_content:
93
+ span.add_event(
94
+ name=SemanticConvetion.GEN_AI_CONTENT_PROMPT_EVENT,
95
+ attributes={
96
+ SemanticConvetion.GEN_AI_CONTENT_PROMPT: prompt,
97
+ },
98
+ )
99
+ span.add_event(
100
+ name=SemanticConvetion.GEN_AI_CONTENT_COMPLETION_EVENT,
101
+ attributes={
102
+ # pylint: disable=line-too-long
103
+ SemanticConvetion.GEN_AI_CONTENT_COMPLETION: response.responses[0].message.content,
104
+ },
105
+ )
106
+
107
+ prompt_tokens = response.usage.input_tokens
108
+ completion_tokens = response.usage.output_tokens
109
+ total_tokens = prompt_tokens + completion_tokens
110
+ # Calculate cost of the operation
111
+ cost = get_chat_model_cost(kwargs.get("model", "reka-core"),
112
+ pricing_info, prompt_tokens, completion_tokens)
113
+
114
+ span.set_attribute(SemanticConvetion.GEN_AI_USAGE_PROMPT_TOKENS,
115
+ prompt_tokens)
116
+ span.set_attribute(SemanticConvetion.GEN_AI_USAGE_COMPLETION_TOKENS,
117
+ completion_tokens)
118
+ span.set_attribute(SemanticConvetion.GEN_AI_USAGE_TOTAL_TOKENS,
119
+ total_tokens)
120
+ span.set_attribute(SemanticConvetion.GEN_AI_RESPONSE_FINISH_REASON,
121
+ [response.responses[0].finish_reason])
122
+ span.set_attribute(SemanticConvetion.GEN_AI_USAGE_COST,
123
+ cost)
124
+
125
+ span.set_status(Status(StatusCode.OK))
126
+
127
+ if disable_metrics is False:
128
+ attributes = {
129
+ TELEMETRY_SDK_NAME:
130
+ "openlit",
131
+ SemanticConvetion.GEN_AI_APPLICATION_NAME:
132
+ application_name,
133
+ SemanticConvetion.GEN_AI_SYSTEM:
134
+ SemanticConvetion.GEN_AI_SYSTEM_REKAAI,
135
+ SemanticConvetion.GEN_AI_ENVIRONMENT:
136
+ environment,
137
+ SemanticConvetion.GEN_AI_TYPE:
138
+ SemanticConvetion.GEN_AI_TYPE_CHAT,
139
+ SemanticConvetion.GEN_AI_REQUEST_MODEL:
140
+ kwargs.get("model", "reka-core")
141
+ }
142
+
143
+ metrics["genai_requests"].add(1, attributes)
144
+ metrics["genai_total_tokens"].add(total_tokens, attributes)
145
+ metrics["genai_completion_tokens"].add(completion_tokens, attributes)
146
+ metrics["genai_prompt_tokens"].add(prompt_tokens, attributes)
147
+ metrics["genai_cost"].record(cost, attributes)
148
+
149
+ # Return original response
150
+ return response
151
+
152
+ except Exception as e:
153
+ handle_exception(span, e)
154
+ logger.error("Error in trace creation: %s", e)
155
+
156
+ # Return original response
157
+ return response
158
+
159
+ return wrapper
@@ -104,14 +104,21 @@ class SemanticConvetion:
104
104
  GEN_AI_SYSTEM_ELEVENLABS = "elevenlabs"
105
105
  GEN_AI_SYSTEM_VLLM = "vLLM"
106
106
  GEN_AI_SYSTEM_GOOGLE_AI_STUDIO = "google-ai-studio"
107
+ GEN_AI_SYSTEM_REKAAI = "rekaai"
108
+ GEN_AI_SYSTEM_PREMAI = "premai"
107
109
  GEN_AI_SYSTEM_AZURE_AI_INFERENCE = "azure-ai-inference"
108
110
  GEN_AI_SYSTEM_LANGCHAIN = "langchain"
109
111
  GEN_AI_SYSTEM_LLAMAINDEX = "llama_index"
110
112
  GEN_AI_SYSTEM_HAYSTACK = "haystack"
111
113
  GEN_AI_SYSTEM_EMBEDCHAIN = "embedchain"
114
+ GEN_AI_SYSTEM_MEM0 = "mem0"
112
115
  GEN_AI_SYSTEM_LITELLM = "litellm"
113
116
  GEN_AI_SYSTEM_CREWAI = "crewai"
114
117
  GEN_AI_SYSTEM_AG2 = "ag2"
118
+ GEN_AI_SYSTEM_MULTION = "multion"
119
+ GEN_AI_SYSTEM_DYNAMIQ = "dynamiq"
120
+ GEN_AI_SYSTEM_PHIDATA = "phidata"
121
+ GEN_AI_SYSTEM_JULEP = "julep"
115
122
 
116
123
  # Vector DB
117
124
  DB_REQUESTS = "db.total.requests"
@@ -133,6 +140,7 @@ class SemanticConvetion:
133
140
  DB_OPERATION_PEEK = "peek"
134
141
  DB_ID_COUNT = "db.ids_count"
135
142
  DB_VECTOR_COUNT = "db.vector_count"
143
+ DB_METADATA = "db.metadata"
136
144
  DB_METADATA_COUNT = "db.metadatas_count"
137
145
  DB_DOCUMENTS_COUNT = "db.documents_count"
138
146
  DB_PAYLOAD_COUNT = "db.payload_count"
@@ -152,6 +160,7 @@ class SemanticConvetion:
152
160
  DB_UPDATE_METADATA = "db.update.metadata"
153
161
  DB_UPDATE_VALUES = "db.update.values"
154
162
  DB_UPDATE_ID = "db.update.id"
163
+ DB_DELETE_ID = "db.delete.id"
155
164
 
156
165
  DB_SYSTEM_CHROMA = "chroma"
157
166
  DB_SYSTEM_PINECONE = "pinecone"
@@ -160,21 +169,28 @@ class SemanticConvetion:
160
169
 
161
170
  # Agents
162
171
  GEN_AI_AGENT_ID = "gen_ai.agent.id"
172
+ GEN_AI_AGENT_TYPE = "gen_ai.agent.type"
163
173
  GEN_AI_AGENT_TASK_ID = "gen_ai.agent.task.id"
164
174
  GEN_AI_AGENT_ROLE = "gen_ai.agent.role"
165
175
  GEN_AI_AGENT_GOAL = "gen_ai.agent.goal"
166
176
  GEN_AI_AGENT_CONTEXT = "gen_ai.agent.context"
167
177
  GEN_AI_AGENT_ENABLE_CACHE = "gen_ai.agent.enable_cache"
178
+ GEN_AI_AGENT_ENABLE_HISTORY = "gen_ai.agent.enable_history"
168
179
  GEN_AI_AGENT_ALLOW_DELEGATION = "gen_ai.agent.allow_delegation"
169
180
  GEN_AI_AGENT_ALLOW_CODE_EXECUTION = "gen_ai.agent.allow_code_execution"
170
181
  GEN_AI_AGENT_MAX_RETRY_LIMIT = "gen_ai.agent.max_retry_limit"
171
182
  GEN_AI_AGENT_TOOLS = "gen_ai.agent.tools"
172
183
  GEN_AI_AGENT_TOOL_RESULTS = "gen_ai.agent.tool_results"
173
184
  GEN_AI_AGENT_TASK = "gen_ai.agent.task"
185
+ GEN_AI_AGENT_INSTRUCTIONS = "gen_ai.agent.instructions"
186
+ GEN_AI_AGENT_STORAGE = "gen_ai.agent.storage"
174
187
  GEN_AI_AGENT_EXPECTED_OUTPUT = "gen_ai.agent.expected_output"
175
188
  GEN_AI_AGENT_ACTUAL_OUTPUT = "gen_ai.agent.actual_output"
176
189
  GEN_AI_AGENT_HUMAN_INPUT = "gen_ai.agent.human_input"
177
190
  GEN_AI_AGENT_TASK_ASSOCIATION = "gen_ai.agent.task_associations"
191
+ GEN_AI_AGENT_BROWSE_URL = "gen_ai.agent.browse_url"
192
+ GEN_AI_AGENT_STEP_COUNT = "gen_ai.agent.step_count"
193
+ GEN_AI_AGENT_RESPONSE_TIME = "gen_ai.agent.response_time"
178
194
 
179
195
  # GPU
180
196
  GPU_INDEX = "gpu.index"
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.1
2
2
  Name: openlit
3
- Version: 1.31.0
3
+ Version: 1.32.2
4
4
  Summary: OpenTelemetry-native Auto instrumentation library for monitoring LLM Applications and GPUs, facilitating the integration of observability into your GenAI-driven projects
5
5
  Home-page: https://github.com/openlit/openlit/tree/main/openlit/python
6
6
  Keywords: OpenTelemetry,otel,otlp,llm,tracing,openai,anthropic,claude,cohere,llm monitoring,observability,monitoring,gpt,Generative AI,chatGPT,gpu
@@ -45,7 +45,7 @@ Description-Content-Type: text/markdown
45
45
  [![Slack](https://img.shields.io/badge/Slack-4A154B?logo=slack&logoColor=white)](https://join.slack.com/t/openlit/shared_invite/zt-2etnfttwg-TjP_7BZXfYg84oAukY8QRQ)
46
46
  [![X](https://img.shields.io/badge/follow-%40openlit__io-1DA1F2?logo=x&style=social)](https://twitter.com/openlit_io)
47
47
 
48
- ![OpenLIT Connections Banner](https://github.com/openlit/.github/blob/main/profile/assets/github-readme-connections-banner.png?raw=true)
48
+ ![OpenLIT Connections Banner](https://github.com/openlit/.github/blob/main/profile/assets/openlit-integrations-banner.png?raw=true)
49
49
 
50
50
 
51
51
  </div>
@@ -69,20 +69,26 @@ This project proudly follows and maintains the [Semantic Conventions](https://gi
69
69
  | [✅ Ollama](https://docs.openlit.io/latest/integrations/ollama) | [✅ Pinecone](https://docs.openlit.io/latest/integrations/pinecone) | [✅ LiteLLM](https://docs.openlit.io/latest/integrations/litellm) | [✅ AMD](https://docs.openlit.io/latest/integrations/amd-gpu) |
70
70
  | [✅ Anthropic](https://docs.openlit.io/latest/integrations/anthropic) | [✅ Qdrant](https://docs.openlit.io/latest/integrations/qdrant) | [✅ LlamaIndex](https://docs.openlit.io/latest/integrations/llama-index) | |
71
71
  | [✅ GPT4All](https://docs.openlit.io/latest/integrations/gpt4all) | [✅ Milvus](https://docs.openlit.io/latest/integrations/milvus) | [✅ Haystack](https://docs.openlit.io/latest/integrations/haystack) | |
72
- | [✅ Cohere](https://docs.openlit.io/latest/integrations/cohere) | | [✅ EmbedChain](https://docs.openlit.io/latest/integrations/embedchain) | |
72
+ | [✅ Cohere](https://docs.openlit.io/latest/integrations/cohere) | | [✅ EmbedChain](https://docs.openlit.io/latest/integrations/embedchain) | |
73
73
  | [✅ Mistral](https://docs.openlit.io/latest/integrations/mistral) | | [✅ Guardrails](https://docs.openlit.io/latest/integrations/guardrails) | |
74
74
  | [✅ Azure OpenAI](https://docs.openlit.io/latest/integrations/azure-openai) | | [✅ CrewAI](https://docs.openlit.io/latest/integrations/crewai) | |
75
75
  | [✅ Azure AI Inference](https://docs.openlit.io/latest/integrations/azure-ai-inference) | | [✅ DSPy](https://docs.openlit.io/latest/integrations/dspy) | |
76
76
  | [✅ GitHub AI Models](https://docs.openlit.io/latest/integrations/github-models) | | [✅ AG2](https://docs.openlit.io/latest/integrations/ag2) | |
77
- | [✅ HuggingFace Transformers](https://docs.openlit.io/latest/integrations/huggingface) | | | |
78
- | [✅ Amazon Bedrock](https://docs.openlit.io/latest/integrations/bedrock) | | | |
79
- | [✅ Vertex AI](https://docs.openlit.io/latest/integrations/vertexai) | | | |
80
- | [✅ Groq](https://docs.openlit.io/latest/integrations/groq) | | | |
77
+ | [✅ HuggingFace Transformers](https://docs.openlit.io/latest/integrations/huggingface) | | [✅ Dynamiq](https://docs.openlit.io/latest/integrations/dynamiq) | |
78
+ | [✅ Amazon Bedrock](https://docs.openlit.io/latest/integrations/bedrock) | | [✅ Phidata](https://docs.openlit.io/latest/integrations/phidata) | |
79
+ | | | [✅ mem0](https://docs.openlit.io/latest/integrations/mem0) | |
80
+ | [✅ Vertex AI](https://docs.openlit.io/latest/integrations/vertexai) | | [✅ MultiOn](https://docs.openlit.io/latest/integrations/multion) | |
81
+ | [✅ Groq](https://docs.openlit.io/latest/integrations/groq) | | [✅ Julep AI](https://docs.openlit.io/latest/integrations/julep-ai) | |
81
82
  | [✅ ElevenLabs](https://docs.openlit.io/latest/integrations/elevenlabs) | | | |
82
83
  | [✅ vLLM](https://docs.openlit.io/latest/integrations/vllm) | | | |
83
84
  | [✅ OLA Krutrim](https://docs.openlit.io/latest/integrations/krutrim) | | | |
84
85
  | [✅ Google AI Studio](https://docs.openlit.io/latest/integrations/google-ai-studio) | | | |
85
86
  | [✅ NVIDIA NIM](https://docs.openlit.io/latest/integrations/nvidia-nim) | | | |
87
+ | [✅ Titan ML](https://docs.openlit.io/latest/integrations/titan-ml) | | | |
88
+ | [✅ Reka AI](https://docs.openlit.io/latest/integrations/reka) | | | |
89
+ | [✅ xAI](https://docs.openlit.io/latest/integrations/xai) | | | |
90
+ | [✅ Prem AI](https://docs.openlit.io/latest/integrations/premai) | | | |
91
+
86
92
 
87
93
  ## Supported Destinations
88
94
  - [✅ OpenTelemetry Collector](https://docs.openlit.io/latest/connections/otelcol)
@@ -1,5 +1,5 @@
1
1
  openlit/__helpers.py,sha256=2OkGKOdsd9Hc011WxR70OqDlO6c4mZcu6McGuW1uAdA,6316
2
- openlit/__init__.py,sha256=gLhGox66F9JdxwSBYyhCvXRmBluFJN-naNnP_5rG3jI,19940
2
+ openlit/__init__.py,sha256=F9WoSIEnxzZzZFGy1z---7gB7BlwMRcyph4lHdhgp-Q,20870
3
3
  openlit/evals/__init__.py,sha256=nJe99nuLo1b5rf7pt9U9BCdSDedzbVi2Fj96cgl7msM,380
4
4
  openlit/evals/all.py,sha256=oWrue3PotE-rB5WePG3MRYSA-ro6WivkclSHjYlAqGs,7154
5
5
  openlit/evals/bias_detection.py,sha256=mCdsfK7x1vX7S3psC3g641IMlZ-7df3h-V6eiICj5N8,8154
@@ -27,7 +27,9 @@ openlit/instrumentation/chroma/chroma.py,sha256=E80j_41UeZi8RzTsHbpvi1izOA_n-0-3
27
27
  openlit/instrumentation/cohere/__init__.py,sha256=PC5T1qIg9pwLNocBP_WjG5B_6p_z019s8quk_fNLAMs,1920
28
28
  openlit/instrumentation/cohere/cohere.py,sha256=62-P2K39v6pIJme6vTVViLJ9PP8q_UWkTv2l3Wa2gHA,21217
29
29
  openlit/instrumentation/crewai/__init__.py,sha256=cETkkwnKYEMAKlMrHbZ9-RvcRUPYaSNqNIhy2-vCDK8,1794
30
- openlit/instrumentation/crewai/crewai.py,sha256=V0ZAlNf6vPL6nZs_XvQYG2DqpgfbX_37yMnScAu3dsk,6917
30
+ openlit/instrumentation/crewai/crewai.py,sha256=mpEJql6aDs3wwBjLz686anOHkIA5gWfhFCCHAgJRY0w,7049
31
+ openlit/instrumentation/dynamiq/__init__.py,sha256=2uIHHxFWca0g2YLO2RBfi2Al6uWUYvVZBfDiPOHCdpQ,2331
32
+ openlit/instrumentation/dynamiq/dynamiq.py,sha256=ymEctNepwQ_9YGSoR_Sf1NwmSLwmGnFfWJZe3FZAE9M,5128
31
33
  openlit/instrumentation/elevenlabs/__init__.py,sha256=BZjAe-kzFJpKxT0tKksXVfZgirvgEp8qM3SfegWU5co,2631
32
34
  openlit/instrumentation/elevenlabs/async_elevenlabs.py,sha256=yMYACh95SFr5EYklKnXw2DrPFa3iIgM4qQMWjO1itMU,5690
33
35
  openlit/instrumentation/elevenlabs/elevenlabs.py,sha256=mFnD7sgT47OxaXJz0Vc1nrNjXEpcGQDj5run3gA48Lw,6089
@@ -44,6 +46,9 @@ openlit/instrumentation/groq/async_groq.py,sha256=myob-d9V66YiNmkFd9rtmMaXjlLiSM
44
46
  openlit/instrumentation/groq/groq.py,sha256=m4gFPbYzjUUIgjXZ0Alu2Zy1HcO5takCFA2XFnkcGVo,19975
45
47
  openlit/instrumentation/haystack/__init__.py,sha256=QK6XxxZUHX8vMv2Crk7rNBOc64iOOBLhJGL_lPlAZ8s,1758
46
48
  openlit/instrumentation/haystack/haystack.py,sha256=oQIZiDhdp3gnJnhYQ1OouJMc9YT0pQ-_31cmNuopa68,3891
49
+ openlit/instrumentation/julep/__init__.py,sha256=oonEVK41P5g4SYRm0E_H4zCVH9NM4aJ-UswXzF3Oiv0,3136
50
+ openlit/instrumentation/julep/async_julep.py,sha256=OO8lIm9uUV1lhPo_klKBVyaDwgHhFJlohTeZItd5qwU,5273
51
+ openlit/instrumentation/julep/julep.py,sha256=lDUmkEP4hXk7vgUUbNRD-mnfdfrZifXSFVVILs8Ttkw,5276
47
52
  openlit/instrumentation/langchain/__init__.py,sha256=0AI2Dnqw81IcJw3jM--gGkv_HRh2GtosOGJjvOpw7Zk,3431
48
53
  openlit/instrumentation/langchain/langchain.py,sha256=jZgWBBWYHYSNnkf5wKyNFF_z9M9YxaZKGI_uyfvtMBU,36909
49
54
  openlit/instrumentation/litellm/__init__.py,sha256=Z-LsVHKJdPganHfJA_rWg7xAfQYkvLfpLdF-eckU4qY,2401
@@ -51,11 +56,16 @@ openlit/instrumentation/litellm/async_litellm.py,sha256=1MKNZbvKaf1lFWbXi1MQy3qF
51
56
  openlit/instrumentation/litellm/litellm.py,sha256=4YqCQ4CEQ4sfDu7pTlnflL_AfUqYEQdJDTO7nHJ6noY,27450
52
57
  openlit/instrumentation/llamaindex/__init__.py,sha256=vPtK65G6b-TwJERowVRUVl7f_nBSlFdwPBtpg8dOGos,1977
53
58
  openlit/instrumentation/llamaindex/llamaindex.py,sha256=uiIigbwhonSbJWA7LpgOVI1R4kxxPODS1K5wyHIQ4hM,4048
59
+ openlit/instrumentation/mem0/__init__.py,sha256=guOkLoSKvHSVSmEWhCHMVRMUGEa5JzqI8CIluHtwirQ,2417
60
+ openlit/instrumentation/mem0/mem0.py,sha256=lX80WCvsgroBLgQS7TSS64yGuXv7-5GshFq02jDlNfY,5305
54
61
  openlit/instrumentation/milvus/__init__.py,sha256=qi1yfmMrvkDtnrN_6toW8qC9BRL78bq7ayWpObJ8Bq4,2961
55
62
  openlit/instrumentation/milvus/milvus.py,sha256=qhKIoggBAJhRctRrBYz69AcvXH-eh7oBn_l9WfxpAjI,9121
56
63
  openlit/instrumentation/mistral/__init__.py,sha256=niWn0gYNOTPS5zoTjtCciDqQVj-iJehnpdh7ElB-H9w,3088
57
64
  openlit/instrumentation/mistral/async_mistral.py,sha256=l-kcaGPrX3sqPH-RXWo6ope0Ui3nUvExNJ4KX9QgDMY,22246
58
65
  openlit/instrumentation/mistral/mistral.py,sha256=Q7MMRvVFsM8o0_ebZ0EfnhGjs16SJSnmu-oE798gYMQ,22087
66
+ openlit/instrumentation/multion/__init__.py,sha256=DUt70uINLYi4xTxZ6D3bxKUBHYi1FpKbliQ6E7D_SeQ,3069
67
+ openlit/instrumentation/multion/async_multion.py,sha256=6jeYiEu94_jUQF4yMA_Ua-OnsMX7th3__USf56T3PPU,5889
68
+ openlit/instrumentation/multion/multion.py,sha256=X9HGCQ7KFu6aOOh9xKBBKXT73Xc1w3Y5s-1lELmrk_g,5871
59
69
  openlit/instrumentation/ollama/__init__.py,sha256=cOax8PiypDuo_FC4WvDCYBRo7lH5nV9xU92h7k-eZbg,3812
60
70
  openlit/instrumentation/ollama/async_ollama.py,sha256=7lbikD-I9k8VL63idqj3VMEfiEKJmFNUPR8Xb6g2phQ,31366
61
71
  openlit/instrumentation/ollama/ollama.py,sha256=lBt1d3rFnF1tFbfdOccwjEafHnmTAUGsiOKSHku6Fkw,31277
@@ -64,11 +74,18 @@ openlit/instrumentation/openai/async_azure_openai.py,sha256=XbST1UE_zXzNL6RX2XwC
64
74
  openlit/instrumentation/openai/async_openai.py,sha256=XFsfN81mbmdgRON2dwmt8pypqoTnlrNWer1eit7wZbQ,50176
65
75
  openlit/instrumentation/openai/azure_openai.py,sha256=dZUc5MtCwg_sZJWiruG6exYGhPAm-339sqs3sKZNRPU,48761
66
76
  openlit/instrumentation/openai/openai.py,sha256=qP3ahUyMGjmq2ZB8apqnERal7kz49uW5DaxDU9FBQdk,50005
77
+ openlit/instrumentation/phidata/__init__.py,sha256=rfPCXYOIsJbxChee2p269UzkJ1Z-pvQbii7Fgrw1v2g,1527
78
+ openlit/instrumentation/phidata/phidata.py,sha256=9Aza2bLgeq688Ahyy7ekbxpSh4RTD7FFKtLmv4TNbrw,4667
67
79
  openlit/instrumentation/pinecone/__init__.py,sha256=Mv9bElqNs07_JQkYyNnO0wOM3hdbprmw7sttdMeKC7g,2526
68
80
  openlit/instrumentation/pinecone/pinecone.py,sha256=0EhLmtOuvwWVvAKh3e56wyd8wzQq1oaLOmF15SVHxVE,8765
81
+ openlit/instrumentation/premai/__init__.py,sha256=g7kBjxEsldQIiZpxH4LgXFmU-WSmqywW4aFxqwH-ptA,1844
82
+ openlit/instrumentation/premai/premai.py,sha256=DIAAXXrEmxXW6vZjGqRgtRGScucob1RusCdciXfYhME,26574
69
83
  openlit/instrumentation/qdrant/__init__.py,sha256=GMlZgRBKoQMgrL4cFbAKwytfdTHLzJEIuTQMxp0uZO0,8940
70
84
  openlit/instrumentation/qdrant/async_qdrant.py,sha256=Xuyw2N75mRIjltrmY8wJes5DHal0Ku3A8VcUqfbsOl0,15071
71
85
  openlit/instrumentation/qdrant/qdrant.py,sha256=K0cvEUbNx0hnk8AbEheYPSHcCgjFC482IZyHF9-P_b8,15488
86
+ openlit/instrumentation/reka/__init__.py,sha256=X0zZ8Q18Z_6pIpksa7pdWldK4SKZM7U24zNc2UeRXC8,1870
87
+ openlit/instrumentation/reka/async_reka.py,sha256=PDodlH_XycevE3k8u0drP7bokKtPDUcDfzfWRz6Fzt4,7439
88
+ openlit/instrumentation/reka/reka.py,sha256=CL9uNX_tYjw2eetTxLKRNRQJ-OgI_e5YRz9iu9f_gP4,7421
72
89
  openlit/instrumentation/transformers/__init__.py,sha256=4GBtjzcJU4XiPexIUYEqF3pNZMeQw4Gm5B-cyumaFjs,1468
73
90
  openlit/instrumentation/transformers/transformers.py,sha256=MWEVkxHRWTHrpD85I1leksDIVtBiTtR5fQCO3Z62qb4,7875
74
91
  openlit/instrumentation/vertexai/__init__.py,sha256=N3E9HtzefD-zC0fvmfGYiDmSqssoavp_i59wfuYLyMw,6079
@@ -78,8 +95,8 @@ openlit/instrumentation/vllm/__init__.py,sha256=OVWalQ1dXvip1DUsjUGaHX4J-2FrSp-T
78
95
  openlit/instrumentation/vllm/vllm.py,sha256=lDzM7F5pgxvh8nKL0dcKB4TD0Mc9wXOWeXOsOGN7Wd8,6527
79
96
  openlit/otel/metrics.py,sha256=y7SQDTyfLakMrz0V4DThN-WAeap7YZzyndeYGSP6nVg,4516
80
97
  openlit/otel/tracing.py,sha256=fG3vl-flSZ30whCi7rrG25PlkIhhr8PhnfJYCkZzCD0,3895
81
- openlit/semcov/__init__.py,sha256=mXDJNyz6dFAaNPtu90iWYBclP8tz0Ia22QVjHq1Mxz8,9167
82
- openlit-1.31.0.dist-info/LICENSE,sha256=xx0jnfkXJvxRnG63LTGOxlggYnIysveWIZ6H3PNdCrQ,11357
83
- openlit-1.31.0.dist-info/METADATA,sha256=Jn8RmER5JZRfC0PJ1Kpvm-pozhh6pHDPhWM4N9ro5ns,21046
84
- openlit-1.31.0.dist-info/WHEEL,sha256=Nq82e9rUAnEjt98J6MlVmMCZb-t9cYE2Ir1kpBmnWfs,88
85
- openlit-1.31.0.dist-info/RECORD,,
98
+ openlit/semcov/__init__.py,sha256=asA0rhBek-BcovxS2EVz-pTsLpt0FE6aaR-7RaIqPaQ,9877
99
+ openlit-1.32.2.dist-info/LICENSE,sha256=xx0jnfkXJvxRnG63LTGOxlggYnIysveWIZ6H3PNdCrQ,11357
100
+ openlit-1.32.2.dist-info/METADATA,sha256=96ZqO5vl3oosge89lLRgOQqHUUM5-I-bVdJWcItGDcM,22396
101
+ openlit-1.32.2.dist-info/WHEEL,sha256=Nq82e9rUAnEjt98J6MlVmMCZb-t9cYE2Ir1kpBmnWfs,88
102
+ openlit-1.32.2.dist-info/RECORD,,