openlit 1.33.8__py3-none-any.whl → 1.33.10__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (92) hide show
  1. openlit/__helpers.py +88 -0
  2. openlit/__init__.py +4 -3
  3. openlit/instrumentation/ag2/ag2.py +5 -5
  4. openlit/instrumentation/ai21/__init__.py +4 -4
  5. openlit/instrumentation/ai21/ai21.py +370 -319
  6. openlit/instrumentation/ai21/async_ai21.py +371 -319
  7. openlit/instrumentation/anthropic/__init__.py +4 -4
  8. openlit/instrumentation/anthropic/anthropic.py +321 -189
  9. openlit/instrumentation/anthropic/async_anthropic.py +323 -190
  10. openlit/instrumentation/assemblyai/__init__.py +1 -1
  11. openlit/instrumentation/assemblyai/assemblyai.py +59 -43
  12. openlit/instrumentation/astra/astra.py +9 -9
  13. openlit/instrumentation/astra/async_astra.py +9 -9
  14. openlit/instrumentation/azure_ai_inference/__init__.py +4 -4
  15. openlit/instrumentation/azure_ai_inference/async_azure_ai_inference.py +406 -252
  16. openlit/instrumentation/azure_ai_inference/azure_ai_inference.py +406 -252
  17. openlit/instrumentation/bedrock/__init__.py +1 -1
  18. openlit/instrumentation/bedrock/bedrock.py +115 -58
  19. openlit/instrumentation/chroma/chroma.py +9 -9
  20. openlit/instrumentation/cohere/__init__.py +33 -10
  21. openlit/instrumentation/cohere/async_cohere.py +610 -0
  22. openlit/instrumentation/cohere/cohere.py +410 -219
  23. openlit/instrumentation/controlflow/controlflow.py +5 -5
  24. openlit/instrumentation/crawl4ai/async_crawl4ai.py +5 -5
  25. openlit/instrumentation/crawl4ai/crawl4ai.py +5 -5
  26. openlit/instrumentation/crewai/crewai.py +6 -4
  27. openlit/instrumentation/dynamiq/dynamiq.py +5 -5
  28. openlit/instrumentation/elevenlabs/async_elevenlabs.py +71 -46
  29. openlit/instrumentation/elevenlabs/elevenlabs.py +71 -51
  30. openlit/instrumentation/embedchain/embedchain.py +9 -9
  31. openlit/instrumentation/firecrawl/firecrawl.py +5 -5
  32. openlit/instrumentation/google_ai_studio/__init__.py +9 -9
  33. openlit/instrumentation/google_ai_studio/async_google_ai_studio.py +183 -219
  34. openlit/instrumentation/google_ai_studio/google_ai_studio.py +183 -220
  35. openlit/instrumentation/gpt4all/__init__.py +2 -2
  36. openlit/instrumentation/gpt4all/gpt4all.py +345 -220
  37. openlit/instrumentation/gpu/__init__.py +5 -5
  38. openlit/instrumentation/groq/__init__.py +2 -2
  39. openlit/instrumentation/groq/async_groq.py +356 -240
  40. openlit/instrumentation/groq/groq.py +356 -240
  41. openlit/instrumentation/haystack/haystack.py +5 -5
  42. openlit/instrumentation/julep/async_julep.py +5 -5
  43. openlit/instrumentation/julep/julep.py +5 -5
  44. openlit/instrumentation/langchain/__init__.py +13 -7
  45. openlit/instrumentation/langchain/async_langchain.py +384 -0
  46. openlit/instrumentation/langchain/langchain.py +105 -492
  47. openlit/instrumentation/letta/letta.py +11 -9
  48. openlit/instrumentation/litellm/__init__.py +4 -5
  49. openlit/instrumentation/litellm/async_litellm.py +318 -247
  50. openlit/instrumentation/litellm/litellm.py +314 -243
  51. openlit/instrumentation/llamaindex/llamaindex.py +5 -5
  52. openlit/instrumentation/mem0/mem0.py +5 -5
  53. openlit/instrumentation/milvus/milvus.py +9 -9
  54. openlit/instrumentation/mistral/__init__.py +6 -6
  55. openlit/instrumentation/mistral/async_mistral.py +423 -250
  56. openlit/instrumentation/mistral/mistral.py +420 -246
  57. openlit/instrumentation/multion/async_multion.py +6 -4
  58. openlit/instrumentation/multion/multion.py +6 -4
  59. openlit/instrumentation/ollama/__init__.py +8 -30
  60. openlit/instrumentation/ollama/async_ollama.py +385 -417
  61. openlit/instrumentation/ollama/ollama.py +384 -417
  62. openlit/instrumentation/openai/__init__.py +11 -230
  63. openlit/instrumentation/openai/async_openai.py +433 -410
  64. openlit/instrumentation/openai/openai.py +414 -394
  65. openlit/instrumentation/phidata/phidata.py +6 -4
  66. openlit/instrumentation/pinecone/pinecone.py +9 -9
  67. openlit/instrumentation/premai/__init__.py +2 -2
  68. openlit/instrumentation/premai/premai.py +262 -213
  69. openlit/instrumentation/qdrant/async_qdrant.py +9 -9
  70. openlit/instrumentation/qdrant/qdrant.py +9 -9
  71. openlit/instrumentation/reka/__init__.py +2 -2
  72. openlit/instrumentation/reka/async_reka.py +90 -52
  73. openlit/instrumentation/reka/reka.py +90 -52
  74. openlit/instrumentation/together/__init__.py +4 -4
  75. openlit/instrumentation/together/async_together.py +278 -236
  76. openlit/instrumentation/together/together.py +278 -236
  77. openlit/instrumentation/transformers/__init__.py +1 -1
  78. openlit/instrumentation/transformers/transformers.py +76 -45
  79. openlit/instrumentation/vertexai/__init__.py +14 -64
  80. openlit/instrumentation/vertexai/async_vertexai.py +330 -987
  81. openlit/instrumentation/vertexai/vertexai.py +330 -987
  82. openlit/instrumentation/vllm/__init__.py +1 -1
  83. openlit/instrumentation/vllm/vllm.py +66 -36
  84. openlit/otel/metrics.py +98 -7
  85. openlit/semcov/__init__.py +113 -80
  86. {openlit-1.33.8.dist-info → openlit-1.33.10.dist-info}/METADATA +1 -1
  87. openlit-1.33.10.dist-info/RECORD +122 -0
  88. {openlit-1.33.8.dist-info → openlit-1.33.10.dist-info}/WHEEL +1 -1
  89. openlit/instrumentation/openai/async_azure_openai.py +0 -900
  90. openlit/instrumentation/openai/azure_openai.py +0 -898
  91. openlit-1.33.8.dist-info/RECORD +0 -122
  92. {openlit-1.33.8.dist-info → openlit-1.33.10.dist-info}/LICENSE +0 -0
openlit/__helpers.py CHANGED
@@ -6,9 +6,12 @@ import os
6
6
  import json
7
7
  import logging
8
8
  from urllib.parse import urlparse
9
+ from typing import Any, Dict, List, Tuple
9
10
  import requests
10
11
  import tiktoken
12
+ from opentelemetry.sdk.resources import SERVICE_NAME, TELEMETRY_SDK_NAME, DEPLOYMENT_ENVIRONMENT
11
13
  from opentelemetry.trace import Status, StatusCode
14
+ from openlit.semcov import SemanticConvetion
12
15
 
13
16
  # Set up logging
14
17
  logger = logging.getLogger(__name__)
@@ -197,3 +200,88 @@ def handle_exception(span,e):
197
200
  # Record the exception details within the span
198
201
  span.record_exception(e)
199
202
  span.set_status(Status(StatusCode.ERROR))
203
+
204
+ def calculate_ttft(timestamps: List[float], start_time: float) -> float:
205
+ """
206
+ Calculate the time to the first tokens.
207
+
208
+ :param timestamps: List of timestamps for received tokens
209
+ :param start_time: The start time of the streaming process
210
+ :return: Time to the first tokens
211
+ """
212
+ if timestamps:
213
+ return timestamps[0] - start_time
214
+ return 0.0
215
+
216
+ def calculate_tbt(timestamps: List[float]) -> float:
217
+ """
218
+ Calculate the average time between tokens.
219
+
220
+ :param timestamps: List of timestamps for received tokens
221
+ :return: Average time between tokens
222
+ """
223
+ if len(timestamps) > 1:
224
+ time_diffs = [timestamps[i] - timestamps[i - 1] for i in range(1, len(timestamps))]
225
+ return sum(time_diffs) / len(time_diffs)
226
+ return 0.0
227
+
228
+ def create_metrics_attributes(
229
+ service_name: str,
230
+ deployment_environment: str,
231
+ operation: str,
232
+ system: str,
233
+ request_model: str,
234
+ server_address: str,
235
+ server_port: int,
236
+ response_model: str,
237
+ ) -> Dict[Any, Any]:
238
+ """
239
+ Returns OTel metrics attributes
240
+ """
241
+ return {
242
+ TELEMETRY_SDK_NAME: "openlit",
243
+ SERVICE_NAME: service_name,
244
+ DEPLOYMENT_ENVIRONMENT: deployment_environment,
245
+ SemanticConvetion.GEN_AI_OPERATION: operation,
246
+ SemanticConvetion.GEN_AI_SYSTEM: system,
247
+ SemanticConvetion.GEN_AI_REQUEST_MODEL: request_model,
248
+ SemanticConvetion.SERVER_ADDRESS: server_address,
249
+ SemanticConvetion.SERVER_PORT: server_port,
250
+ SemanticConvetion.GEN_AI_RESPONSE_MODEL: response_model
251
+ }
252
+
253
+ def set_server_address_and_port(client_instance: Any,
254
+ default_server_address: str, default_server_port: int) -> Tuple[str, int]:
255
+ """
256
+ Determines and returns the server address and port based on the provided client's `base_url`,
257
+ using defaults if none found or values are None.
258
+ """
259
+
260
+ # Try getting base_url from multiple potential attributes
261
+ base_client = getattr(client_instance, "_client", None)
262
+ base_url = getattr(base_client, "base_url", None)
263
+
264
+ if not base_url:
265
+ # Attempt to get endpoint from instance._config.endpoint if base_url is not set
266
+ config = getattr(client_instance, "_config", None)
267
+ base_url = getattr(config, "endpoint", None)
268
+
269
+ if not base_url:
270
+ # Attempt to get server_url from instance.sdk_configuration.server_url
271
+ config = getattr(client_instance, "sdk_configuration", None)
272
+ base_url = getattr(config, "server_url", None)
273
+
274
+ if base_url:
275
+ if isinstance(base_url, str):
276
+ url = urlparse(base_url)
277
+ server_address = url.hostname or default_server_address
278
+ server_port = url.port if url.port is not None else default_server_port
279
+ else: # base_url might not be a str; handle as an object.
280
+ server_address = getattr(base_url, "host", None) or default_server_address
281
+ port_attr = getattr(base_url, "port", None)
282
+ server_port = port_attr if port_attr is not None else default_server_port
283
+ else: # no base_url or endpoint provided; use defaults.
284
+ server_address = default_server_address
285
+ server_port = default_server_port
286
+
287
+ return server_address, server_port
openlit/__init__.py CHANGED
@@ -17,6 +17,7 @@ import requests
17
17
  # Import internal modules for setting up tracing and fetching pricing info.
18
18
  from opentelemetry import trace as t
19
19
  from opentelemetry.trace import SpanKind, Status, StatusCode, Span
20
+ from opentelemetry.sdk.resources import SERVICE_NAME, DEPLOYMENT_ENVIRONMENT
20
21
  from openlit.semcov import SemanticConvetion
21
22
  from openlit.otel.tracing import setup_tracing
22
23
  from openlit.otel.metrics import setup_meter
@@ -251,7 +252,7 @@ def init(
251
252
  "gpt4all": "gpt4all",
252
253
  "elevenlabs": "elevenlabs",
253
254
  "vllm": "vllm",
254
- "google-ai-studio": "google.generativeai",
255
+ "google-ai-studio": "google.genai",
255
256
  "azure-ai-inference": "azure.ai.inference",
256
257
  "langchain": "langchain",
257
258
  "llama_index": "llama_index",
@@ -555,11 +556,11 @@ def trace(wrapped):
555
556
  span.set_attribute("function.args", str(args))
556
557
  span.set_attribute("function.kwargs", str(kwargs))
557
558
  span.set_attribute(
558
- SemanticConvetion.GEN_AI_APPLICATION_NAME,
559
+ SERVICE_NAME,
559
560
  OpenlitConfig.application_name,
560
561
  )
561
562
  span.set_attribute(
562
- SemanticConvetion.GEN_AI_ENVIRONMENT, OpenlitConfig.environment
563
+ DEPLOYMENT_ENVIRONMENT, OpenlitConfig.environment
563
564
  )
564
565
  except Exception as meta_exception:
565
566
  logging.error(
@@ -5,7 +5,7 @@ Module for monitoring AG2.
5
5
 
6
6
  import logging
7
7
  from opentelemetry.trace import SpanKind, Status, StatusCode
8
- from opentelemetry.sdk.resources import TELEMETRY_SDK_NAME
8
+ from opentelemetry.sdk.resources import SERVICE_NAME, TELEMETRY_SDK_NAME, DEPLOYMENT_ENVIRONMENT
9
9
  from openlit.__helpers import handle_exception
10
10
  from openlit.semcov import SemanticConvetion
11
11
 
@@ -70,12 +70,12 @@ def wrap_ag2(gen_ai_endpoint, version, environment, application_name,
70
70
  gen_ai_endpoint)
71
71
  span.set_attribute(SemanticConvetion.GEN_AI_SYSTEM,
72
72
  SemanticConvetion.GEN_AI_SYSTEM_AG2)
73
- span.set_attribute(SemanticConvetion.GEN_AI_ENVIRONMENT,
73
+ span.set_attribute(DEPLOYMENT_ENVIRONMENT,
74
74
  environment)
75
- span.set_attribute(SemanticConvetion.GEN_AI_APPLICATION_NAME,
75
+ span.set_attribute(SERVICE_NAME,
76
76
  application_name)
77
- span.set_attribute(SemanticConvetion.GEN_AI_TYPE,
78
- SemanticConvetion.GEN_AI_TYPE_AGENT)
77
+ span.set_attribute(SemanticConvetion.GEN_AI_OPERATION,
78
+ SemanticConvetion.GEN_AI_OPERATION_TYPE_AGENT)
79
79
  span.set_attribute(SemanticConvetion.GEN_AI_AGENT_ROLE,
80
80
  instance.name)
81
81
  if llm_model:
@@ -37,13 +37,13 @@ class AI21Instrumentor(BaseInstrumentor):
37
37
  wrap_function_wrapper(
38
38
  "ai21.clients.studio.resources.chat.chat_completions",
39
39
  "ChatCompletions.create",
40
- chat("ai21.chat.completions", version, environment, application_name,
40
+ chat(version, environment, application_name,
41
41
  tracer, pricing_info, trace_content, metrics, disable_metrics),
42
42
  )
43
43
  wrap_function_wrapper(
44
44
  "ai21.clients.studio.resources.studio_conversational_rag",
45
45
  "StudioConversationalRag.create",
46
- chat_rag("ai21.conversational_rag", version, environment, application_name,
46
+ chat_rag(version, environment, application_name,
47
47
  tracer, pricing_info, trace_content, metrics, disable_metrics),
48
48
  )
49
49
 
@@ -51,13 +51,13 @@ class AI21Instrumentor(BaseInstrumentor):
51
51
  wrap_function_wrapper(
52
52
  "ai21.clients.studio.resources.chat.async_chat_completions",
53
53
  "AsyncChatCompletions.create",
54
- async_chat("ai21.chat.completions", version, environment, application_name,
54
+ async_chat(version, environment, application_name,
55
55
  tracer, pricing_info, trace_content, metrics, disable_metrics),
56
56
  )
57
57
  wrap_function_wrapper(
58
58
  "ai21.clients.studio.resources.studio_conversational_rag",
59
59
  "AsyncStudioConversationalRag.create",
60
- async_chat_rag("ai21.conversational_rag", version, environment, application_name,
60
+ async_chat_rag(version, environment, application_name,
61
61
  tracer, pricing_info, trace_content, metrics, disable_metrics),
62
62
  )
63
63