openlit 1.33.9__py3-none-any.whl → 1.33.10__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (77) hide show
  1. openlit/__helpers.py +5 -0
  2. openlit/__init__.py +3 -2
  3. openlit/instrumentation/ag2/ag2.py +3 -3
  4. openlit/instrumentation/ai21/ai21.py +1 -1
  5. openlit/instrumentation/ai21/async_ai21.py +1 -1
  6. openlit/instrumentation/anthropic/anthropic.py +1 -1
  7. openlit/instrumentation/anthropic/async_anthropic.py +1 -1
  8. openlit/instrumentation/astra/astra.py +5 -5
  9. openlit/instrumentation/astra/async_astra.py +5 -5
  10. openlit/instrumentation/azure_ai_inference/async_azure_ai_inference.py +3 -3
  11. openlit/instrumentation/azure_ai_inference/azure_ai_inference.py +3 -3
  12. openlit/instrumentation/chroma/chroma.py +5 -5
  13. openlit/instrumentation/cohere/async_cohere.py +1 -1
  14. openlit/instrumentation/cohere/cohere.py +2 -2
  15. openlit/instrumentation/controlflow/controlflow.py +3 -3
  16. openlit/instrumentation/crawl4ai/async_crawl4ai.py +3 -3
  17. openlit/instrumentation/crawl4ai/crawl4ai.py +3 -3
  18. openlit/instrumentation/crewai/crewai.py +4 -2
  19. openlit/instrumentation/dynamiq/dynamiq.py +3 -3
  20. openlit/instrumentation/elevenlabs/async_elevenlabs.py +1 -2
  21. openlit/instrumentation/elevenlabs/elevenlabs.py +1 -2
  22. openlit/instrumentation/embedchain/embedchain.py +5 -5
  23. openlit/instrumentation/firecrawl/firecrawl.py +3 -3
  24. openlit/instrumentation/gpt4all/__init__.py +2 -2
  25. openlit/instrumentation/gpt4all/gpt4all.py +345 -220
  26. openlit/instrumentation/gpu/__init__.py +5 -5
  27. openlit/instrumentation/groq/__init__.py +2 -2
  28. openlit/instrumentation/groq/async_groq.py +356 -240
  29. openlit/instrumentation/groq/groq.py +356 -240
  30. openlit/instrumentation/haystack/haystack.py +3 -3
  31. openlit/instrumentation/julep/async_julep.py +3 -3
  32. openlit/instrumentation/julep/julep.py +3 -3
  33. openlit/instrumentation/langchain/__init__.py +13 -7
  34. openlit/instrumentation/langchain/async_langchain.py +384 -0
  35. openlit/instrumentation/langchain/langchain.py +98 -490
  36. openlit/instrumentation/letta/letta.py +5 -3
  37. openlit/instrumentation/litellm/__init__.py +4 -5
  38. openlit/instrumentation/litellm/async_litellm.py +316 -245
  39. openlit/instrumentation/litellm/litellm.py +312 -241
  40. openlit/instrumentation/llamaindex/llamaindex.py +3 -3
  41. openlit/instrumentation/mem0/mem0.py +3 -3
  42. openlit/instrumentation/milvus/milvus.py +5 -5
  43. openlit/instrumentation/mistral/__init__.py +6 -6
  44. openlit/instrumentation/mistral/async_mistral.py +421 -248
  45. openlit/instrumentation/mistral/mistral.py +418 -244
  46. openlit/instrumentation/multion/async_multion.py +4 -2
  47. openlit/instrumentation/multion/multion.py +4 -2
  48. openlit/instrumentation/ollama/__init__.py +8 -30
  49. openlit/instrumentation/ollama/async_ollama.py +385 -417
  50. openlit/instrumentation/ollama/ollama.py +384 -417
  51. openlit/instrumentation/openai/async_openai.py +7 -9
  52. openlit/instrumentation/openai/openai.py +7 -9
  53. openlit/instrumentation/phidata/phidata.py +4 -2
  54. openlit/instrumentation/pinecone/pinecone.py +5 -5
  55. openlit/instrumentation/premai/__init__.py +2 -2
  56. openlit/instrumentation/premai/premai.py +262 -213
  57. openlit/instrumentation/qdrant/async_qdrant.py +5 -5
  58. openlit/instrumentation/qdrant/qdrant.py +5 -5
  59. openlit/instrumentation/reka/__init__.py +2 -2
  60. openlit/instrumentation/reka/async_reka.py +90 -52
  61. openlit/instrumentation/reka/reka.py +90 -52
  62. openlit/instrumentation/together/__init__.py +4 -4
  63. openlit/instrumentation/together/async_together.py +278 -236
  64. openlit/instrumentation/together/together.py +278 -236
  65. openlit/instrumentation/transformers/__init__.py +1 -1
  66. openlit/instrumentation/transformers/transformers.py +75 -44
  67. openlit/instrumentation/vertexai/__init__.py +14 -64
  68. openlit/instrumentation/vertexai/async_vertexai.py +329 -986
  69. openlit/instrumentation/vertexai/vertexai.py +329 -986
  70. openlit/instrumentation/vllm/__init__.py +1 -1
  71. openlit/instrumentation/vllm/vllm.py +62 -32
  72. openlit/semcov/__init__.py +3 -3
  73. {openlit-1.33.9.dist-info → openlit-1.33.10.dist-info}/METADATA +1 -1
  74. openlit-1.33.10.dist-info/RECORD +122 -0
  75. openlit-1.33.9.dist-info/RECORD +0 -121
  76. {openlit-1.33.9.dist-info → openlit-1.33.10.dist-info}/LICENSE +0 -0
  77. {openlit-1.33.9.dist-info → openlit-1.33.10.dist-info}/WHEEL +0 -0
@@ -44,7 +44,7 @@ def async_chat_completions(version, environment, application_name,
44
44
  class TracedAsyncStream:
45
45
  """
46
46
  Wrapper for streaming responses to collect metrics and trace data.
47
- Wraps the 'openai.AsyncStream' response to collect message IDs and aggregated response.
47
+ Wraps the response to collect message IDs and aggregated response.
48
48
 
49
49
  This class implements the '__aiter__' and '__anext__' methods that
50
50
  handle asynchronous streaming responses.
@@ -197,11 +197,11 @@ def async_chat_completions(version, environment, application_name,
197
197
  output_tokens)
198
198
  self._span.set_attribute(SemanticConvetion.SERVER_ADDRESS,
199
199
  self._server_address)
200
- self._span.set_attribute(SemanticConvetion.GEN_AI_OPENAI_REQUEST_SERVICE_TIER,
200
+ self._span.set_attribute(SemanticConvetion.GEN_AI_REQUEST_SERVICE_TIER,
201
201
  self._kwargs.get("service_tier", "auto"))
202
- self._span.set_attribute(SemanticConvetion.GEN_AI_OPENAI_RESPONSE_SERVICE_TIER,
202
+ self._span.set_attribute(SemanticConvetion.GEN_AI_RESPONSE_SERVICE_TIER,
203
203
  self._openai_response_service_tier)
204
- self._span.set_attribute(SemanticConvetion.GEN_AI_OPENAI_RESPONSE_SYSTEM_FINGERPRINT,
204
+ self._span.set_attribute(SemanticConvetion.GEN_AI_RESPONSE_SYSTEM_FINGERPRINT,
205
205
  self._openai_system_fingerprint)
206
206
  if isinstance(self._llmresponse, str):
207
207
  self._span.set_attribute(SemanticConvetion.GEN_AI_OUTPUT_TYPE,
@@ -382,11 +382,11 @@ def async_chat_completions(version, environment, application_name,
382
382
  output_tokens)
383
383
  span.set_attribute(SemanticConvetion.SERVER_ADDRESS,
384
384
  server_address)
385
- span.set_attribute(SemanticConvetion.GEN_AI_OPENAI_REQUEST_SERVICE_TIER,
385
+ span.set_attribute(SemanticConvetion.GEN_AI_REQUEST_SERVICE_TIER,
386
386
  kwargs.get("service_tier", "auto"))
387
- span.set_attribute(SemanticConvetion.GEN_AI_OPENAI_RESPONSE_SERVICE_TIER,
387
+ span.set_attribute(SemanticConvetion.GEN_AI_RESPONSE_SERVICE_TIER,
388
388
  response_dict.get('service_tier'))
389
- span.set_attribute(SemanticConvetion.GEN_AI_OPENAI_RESPONSE_SYSTEM_FINGERPRINT,
389
+ span.set_attribute(SemanticConvetion.GEN_AI_RESPONSE_SYSTEM_FINGERPRINT,
390
390
  response_dict.get('system_fingerprint'))
391
391
 
392
392
  # Set base span attribues (Extras)
@@ -657,8 +657,6 @@ def async_image_generate(version, environment, application_name,
657
657
  else:
658
658
  image = "url"
659
659
 
660
- request_model = kwargs.get("model", "dall-e-2")
661
-
662
660
  # Calculate cost of the operation
663
661
  cost = get_image_model_cost(request_model,
664
662
  pricing_info, kwargs.get("size", "1024x1024"),
@@ -44,7 +44,7 @@ def chat_completions(version, environment, application_name,
44
44
  class TracedSyncStream:
45
45
  """
46
46
  Wrapper for streaming responses to collect metrics and trace data.
47
- Wraps the 'openai.AsyncStream' response to collect message IDs and aggregated response.
47
+ Wraps the response to collect message IDs and aggregated response.
48
48
 
49
49
  This class implements the '__aiter__' and '__anext__' methods that
50
50
  handle asynchronous streaming responses.
@@ -197,11 +197,11 @@ def chat_completions(version, environment, application_name,
197
197
  output_tokens)
198
198
  self._span.set_attribute(SemanticConvetion.SERVER_ADDRESS,
199
199
  self._server_address)
200
- self._span.set_attribute(SemanticConvetion.GEN_AI_OPENAI_REQUEST_SERVICE_TIER,
200
+ self._span.set_attribute(SemanticConvetion.GEN_AI_REQUEST_SERVICE_TIER,
201
201
  self._kwargs.get("service_tier", "auto"))
202
- self._span.set_attribute(SemanticConvetion.GEN_AI_OPENAI_RESPONSE_SERVICE_TIER,
202
+ self._span.set_attribute(SemanticConvetion.GEN_AI_RESPONSE_SERVICE_TIER,
203
203
  self._openai_response_service_tier)
204
- self._span.set_attribute(SemanticConvetion.GEN_AI_OPENAI_RESPONSE_SYSTEM_FINGERPRINT,
204
+ self._span.set_attribute(SemanticConvetion.GEN_AI_RESPONSE_SYSTEM_FINGERPRINT,
205
205
  self._openai_system_fingerprint)
206
206
  if isinstance(self._llmresponse, str):
207
207
  self._span.set_attribute(SemanticConvetion.GEN_AI_OUTPUT_TYPE,
@@ -382,11 +382,11 @@ def chat_completions(version, environment, application_name,
382
382
  output_tokens)
383
383
  span.set_attribute(SemanticConvetion.SERVER_ADDRESS,
384
384
  server_address)
385
- span.set_attribute(SemanticConvetion.GEN_AI_OPENAI_REQUEST_SERVICE_TIER,
385
+ span.set_attribute(SemanticConvetion.GEN_AI_REQUEST_SERVICE_TIER,
386
386
  kwargs.get("service_tier", "auto"))
387
- span.set_attribute(SemanticConvetion.GEN_AI_OPENAI_RESPONSE_SERVICE_TIER,
387
+ span.set_attribute(SemanticConvetion.GEN_AI_RESPONSE_SERVICE_TIER,
388
388
  response_dict.get('service_tier'))
389
- span.set_attribute(SemanticConvetion.GEN_AI_OPENAI_RESPONSE_SYSTEM_FINGERPRINT,
389
+ span.set_attribute(SemanticConvetion.GEN_AI_RESPONSE_SYSTEM_FINGERPRINT,
390
390
  response_dict.get('system_fingerprint'))
391
391
 
392
392
  # Set base span attribues (Extras)
@@ -657,8 +657,6 @@ def image_generate(version, environment, application_name,
657
657
  else:
658
658
  image = "url"
659
659
 
660
- request_model = kwargs.get("model", "dall-e-2")
661
-
662
660
  # Calculate cost of the operation
663
661
  cost = get_image_model_cost(request_model,
664
662
  pricing_info, kwargs.get("size", "1024x1024"),
@@ -5,7 +5,7 @@ Module for monitoring Phidata calls.
5
5
 
6
6
  import logging
7
7
  from opentelemetry.trace import SpanKind, Status, StatusCode
8
- from opentelemetry.sdk.resources import TELEMETRY_SDK_NAME
8
+ from opentelemetry.sdk.resources import SERVICE_NAME, TELEMETRY_SDK_NAME, DEPLOYMENT_ENVIRONMENT
9
9
  from openlit.__helpers import (
10
10
  handle_exception,
11
11
  )
@@ -62,8 +62,10 @@ def phidata_wrap(gen_ai_endpoint, version, environment, application_name,
62
62
  SemanticConvetion.GEN_AI_OPERATION_TYPE_AGENT)
63
63
  span.set_attribute(SemanticConvetion.GEN_AI_ENDPOINT,
64
64
  gen_ai_endpoint)
65
- span.set_attribute(SemanticConvetion.GEN_AI_APPLICATION_NAME,
65
+ span.set_attribute(SERVICE_NAME,
66
66
  application_name)
67
+ span.set_attribute(DEPLOYMENT_ENVIRONMENT,
68
+ environment)
67
69
  span.set_attribute(SemanticConvetion.GEN_AI_AGENT_ID,
68
70
  getattr(instance, 'agent_id', '') or '')
69
71
  span.set_attribute(SemanticConvetion.GEN_AI_AGENT_ROLE,
@@ -5,7 +5,7 @@ Module for monitoring Pinecone.
5
5
 
6
6
  import logging
7
7
  from opentelemetry.trace import SpanKind, Status, StatusCode
8
- from opentelemetry.sdk.resources import TELEMETRY_SDK_NAME
8
+ from opentelemetry.sdk.resources import SERVICE_NAME, TELEMETRY_SDK_NAME, DEPLOYMENT_ENVIRONMENT
9
9
  from openlit.__helpers import handle_exception
10
10
  from openlit.semcov import SemanticConvetion
11
11
 
@@ -72,9 +72,9 @@ def general_wrap(gen_ai_endpoint, version, environment, application_name,
72
72
  span.set_attribute(TELEMETRY_SDK_NAME, "openlit")
73
73
  span.set_attribute(SemanticConvetion.GEN_AI_ENDPOINT,
74
74
  gen_ai_endpoint)
75
- span.set_attribute(SemanticConvetion.GEN_AI_ENVIRONMENT,
75
+ span.set_attribute(DEPLOYMENT_ENVIRONMENT,
76
76
  environment)
77
- span.set_attribute(SemanticConvetion.GEN_AI_APPLICATION_NAME,
77
+ span.set_attribute(SERVICE_NAME,
78
78
  application_name)
79
79
  span.set_attribute(SemanticConvetion.GEN_AI_OPERATION,
80
80
  SemanticConvetion.GEN_AI_OPERATION_TYPE_VECTORDB)
@@ -146,11 +146,11 @@ def general_wrap(gen_ai_endpoint, version, environment, application_name,
146
146
  attributes = {
147
147
  TELEMETRY_SDK_NAME:
148
148
  "openlit",
149
- SemanticConvetion.GEN_AI_APPLICATION_NAME:
149
+ SERVICE_NAME:
150
150
  application_name,
151
151
  SemanticConvetion.DB_SYSTEM:
152
152
  SemanticConvetion.DB_SYSTEM_PINECONE,
153
- SemanticConvetion.GEN_AI_ENVIRONMENT:
153
+ DEPLOYMENT_ENVIRONMENT:
154
154
  environment,
155
155
  SemanticConvetion.GEN_AI_OPERATION:
156
156
  SemanticConvetion.GEN_AI_OPERATION_TYPE_VECTORDB,
@@ -34,7 +34,7 @@ class PremAIInstrumentor(BaseInstrumentor):
34
34
  wrap_function_wrapper(
35
35
  "premai.api",
36
36
  "ChatCompletionsModule.create",
37
- chat("premai.chat.completions", version, environment, application_name,
37
+ chat(version, environment, application_name,
38
38
  tracer, pricing_info, trace_content, metrics, disable_metrics),
39
39
  )
40
40
 
@@ -42,7 +42,7 @@ class PremAIInstrumentor(BaseInstrumentor):
42
42
  wrap_function_wrapper(
43
43
  "premai.api",
44
44
  "EmbeddingsModule.create",
45
- embedding("premai.embeddings", version, environment, application_name,
45
+ embedding(version, environment, application_name,
46
46
  tracer, pricing_info, trace_content, metrics, disable_metrics),
47
47
  )
48
48