openlit 1.33.9__py3-none-any.whl → 1.33.11__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (113) hide show
  1. openlit/__helpers.py +78 -0
  2. openlit/__init__.py +41 -13
  3. openlit/instrumentation/ag2/__init__.py +9 -10
  4. openlit/instrumentation/ag2/ag2.py +134 -69
  5. openlit/instrumentation/ai21/__init__.py +6 -5
  6. openlit/instrumentation/ai21/ai21.py +71 -534
  7. openlit/instrumentation/ai21/async_ai21.py +71 -534
  8. openlit/instrumentation/ai21/utils.py +407 -0
  9. openlit/instrumentation/anthropic/__init__.py +3 -3
  10. openlit/instrumentation/anthropic/anthropic.py +5 -5
  11. openlit/instrumentation/anthropic/async_anthropic.py +5 -5
  12. openlit/instrumentation/assemblyai/__init__.py +2 -2
  13. openlit/instrumentation/assemblyai/assemblyai.py +3 -3
  14. openlit/instrumentation/astra/__init__.py +25 -25
  15. openlit/instrumentation/astra/astra.py +7 -7
  16. openlit/instrumentation/astra/async_astra.py +7 -7
  17. openlit/instrumentation/azure_ai_inference/__init__.py +5 -5
  18. openlit/instrumentation/azure_ai_inference/async_azure_ai_inference.py +11 -11
  19. openlit/instrumentation/azure_ai_inference/azure_ai_inference.py +11 -11
  20. openlit/instrumentation/bedrock/__init__.py +2 -2
  21. openlit/instrumentation/bedrock/bedrock.py +3 -3
  22. openlit/instrumentation/chroma/__init__.py +9 -9
  23. openlit/instrumentation/chroma/chroma.py +7 -7
  24. openlit/instrumentation/cohere/__init__.py +7 -7
  25. openlit/instrumentation/cohere/async_cohere.py +10 -10
  26. openlit/instrumentation/cohere/cohere.py +11 -11
  27. openlit/instrumentation/controlflow/__init__.py +4 -4
  28. openlit/instrumentation/controlflow/controlflow.py +5 -5
  29. openlit/instrumentation/crawl4ai/__init__.py +3 -3
  30. openlit/instrumentation/crawl4ai/async_crawl4ai.py +5 -5
  31. openlit/instrumentation/crawl4ai/crawl4ai.py +5 -5
  32. openlit/instrumentation/crewai/__init__.py +3 -3
  33. openlit/instrumentation/crewai/crewai.py +6 -4
  34. openlit/instrumentation/dynamiq/__init__.py +5 -5
  35. openlit/instrumentation/dynamiq/dynamiq.py +5 -5
  36. openlit/instrumentation/elevenlabs/__init__.py +5 -5
  37. openlit/instrumentation/elevenlabs/async_elevenlabs.py +4 -5
  38. openlit/instrumentation/elevenlabs/elevenlabs.py +4 -5
  39. openlit/instrumentation/embedchain/__init__.py +2 -2
  40. openlit/instrumentation/embedchain/embedchain.py +9 -9
  41. openlit/instrumentation/firecrawl/__init__.py +3 -3
  42. openlit/instrumentation/firecrawl/firecrawl.py +5 -5
  43. openlit/instrumentation/google_ai_studio/__init__.py +3 -3
  44. openlit/instrumentation/google_ai_studio/async_google_ai_studio.py +3 -3
  45. openlit/instrumentation/google_ai_studio/google_ai_studio.py +3 -3
  46. openlit/instrumentation/gpt4all/__init__.py +5 -5
  47. openlit/instrumentation/gpt4all/gpt4all.py +350 -225
  48. openlit/instrumentation/gpu/__init__.py +5 -5
  49. openlit/instrumentation/groq/__init__.py +5 -5
  50. openlit/instrumentation/groq/async_groq.py +359 -243
  51. openlit/instrumentation/groq/groq.py +359 -243
  52. openlit/instrumentation/haystack/__init__.py +2 -2
  53. openlit/instrumentation/haystack/haystack.py +5 -5
  54. openlit/instrumentation/julep/__init__.py +7 -7
  55. openlit/instrumentation/julep/async_julep.py +6 -6
  56. openlit/instrumentation/julep/julep.py +6 -6
  57. openlit/instrumentation/langchain/__init__.py +15 -9
  58. openlit/instrumentation/langchain/async_langchain.py +388 -0
  59. openlit/instrumentation/langchain/langchain.py +110 -497
  60. openlit/instrumentation/letta/__init__.py +7 -7
  61. openlit/instrumentation/letta/letta.py +10 -8
  62. openlit/instrumentation/litellm/__init__.py +9 -10
  63. openlit/instrumentation/litellm/async_litellm.py +321 -250
  64. openlit/instrumentation/litellm/litellm.py +319 -248
  65. openlit/instrumentation/llamaindex/__init__.py +2 -2
  66. openlit/instrumentation/llamaindex/llamaindex.py +5 -5
  67. openlit/instrumentation/mem0/__init__.py +2 -2
  68. openlit/instrumentation/mem0/mem0.py +5 -5
  69. openlit/instrumentation/milvus/__init__.py +2 -2
  70. openlit/instrumentation/milvus/milvus.py +7 -7
  71. openlit/instrumentation/mistral/__init__.py +13 -13
  72. openlit/instrumentation/mistral/async_mistral.py +426 -253
  73. openlit/instrumentation/mistral/mistral.py +424 -250
  74. openlit/instrumentation/multion/__init__.py +7 -7
  75. openlit/instrumentation/multion/async_multion.py +9 -7
  76. openlit/instrumentation/multion/multion.py +9 -7
  77. openlit/instrumentation/ollama/__init__.py +19 -39
  78. openlit/instrumentation/ollama/async_ollama.py +137 -563
  79. openlit/instrumentation/ollama/ollama.py +136 -563
  80. openlit/instrumentation/ollama/utils.py +333 -0
  81. openlit/instrumentation/openai/__init__.py +11 -11
  82. openlit/instrumentation/openai/async_openai.py +25 -27
  83. openlit/instrumentation/openai/openai.py +25 -27
  84. openlit/instrumentation/phidata/__init__.py +2 -2
  85. openlit/instrumentation/phidata/phidata.py +6 -4
  86. openlit/instrumentation/pinecone/__init__.py +6 -6
  87. openlit/instrumentation/pinecone/pinecone.py +7 -7
  88. openlit/instrumentation/premai/__init__.py +5 -5
  89. openlit/instrumentation/premai/premai.py +268 -219
  90. openlit/instrumentation/qdrant/__init__.py +2 -2
  91. openlit/instrumentation/qdrant/async_qdrant.py +7 -7
  92. openlit/instrumentation/qdrant/qdrant.py +7 -7
  93. openlit/instrumentation/reka/__init__.py +5 -5
  94. openlit/instrumentation/reka/async_reka.py +93 -55
  95. openlit/instrumentation/reka/reka.py +93 -55
  96. openlit/instrumentation/together/__init__.py +9 -9
  97. openlit/instrumentation/together/async_together.py +284 -242
  98. openlit/instrumentation/together/together.py +284 -242
  99. openlit/instrumentation/transformers/__init__.py +3 -3
  100. openlit/instrumentation/transformers/transformers.py +79 -48
  101. openlit/instrumentation/vertexai/__init__.py +19 -69
  102. openlit/instrumentation/vertexai/async_vertexai.py +333 -990
  103. openlit/instrumentation/vertexai/vertexai.py +333 -990
  104. openlit/instrumentation/vllm/__init__.py +3 -3
  105. openlit/instrumentation/vllm/vllm.py +65 -35
  106. openlit/otel/events.py +85 -0
  107. openlit/otel/tracing.py +3 -13
  108. openlit/semcov/__init__.py +16 -4
  109. {openlit-1.33.9.dist-info → openlit-1.33.11.dist-info}/METADATA +2 -2
  110. openlit-1.33.11.dist-info/RECORD +125 -0
  111. openlit-1.33.9.dist-info/RECORD +0 -121
  112. {openlit-1.33.9.dist-info → openlit-1.33.11.dist-info}/LICENSE +0 -0
  113. {openlit-1.33.9.dist-info → openlit-1.33.11.dist-info}/WHEEL +0 -0
@@ -30,7 +30,7 @@ class MultiOnInstrumentor(BaseInstrumentor):
30
30
  tracer = kwargs.get("tracer")
31
31
  metrics = kwargs.get("metrics_dict")
32
32
  pricing_info = kwargs.get("pricing_info", {})
33
- trace_content = kwargs.get("trace_content", False)
33
+ capture_message_content = kwargs.get("capture_message_content", False)
34
34
  disable_metrics = kwargs.get("disable_metrics")
35
35
  version = importlib.metadata.version("multion")
36
36
 
@@ -39,19 +39,19 @@ class MultiOnInstrumentor(BaseInstrumentor):
39
39
  "multion.client",
40
40
  "MultiOn.browse",
41
41
  multion_wrap("multion.browse", version, environment, application_name,
42
- tracer, pricing_info, trace_content, metrics, disable_metrics),
42
+ tracer, pricing_info, capture_message_content, metrics, disable_metrics),
43
43
  )
44
44
  wrap_function_wrapper(
45
45
  "multion.client",
46
46
  "MultiOn.retrieve",
47
47
  multion_wrap("multion.retrieve", version, environment, application_name,
48
- tracer, pricing_info, trace_content, metrics, disable_metrics),
48
+ tracer, pricing_info, capture_message_content, metrics, disable_metrics),
49
49
  )
50
50
  wrap_function_wrapper(
51
51
  "multion.sessions.client",
52
52
  "SessionsClient.create",
53
53
  multion_wrap("multion.sessions.create", version, environment, application_name,
54
- tracer, pricing_info, trace_content, metrics, disable_metrics),
54
+ tracer, pricing_info, capture_message_content, metrics, disable_metrics),
55
55
  )
56
56
 
57
57
  # Asynchronus
@@ -59,19 +59,19 @@ class MultiOnInstrumentor(BaseInstrumentor):
59
59
  "multion.client",
60
60
  "AsyncMultiOn.browse",
61
61
  async_multion_wrap("multion.browse", version, environment, application_name,
62
- tracer, pricing_info, trace_content, metrics, disable_metrics),
62
+ tracer, pricing_info, capture_message_content, metrics, disable_metrics),
63
63
  )
64
64
  wrap_function_wrapper(
65
65
  "multion.client",
66
66
  "AsyncMultiOn.retrieve",
67
67
  async_multion_wrap("multion.retrieve", version, environment, application_name,
68
- tracer, pricing_info, trace_content, metrics, disable_metrics),
68
+ tracer, pricing_info, capture_message_content, metrics, disable_metrics),
69
69
  )
70
70
  wrap_function_wrapper(
71
71
  "multion.sessions.client",
72
72
  "AsyncSessionsClient.create",
73
73
  async_multion_wrap("multion.sessions.create", version, environment, application_name,
74
- tracer, pricing_info, trace_content, metrics, disable_metrics),
74
+ tracer, pricing_info, capture_message_content, metrics, disable_metrics),
75
75
  )
76
76
 
77
77
 
@@ -5,7 +5,7 @@ Module for monitoring multion calls.
5
5
 
6
6
  import logging
7
7
  from opentelemetry.trace import SpanKind, Status, StatusCode
8
- from opentelemetry.sdk.resources import TELEMETRY_SDK_NAME
8
+ from opentelemetry.sdk.resources import SERVICE_NAME, TELEMETRY_SDK_NAME, DEPLOYMENT_ENVIRONMENT
9
9
  from openlit.__helpers import (
10
10
  handle_exception,
11
11
  )
@@ -15,7 +15,7 @@ from openlit.semcov import SemanticConvetion
15
15
  logger = logging.getLogger(__name__)
16
16
 
17
17
  def async_multion_wrap(gen_ai_endpoint, version, environment, application_name,
18
- tracer, pricing_info, trace_content, metrics, disable_metrics):
18
+ tracer, pricing_info, capture_message_content, metrics, disable_metrics):
19
19
  """
20
20
  Generates a telemetry wrapper for chat completions to collect metrics.
21
21
 
@@ -26,7 +26,7 @@ def async_multion_wrap(gen_ai_endpoint, version, environment, application_name,
26
26
  application_name: Name of the application using the multion Agent.
27
27
  tracer: OpenTelemetry tracer for creating spans.
28
28
  pricing_info: Information used for calculating the cost of multion usage.
29
- trace_content: Flag indicating whether to trace the actual content.
29
+ capture_message_content: Flag indicating whether to trace the actual content.
30
30
 
31
31
  Returns:
32
32
  A function that wraps the chat completions method to add telemetry.
@@ -62,8 +62,10 @@ def async_multion_wrap(gen_ai_endpoint, version, environment, application_name,
62
62
  SemanticConvetion.GEN_AI_OPERATION_TYPE_AGENT)
63
63
  span.set_attribute(SemanticConvetion.GEN_AI_ENDPOINT,
64
64
  gen_ai_endpoint)
65
- span.set_attribute(SemanticConvetion.GEN_AI_APPLICATION_NAME,
65
+ span.set_attribute(SERVICE_NAME,
66
66
  application_name)
67
+ span.set_attribute(DEPLOYMENT_ENVIRONMENT,
68
+ environment)
67
69
 
68
70
  if gen_ai_endpoint == "multion.browse":
69
71
  span.set_attribute(SemanticConvetion.GEN_AI_AGENT_BROWSE_URL,
@@ -73,7 +75,7 @@ def async_multion_wrap(gen_ai_endpoint, version, environment, application_name,
73
75
  span.set_attribute(SemanticConvetion.GEN_AI_AGENT_RESPONSE_TIME,
74
76
  response.metadata.processing_time)
75
77
 
76
- if trace_content:
78
+ if capture_message_content:
77
79
  span.add_event(
78
80
  name=SemanticConvetion.GEN_AI_CONTENT_PROMPT_EVENT,
79
81
  attributes={
@@ -90,7 +92,7 @@ def async_multion_wrap(gen_ai_endpoint, version, environment, application_name,
90
92
  span.set_attribute(SemanticConvetion.GEN_AI_AGENT_BROWSE_URL,
91
93
  kwargs.get("url", ""))
92
94
 
93
- if trace_content:
95
+ if capture_message_content:
94
96
  span.add_event(
95
97
  name=SemanticConvetion.GEN_AI_CONTENT_PROMPT_EVENT,
96
98
  attributes={
@@ -108,7 +110,7 @@ def async_multion_wrap(gen_ai_endpoint, version, environment, application_name,
108
110
  span.set_attribute(SemanticConvetion.GEN_AI_AGENT_BROWSE_URL,
109
111
  kwargs.get("url", ""))
110
112
 
111
- if trace_content:
113
+ if capture_message_content:
112
114
  span.add_event(
113
115
  name=SemanticConvetion.GEN_AI_CONTENT_COMPLETION_EVENT,
114
116
  attributes={
@@ -5,7 +5,7 @@ Module for monitoring multion calls.
5
5
 
6
6
  import logging
7
7
  from opentelemetry.trace import SpanKind, Status, StatusCode
8
- from opentelemetry.sdk.resources import TELEMETRY_SDK_NAME
8
+ from opentelemetry.sdk.resources import SERVICE_NAME, TELEMETRY_SDK_NAME, DEPLOYMENT_ENVIRONMENT
9
9
  from openlit.__helpers import (
10
10
  handle_exception,
11
11
  )
@@ -15,7 +15,7 @@ from openlit.semcov import SemanticConvetion
15
15
  logger = logging.getLogger(__name__)
16
16
 
17
17
  def multion_wrap(gen_ai_endpoint, version, environment, application_name,
18
- tracer, pricing_info, trace_content, metrics, disable_metrics):
18
+ tracer, pricing_info, capture_message_content, metrics, disable_metrics):
19
19
  """
20
20
  Generates a telemetry wrapper for chat completions to collect metrics.
21
21
 
@@ -26,7 +26,7 @@ def multion_wrap(gen_ai_endpoint, version, environment, application_name,
26
26
  application_name: Name of the application using the multion Agent.
27
27
  tracer: OpenTelemetry tracer for creating spans.
28
28
  pricing_info: Information used for calculating the cost of multion usage.
29
- trace_content: Flag indicating whether to trace the actual content.
29
+ capture_message_content: Flag indicating whether to trace the actual content.
30
30
 
31
31
  Returns:
32
32
  A function that wraps the chat completions method to add telemetry.
@@ -62,8 +62,10 @@ def multion_wrap(gen_ai_endpoint, version, environment, application_name,
62
62
  SemanticConvetion.GEN_AI_OPERATION_TYPE_AGENT)
63
63
  span.set_attribute(SemanticConvetion.GEN_AI_ENDPOINT,
64
64
  gen_ai_endpoint)
65
- span.set_attribute(SemanticConvetion.GEN_AI_APPLICATION_NAME,
65
+ span.set_attribute(SERVICE_NAME,
66
66
  application_name)
67
+ span.set_attribute(DEPLOYMENT_ENVIRONMENT,
68
+ environment)
67
69
 
68
70
  if gen_ai_endpoint == "multion.browse":
69
71
  span.set_attribute(SemanticConvetion.GEN_AI_AGENT_BROWSE_URL,
@@ -73,7 +75,7 @@ def multion_wrap(gen_ai_endpoint, version, environment, application_name,
73
75
  span.set_attribute(SemanticConvetion.GEN_AI_AGENT_RESPONSE_TIME,
74
76
  response.metadata.processing_time)
75
77
 
76
- if trace_content:
78
+ if capture_message_content:
77
79
  span.add_event(
78
80
  name=SemanticConvetion.GEN_AI_CONTENT_PROMPT_EVENT,
79
81
  attributes={
@@ -90,7 +92,7 @@ def multion_wrap(gen_ai_endpoint, version, environment, application_name,
90
92
  span.set_attribute(SemanticConvetion.GEN_AI_AGENT_BROWSE_URL,
91
93
  kwargs.get("url", ""))
92
94
 
93
- if trace_content:
95
+ if capture_message_content:
94
96
  span.add_event(
95
97
  name=SemanticConvetion.GEN_AI_CONTENT_PROMPT_EVENT,
96
98
  attributes={
@@ -108,7 +110,7 @@ def multion_wrap(gen_ai_endpoint, version, environment, application_name,
108
110
  span.set_attribute(SemanticConvetion.GEN_AI_AGENT_BROWSE_URL,
109
111
  kwargs.get("url", ""))
110
112
 
111
- if trace_content:
113
+ if capture_message_content:
112
114
  span.add_event(
113
115
  name=SemanticConvetion.GEN_AI_CONTENT_COMPLETION_EVENT,
114
116
  attributes={
@@ -1,5 +1,6 @@
1
- # pylint: disable=useless-return, bad-staticmethod-argument, disable=duplicate-code
2
- """Initializer of Auto Instrumentation of Ollama Functions"""
1
+ """
2
+ Initializer of Auto Instrumentation of Ollama Functions
3
+ """
3
4
 
4
5
  from typing import Collection
5
6
  import importlib.metadata
@@ -7,10 +8,10 @@ from opentelemetry.instrumentation.instrumentor import BaseInstrumentor
7
8
  from wrapt import wrap_function_wrapper
8
9
 
9
10
  from openlit.instrumentation.ollama.ollama import (
10
- chat, embeddings, generate
11
+ chat, embeddings
11
12
  )
12
13
  from openlit.instrumentation.ollama.async_ollama import (
13
- async_chat, async_embeddings, async_generate
14
+ async_chat, async_embeddings
14
15
  )
15
16
 
16
17
  _instruments = ("ollama >= 0.2.0",)
@@ -27,9 +28,10 @@ class OllamaInstrumentor(BaseInstrumentor):
27
28
  application_name = kwargs.get("application_name", "default_application")
28
29
  environment = kwargs.get("environment", "default_environment")
29
30
  tracer = kwargs.get("tracer")
31
+ event_provider = kwargs.get("event_provider")
30
32
  metrics = kwargs.get("metrics_dict")
31
33
  pricing_info = kwargs.get("pricing_info", {})
32
- trace_content = kwargs.get("trace_content", False)
34
+ capture_message_content = kwargs.get("capture_message_content", False)
33
35
  disable_metrics = kwargs.get("disable_metrics")
34
36
  version = importlib.metadata.version("ollama")
35
37
 
@@ -37,66 +39,44 @@ class OllamaInstrumentor(BaseInstrumentor):
37
39
  wrap_function_wrapper(
38
40
  "ollama",
39
41
  "chat",
40
- chat("ollama.chat", version, environment, application_name,
41
- tracer, pricing_info, trace_content, metrics, disable_metrics),
42
+ chat(version, environment, application_name,
43
+ tracer, event_provider, pricing_info, capture_message_content, metrics, disable_metrics),
42
44
  )
43
45
  wrap_function_wrapper(
44
46
  "ollama",
45
47
  "Client.chat",
46
- chat("ollama.chat", version, environment, application_name,
47
- tracer, pricing_info, trace_content, metrics, disable_metrics),
48
+ chat(version, environment, application_name,
49
+ tracer, event_provider, pricing_info, capture_message_content, metrics, disable_metrics),
48
50
  )
49
51
 
50
52
  # sync embeddings
51
53
  wrap_function_wrapper(
52
54
  "ollama",
53
55
  "embeddings",
54
- embeddings("ollama.embeddings", version, environment, application_name,
55
- tracer, pricing_info, trace_content, metrics, disable_metrics),
56
+ embeddings(version, environment, application_name,
57
+ tracer, event_provider, pricing_info, capture_message_content, metrics, disable_metrics),
56
58
  )
57
59
  wrap_function_wrapper(
58
60
  "ollama",
59
61
  "Client.embeddings",
60
- embeddings("ollama.embeddings", version, environment, application_name,
61
- tracer, pricing_info, trace_content, metrics, disable_metrics),
62
- )
63
-
64
- # sync generate
65
- wrap_function_wrapper(
66
- "ollama",
67
- "generate",
68
- generate("ollama.generate", version, environment, application_name,
69
- tracer, pricing_info, trace_content, metrics, disable_metrics),
70
- )
71
- wrap_function_wrapper(
72
- "ollama",
73
- "Client.generate",
74
- generate("ollama.generate", version, environment, application_name,
75
- tracer, pricing_info, trace_content, metrics, disable_metrics),
62
+ embeddings(version, environment, application_name,
63
+ tracer, event_provider, pricing_info, capture_message_content, metrics, disable_metrics),
76
64
  )
77
65
 
78
66
  # async chat
79
67
  wrap_function_wrapper(
80
68
  "ollama",
81
69
  "AsyncClient.chat",
82
- async_chat("ollama.chat", version, environment, application_name,
83
- tracer, pricing_info, trace_content, metrics, disable_metrics),
70
+ async_chat(version, environment, application_name,
71
+ tracer, event_provider, pricing_info, capture_message_content, metrics, disable_metrics),
84
72
  )
85
73
 
86
74
  # async embeddings
87
75
  wrap_function_wrapper(
88
76
  "ollama",
89
77
  "AsyncClient.embeddings",
90
- async_embeddings("ollama.embeddings", version, environment, application_name,
91
- tracer, pricing_info, trace_content, metrics, disable_metrics),
92
- )
93
-
94
- # aync generate
95
- wrap_function_wrapper(
96
- "ollama",
97
- "AsyncClient.generate",
98
- async_generate("ollama.generate", version, environment, application_name,
99
- tracer, pricing_info, trace_content, metrics, disable_metrics),
78
+ async_embeddings(version, environment, application_name,
79
+ tracer, event_provider, pricing_info, capture_message_content, metrics, disable_metrics),
100
80
  )
101
81
 
102
82
  def _uninstrument(self, **kwargs):