openlit 1.34.22__tar.gz → 1.34.24__tar.gz

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (164) hide show
  1. {openlit-1.34.22 → openlit-1.34.24}/PKG-INFO +1 -1
  2. {openlit-1.34.22 → openlit-1.34.24}/pyproject.toml +1 -1
  3. {openlit-1.34.22 → openlit-1.34.24}/src/openlit/__helpers.py +48 -3
  4. {openlit-1.34.22 → openlit-1.34.24}/src/openlit/__init__.py +3 -0
  5. openlit-1.34.24/src/openlit/instrumentation/ag2/__init__.py +53 -0
  6. openlit-1.34.24/src/openlit/instrumentation/ag2/ag2.py +114 -0
  7. openlit-1.34.24/src/openlit/instrumentation/ag2/async_ag2.py +114 -0
  8. openlit-1.34.24/src/openlit/instrumentation/ag2/utils.py +175 -0
  9. {openlit-1.34.22 → openlit-1.34.24}/src/openlit/instrumentation/langchain/__init__.py +11 -35
  10. openlit-1.34.24/src/openlit/instrumentation/langchain/async_langchain.py +102 -0
  11. openlit-1.34.24/src/openlit/instrumentation/langchain/langchain.py +102 -0
  12. openlit-1.34.24/src/openlit/instrumentation/langchain/utils.py +252 -0
  13. openlit-1.34.24/src/openlit/instrumentation/langchain_community/__init__.py +74 -0
  14. openlit-1.34.24/src/openlit/instrumentation/langchain_community/async_langchain_community.py +49 -0
  15. openlit-1.34.24/src/openlit/instrumentation/langchain_community/langchain_community.py +49 -0
  16. openlit-1.34.24/src/openlit/instrumentation/langchain_community/utils.py +69 -0
  17. openlit-1.34.24/src/openlit/instrumentation/pinecone/__init__.py +174 -0
  18. openlit-1.34.24/src/openlit/instrumentation/pinecone/async_pinecone.py +59 -0
  19. openlit-1.34.24/src/openlit/instrumentation/pinecone/pinecone.py +59 -0
  20. openlit-1.34.24/src/openlit/instrumentation/pinecone/utils.py +182 -0
  21. {openlit-1.34.22 → openlit-1.34.24}/src/openlit/semcov/__init__.py +13 -1
  22. openlit-1.34.22/src/openlit/instrumentation/ag2/__init__.py +0 -49
  23. openlit-1.34.22/src/openlit/instrumentation/ag2/ag2.py +0 -163
  24. openlit-1.34.22/src/openlit/instrumentation/langchain/async_langchain.py +0 -388
  25. openlit-1.34.22/src/openlit/instrumentation/langchain/langchain.py +0 -362
  26. openlit-1.34.22/src/openlit/instrumentation/pinecone/__init__.py +0 -66
  27. openlit-1.34.22/src/openlit/instrumentation/pinecone/pinecone.py +0 -173
  28. {openlit-1.34.22 → openlit-1.34.24}/LICENSE +0 -0
  29. {openlit-1.34.22 → openlit-1.34.24}/README.md +0 -0
  30. {openlit-1.34.22 → openlit-1.34.24}/src/openlit/evals/__init__.py +0 -0
  31. {openlit-1.34.22 → openlit-1.34.24}/src/openlit/evals/all.py +0 -0
  32. {openlit-1.34.22 → openlit-1.34.24}/src/openlit/evals/bias_detection.py +0 -0
  33. {openlit-1.34.22 → openlit-1.34.24}/src/openlit/evals/hallucination.py +0 -0
  34. {openlit-1.34.22 → openlit-1.34.24}/src/openlit/evals/toxicity.py +0 -0
  35. {openlit-1.34.22 → openlit-1.34.24}/src/openlit/evals/utils.py +0 -0
  36. {openlit-1.34.22 → openlit-1.34.24}/src/openlit/guard/__init__.py +0 -0
  37. {openlit-1.34.22 → openlit-1.34.24}/src/openlit/guard/all.py +0 -0
  38. {openlit-1.34.22 → openlit-1.34.24}/src/openlit/guard/prompt_injection.py +0 -0
  39. {openlit-1.34.22 → openlit-1.34.24}/src/openlit/guard/restrict_topic.py +0 -0
  40. {openlit-1.34.22 → openlit-1.34.24}/src/openlit/guard/sensitive_topic.py +0 -0
  41. {openlit-1.34.22 → openlit-1.34.24}/src/openlit/guard/utils.py +0 -0
  42. {openlit-1.34.22 → openlit-1.34.24}/src/openlit/instrumentation/ai21/__init__.py +0 -0
  43. {openlit-1.34.22 → openlit-1.34.24}/src/openlit/instrumentation/ai21/ai21.py +0 -0
  44. {openlit-1.34.22 → openlit-1.34.24}/src/openlit/instrumentation/ai21/async_ai21.py +0 -0
  45. {openlit-1.34.22 → openlit-1.34.24}/src/openlit/instrumentation/ai21/utils.py +0 -0
  46. {openlit-1.34.22 → openlit-1.34.24}/src/openlit/instrumentation/anthropic/__init__.py +0 -0
  47. {openlit-1.34.22 → openlit-1.34.24}/src/openlit/instrumentation/anthropic/anthropic.py +0 -0
  48. {openlit-1.34.22 → openlit-1.34.24}/src/openlit/instrumentation/anthropic/async_anthropic.py +0 -0
  49. {openlit-1.34.22 → openlit-1.34.24}/src/openlit/instrumentation/anthropic/utils.py +0 -0
  50. {openlit-1.34.22 → openlit-1.34.24}/src/openlit/instrumentation/assemblyai/__init__.py +0 -0
  51. {openlit-1.34.22 → openlit-1.34.24}/src/openlit/instrumentation/assemblyai/assemblyai.py +0 -0
  52. {openlit-1.34.22 → openlit-1.34.24}/src/openlit/instrumentation/assemblyai/utils.py +0 -0
  53. {openlit-1.34.22 → openlit-1.34.24}/src/openlit/instrumentation/astra/__init__.py +0 -0
  54. {openlit-1.34.22 → openlit-1.34.24}/src/openlit/instrumentation/astra/astra.py +0 -0
  55. {openlit-1.34.22 → openlit-1.34.24}/src/openlit/instrumentation/astra/async_astra.py +0 -0
  56. {openlit-1.34.22 → openlit-1.34.24}/src/openlit/instrumentation/astra/utils.py +0 -0
  57. {openlit-1.34.22 → openlit-1.34.24}/src/openlit/instrumentation/azure_ai_inference/__init__.py +0 -0
  58. {openlit-1.34.22 → openlit-1.34.24}/src/openlit/instrumentation/azure_ai_inference/async_azure_ai_inference.py +0 -0
  59. {openlit-1.34.22 → openlit-1.34.24}/src/openlit/instrumentation/azure_ai_inference/azure_ai_inference.py +0 -0
  60. {openlit-1.34.22 → openlit-1.34.24}/src/openlit/instrumentation/azure_ai_inference/utils.py +0 -0
  61. {openlit-1.34.22 → openlit-1.34.24}/src/openlit/instrumentation/bedrock/__init__.py +0 -0
  62. {openlit-1.34.22 → openlit-1.34.24}/src/openlit/instrumentation/bedrock/bedrock.py +0 -0
  63. {openlit-1.34.22 → openlit-1.34.24}/src/openlit/instrumentation/bedrock/utils.py +0 -0
  64. {openlit-1.34.22 → openlit-1.34.24}/src/openlit/instrumentation/chroma/__init__.py +0 -0
  65. {openlit-1.34.22 → openlit-1.34.24}/src/openlit/instrumentation/chroma/chroma.py +0 -0
  66. {openlit-1.34.22 → openlit-1.34.24}/src/openlit/instrumentation/cohere/__init__.py +0 -0
  67. {openlit-1.34.22 → openlit-1.34.24}/src/openlit/instrumentation/cohere/async_cohere.py +0 -0
  68. {openlit-1.34.22 → openlit-1.34.24}/src/openlit/instrumentation/cohere/cohere.py +0 -0
  69. {openlit-1.34.22 → openlit-1.34.24}/src/openlit/instrumentation/cohere/utils.py +0 -0
  70. {openlit-1.34.22 → openlit-1.34.24}/src/openlit/instrumentation/controlflow/__init__.py +0 -0
  71. {openlit-1.34.22 → openlit-1.34.24}/src/openlit/instrumentation/controlflow/controlflow.py +0 -0
  72. {openlit-1.34.22 → openlit-1.34.24}/src/openlit/instrumentation/crawl4ai/__init__.py +0 -0
  73. {openlit-1.34.22 → openlit-1.34.24}/src/openlit/instrumentation/crawl4ai/async_crawl4ai.py +0 -0
  74. {openlit-1.34.22 → openlit-1.34.24}/src/openlit/instrumentation/crawl4ai/crawl4ai.py +0 -0
  75. {openlit-1.34.22 → openlit-1.34.24}/src/openlit/instrumentation/crewai/__init__.py +0 -0
  76. {openlit-1.34.22 → openlit-1.34.24}/src/openlit/instrumentation/crewai/crewai.py +0 -0
  77. {openlit-1.34.22 → openlit-1.34.24}/src/openlit/instrumentation/dynamiq/__init__.py +0 -0
  78. {openlit-1.34.22 → openlit-1.34.24}/src/openlit/instrumentation/dynamiq/dynamiq.py +0 -0
  79. {openlit-1.34.22 → openlit-1.34.24}/src/openlit/instrumentation/elevenlabs/__init__.py +0 -0
  80. {openlit-1.34.22 → openlit-1.34.24}/src/openlit/instrumentation/elevenlabs/async_elevenlabs.py +0 -0
  81. {openlit-1.34.22 → openlit-1.34.24}/src/openlit/instrumentation/elevenlabs/elevenlabs.py +0 -0
  82. {openlit-1.34.22 → openlit-1.34.24}/src/openlit/instrumentation/elevenlabs/utils.py +0 -0
  83. {openlit-1.34.22 → openlit-1.34.24}/src/openlit/instrumentation/embedchain/__init__.py +0 -0
  84. {openlit-1.34.22 → openlit-1.34.24}/src/openlit/instrumentation/embedchain/embedchain.py +0 -0
  85. {openlit-1.34.22 → openlit-1.34.24}/src/openlit/instrumentation/firecrawl/__init__.py +0 -0
  86. {openlit-1.34.22 → openlit-1.34.24}/src/openlit/instrumentation/firecrawl/firecrawl.py +0 -0
  87. {openlit-1.34.22 → openlit-1.34.24}/src/openlit/instrumentation/google_ai_studio/__init__.py +0 -0
  88. {openlit-1.34.22 → openlit-1.34.24}/src/openlit/instrumentation/google_ai_studio/async_google_ai_studio.py +0 -0
  89. {openlit-1.34.22 → openlit-1.34.24}/src/openlit/instrumentation/google_ai_studio/google_ai_studio.py +0 -0
  90. {openlit-1.34.22 → openlit-1.34.24}/src/openlit/instrumentation/google_ai_studio/utils.py +0 -0
  91. {openlit-1.34.22 → openlit-1.34.24}/src/openlit/instrumentation/gpt4all/__init__.py +0 -0
  92. {openlit-1.34.22 → openlit-1.34.24}/src/openlit/instrumentation/gpt4all/gpt4all.py +0 -0
  93. {openlit-1.34.22 → openlit-1.34.24}/src/openlit/instrumentation/gpt4all/utils.py +0 -0
  94. {openlit-1.34.22 → openlit-1.34.24}/src/openlit/instrumentation/gpu/__init__.py +0 -0
  95. {openlit-1.34.22 → openlit-1.34.24}/src/openlit/instrumentation/groq/__init__.py +0 -0
  96. {openlit-1.34.22 → openlit-1.34.24}/src/openlit/instrumentation/groq/async_groq.py +0 -0
  97. {openlit-1.34.22 → openlit-1.34.24}/src/openlit/instrumentation/groq/groq.py +0 -0
  98. {openlit-1.34.22 → openlit-1.34.24}/src/openlit/instrumentation/groq/utils.py +0 -0
  99. {openlit-1.34.22 → openlit-1.34.24}/src/openlit/instrumentation/haystack/__init__.py +0 -0
  100. {openlit-1.34.22 → openlit-1.34.24}/src/openlit/instrumentation/haystack/haystack.py +0 -0
  101. {openlit-1.34.22 → openlit-1.34.24}/src/openlit/instrumentation/julep/__init__.py +0 -0
  102. {openlit-1.34.22 → openlit-1.34.24}/src/openlit/instrumentation/julep/async_julep.py +0 -0
  103. {openlit-1.34.22 → openlit-1.34.24}/src/openlit/instrumentation/julep/julep.py +0 -0
  104. {openlit-1.34.22 → openlit-1.34.24}/src/openlit/instrumentation/letta/__init__.py +0 -0
  105. {openlit-1.34.22 → openlit-1.34.24}/src/openlit/instrumentation/letta/letta.py +0 -0
  106. {openlit-1.34.22 → openlit-1.34.24}/src/openlit/instrumentation/litellm/__init__.py +0 -0
  107. {openlit-1.34.22 → openlit-1.34.24}/src/openlit/instrumentation/litellm/async_litellm.py +0 -0
  108. {openlit-1.34.22 → openlit-1.34.24}/src/openlit/instrumentation/litellm/litellm.py +0 -0
  109. {openlit-1.34.22 → openlit-1.34.24}/src/openlit/instrumentation/litellm/utils.py +0 -0
  110. {openlit-1.34.22 → openlit-1.34.24}/src/openlit/instrumentation/llamaindex/__init__.py +0 -0
  111. {openlit-1.34.22 → openlit-1.34.24}/src/openlit/instrumentation/llamaindex/llamaindex.py +0 -0
  112. {openlit-1.34.22 → openlit-1.34.24}/src/openlit/instrumentation/mem0/__init__.py +0 -0
  113. {openlit-1.34.22 → openlit-1.34.24}/src/openlit/instrumentation/mem0/mem0.py +0 -0
  114. {openlit-1.34.22 → openlit-1.34.24}/src/openlit/instrumentation/milvus/__init__.py +0 -0
  115. {openlit-1.34.22 → openlit-1.34.24}/src/openlit/instrumentation/milvus/milvus.py +0 -0
  116. {openlit-1.34.22 → openlit-1.34.24}/src/openlit/instrumentation/mistral/__init__.py +0 -0
  117. {openlit-1.34.22 → openlit-1.34.24}/src/openlit/instrumentation/mistral/async_mistral.py +0 -0
  118. {openlit-1.34.22 → openlit-1.34.24}/src/openlit/instrumentation/mistral/mistral.py +0 -0
  119. {openlit-1.34.22 → openlit-1.34.24}/src/openlit/instrumentation/mistral/utils.py +0 -0
  120. {openlit-1.34.22 → openlit-1.34.24}/src/openlit/instrumentation/multion/__init__.py +0 -0
  121. {openlit-1.34.22 → openlit-1.34.24}/src/openlit/instrumentation/multion/async_multion.py +0 -0
  122. {openlit-1.34.22 → openlit-1.34.24}/src/openlit/instrumentation/multion/multion.py +0 -0
  123. {openlit-1.34.22 → openlit-1.34.24}/src/openlit/instrumentation/ollama/__init__.py +0 -0
  124. {openlit-1.34.22 → openlit-1.34.24}/src/openlit/instrumentation/ollama/async_ollama.py +0 -0
  125. {openlit-1.34.22 → openlit-1.34.24}/src/openlit/instrumentation/ollama/ollama.py +0 -0
  126. {openlit-1.34.22 → openlit-1.34.24}/src/openlit/instrumentation/ollama/utils.py +0 -0
  127. {openlit-1.34.22 → openlit-1.34.24}/src/openlit/instrumentation/openai/__init__.py +0 -0
  128. {openlit-1.34.22 → openlit-1.34.24}/src/openlit/instrumentation/openai/async_openai.py +0 -0
  129. {openlit-1.34.22 → openlit-1.34.24}/src/openlit/instrumentation/openai/openai.py +0 -0
  130. {openlit-1.34.22 → openlit-1.34.24}/src/openlit/instrumentation/openai/utils.py +0 -0
  131. {openlit-1.34.22 → openlit-1.34.24}/src/openlit/instrumentation/openai_agents/__init__.py +0 -0
  132. {openlit-1.34.22 → openlit-1.34.24}/src/openlit/instrumentation/openai_agents/openai_agents.py +0 -0
  133. {openlit-1.34.22 → openlit-1.34.24}/src/openlit/instrumentation/phidata/__init__.py +0 -0
  134. {openlit-1.34.22 → openlit-1.34.24}/src/openlit/instrumentation/phidata/phidata.py +0 -0
  135. {openlit-1.34.22 → openlit-1.34.24}/src/openlit/instrumentation/premai/__init__.py +0 -0
  136. {openlit-1.34.22 → openlit-1.34.24}/src/openlit/instrumentation/premai/premai.py +0 -0
  137. {openlit-1.34.22 → openlit-1.34.24}/src/openlit/instrumentation/premai/utils.py +0 -0
  138. {openlit-1.34.22 → openlit-1.34.24}/src/openlit/instrumentation/pydantic_ai/__init__.py +0 -0
  139. {openlit-1.34.22 → openlit-1.34.24}/src/openlit/instrumentation/pydantic_ai/pydantic_ai.py +0 -0
  140. {openlit-1.34.22 → openlit-1.34.24}/src/openlit/instrumentation/pydantic_ai/utils.py +0 -0
  141. {openlit-1.34.22 → openlit-1.34.24}/src/openlit/instrumentation/qdrant/__init__.py +0 -0
  142. {openlit-1.34.22 → openlit-1.34.24}/src/openlit/instrumentation/qdrant/async_qdrant.py +0 -0
  143. {openlit-1.34.22 → openlit-1.34.24}/src/openlit/instrumentation/qdrant/qdrant.py +0 -0
  144. {openlit-1.34.22 → openlit-1.34.24}/src/openlit/instrumentation/reka/__init__.py +0 -0
  145. {openlit-1.34.22 → openlit-1.34.24}/src/openlit/instrumentation/reka/async_reka.py +0 -0
  146. {openlit-1.34.22 → openlit-1.34.24}/src/openlit/instrumentation/reka/reka.py +0 -0
  147. {openlit-1.34.22 → openlit-1.34.24}/src/openlit/instrumentation/reka/utils.py +0 -0
  148. {openlit-1.34.22 → openlit-1.34.24}/src/openlit/instrumentation/together/__init__.py +0 -0
  149. {openlit-1.34.22 → openlit-1.34.24}/src/openlit/instrumentation/together/async_together.py +0 -0
  150. {openlit-1.34.22 → openlit-1.34.24}/src/openlit/instrumentation/together/together.py +0 -0
  151. {openlit-1.34.22 → openlit-1.34.24}/src/openlit/instrumentation/together/utils.py +0 -0
  152. {openlit-1.34.22 → openlit-1.34.24}/src/openlit/instrumentation/transformers/__init__.py +0 -0
  153. {openlit-1.34.22 → openlit-1.34.24}/src/openlit/instrumentation/transformers/transformers.py +0 -0
  154. {openlit-1.34.22 → openlit-1.34.24}/src/openlit/instrumentation/transformers/utils.py +0 -0
  155. {openlit-1.34.22 → openlit-1.34.24}/src/openlit/instrumentation/vertexai/__init__.py +0 -0
  156. {openlit-1.34.22 → openlit-1.34.24}/src/openlit/instrumentation/vertexai/async_vertexai.py +0 -0
  157. {openlit-1.34.22 → openlit-1.34.24}/src/openlit/instrumentation/vertexai/utils.py +0 -0
  158. {openlit-1.34.22 → openlit-1.34.24}/src/openlit/instrumentation/vertexai/vertexai.py +0 -0
  159. {openlit-1.34.22 → openlit-1.34.24}/src/openlit/instrumentation/vllm/__init__.py +0 -0
  160. {openlit-1.34.22 → openlit-1.34.24}/src/openlit/instrumentation/vllm/utils.py +0 -0
  161. {openlit-1.34.22 → openlit-1.34.24}/src/openlit/instrumentation/vllm/vllm.py +0 -0
  162. {openlit-1.34.22 → openlit-1.34.24}/src/openlit/otel/events.py +0 -0
  163. {openlit-1.34.22 → openlit-1.34.24}/src/openlit/otel/metrics.py +0 -0
  164. {openlit-1.34.22 → openlit-1.34.24}/src/openlit/otel/tracing.py +0 -0
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.3
2
2
  Name: openlit
3
- Version: 1.34.22
3
+ Version: 1.34.24
4
4
  Summary: OpenTelemetry-native Auto instrumentation library for monitoring LLM Applications and GPUs, facilitating the integration of observability into your GenAI-driven projects
5
5
  License: Apache-2.0
6
6
  Keywords: OpenTelemetry,otel,otlp,llm,tracing,openai,anthropic,claude,cohere,llm monitoring,observability,monitoring,gpt,Generative AI,chatGPT,gpu
@@ -1,6 +1,6 @@
1
1
  [tool.poetry]
2
2
  name = "openlit"
3
- version = "1.34.22"
3
+ version = "1.34.24"
4
4
  description = "OpenTelemetry-native Auto instrumentation library for monitoring LLM Applications and GPUs, facilitating the integration of observability into your GenAI-driven projects"
5
5
  authors = ["OpenLIT"]
6
6
  license = "Apache-2.0"
@@ -205,11 +205,22 @@ def set_server_address_and_port(client_instance: Any,
205
205
  config = getattr(client_instance, 'sdk_configuration', None)
206
206
  base_url = getattr(config, 'server_url', None)
207
207
 
208
+ if not base_url:
209
+ # Attempt to get host from instance.config.host (used by Pinecone and other vector DBs)
210
+ config = getattr(client_instance, 'config', None)
211
+ base_url = getattr(config, 'host', None)
212
+
208
213
  if base_url:
209
214
  if isinstance(base_url, str):
210
- url = urlparse(base_url)
211
- server_address = url.hostname or default_server_address
212
- server_port = url.port if url.port is not None else default_server_port
215
+ # Check if it's a full URL or just a hostname
216
+ if base_url.startswith(('http://', 'https://')):
217
+ url = urlparse(base_url)
218
+ server_address = url.hostname or default_server_address
219
+ server_port = url.port if url.port is not None else default_server_port
220
+ else:
221
+ # If it's just a hostname (like Pinecone's case), use it directly
222
+ server_address = base_url
223
+ server_port = default_server_port
213
224
  else: # base_url might not be a str; handle as an object.
214
225
  server_address = getattr(base_url, 'host', None) or default_server_address
215
226
  port_attr = getattr(base_url, 'port', None)
@@ -442,3 +453,37 @@ def record_image_metrics(metrics, gen_ai_operation, gen_ai_system, server_addres
442
453
  metrics["genai_client_operation_duration"].record(end_time - start_time, attributes)
443
454
  metrics["genai_requests"].add(1, attributes)
444
455
  metrics["genai_cost"].record(cost, attributes)
456
+
457
+ def common_db_span_attributes(scope, db_system, server_address, server_port,
458
+ environment, application_name, version):
459
+ """
460
+ Set common span attributes for database operations.
461
+ """
462
+
463
+ scope._span.set_attribute(TELEMETRY_SDK_NAME, "openlit")
464
+ scope._span.set_attribute(SemanticConvention.GEN_AI_OPERATION, SemanticConvention.GEN_AI_OPERATION_TYPE_VECTORDB)
465
+ scope._span.set_attribute(SemanticConvention.DB_SYSTEM_NAME, db_system)
466
+ scope._span.set_attribute(SemanticConvention.SERVER_ADDRESS, server_address)
467
+ scope._span.set_attribute(SemanticConvention.SERVER_PORT, server_port)
468
+ scope._span.set_attribute(DEPLOYMENT_ENVIRONMENT, environment)
469
+ scope._span.set_attribute(SERVICE_NAME, application_name)
470
+ scope._span.set_attribute(SemanticConvention.DB_SDK_VERSION, version)
471
+
472
+ def record_db_metrics(metrics, db_system, server_address, server_port,
473
+ environment, application_name, start_time, end_time):
474
+ """
475
+ Record database-specific metrics for the operation.
476
+ """
477
+
478
+ attributes = create_metrics_attributes(
479
+ operation=SemanticConvention.GEN_AI_OPERATION_TYPE_VECTORDB,
480
+ system=db_system,
481
+ request_model=db_system,
482
+ server_address=server_address,
483
+ server_port=server_port,
484
+ response_model=db_system,
485
+ service_name=application_name,
486
+ deployment_environment=environment,
487
+ )
488
+ metrics["db_requests"].add(1, attributes)
489
+ metrics["db_client_operation_duration"].record(end_time - start_time, attributes)
@@ -42,6 +42,7 @@ from openlit.instrumentation.premai import PremAIInstrumentor
42
42
  from openlit.instrumentation.assemblyai import AssemblyAIInstrumentor
43
43
  from openlit.instrumentation.azure_ai_inference import AzureAIInferenceInstrumentor
44
44
  from openlit.instrumentation.langchain import LangChainInstrumentor
45
+ from openlit.instrumentation.langchain_community import LangChainCommunityInstrumentor
45
46
  from openlit.instrumentation.llamaindex import LlamaIndexInstrumentor
46
47
  from openlit.instrumentation.haystack import HaystackInstrumentor
47
48
  from openlit.instrumentation.embedchain import EmbedChainInstrumentor
@@ -267,6 +268,7 @@ def init(
267
268
  "google-ai-studio": "google.genai",
268
269
  "azure-ai-inference": "azure.ai.inference",
269
270
  "langchain": "langchain",
271
+ "langchain_community": "langchain_community",
270
272
  "llama_index": "llama_index",
271
273
  "haystack": "haystack",
272
274
  "embedchain": "embedchain",
@@ -387,6 +389,7 @@ def init(
387
389
  "google-ai-studio": GoogleAIStudioInstrumentor(),
388
390
  "azure-ai-inference": AzureAIInferenceInstrumentor(),
389
391
  "langchain": LangChainInstrumentor(),
392
+ "langchain_community": LangChainCommunityInstrumentor(),
390
393
  "llama_index": LlamaIndexInstrumentor(),
391
394
  "haystack": HaystackInstrumentor(),
392
395
  "embedchain": EmbedChainInstrumentor(),
@@ -0,0 +1,53 @@
1
+ """Initializer of Auto Instrumentation of AG2 Functions"""
2
+
3
+ from typing import Collection
4
+ import importlib.metadata
5
+ from opentelemetry.instrumentation.instrumentor import BaseInstrumentor
6
+ from wrapt import wrap_function_wrapper
7
+
8
+ from openlit.instrumentation.ag2.ag2 import (
9
+ conversable_agent, agent_run
10
+ )
11
+ from openlit.instrumentation.ag2.async_ag2 import (
12
+ async_conversable_agent, async_agent_run
13
+ )
14
+
15
+ _instruments = ("ag2 >= 0.3.2",)
16
+
17
+ class AG2Instrumentor(BaseInstrumentor):
18
+ """
19
+ An instrumentor for AG2 client library.
20
+ """
21
+
22
+ def instrumentation_dependencies(self) -> Collection[str]:
23
+ return _instruments
24
+
25
+ def _instrument(self, **kwargs):
26
+ version = importlib.metadata.version("ag2")
27
+ environment = kwargs.get("environment", "default")
28
+ application_name = kwargs.get("application_name", "default")
29
+ tracer = kwargs.get("tracer")
30
+ pricing_info = kwargs.get("pricing_info", {})
31
+ capture_message_content = kwargs.get("capture_message_content", False)
32
+ metrics = kwargs.get("metrics_dict")
33
+ disable_metrics = kwargs.get("disable_metrics")
34
+
35
+ # sync conversable agent
36
+ wrap_function_wrapper(
37
+ "autogen.agentchat.conversable_agent",
38
+ "ConversableAgent.__init__",
39
+ conversable_agent(version, environment, application_name,
40
+ tracer, pricing_info, capture_message_content, metrics, disable_metrics),
41
+ )
42
+
43
+ # sync agent run
44
+ wrap_function_wrapper(
45
+ "autogen.agentchat.conversable_agent",
46
+ "ConversableAgent.run",
47
+ agent_run(version, environment, application_name,
48
+ tracer, pricing_info, capture_message_content, metrics, disable_metrics),
49
+ )
50
+
51
+ def _uninstrument(self, **kwargs):
52
+ # Proper uninstrumentation logic to revert patched methods
53
+ pass
@@ -0,0 +1,114 @@
1
+ """
2
+ Module for monitoring AG2 API calls.
3
+ """
4
+
5
+ import time
6
+ from opentelemetry.trace import SpanKind
7
+ from openlit.__helpers import (
8
+ handle_exception,
9
+ set_server_address_and_port
10
+ )
11
+ from openlit.instrumentation.ag2.utils import (
12
+ process_agent_creation,
13
+ process_agent_run,
14
+ )
15
+ from openlit.semcov import SemanticConvention
16
+
17
+ def conversable_agent(version, environment, application_name, tracer, pricing_info,
18
+ capture_message_content, metrics, disable_metrics):
19
+ """
20
+ Generates a telemetry wrapper for AG2 conversable agent creation.
21
+ """
22
+
23
+ def wrapper(wrapped, instance, args, kwargs):
24
+ """
25
+ Wraps the AG2 conversable agent creation call.
26
+ """
27
+
28
+ server_address, server_port = set_server_address_and_port(instance, "127.0.0.1", 80)
29
+ agent_name = kwargs.get("name", "NOT_FOUND")
30
+ llm_config = kwargs.get("llm_config", {})
31
+ system_message = kwargs.get("system_message", "")
32
+
33
+ span_name = f"{SemanticConvention.GEN_AI_OPERATION_TYPE_CREATE_AGENT} {agent_name}"
34
+
35
+ with tracer.start_as_current_span(span_name, kind=SpanKind.CLIENT) as span:
36
+ start_time = time.time()
37
+ response = wrapped(*args, **kwargs)
38
+
39
+ try:
40
+ process_agent_creation(
41
+ agent_name=agent_name,
42
+ llm_config=llm_config,
43
+ system_message=system_message,
44
+ pricing_info=pricing_info,
45
+ server_port=server_port,
46
+ server_address=server_address,
47
+ environment=environment,
48
+ application_name=application_name,
49
+ metrics=metrics,
50
+ start_time=start_time,
51
+ span=span,
52
+ capture_message_content=capture_message_content,
53
+ disable_metrics=disable_metrics,
54
+ version=version
55
+ )
56
+
57
+ except Exception as e:
58
+ handle_exception(span, e)
59
+
60
+ return response
61
+
62
+ return wrapper
63
+
64
+ def agent_run(version, environment, application_name, tracer, pricing_info,
65
+ capture_message_content, metrics, disable_metrics):
66
+ """
67
+ Generates a telemetry wrapper for AG2 agent run execution.
68
+ """
69
+
70
+ def wrapper(wrapped, instance, args, kwargs):
71
+ """
72
+ Wraps the AG2 agent run execution call.
73
+ """
74
+
75
+ server_address, server_port = set_server_address_and_port(instance, "127.0.0.1", 80)
76
+
77
+ # Extract agent name from instance
78
+ agent_name = getattr(instance, "name", "NOT_FOUND")
79
+
80
+ # Extract model from instance llm_config
81
+ request_model = "gpt-4o"
82
+ if hasattr(instance, "llm_config") and isinstance(instance.llm_config, dict):
83
+ request_model = instance.llm_config.get("model", "gpt-4o")
84
+
85
+ span_name = f"{SemanticConvention.GEN_AI_OPERATION_TYPE_EXECUTE_AGENT_TASK} {agent_name}"
86
+
87
+ with tracer.start_as_current_span(span_name, kind=SpanKind.CLIENT) as span:
88
+ start_time = time.time()
89
+ response = wrapped(*args, **kwargs)
90
+
91
+ try:
92
+ response = process_agent_run(
93
+ response=response,
94
+ agent_name=agent_name,
95
+ request_model=request_model,
96
+ pricing_info=pricing_info,
97
+ server_port=server_port,
98
+ server_address=server_address,
99
+ environment=environment,
100
+ application_name=application_name,
101
+ metrics=metrics,
102
+ start_time=start_time,
103
+ span=span,
104
+ capture_message_content=capture_message_content,
105
+ disable_metrics=disable_metrics,
106
+ version=version
107
+ )
108
+
109
+ except Exception as e:
110
+ handle_exception(span, e)
111
+
112
+ return response
113
+
114
+ return wrapper
@@ -0,0 +1,114 @@
1
+ """
2
+ Module for monitoring AG2 API calls (async version).
3
+ """
4
+
5
+ import time
6
+ from opentelemetry.trace import SpanKind
7
+ from openlit.__helpers import (
8
+ handle_exception,
9
+ set_server_address_and_port
10
+ )
11
+ from openlit.instrumentation.ag2.utils import (
12
+ process_agent_creation,
13
+ process_agent_run,
14
+ )
15
+ from openlit.semcov import SemanticConvention
16
+
17
+ def async_conversable_agent(version, environment, application_name, tracer, pricing_info,
18
+ capture_message_content, metrics, disable_metrics):
19
+ """
20
+ Generates a telemetry wrapper for AG2 async conversable agent creation.
21
+ """
22
+
23
+ async def wrapper(wrapped, instance, args, kwargs):
24
+ """
25
+ Wraps the AG2 async conversable agent creation call.
26
+ """
27
+
28
+ server_address, server_port = set_server_address_and_port(instance, "127.0.0.1", 80)
29
+ agent_name = kwargs.get("name", "NOT_FOUND")
30
+ llm_config = kwargs.get("llm_config", {})
31
+ system_message = kwargs.get("system_message", "")
32
+
33
+ span_name = f"{SemanticConvention.GEN_AI_OPERATION_TYPE_CREATE_AGENT} {agent_name}"
34
+
35
+ with tracer.start_as_current_span(span_name, kind=SpanKind.CLIENT) as span:
36
+ start_time = time.time()
37
+ response = await wrapped(*args, **kwargs)
38
+
39
+ try:
40
+ process_agent_creation(
41
+ agent_name=agent_name,
42
+ llm_config=llm_config,
43
+ system_message=system_message,
44
+ pricing_info=pricing_info,
45
+ server_port=server_port,
46
+ server_address=server_address,
47
+ environment=environment,
48
+ application_name=application_name,
49
+ metrics=metrics,
50
+ start_time=start_time,
51
+ span=span,
52
+ capture_message_content=capture_message_content,
53
+ disable_metrics=disable_metrics,
54
+ version=version
55
+ )
56
+
57
+ except Exception as e:
58
+ handle_exception(span, e)
59
+
60
+ return response
61
+
62
+ return wrapper
63
+
64
+ def async_agent_run(version, environment, application_name, tracer, pricing_info,
65
+ capture_message_content, metrics, disable_metrics):
66
+ """
67
+ Generates a telemetry wrapper for AG2 async agent run execution.
68
+ """
69
+
70
+ async def wrapper(wrapped, instance, args, kwargs):
71
+ """
72
+ Wraps the AG2 async agent run execution call.
73
+ """
74
+
75
+ server_address, server_port = set_server_address_and_port(instance, "127.0.0.1", 80)
76
+
77
+ # Extract agent name from instance
78
+ agent_name = getattr(instance, "name", "NOT_FOUND")
79
+
80
+ # Extract model from instance llm_config
81
+ request_model = "gpt-4o"
82
+ if hasattr(instance, "llm_config") and isinstance(instance.llm_config, dict):
83
+ request_model = instance.llm_config.get("model", "gpt-4o")
84
+
85
+ span_name = f"{SemanticConvention.GEN_AI_OPERATION_TYPE_EXECUTE_AGENT_TASK} {agent_name}"
86
+
87
+ with tracer.start_as_current_span(span_name, kind=SpanKind.CLIENT) as span:
88
+ start_time = time.time()
89
+ response = await wrapped(*args, **kwargs)
90
+
91
+ try:
92
+ response = process_agent_run(
93
+ response=response,
94
+ agent_name=agent_name,
95
+ request_model=request_model,
96
+ pricing_info=pricing_info,
97
+ server_port=server_port,
98
+ server_address=server_address,
99
+ environment=environment,
100
+ application_name=application_name,
101
+ metrics=metrics,
102
+ start_time=start_time,
103
+ span=span,
104
+ capture_message_content=capture_message_content,
105
+ disable_metrics=disable_metrics,
106
+ version=version
107
+ )
108
+
109
+ except Exception as e:
110
+ handle_exception(span, e)
111
+
112
+ return response
113
+
114
+ return wrapper
@@ -0,0 +1,175 @@
1
+ """
2
+ AG2 OpenTelemetry instrumentation utility functions
3
+ """
4
+ import time
5
+
6
+ from opentelemetry.trace import Status, StatusCode
7
+
8
+ from openlit.__helpers import (
9
+ get_chat_model_cost,
10
+ common_span_attributes,
11
+ record_completion_metrics,
12
+ )
13
+ from openlit.semcov import SemanticConvention
14
+
15
+ def calculate_tokens_and_cost(response, request_model, pricing_info):
16
+ """
17
+ Calculate the input, output tokens, and their respective costs from AG2 response.
18
+ """
19
+ input_tokens = 0
20
+ output_tokens = 0
21
+
22
+ # Early return if response doesn't have cost data
23
+ if not hasattr(response, "cost") or response.cost is None:
24
+ cost = get_chat_model_cost(request_model, pricing_info, input_tokens, output_tokens)
25
+ return input_tokens, output_tokens, cost
26
+
27
+ try:
28
+ input_tokens, output_tokens = _extract_tokens_from_cost(response.cost)
29
+ except (AttributeError, TypeError):
30
+ # If theres any issue accessing cost data, default to 0 tokens
31
+ input_tokens = 0
32
+ output_tokens = 0
33
+
34
+ cost = get_chat_model_cost(request_model, pricing_info, input_tokens, output_tokens)
35
+ return input_tokens, output_tokens, cost
36
+
37
+ def _extract_tokens_from_cost(cost_data):
38
+ """
39
+ Extract input and output tokens from AG2 cost data structure.
40
+ """
41
+ input_tokens = 0
42
+ output_tokens = 0
43
+
44
+ for usage_data in cost_data.values():
45
+ if not isinstance(usage_data, dict):
46
+ continue
47
+
48
+ for model_data in usage_data.values():
49
+ if isinstance(model_data, dict):
50
+ input_tokens += model_data.get("prompt_tokens", 0)
51
+ output_tokens += model_data.get("completion_tokens", 0)
52
+
53
+ return input_tokens, output_tokens
54
+
55
+ def format_content(chat_history):
56
+ """
57
+ Format the chat history into a string for span events.
58
+ """
59
+ if not chat_history:
60
+ return ""
61
+
62
+ formatted_messages = []
63
+ for chat in chat_history:
64
+ role = chat.get("role", "user")
65
+ content = chat.get("content", "")
66
+ formatted_messages.append(f"{role}: {content}")
67
+
68
+ return "\n".join(formatted_messages)
69
+
70
+ def common_agent_logic(scope, pricing_info, environment, application_name, metrics,
71
+ capture_message_content, disable_metrics, version, operation_type):
72
+ """
73
+ Process agent request and generate Telemetry
74
+ """
75
+
76
+ # Common Span Attributes
77
+ common_span_attributes(scope,
78
+ operation_type, SemanticConvention.GEN_AI_SYSTEM_AG2,
79
+ scope._server_address, scope._server_port, scope._request_model, scope._response_model,
80
+ environment, application_name, False, 0, scope._end_time - scope._start_time, version)
81
+
82
+ # Span Attributes for Agent-specific parameters
83
+ scope._span.set_attribute(SemanticConvention.GEN_AI_AGENT_NAME, scope._agent_name)
84
+
85
+ # Span Attributes for Response parameters
86
+ if hasattr(scope, "_input_tokens"):
87
+ scope._span.set_attribute(SemanticConvention.GEN_AI_USAGE_INPUT_TOKENS, scope._input_tokens)
88
+ scope._span.set_attribute(SemanticConvention.GEN_AI_USAGE_OUTPUT_TOKENS, scope._output_tokens)
89
+ scope._span.set_attribute(SemanticConvention.GEN_AI_CLIENT_TOKEN_USAGE, scope._input_tokens + scope._output_tokens)
90
+ scope._span.set_attribute(SemanticConvention.GEN_AI_USAGE_COST, scope._cost)
91
+
92
+ # Span Attributes for Content
93
+ if capture_message_content and hasattr(scope, "_chat_history"):
94
+ chat_content = format_content(scope._chat_history)
95
+ scope._span.set_attribute(SemanticConvention.GEN_AI_CONTENT_COMPLETION, chat_content)
96
+
97
+ # To be removed once the change to span_attributes (from span events) is complete
98
+ scope._span.add_event(
99
+ name=SemanticConvention.GEN_AI_CONTENT_COMPLETION_EVENT,
100
+ attributes={
101
+ SemanticConvention.GEN_AI_CONTENT_COMPLETION: chat_content,
102
+ },
103
+ )
104
+
105
+ # Set agent description for create agent operation
106
+ if hasattr(scope, "_system_message"):
107
+ scope._span.set_attribute(SemanticConvention.GEN_AI_AGENT_DESCRIPTION, scope._system_message)
108
+
109
+ scope._span.set_status(Status(StatusCode.OK))
110
+
111
+ # Metrics
112
+ if not disable_metrics and hasattr(scope, "_input_tokens"):
113
+ record_completion_metrics(metrics, operation_type, SemanticConvention.GEN_AI_SYSTEM_AG2,
114
+ scope._server_address, scope._server_port, scope._request_model, scope._response_model, environment,
115
+ application_name, scope._start_time, scope._end_time, scope._input_tokens, scope._output_tokens,
116
+ scope._cost, 0, scope._end_time - scope._start_time)
117
+
118
+ def process_agent_creation(agent_name, llm_config, system_message, pricing_info, server_port, server_address,
119
+ environment, application_name, metrics, start_time, span, capture_message_content=False,
120
+ disable_metrics=False, version="1.0.0", **kwargs):
121
+ """
122
+ Process agent creation and generate Telemetry
123
+ """
124
+
125
+ # Create scope object
126
+ scope = type("GenericScope", (), {})()
127
+
128
+ scope._start_time = start_time
129
+ scope._end_time = time.time()
130
+ scope._span = span
131
+ scope._agent_name = agent_name
132
+ scope._request_model = llm_config.get("model", "gpt-4o")
133
+ scope._response_model = scope._request_model
134
+ scope._system_message = system_message
135
+ scope._server_address, scope._server_port = server_address, server_port
136
+
137
+ common_agent_logic(scope, pricing_info, environment, application_name, metrics,
138
+ capture_message_content, disable_metrics, version, SemanticConvention.GEN_AI_OPERATION_TYPE_CREATE_AGENT)
139
+
140
+ def process_agent_run(response, agent_name, request_model, pricing_info, server_port, server_address,
141
+ environment, application_name, metrics, start_time, span, capture_message_content=False,
142
+ disable_metrics=False, version="1.0.0", **kwargs):
143
+ """
144
+ Process agent run and generate Telemetry
145
+ """
146
+
147
+ # Create scope object
148
+ scope = type("GenericScope", (), {})()
149
+
150
+ scope._start_time = start_time
151
+ scope._end_time = time.time()
152
+ scope._span = span
153
+ scope._agent_name = agent_name
154
+ scope._request_model = request_model
155
+ scope._chat_history = getattr(response, "chat_history", [])
156
+ scope._server_address, scope._server_port = server_address, server_port
157
+
158
+ # Calculate tokens and cost
159
+ scope._input_tokens, scope._output_tokens, scope._cost = calculate_tokens_and_cost(
160
+ response, request_model, pricing_info)
161
+
162
+ # Extract response model from cost data
163
+ try:
164
+ if hasattr(response, "cost") and response.cost is not None:
165
+ cost_data = response.cost.get("usage_including_cached_inference", {})
166
+ scope._response_model = list(cost_data.keys())[1] if len(cost_data) > 1 else request_model
167
+ else:
168
+ scope._response_model = request_model
169
+ except (AttributeError, IndexError, KeyError, TypeError):
170
+ scope._response_model = request_model
171
+
172
+ common_agent_logic(scope, pricing_info, environment, application_name, metrics,
173
+ capture_message_content, disable_metrics, version, SemanticConvention.GEN_AI_OPERATION_TYPE_EXECUTE_AGENT_TASK)
174
+
175
+ return response
@@ -1,4 +1,3 @@
1
- # pylint: disable=useless-return, bad-staticmethod-argument, disable=duplicate-code
2
1
  """Initializer of Auto Instrumentation of LangChain Functions"""
3
2
  from typing import Collection
4
3
  import importlib.metadata
@@ -6,41 +5,17 @@ from opentelemetry.instrumentation.instrumentor import BaseInstrumentor
6
5
  from wrapt import wrap_function_wrapper
7
6
 
8
7
  from openlit.instrumentation.langchain.langchain import (
9
- general_wrap,
10
8
  hub,
11
9
  chat
12
10
  )
13
11
  from openlit.instrumentation.langchain.async_langchain import (
12
+ async_hub,
14
13
  async_chat
15
14
  )
16
15
 
17
16
  _instruments = ("langchain >= 0.1.20",)
18
17
 
19
18
  WRAPPED_METHODS = [
20
- {
21
- "package": "langchain_community.document_loaders.base",
22
- "object": "BaseLoader.load",
23
- "endpoint": "langchain.retrieve.load",
24
- "wrapper": general_wrap,
25
- },
26
- {
27
- "package": "langchain_community.document_loaders.base",
28
- "object": "BaseLoader.aload",
29
- "endpoint": "langchain.retrieve.load",
30
- "wrapper": general_wrap,
31
- },
32
- {
33
- "package": "langchain_text_splitters.base",
34
- "object": "TextSplitter.split_documents",
35
- "endpoint": "langchain.retrieve.split_documents",
36
- "wrapper": general_wrap,
37
- },
38
- {
39
- "package": "langchain_text_splitters.base",
40
- "object": "TextSplitter.create_documents",
41
- "endpoint": "langchain.retrieve.create_documents",
42
- "wrapper": general_wrap,
43
- },
44
19
  {
45
20
  "package": "langchain.hub",
46
21
  "object": "pull",
@@ -79,27 +54,29 @@ WRAPPED_METHODS = [
79
54
  },
80
55
  {
81
56
  "package": "langchain.chains.base",
82
- "object": "Chain.invoke",
57
+ "object": "Chain.ainvoke",
83
58
  "endpoint": "langchain.chain.invoke",
84
59
  "wrapper": async_chat,
85
60
  }
86
61
  ]
87
62
 
88
63
  class LangChainInstrumentor(BaseInstrumentor):
89
- """An instrumentor for Cohere's client library."""
64
+ """
65
+ An instrumentor for LangChain client library.
66
+ """
90
67
 
91
68
  def instrumentation_dependencies(self) -> Collection[str]:
92
69
  return _instruments
93
70
 
94
71
  def _instrument(self, **kwargs):
95
- application_name = kwargs.get("application_name")
96
- environment = kwargs.get("environment")
72
+ version = importlib.metadata.version("langchain")
73
+ environment = kwargs.get("environment", "default")
74
+ application_name = kwargs.get("application_name", "default")
97
75
  tracer = kwargs.get("tracer")
98
- pricing_info = kwargs.get("pricing_info")
99
- capture_message_content = kwargs.get("capture_message_content")
76
+ pricing_info = kwargs.get("pricing_info", {})
77
+ capture_message_content = kwargs.get("capture_message_content", False)
100
78
  metrics = kwargs.get("metrics_dict")
101
79
  disable_metrics = kwargs.get("disable_metrics")
102
- version = importlib.metadata.version("langchain")
103
80
 
104
81
  for wrapped_method in WRAPPED_METHODS:
105
82
  wrap_package = wrapped_method.get("package")
@@ -110,9 +87,8 @@ class LangChainInstrumentor(BaseInstrumentor):
110
87
  wrap_package,
111
88
  wrap_object,
112
89
  wrapper(gen_ai_endpoint, version, environment, application_name,
113
- tracer, pricing_info, capture_message_content, metrics, disable_metrics),
90
+ tracer, pricing_info, capture_message_content, metrics, disable_metrics),
114
91
  )
115
92
 
116
- @staticmethod
117
93
  def _uninstrument(self, **kwargs):
118
94
  pass