openlit 1.33.18__tar.gz → 1.33.20__tar.gz

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (133) hide show
  1. {openlit-1.33.18 → openlit-1.33.20}/PKG-INFO +8 -8
  2. {openlit-1.33.18 → openlit-1.33.20}/pyproject.toml +8 -8
  3. {openlit-1.33.18 → openlit-1.33.20}/src/openlit/__helpers.py +11 -41
  4. {openlit-1.33.18 → openlit-1.33.20}/src/openlit/__init__.py +3 -3
  5. {openlit-1.33.18 → openlit-1.33.20}/src/openlit/evals/utils.py +7 -7
  6. {openlit-1.33.18 → openlit-1.33.20}/src/openlit/guard/utils.py +7 -7
  7. {openlit-1.33.18 → openlit-1.33.20}/src/openlit/instrumentation/ag2/ag2.py +24 -24
  8. {openlit-1.33.18 → openlit-1.33.20}/src/openlit/instrumentation/ai21/ai21.py +3 -3
  9. {openlit-1.33.18 → openlit-1.33.20}/src/openlit/instrumentation/ai21/async_ai21.py +3 -3
  10. {openlit-1.33.18 → openlit-1.33.20}/src/openlit/instrumentation/ai21/utils.py +59 -59
  11. {openlit-1.33.18 → openlit-1.33.20}/src/openlit/instrumentation/anthropic/anthropic.py +2 -2
  12. {openlit-1.33.18 → openlit-1.33.20}/src/openlit/instrumentation/anthropic/async_anthropic.py +2 -2
  13. {openlit-1.33.18 → openlit-1.33.20}/src/openlit/instrumentation/anthropic/utils.py +34 -34
  14. {openlit-1.33.18 → openlit-1.33.20}/src/openlit/instrumentation/assemblyai/assemblyai.py +24 -24
  15. {openlit-1.33.18 → openlit-1.33.20}/src/openlit/instrumentation/astra/astra.py +3 -3
  16. {openlit-1.33.18 → openlit-1.33.20}/src/openlit/instrumentation/astra/async_astra.py +3 -3
  17. openlit-1.33.20/src/openlit/instrumentation/astra/utils.py +102 -0
  18. {openlit-1.33.18 → openlit-1.33.20}/src/openlit/instrumentation/azure_ai_inference/async_azure_ai_inference.py +10 -10
  19. {openlit-1.33.18 → openlit-1.33.20}/src/openlit/instrumentation/azure_ai_inference/azure_ai_inference.py +10 -10
  20. {openlit-1.33.18 → openlit-1.33.20}/src/openlit/instrumentation/azure_ai_inference/utils.py +38 -38
  21. {openlit-1.33.18 → openlit-1.33.20}/src/openlit/instrumentation/bedrock/__init__.py +2 -1
  22. openlit-1.33.20/src/openlit/instrumentation/bedrock/bedrock.py +77 -0
  23. openlit-1.33.20/src/openlit/instrumentation/bedrock/utils.py +252 -0
  24. {openlit-1.33.18 → openlit-1.33.20}/src/openlit/instrumentation/chroma/chroma.py +57 -57
  25. {openlit-1.33.18 → openlit-1.33.20}/src/openlit/instrumentation/cohere/async_cohere.py +88 -88
  26. {openlit-1.33.18 → openlit-1.33.20}/src/openlit/instrumentation/cohere/cohere.py +88 -88
  27. {openlit-1.33.18 → openlit-1.33.20}/src/openlit/instrumentation/controlflow/controlflow.py +15 -15
  28. {openlit-1.33.18 → openlit-1.33.20}/src/openlit/instrumentation/crawl4ai/async_crawl4ai.py +14 -14
  29. {openlit-1.33.18 → openlit-1.33.20}/src/openlit/instrumentation/crawl4ai/crawl4ai.py +14 -14
  30. {openlit-1.33.18 → openlit-1.33.20}/src/openlit/instrumentation/crewai/crewai.py +22 -22
  31. {openlit-1.33.18 → openlit-1.33.20}/src/openlit/instrumentation/dynamiq/dynamiq.py +19 -19
  32. {openlit-1.33.18 → openlit-1.33.20}/src/openlit/instrumentation/elevenlabs/async_elevenlabs.py +24 -25
  33. {openlit-1.33.18 → openlit-1.33.20}/src/openlit/instrumentation/elevenlabs/elevenlabs.py +23 -25
  34. {openlit-1.33.18 → openlit-1.33.20}/src/openlit/instrumentation/embedchain/embedchain.py +15 -15
  35. {openlit-1.33.18 → openlit-1.33.20}/src/openlit/instrumentation/firecrawl/firecrawl.py +10 -10
  36. {openlit-1.33.18 → openlit-1.33.20}/src/openlit/instrumentation/google_ai_studio/async_google_ai_studio.py +33 -33
  37. {openlit-1.33.18 → openlit-1.33.20}/src/openlit/instrumentation/google_ai_studio/google_ai_studio.py +33 -33
  38. {openlit-1.33.18 → openlit-1.33.20}/src/openlit/instrumentation/gpt4all/gpt4all.py +78 -78
  39. {openlit-1.33.18 → openlit-1.33.20}/src/openlit/instrumentation/gpu/__init__.py +8 -8
  40. {openlit-1.33.18 → openlit-1.33.20}/src/openlit/instrumentation/groq/async_groq.py +74 -74
  41. {openlit-1.33.18 → openlit-1.33.20}/src/openlit/instrumentation/groq/groq.py +74 -74
  42. {openlit-1.33.18 → openlit-1.33.20}/src/openlit/instrumentation/haystack/haystack.py +6 -6
  43. {openlit-1.33.18 → openlit-1.33.20}/src/openlit/instrumentation/julep/async_julep.py +14 -14
  44. {openlit-1.33.18 → openlit-1.33.20}/src/openlit/instrumentation/julep/julep.py +14 -14
  45. {openlit-1.33.18 → openlit-1.33.20}/src/openlit/instrumentation/langchain/async_langchain.py +39 -39
  46. {openlit-1.33.18 → openlit-1.33.20}/src/openlit/instrumentation/langchain/langchain.py +39 -39
  47. {openlit-1.33.18 → openlit-1.33.20}/src/openlit/instrumentation/letta/letta.py +26 -26
  48. {openlit-1.33.18 → openlit-1.33.20}/src/openlit/instrumentation/litellm/async_litellm.py +94 -94
  49. {openlit-1.33.18 → openlit-1.33.20}/src/openlit/instrumentation/litellm/litellm.py +94 -94
  50. {openlit-1.33.18 → openlit-1.33.20}/src/openlit/instrumentation/llamaindex/llamaindex.py +7 -7
  51. {openlit-1.33.18 → openlit-1.33.20}/src/openlit/instrumentation/mem0/mem0.py +13 -13
  52. {openlit-1.33.18 → openlit-1.33.20}/src/openlit/instrumentation/milvus/milvus.py +47 -47
  53. {openlit-1.33.18 → openlit-1.33.20}/src/openlit/instrumentation/mistral/async_mistral.py +88 -88
  54. {openlit-1.33.18 → openlit-1.33.20}/src/openlit/instrumentation/mistral/mistral.py +88 -88
  55. {openlit-1.33.18 → openlit-1.33.20}/src/openlit/instrumentation/multion/async_multion.py +21 -21
  56. {openlit-1.33.18 → openlit-1.33.20}/src/openlit/instrumentation/multion/multion.py +21 -21
  57. {openlit-1.33.18 → openlit-1.33.20}/src/openlit/instrumentation/ollama/async_ollama.py +3 -3
  58. {openlit-1.33.18 → openlit-1.33.20}/src/openlit/instrumentation/ollama/ollama.py +3 -3
  59. {openlit-1.33.18 → openlit-1.33.20}/src/openlit/instrumentation/ollama/utils.py +50 -50
  60. {openlit-1.33.18 → openlit-1.33.20}/src/openlit/instrumentation/openai/async_openai.py +225 -225
  61. {openlit-1.33.18 → openlit-1.33.20}/src/openlit/instrumentation/openai/openai.py +225 -225
  62. {openlit-1.33.18 → openlit-1.33.20}/src/openlit/instrumentation/openai_agents/openai_agents.py +11 -11
  63. {openlit-1.33.18 → openlit-1.33.20}/src/openlit/instrumentation/phidata/phidata.py +15 -15
  64. {openlit-1.33.18 → openlit-1.33.20}/src/openlit/instrumentation/pinecone/pinecone.py +43 -43
  65. {openlit-1.33.18 → openlit-1.33.20}/src/openlit/instrumentation/premai/premai.py +86 -86
  66. {openlit-1.33.18 → openlit-1.33.20}/src/openlit/instrumentation/qdrant/async_qdrant.py +95 -95
  67. {openlit-1.33.18 → openlit-1.33.20}/src/openlit/instrumentation/qdrant/qdrant.py +99 -99
  68. {openlit-1.33.18 → openlit-1.33.20}/src/openlit/instrumentation/reka/async_reka.py +33 -33
  69. {openlit-1.33.18 → openlit-1.33.20}/src/openlit/instrumentation/reka/reka.py +33 -33
  70. {openlit-1.33.18 → openlit-1.33.20}/src/openlit/instrumentation/together/async_together.py +90 -90
  71. {openlit-1.33.18 → openlit-1.33.20}/src/openlit/instrumentation/together/together.py +90 -90
  72. {openlit-1.33.18 → openlit-1.33.20}/src/openlit/instrumentation/transformers/transformers.py +26 -26
  73. {openlit-1.33.18 → openlit-1.33.20}/src/openlit/instrumentation/vertexai/async_vertexai.py +64 -64
  74. {openlit-1.33.18 → openlit-1.33.20}/src/openlit/instrumentation/vertexai/vertexai.py +64 -64
  75. {openlit-1.33.18 → openlit-1.33.20}/src/openlit/instrumentation/vllm/vllm.py +24 -24
  76. {openlit-1.33.18 → openlit-1.33.20}/src/openlit/otel/metrics.py +11 -11
  77. {openlit-1.33.18 → openlit-1.33.20}/src/openlit/semcov/__init__.py +3 -3
  78. openlit-1.33.18/src/openlit/instrumentation/astra/utils.py +0 -102
  79. openlit-1.33.18/src/openlit/instrumentation/bedrock/bedrock.py +0 -259
  80. {openlit-1.33.18 → openlit-1.33.20}/LICENSE +0 -0
  81. {openlit-1.33.18 → openlit-1.33.20}/README.md +0 -0
  82. {openlit-1.33.18 → openlit-1.33.20}/src/openlit/evals/__init__.py +0 -0
  83. {openlit-1.33.18 → openlit-1.33.20}/src/openlit/evals/all.py +0 -0
  84. {openlit-1.33.18 → openlit-1.33.20}/src/openlit/evals/bias_detection.py +0 -0
  85. {openlit-1.33.18 → openlit-1.33.20}/src/openlit/evals/hallucination.py +0 -0
  86. {openlit-1.33.18 → openlit-1.33.20}/src/openlit/evals/toxicity.py +0 -0
  87. {openlit-1.33.18 → openlit-1.33.20}/src/openlit/guard/__init__.py +0 -0
  88. {openlit-1.33.18 → openlit-1.33.20}/src/openlit/guard/all.py +0 -0
  89. {openlit-1.33.18 → openlit-1.33.20}/src/openlit/guard/prompt_injection.py +0 -0
  90. {openlit-1.33.18 → openlit-1.33.20}/src/openlit/guard/restrict_topic.py +0 -0
  91. {openlit-1.33.18 → openlit-1.33.20}/src/openlit/guard/sensitive_topic.py +0 -0
  92. {openlit-1.33.18 → openlit-1.33.20}/src/openlit/instrumentation/ag2/__init__.py +0 -0
  93. {openlit-1.33.18 → openlit-1.33.20}/src/openlit/instrumentation/ai21/__init__.py +0 -0
  94. {openlit-1.33.18 → openlit-1.33.20}/src/openlit/instrumentation/anthropic/__init__.py +0 -0
  95. {openlit-1.33.18 → openlit-1.33.20}/src/openlit/instrumentation/assemblyai/__init__.py +0 -0
  96. {openlit-1.33.18 → openlit-1.33.20}/src/openlit/instrumentation/astra/__init__.py +0 -0
  97. {openlit-1.33.18 → openlit-1.33.20}/src/openlit/instrumentation/azure_ai_inference/__init__.py +0 -0
  98. {openlit-1.33.18 → openlit-1.33.20}/src/openlit/instrumentation/chroma/__init__.py +0 -0
  99. {openlit-1.33.18 → openlit-1.33.20}/src/openlit/instrumentation/cohere/__init__.py +0 -0
  100. {openlit-1.33.18 → openlit-1.33.20}/src/openlit/instrumentation/controlflow/__init__.py +0 -0
  101. {openlit-1.33.18 → openlit-1.33.20}/src/openlit/instrumentation/crawl4ai/__init__.py +0 -0
  102. {openlit-1.33.18 → openlit-1.33.20}/src/openlit/instrumentation/crewai/__init__.py +0 -0
  103. {openlit-1.33.18 → openlit-1.33.20}/src/openlit/instrumentation/dynamiq/__init__.py +0 -0
  104. {openlit-1.33.18 → openlit-1.33.20}/src/openlit/instrumentation/elevenlabs/__init__.py +0 -0
  105. {openlit-1.33.18 → openlit-1.33.20}/src/openlit/instrumentation/embedchain/__init__.py +0 -0
  106. {openlit-1.33.18 → openlit-1.33.20}/src/openlit/instrumentation/firecrawl/__init__.py +0 -0
  107. {openlit-1.33.18 → openlit-1.33.20}/src/openlit/instrumentation/google_ai_studio/__init__.py +0 -0
  108. {openlit-1.33.18 → openlit-1.33.20}/src/openlit/instrumentation/gpt4all/__init__.py +0 -0
  109. {openlit-1.33.18 → openlit-1.33.20}/src/openlit/instrumentation/groq/__init__.py +0 -0
  110. {openlit-1.33.18 → openlit-1.33.20}/src/openlit/instrumentation/haystack/__init__.py +0 -0
  111. {openlit-1.33.18 → openlit-1.33.20}/src/openlit/instrumentation/julep/__init__.py +0 -0
  112. {openlit-1.33.18 → openlit-1.33.20}/src/openlit/instrumentation/langchain/__init__.py +0 -0
  113. {openlit-1.33.18 → openlit-1.33.20}/src/openlit/instrumentation/letta/__init__.py +0 -0
  114. {openlit-1.33.18 → openlit-1.33.20}/src/openlit/instrumentation/litellm/__init__.py +0 -0
  115. {openlit-1.33.18 → openlit-1.33.20}/src/openlit/instrumentation/llamaindex/__init__.py +0 -0
  116. {openlit-1.33.18 → openlit-1.33.20}/src/openlit/instrumentation/mem0/__init__.py +0 -0
  117. {openlit-1.33.18 → openlit-1.33.20}/src/openlit/instrumentation/milvus/__init__.py +0 -0
  118. {openlit-1.33.18 → openlit-1.33.20}/src/openlit/instrumentation/mistral/__init__.py +0 -0
  119. {openlit-1.33.18 → openlit-1.33.20}/src/openlit/instrumentation/multion/__init__.py +0 -0
  120. {openlit-1.33.18 → openlit-1.33.20}/src/openlit/instrumentation/ollama/__init__.py +0 -0
  121. {openlit-1.33.18 → openlit-1.33.20}/src/openlit/instrumentation/openai/__init__.py +0 -0
  122. {openlit-1.33.18 → openlit-1.33.20}/src/openlit/instrumentation/openai_agents/__init__.py +0 -0
  123. {openlit-1.33.18 → openlit-1.33.20}/src/openlit/instrumentation/phidata/__init__.py +0 -0
  124. {openlit-1.33.18 → openlit-1.33.20}/src/openlit/instrumentation/pinecone/__init__.py +0 -0
  125. {openlit-1.33.18 → openlit-1.33.20}/src/openlit/instrumentation/premai/__init__.py +0 -0
  126. {openlit-1.33.18 → openlit-1.33.20}/src/openlit/instrumentation/qdrant/__init__.py +0 -0
  127. {openlit-1.33.18 → openlit-1.33.20}/src/openlit/instrumentation/reka/__init__.py +0 -0
  128. {openlit-1.33.18 → openlit-1.33.20}/src/openlit/instrumentation/together/__init__.py +0 -0
  129. {openlit-1.33.18 → openlit-1.33.20}/src/openlit/instrumentation/transformers/__init__.py +0 -0
  130. {openlit-1.33.18 → openlit-1.33.20}/src/openlit/instrumentation/vertexai/__init__.py +0 -0
  131. {openlit-1.33.18 → openlit-1.33.20}/src/openlit/instrumentation/vllm/__init__.py +0 -0
  132. {openlit-1.33.18 → openlit-1.33.20}/src/openlit/otel/events.py +0 -0
  133. {openlit-1.33.18 → openlit-1.33.20}/src/openlit/otel/tracing.py +0 -0
@@ -1,11 +1,11 @@
1
1
  Metadata-Version: 2.3
2
2
  Name: openlit
3
- Version: 1.33.18
3
+ Version: 1.33.20
4
4
  Summary: OpenTelemetry-native Auto instrumentation library for monitoring LLM Applications and GPUs, facilitating the integration of observability into your GenAI-driven projects
5
5
  License: Apache-2.0
6
6
  Keywords: OpenTelemetry,otel,otlp,llm,tracing,openai,anthropic,claude,cohere,llm monitoring,observability,monitoring,gpt,Generative AI,chatGPT,gpu
7
7
  Author: OpenLIT
8
- Requires-Python: >=3.7.1,<4.0.0
8
+ Requires-Python: >=3.8.0,<4.0.0
9
9
  Classifier: License :: OSI Approved :: Apache Software License
10
10
  Classifier: Programming Language :: Python :: 3
11
11
  Classifier: Programming Language :: Python :: 3.8
@@ -14,18 +14,18 @@ Classifier: Programming Language :: Python :: 3.10
14
14
  Classifier: Programming Language :: Python :: 3.11
15
15
  Classifier: Programming Language :: Python :: 3.12
16
16
  Classifier: Programming Language :: Python :: 3.13
17
- Requires-Dist: anthropic (>=0.42.0,<0.43.0)
17
+ Requires-Dist: anthropic (>=0.42.0,<1.0.0)
18
18
  Requires-Dist: boto3 (>=1.34.0,<2.0.0)
19
19
  Requires-Dist: botocore (>=1.34.0,<2.0.0)
20
20
  Requires-Dist: openai (>=1.1.1,<2.0.0)
21
- Requires-Dist: opentelemetry-api (>=1.27.0,<2.0.0)
22
- Requires-Dist: opentelemetry-exporter-otlp (>=1.27.0,<2.0.0)
23
- Requires-Dist: opentelemetry-instrumentation (>=0.48b0,<0.49)
24
- Requires-Dist: opentelemetry-sdk (>=1.27.0,<2.0.0)
21
+ Requires-Dist: opentelemetry-api (>=1.30.0,<2.0.0)
22
+ Requires-Dist: opentelemetry-exporter-otlp (>=1.30.0,<2.0.0)
23
+ Requires-Dist: opentelemetry-instrumentation (>=0.52b0,<1.0.0)
24
+ Requires-Dist: opentelemetry-sdk (>=1.30.0,<2.0.0)
25
25
  Requires-Dist: pydantic (>=2.0.0,<3.0.0)
26
26
  Requires-Dist: requests (>=2.26.0,<3.0.0)
27
27
  Requires-Dist: schedule (>=1.2.2,<2.0.0)
28
- Requires-Dist: xmltodict (>=0.13.0,<0.14.0)
28
+ Requires-Dist: xmltodict (>=0.13.0,<1.0.0)
29
29
  Project-URL: Homepage, https://github.com/openlit/openlit/tree/main/openlit/python
30
30
  Project-URL: Repository, https://github.com/openlit/openlit/tree/main/openlit/python
31
31
  Description-Content-Type: text/markdown
@@ -1,6 +1,6 @@
1
1
  [tool.poetry]
2
2
  name = "openlit"
3
- version = "1.33.18"
3
+ version = "1.33.20"
4
4
  description = "OpenTelemetry-native Auto instrumentation library for monitoring LLM Applications and GPUs, facilitating the integration of observability into your GenAI-driven projects"
5
5
  authors = ["OpenLIT"]
6
6
  license = "Apache-2.0"
@@ -10,19 +10,19 @@ homepage = "https://github.com/openlit/openlit/tree/main/openlit/python"
10
10
  keywords = ["OpenTelemetry", "otel", "otlp","llm", "tracing", "openai", "anthropic", "claude", "cohere", "llm monitoring", "observability", "monitoring", "gpt", "Generative AI", "chatGPT", "gpu"]
11
11
 
12
12
  [tool.poetry.dependencies]
13
- python = "^3.7.1"
13
+ python = "^3.8.0"
14
14
  requests = "^2.26.0"
15
15
  schedule = "^1.2.2"
16
16
  pydantic = "^2.0.0"
17
- xmltodict = "^0.13.0"
17
+ xmltodict = ">=0.13.0,<1.0.0"
18
18
  boto3 = "^1.34.0"
19
19
  botocore = "^1.34.0"
20
- opentelemetry-api = "^1.27.0"
21
- opentelemetry-sdk = "^1.27.0"
22
- opentelemetry-exporter-otlp = "^1.27.0"
23
- opentelemetry-instrumentation = "^0.48b0"
20
+ opentelemetry-api = "^1.30.0"
21
+ opentelemetry-sdk = "^1.30.0"
22
+ opentelemetry-exporter-otlp = "^1.30.0"
23
+ opentelemetry-instrumentation = ">=0.52b0,<1.0.0"
24
24
  openai = "^1.1.1"
25
- anthropic = "^0.42.0"
25
+ anthropic = ">=0.42.0,<1.0.0"
26
26
 
27
27
  [build-system]
28
28
  requires = ["poetry-core>=1.1.0"]
@@ -12,7 +12,7 @@ import requests
12
12
  from opentelemetry.sdk.resources import SERVICE_NAME, TELEMETRY_SDK_NAME, DEPLOYMENT_ENVIRONMENT
13
13
  from opentelemetry.trace import Status, StatusCode
14
14
  from opentelemetry._events import Event
15
- from openlit.semcov import SemanticConvetion
15
+ from openlit.semcov import SemanticConvention
16
16
 
17
17
  # Set up logging
18
18
  logger = logging.getLogger(__name__)
@@ -176,12 +176,12 @@ def create_metrics_attributes(
176
176
  TELEMETRY_SDK_NAME: 'openlit',
177
177
  SERVICE_NAME: service_name,
178
178
  DEPLOYMENT_ENVIRONMENT: deployment_environment,
179
- SemanticConvetion.GEN_AI_OPERATION: operation,
180
- SemanticConvetion.GEN_AI_SYSTEM: system,
181
- SemanticConvetion.GEN_AI_REQUEST_MODEL: request_model,
182
- SemanticConvetion.SERVER_ADDRESS: server_address,
183
- SemanticConvetion.SERVER_PORT: server_port,
184
- SemanticConvetion.GEN_AI_RESPONSE_MODEL: response_model
179
+ SemanticConvention.GEN_AI_OPERATION: operation,
180
+ SemanticConvention.GEN_AI_SYSTEM: system,
181
+ SemanticConvention.GEN_AI_REQUEST_MODEL: request_model,
182
+ SemanticConvention.SERVER_ADDRESS: server_address,
183
+ SemanticConvention.SERVER_PORT: server_port,
184
+ SemanticConvention.GEN_AI_RESPONSE_MODEL: response_model
185
185
  }
186
186
 
187
187
  def set_server_address_and_port(client_instance: Any,
@@ -237,27 +237,21 @@ def extract_and_format_input(messages):
237
237
  them into fixed roles like 'user', 'assistant', 'system', 'tool'.
238
238
  """
239
239
 
240
- fixed_roles = ['user', 'assistant', 'system', 'tool'] # Ensure these are your fixed keys
241
- # Initialize the dictionary with fixed keys and empty structures
240
+ fixed_roles = ['user', 'assistant', 'system', 'tool', 'developer']
242
241
  formatted_messages = {role_key: {'role': '', 'content': ''} for role_key in fixed_roles}
243
242
 
244
243
  for message in messages:
245
- # Normalize the message structure
246
244
  message = response_as_dict(message)
247
245
 
248
- # Extract role and content
249
246
  role = message.get('role')
250
247
  if role not in fixed_roles:
251
- continue # Skip any role not in our predefined roles
248
+ continue
252
249
 
253
250
  content = message.get('content', '')
254
251
 
255
- # Prepare content as a string
252
+ # Prepare content as a string, handling both list and str
256
253
  if isinstance(content, list):
257
- content_str = ", ".join(
258
- f'{item.get("type", "text")}: {extract_text_from_item(item)}'
259
- for item in content
260
- )
254
+ content_str = ", ".join(str(item) for item in content)
261
255
  else:
262
256
  content_str = content
263
257
 
@@ -272,30 +266,6 @@ def extract_and_format_input(messages):
272
266
 
273
267
  return formatted_messages
274
268
 
275
- def extract_text_from_item(item):
276
- """
277
- Extract text from inpit message
278
- """
279
-
280
- #pylint: disable=no-else-return
281
- if item.get('type') == 'text':
282
- return item.get('text', '')
283
- elif item.get('type') == 'image':
284
- # Handle image content specifically checking for 'url' or 'base64'
285
- source = item.get('source', {})
286
- if isinstance(source, dict):
287
- if source.get('type') == 'base64':
288
- # Return the actual base64 data if present
289
- return source.get('data', '[Missing base64 data]')
290
- elif source.get('type') == 'url':
291
- return source.get('url', '[Missing URL]')
292
- elif item.get('type') == 'image_url':
293
- # New format: Handle the 'image_url' type
294
- image_url = item.get('image_url', {})
295
- if isinstance(image_url, dict):
296
- return image_url.get('url', '[Missing image URL]')
297
- return ''
298
-
299
269
  # To be removed one the change to log events (from span events) is complete
300
270
  def concatenate_all_contents(formatted_messages):
301
271
  """
@@ -18,7 +18,7 @@ import requests
18
18
  from opentelemetry import trace as t
19
19
  from opentelemetry.trace import SpanKind, Status, StatusCode, Span
20
20
  from opentelemetry.sdk.resources import SERVICE_NAME, DEPLOYMENT_ENVIRONMENT
21
- from openlit.semcov import SemanticConvetion
21
+ from openlit.semcov import SemanticConvention
22
22
  from openlit.otel.tracing import setup_tracing
23
23
  from openlit.otel.metrics import setup_meter
24
24
  from openlit.otel.events import setup_events
@@ -573,7 +573,7 @@ def trace(wrapped):
573
573
  try:
574
574
  response = wrapped(*args, **kwargs)
575
575
  span.set_attribute(
576
- SemanticConvetion.GEN_AI_CONTENT_COMPLETION, response or ""
576
+ SemanticConvention.GEN_AI_CONTENT_COMPLETION, response or ""
577
577
  )
578
578
  span.set_status(Status(StatusCode.OK))
579
579
  except Exception as e:
@@ -632,7 +632,7 @@ class TracedSpan:
632
632
  result: The result to be set as an attribute on the span.
633
633
  """
634
634
 
635
- self._span.set_attribute(SemanticConvetion.GEN_AI_CONTENT_COMPLETION, result)
635
+ self._span.set_attribute(SemanticConvention.GEN_AI_CONTENT_COMPLETION, result)
636
636
 
637
637
  def set_metadata(self, metadata: Dict):
638
638
  """
@@ -10,7 +10,7 @@ from opentelemetry.metrics import get_meter
10
10
  from opentelemetry.sdk.resources import TELEMETRY_SDK_NAME
11
11
  from anthropic import Anthropic
12
12
  from openai import OpenAI
13
- from openlit.semcov import SemanticConvetion
13
+ from openlit.semcov import SemanticConvention
14
14
 
15
15
  # Initialize logger for logging potential issues and operations
16
16
  logger = logging.getLogger(__name__)
@@ -238,7 +238,7 @@ def eval_metrics():
238
238
  )
239
239
 
240
240
  guard_requests = meter.create_counter(
241
- name=SemanticConvetion.EVAL_REQUESTS,
241
+ name=SemanticConvention.EVAL_REQUESTS,
242
242
  description="Counter for evaluation requests",
243
243
  unit="1"
244
244
  )
@@ -262,14 +262,14 @@ def eval_metric_attributes(verdict, score, validator, classification, explanatio
262
262
  return {
263
263
  TELEMETRY_SDK_NAME:
264
264
  "openlit",
265
- SemanticConvetion.EVAL_VERDICT:
265
+ SemanticConvention.EVAL_VERDICT:
266
266
  verdict,
267
- SemanticConvetion.EVAL_SCORE:
267
+ SemanticConvention.EVAL_SCORE:
268
268
  score,
269
- SemanticConvetion.EVAL_VALIDATOR:
269
+ SemanticConvention.EVAL_VALIDATOR:
270
270
  validator,
271
- SemanticConvetion.EVAL_CLASSIFICATION:
271
+ SemanticConvention.EVAL_CLASSIFICATION:
272
272
  classification,
273
- SemanticConvetion.EVAL_EXPLANATION:
273
+ SemanticConvention.EVAL_EXPLANATION:
274
274
  explanation,
275
275
  }
@@ -11,7 +11,7 @@ from opentelemetry.metrics import get_meter
11
11
  from opentelemetry.sdk.resources import TELEMETRY_SDK_NAME
12
12
  from anthropic import Anthropic
13
13
  from openai import OpenAI
14
- from openlit.semcov import SemanticConvetion
14
+ from openlit.semcov import SemanticConvention
15
15
 
16
16
  # Initialize logger for logging potential issues and operations
17
17
  logger = logging.getLogger(__name__)
@@ -202,7 +202,7 @@ def guard_metrics():
202
202
  )
203
203
 
204
204
  guard_requests = meter.create_counter(
205
- name=SemanticConvetion.GUARD_REQUESTS,
205
+ name=SemanticConvention.GUARD_REQUESTS,
206
206
  description="Counter for Guard requests",
207
207
  unit="1"
208
208
  )
@@ -224,9 +224,9 @@ def guard_metric_attributes(verdict, score, validator, classification, explanati
224
224
  """
225
225
  return {
226
226
  TELEMETRY_SDK_NAME: "openlit",
227
- SemanticConvetion.GUARD_VERDICT: verdict,
228
- SemanticConvetion.GUARD_SCORE: score,
229
- SemanticConvetion.GUARD_VALIDATOR: validator,
230
- SemanticConvetion.GUARD_CLASSIFICATION: classification,
231
- SemanticConvetion.GUARD_EXPLANATION: explanation,
227
+ SemanticConvention.GUARD_VERDICT: verdict,
228
+ SemanticConvention.GUARD_SCORE: score,
229
+ SemanticConvention.GUARD_VALIDATOR: validator,
230
+ SemanticConvention.GUARD_CLASSIFICATION: classification,
231
+ SemanticConvention.GUARD_EXPLANATION: explanation,
232
232
  }
@@ -11,7 +11,7 @@ from openlit.__helpers import (
11
11
  get_chat_model_cost,
12
12
  otel_event,
13
13
  )
14
- from openlit.semcov import SemanticConvetion
14
+ from openlit.semcov import SemanticConvention
15
15
 
16
16
  # Initialize logger for logging potential issues and operations
17
17
  logger = logging.getLogger(__name__)
@@ -29,17 +29,17 @@ def set_span_attributes(span, version, operation_name, environment,
29
29
 
30
30
  # Set Span attributes (OTel Semconv)
31
31
  span.set_attribute(TELEMETRY_SDK_NAME, 'openlit')
32
- span.set_attribute(SemanticConvetion.GEN_AI_OPERATION, operation_name)
33
- span.set_attribute(SemanticConvetion.GEN_AI_SYSTEM, SemanticConvetion.GEN_AI_SYSTEM_AG2)
34
- span.set_attribute(SemanticConvetion.GEN_AI_AGENT_NAME, AGENT_NAME)
35
- span.set_attribute(SemanticConvetion.SERVER_ADDRESS, server_address)
36
- span.set_attribute(SemanticConvetion.SERVER_PORT, server_port)
37
- span.set_attribute(SemanticConvetion.GEN_AI_REQUEST_MODEL, request_model)
32
+ span.set_attribute(SemanticConvention.GEN_AI_OPERATION, operation_name)
33
+ span.set_attribute(SemanticConvention.GEN_AI_SYSTEM, SemanticConvention.GEN_AI_SYSTEM_AG2)
34
+ span.set_attribute(SemanticConvention.GEN_AI_AGENT_NAME, AGENT_NAME)
35
+ span.set_attribute(SemanticConvention.SERVER_ADDRESS, server_address)
36
+ span.set_attribute(SemanticConvention.SERVER_PORT, server_port)
37
+ span.set_attribute(SemanticConvention.GEN_AI_REQUEST_MODEL, request_model)
38
38
 
39
39
  # Set Span attributes (Extras)
40
40
  span.set_attribute(DEPLOYMENT_ENVIRONMENT, environment)
41
41
  span.set_attribute(SERVICE_NAME, application_name)
42
- span.set_attribute(SemanticConvetion.GEN_AI_SDK_VERSION, version)
42
+ span.set_attribute(SemanticConvention.GEN_AI_SDK_VERSION, version)
43
43
 
44
44
  def calculate_tokens_and_cost(response, request_model, pricing_info):
45
45
  """
@@ -64,13 +64,13 @@ def emit_events(response, event_provider, capture_message_content):
64
64
  """
65
65
  for chat in response.chat_history:
66
66
  event_type = (
67
- SemanticConvetion.GEN_AI_CHOICE if chat['role'] == 'user'
68
- else SemanticConvetion.GEN_AI_USER_MESSAGE
67
+ SemanticConvention.GEN_AI_CHOICE if chat['role'] == 'user'
68
+ else SemanticConvention.GEN_AI_USER_MESSAGE
69
69
  )
70
70
  choice_event = otel_event(
71
71
  name=event_type,
72
72
  attributes={
73
- SemanticConvetion.GEN_AI_SYSTEM: SemanticConvetion.GEN_AI_SYSTEM_AG2
73
+ SemanticConvention.GEN_AI_SYSTEM: SemanticConvention.GEN_AI_SYSTEM_AG2
74
74
  },
75
75
  body={
76
76
  'index': response.chat_history.index(chat),
@@ -97,7 +97,7 @@ def conversable_agent(version, environment, application_name,
97
97
  SYSTEM_MESSAGE = kwargs.get('system_message', '')
98
98
  MODEL_AND_NAME_SET = True
99
99
 
100
- span_name = f'{SemanticConvetion.GEN_AI_OPERATION_TYPE_CREATE_AGENT} {AGENT_NAME}'
100
+ span_name = f'{SemanticConvention.GEN_AI_OPERATION_TYPE_CREATE_AGENT} {AGENT_NAME}'
101
101
 
102
102
  with tracer.start_as_current_span(span_name, kind=SpanKind.CLIENT) as span:
103
103
  try:
@@ -105,11 +105,11 @@ def conversable_agent(version, environment, application_name,
105
105
  response = wrapped(*args, **kwargs)
106
106
  end_time = time.time()
107
107
 
108
- set_span_attributes(span, version, SemanticConvetion.GEN_AI_OPERATION_TYPE_CREATE_AGENT,
108
+ set_span_attributes(span, version, SemanticConvention.GEN_AI_OPERATION_TYPE_CREATE_AGENT,
109
109
  environment, application_name, server_address, server_port, REQUEST_MODEL)
110
- span.set_attribute(SemanticConvetion.GEN_AI_AGENT_DESCRIPTION, SYSTEM_MESSAGE)
111
- span.set_attribute(SemanticConvetion.GEN_AI_RESPONSE_MODEL, REQUEST_MODEL)
112
- span.set_attribute(SemanticConvetion.GEN_AI_SERVER_TTFT, end_time - start_time)
110
+ span.set_attribute(SemanticConvention.GEN_AI_AGENT_DESCRIPTION, SYSTEM_MESSAGE)
111
+ span.set_attribute(SemanticConvention.GEN_AI_RESPONSE_MODEL, REQUEST_MODEL)
112
+ span.set_attribute(SemanticConvention.GEN_AI_SERVER_TTFT, end_time - start_time)
113
113
 
114
114
  span.set_status(Status(StatusCode.OK))
115
115
 
@@ -130,7 +130,7 @@ def agent_run(version, environment, application_name,
130
130
  def wrapper(wrapped, instance, args, kwargs):
131
131
  server_address, server_port = '127.0.0.1', 80
132
132
 
133
- span_name = f'{SemanticConvetion.GEN_AI_OPERATION_TYPE_EXECUTE_AGENT_TASK} {AGENT_NAME}'
133
+ span_name = f'{SemanticConvention.GEN_AI_OPERATION_TYPE_EXECUTE_AGENT_TASK} {AGENT_NAME}'
134
134
 
135
135
  with tracer.start_as_current_span(span_name, kind=SpanKind.CLIENT) as span:
136
136
  try:
@@ -141,14 +141,14 @@ def agent_run(version, environment, application_name,
141
141
  input_tokens, output_tokens, cost = calculate_tokens_and_cost(response, REQUEST_MODEL, pricing_info)
142
142
  response_model = list(response.cost.get('usage_including_cached_inference', {}).keys())[1]
143
143
 
144
- set_span_attributes(span, version, SemanticConvetion.GEN_AI_OPERATION_TYPE_EXECUTE_AGENT_TASK,
144
+ set_span_attributes(span, version, SemanticConvention.GEN_AI_OPERATION_TYPE_EXECUTE_AGENT_TASK,
145
145
  environment, application_name, server_address, server_port, REQUEST_MODEL)
146
- span.set_attribute(SemanticConvetion.GEN_AI_RESPONSE_MODEL, response_model)
147
- span.set_attribute(SemanticConvetion.GEN_AI_USAGE_INPUT_TOKENS, input_tokens)
148
- span.set_attribute(SemanticConvetion.GEN_AI_USAGE_OUTPUT_TOKENS, output_tokens)
149
- span.set_attribute(SemanticConvetion.GEN_AI_CLIENT_TOKEN_USAGE, input_tokens + output_tokens)
150
- span.set_attribute(SemanticConvetion.GEN_AI_USAGE_COST, cost)
151
- span.set_attribute(SemanticConvetion.GEN_AI_SERVER_TTFT, end_time - start_time)
146
+ span.set_attribute(SemanticConvention.GEN_AI_RESPONSE_MODEL, response_model)
147
+ span.set_attribute(SemanticConvention.GEN_AI_USAGE_INPUT_TOKENS, input_tokens)
148
+ span.set_attribute(SemanticConvention.GEN_AI_USAGE_OUTPUT_TOKENS, output_tokens)
149
+ span.set_attribute(SemanticConvention.GEN_AI_CLIENT_TOKEN_USAGE, input_tokens + output_tokens)
150
+ span.set_attribute(SemanticConvention.GEN_AI_USAGE_COST, cost)
151
+ span.set_attribute(SemanticConvention.GEN_AI_SERVER_TTFT, end_time - start_time)
152
152
 
153
153
  emit_events(response, event_provider, capture_message_content)
154
154
  span.set_status(Status(StatusCode.OK))
@@ -16,7 +16,7 @@ from openlit.instrumentation.ai21.utils import (
16
16
  process_chat_rag_response
17
17
  )
18
18
 
19
- from openlit.semcov import SemanticConvetion
19
+ from openlit.semcov import SemanticConvention
20
20
 
21
21
  # Initialize logger for logging potential issues and operations
22
22
  logger = logging.getLogger(__name__)
@@ -113,7 +113,7 @@ def chat(version, environment, application_name,
113
113
  server_address, server_port = set_server_address_and_port(instance, 'api.ai21.com', 443)
114
114
  request_model = kwargs.get('model', 'jamba-1.5-mini')
115
115
 
116
- span_name = f'{SemanticConvetion.GEN_AI_OPERATION_TYPE_CHAT} {request_model}'
116
+ span_name = f'{SemanticConvention.GEN_AI_OPERATION_TYPE_CHAT} {request_model}'
117
117
 
118
118
  # pylint: disable=no-else-return
119
119
  if streaming:
@@ -163,7 +163,7 @@ def chat_rag(version, environment, application_name,
163
163
  server_address, server_port = set_server_address_and_port(instance, 'api.ai21.com', 443)
164
164
  request_model = kwargs.get('model', 'jamba-1.5-mini')
165
165
 
166
- span_name = f'{SemanticConvetion.GEN_AI_OPERATION_TYPE_CHAT} {request_model}'
166
+ span_name = f'{SemanticConvention.GEN_AI_OPERATION_TYPE_CHAT} {request_model}'
167
167
 
168
168
  with tracer.start_as_current_span(span_name, kind= SpanKind.CLIENT) as span:
169
169
  start_time = time.time()
@@ -16,7 +16,7 @@ from openlit.instrumentation.ai21.utils import (
16
16
  process_chat_rag_response
17
17
  )
18
18
 
19
- from openlit.semcov import SemanticConvetion
19
+ from openlit.semcov import SemanticConvention
20
20
 
21
21
  # Initialize logger for logging potential issues and operations
22
22
  logger = logging.getLogger(__name__)
@@ -113,7 +113,7 @@ def async_chat(version, environment, application_name,
113
113
  server_address, server_port = set_server_address_and_port(instance, 'api.ai21.com', 443)
114
114
  request_model = kwargs.get('model', 'jamba-1.5-mini')
115
115
 
116
- span_name = f'{SemanticConvetion.GEN_AI_OPERATION_TYPE_CHAT} {request_model}'
116
+ span_name = f'{SemanticConvention.GEN_AI_OPERATION_TYPE_CHAT} {request_model}'
117
117
 
118
118
  # pylint: disable=no-else-return
119
119
  if streaming:
@@ -163,7 +163,7 @@ def async_chat_rag(version, environment, application_name,
163
163
  server_address, server_port = set_server_address_and_port(instance, 'api.ai21.com', 443)
164
164
  request_model = kwargs.get('model', 'jamba-1.5-mini')
165
165
 
166
- span_name = f'{SemanticConvetion.GEN_AI_OPERATION_TYPE_CHAT} {request_model}'
166
+ span_name = f'{SemanticConvention.GEN_AI_OPERATION_TYPE_CHAT} {request_model}'
167
167
 
168
168
  with tracer.start_as_current_span(span_name, kind= SpanKind.CLIENT) as span:
169
169
  start_time = time.time()